clang 22.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGHLSLRuntime.h"
20#include "CGObjCRuntime.h"
21#include "CGOpenMPRuntime.h"
22#include "CGRecordLayout.h"
23#include "CodeGenFunction.h"
24#include "CodeGenModule.h"
25#include "CodeGenPGO.h"
26#include "ConstantEmitter.h"
27#include "TargetInfo.h"
29#include "clang/AST/ASTLambda.h"
30#include "clang/AST/Attr.h"
31#include "clang/AST/DeclObjC.h"
32#include "clang/AST/NSAPI.h"
36#include "clang/Basic/Module.h"
38#include "llvm/ADT/STLExtras.h"
39#include "llvm/ADT/ScopeExit.h"
40#include "llvm/ADT/StringExtras.h"
41#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/Intrinsics.h"
43#include "llvm/IR/LLVMContext.h"
44#include "llvm/IR/MDBuilder.h"
45#include "llvm/IR/MatrixBuilder.h"
46#include "llvm/Support/ConvertUTF.h"
47#include "llvm/Support/Endian.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Support/Path.h"
50#include "llvm/Support/xxhash.h"
51#include "llvm/Transforms/Utils/SanitizerStats.h"
52
53#include <numeric>
54#include <optional>
55#include <string>
56
57using namespace clang;
58using namespace CodeGen;
59
60namespace clang {
61// TODO: consider deprecating ClSanitizeGuardChecks; functionality is subsumed
62// by -fsanitize-skip-hot-cutoff
63llvm::cl::opt<bool> ClSanitizeGuardChecks(
64 "ubsan-guard-checks", llvm::cl::Optional,
65 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
66
67} // namespace clang
68
69//===--------------------------------------------------------------------===//
70// Defines for metadata
71//===--------------------------------------------------------------------===//
72
73// Those values are crucial to be the SAME as in ubsan runtime library.
75 /// An integer type.
76 TK_Integer = 0x0000,
77 /// A floating-point type.
78 TK_Float = 0x0001,
79 /// An _BitInt(N) type.
80 TK_BitInt = 0x0002,
81 /// Any other type. The value representation is unspecified.
82 TK_Unknown = 0xffff
83};
84
85//===--------------------------------------------------------------------===//
86// Miscellaneous Helper Methods
87//===--------------------------------------------------------------------===//
88
89static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID) {
90 switch (ID) {
91#define SANITIZER_CHECK(Enum, Name, Version, Msg) \
92 case SanitizerHandler::Enum: \
93 return Msg;
95#undef SANITIZER_CHECK
96 }
97 llvm_unreachable("unhandled switch case");
98}
99
100/// CreateTempAlloca - This creates a alloca and inserts it into the entry
101/// block.
104 const Twine &Name,
105 llvm::Value *ArraySize) {
106 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
107 Alloca->setAlignment(Align.getAsAlign());
108 return RawAddress(Alloca, Ty, Align, KnownNonNull);
109}
110
111RawAddress CodeGenFunction::MaybeCastStackAddressSpace(RawAddress Alloca,
112 LangAS DestLangAS,
113 llvm::Value *ArraySize) {
114
115 llvm::Value *V = Alloca.getPointer();
116 // Alloca always returns a pointer in alloca address space, which may
117 // be different from the type defined by the language. For example,
118 // in C++ the auto variables are in the default address space. Therefore
119 // cast alloca to the default address space when necessary.
120
121 unsigned DestAddrSpace = getContext().getTargetAddressSpace(DestLangAS);
122 if (DestAddrSpace != Alloca.getAddressSpace()) {
123 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
124 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
125 // otherwise alloca is inserted at the current insertion point of the
126 // builder.
127 if (!ArraySize)
128 Builder.SetInsertPoint(getPostAllocaInsertPoint());
130 *this, V, getASTAllocaAddressSpace(), Builder.getPtrTy(DestAddrSpace),
131 /*IsNonNull=*/true);
132 }
133
134 return RawAddress(V, Alloca.getElementType(), Alloca.getAlignment(),
136}
137
139 CharUnits Align, const Twine &Name,
140 llvm::Value *ArraySize,
141 RawAddress *AllocaAddr) {
142 RawAddress Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
143 if (AllocaAddr)
144 *AllocaAddr = Alloca;
145 return MaybeCastStackAddressSpace(Alloca, DestLangAS, ArraySize);
146}
147
148/// CreateTempAlloca - This creates an alloca and inserts it into the entry
149/// block if \p ArraySize is nullptr, otherwise inserts it at the current
150/// insertion point of the builder.
151llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
152 const Twine &Name,
153 llvm::Value *ArraySize) {
154 llvm::AllocaInst *Alloca;
155 if (ArraySize)
156 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
157 else
158 Alloca =
159 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
160 ArraySize, Name, AllocaInsertPt->getIterator());
161 if (SanOpts.Mask & SanitizerKind::Address) {
162 Alloca->addAnnotationMetadata({"alloca_name_altered", Name.str()});
163 }
164 if (Allocas) {
165 Allocas->Add(Alloca);
166 }
167 return Alloca;
168}
169
170/// CreateDefaultAlignTempAlloca - This creates an alloca with the
171/// default alignment of the corresponding LLVM type, which is *not*
172/// guaranteed to be related in any way to the expected alignment of
173/// an AST type that might have been lowered to Ty.
175 const Twine &Name) {
176 CharUnits Align =
177 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
178 return CreateTempAlloca(Ty, Align, Name);
179}
180
183 return CreateTempAlloca(ConvertType(Ty), Align, Name);
184}
185
187 RawAddress *Alloca) {
188 // FIXME: Should we prefer the preferred type alignment here?
189 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
190}
191
193 const Twine &Name,
194 RawAddress *Alloca) {
196 /*ArraySize=*/nullptr, Alloca);
197
198 if (Ty->isConstantMatrixType()) {
199 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
200 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
201 ArrayTy->getNumElements());
202
203 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
205 }
206 return Result;
207}
208
210 CharUnits Align,
211 const Twine &Name) {
212 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
213}
214
216 const Twine &Name) {
217 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
218 Name);
219}
220
221/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
222/// expression and compare the result against zero, returning an Int1Ty value.
224 PGO->setCurrentStmt(E);
225 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
226 llvm::Value *MemPtr = EmitScalarExpr(E);
227 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
228 }
229
230 QualType BoolTy = getContext().BoolTy;
231 SourceLocation Loc = E->getExprLoc();
232 CGFPOptionsRAII FPOptsRAII(*this, E);
233 if (!E->getType()->isAnyComplexType())
234 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
235
237 Loc);
238}
239
240/// EmitIgnoredExpr - Emit code to compute the specified expression,
241/// ignoring the result.
243 if (E->isPRValue())
244 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
245
246 // if this is a bitfield-resulting conditional operator, we can special case
247 // emit this. The normal 'EmitLValue' version of this is particularly
248 // difficult to codegen for, since creating a single "LValue" for two
249 // different sized arguments here is not particularly doable.
250 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
252 if (CondOp->getObjectKind() == OK_BitField)
253 return EmitIgnoredConditionalOperator(CondOp);
254 }
255
256 // Just emit it as an l-value and drop the result.
257 EmitLValue(E);
258}
259
260/// EmitAnyExpr - Emit code to compute the specified expression which
261/// can have any type. The result is returned as an RValue struct.
262/// If this is an aggregate expression, AggSlot indicates where the
263/// result should be returned.
265 AggValueSlot aggSlot,
266 bool ignoreResult) {
267 switch (getEvaluationKind(E->getType())) {
268 case TEK_Scalar:
269 return RValue::get(EmitScalarExpr(E, ignoreResult));
270 case TEK_Complex:
271 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
272 case TEK_Aggregate:
273 if (!ignoreResult && aggSlot.isIgnored())
274 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
275 EmitAggExpr(E, aggSlot);
276 return aggSlot.asRValue();
277 }
278 llvm_unreachable("bad evaluation kind");
279}
280
281/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
282/// always be accessible even if no aggregate location is provided.
285
287 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
288 return EmitAnyExpr(E, AggSlot);
289}
290
291/// EmitAnyExprToMem - Evaluate an expression into a given memory
292/// location.
294 Address Location,
295 Qualifiers Quals,
296 bool IsInit) {
297 // FIXME: This function should take an LValue as an argument.
298 switch (getEvaluationKind(E->getType())) {
299 case TEK_Complex:
301 /*isInit*/ false);
302 return;
303
304 case TEK_Aggregate: {
305 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
310 return;
311 }
312
313 case TEK_Scalar: {
314 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
315 LValue LV = MakeAddrLValue(Location, E->getType());
317 return;
318 }
319 }
320 llvm_unreachable("bad evaluation kind");
321}
322
324 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
325 QualType Type = LV.getType();
326 switch (getEvaluationKind(Type)) {
327 case TEK_Complex:
328 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
329 return;
330 case TEK_Aggregate:
334 AggValueSlot::MayOverlap, IsZeroed));
335 return;
336 case TEK_Scalar:
337 if (LV.isSimple())
338 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
339 else
341 return;
342 }
343 llvm_unreachable("bad evaluation kind");
344}
345
346static void
348 const Expr *E, Address ReferenceTemporary) {
349 // Objective-C++ ARC:
350 // If we are binding a reference to a temporary that has ownership, we
351 // need to perform retain/release operations on the temporary.
352 //
353 // FIXME: This should be looking at E, not M.
354 if (auto Lifetime = M->getType().getObjCLifetime()) {
355 switch (Lifetime) {
358 // Carry on to normal cleanup handling.
359 break;
360
362 // Nothing to do; cleaned up by an autorelease pool.
363 return;
364
367 switch (StorageDuration Duration = M->getStorageDuration()) {
368 case SD_Static:
369 // Note: we intentionally do not register a cleanup to release
370 // the object on program termination.
371 return;
372
373 case SD_Thread:
374 // FIXME: We should probably register a cleanup in this case.
375 return;
376
377 case SD_Automatic:
381 if (Lifetime == Qualifiers::OCL_Strong) {
382 const ValueDecl *VD = M->getExtendingDecl();
383 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
384 VD->hasAttr<ObjCPreciseLifetimeAttr>();
388 } else {
389 // __weak objects always get EH cleanups; otherwise, exceptions
390 // could cause really nasty crashes instead of mere leaks.
393 }
394 if (Duration == SD_FullExpression)
395 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
396 M->getType(), *Destroy,
398 else
399 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
400 M->getType(),
401 *Destroy, CleanupKind & EHCleanup);
402 return;
403
404 case SD_Dynamic:
405 llvm_unreachable("temporary cannot have dynamic storage duration");
406 }
407 llvm_unreachable("unknown storage duration");
408 }
409 }
410
412 if (DK != QualType::DK_none) {
413 switch (M->getStorageDuration()) {
414 case SD_Static:
415 case SD_Thread: {
416 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
417 if (const auto *ClassDecl =
419 ClassDecl && !ClassDecl->hasTrivialDestructor())
420 // Get the destructor for the reference temporary.
421 ReferenceTemporaryDtor = ClassDecl->getDestructor();
422
423 if (!ReferenceTemporaryDtor)
424 return;
425
426 llvm::FunctionCallee CleanupFn;
427 llvm::Constant *CleanupArg;
428 if (E->getType()->isArrayType()) {
430 ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject,
431 CGF.getLangOpts().Exceptions,
432 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
433 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
434 } else {
435 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
436 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
437 CleanupArg =
438 cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
439 }
441 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
442 } break;
444 CGF.pushDestroy(DK, ReferenceTemporary, E->getType());
445 break;
446 case SD_Automatic:
447 CGF.pushLifetimeExtendedDestroy(DK, ReferenceTemporary, E->getType());
448 break;
449 case SD_Dynamic:
450 llvm_unreachable("temporary cannot have dynamic storage duration");
451 }
452 }
453}
454
457 const Expr *Inner,
458 RawAddress *Alloca = nullptr) {
459 auto &TCG = CGF.getTargetHooks();
460 switch (M->getStorageDuration()) {
462 case SD_Automatic: {
463 // If we have a constant temporary array or record try to promote it into a
464 // constant global under the same rules a normal constant would've been
465 // promoted. This is easier on the optimizer and generally emits fewer
466 // instructions.
467 QualType Ty = Inner->getType();
468 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
469 (Ty->isArrayType() || Ty->isRecordType()) &&
470 Ty.isConstantStorage(CGF.getContext(), true, false))
471 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
472 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
473 auto *GV = new llvm::GlobalVariable(
474 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
475 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
476 llvm::GlobalValue::NotThreadLocal,
478 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
479 GV->setAlignment(alignment.getAsAlign());
480 llvm::Constant *C = GV;
481 if (AS != LangAS::Default)
482 C = TCG.performAddrSpaceCast(
483 CGF.CGM, GV, AS,
484 llvm::PointerType::get(
485 CGF.getLLVMContext(),
487 // FIXME: Should we put the new global into a COMDAT?
488 return RawAddress(C, GV->getValueType(), alignment);
489 }
490 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
491 }
492 case SD_Thread:
493 case SD_Static:
494 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
495
496 case SD_Dynamic:
497 llvm_unreachable("temporary can't have dynamic storage duration");
498 }
499 llvm_unreachable("unknown storage duration");
500}
501
502/// Helper method to check if the underlying ABI is AAPCS
503static bool isAAPCS(const TargetInfo &TargetInfo) {
504 return TargetInfo.getABI().starts_with("aapcs");
505}
506
509 const Expr *E = M->getSubExpr();
510
511 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
512 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
513 "Reference should never be pseudo-strong!");
514
515 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
516 // as that will cause the lifetime adjustment to be lost for ARC
517 auto ownership = M->getType().getObjCLifetime();
518 if (ownership != Qualifiers::OCL_None &&
519 ownership != Qualifiers::OCL_ExplicitNone) {
520 RawAddress Object = createReferenceTemporary(*this, M, E);
521 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
522 llvm::Type *Ty = ConvertTypeForMem(E->getType());
523 Object = Object.withElementType(Ty);
524
525 // createReferenceTemporary will promote the temporary to a global with a
526 // constant initializer if it can. It can only do this to a value of
527 // ARC-manageable type if the value is global and therefore "immune" to
528 // ref-counting operations. Therefore we have no need to emit either a
529 // dynamic initialization or a cleanup and we can just return the address
530 // of the temporary.
531 if (Var->hasInitializer())
532 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
533
534 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
535 }
536 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
538
539 switch (getEvaluationKind(E->getType())) {
540 default: llvm_unreachable("expected scalar or aggregate expression");
541 case TEK_Scalar:
542 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
543 break;
544 case TEK_Aggregate: {
546 E->getType().getQualifiers(),
551 break;
552 }
553 }
554
555 pushTemporaryCleanup(*this, M, E, Object);
556 return RefTempDst;
557 }
558
561 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
562
563 for (const auto &Ignored : CommaLHSs)
564 EmitIgnoredExpr(Ignored);
565
566 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
567 if (opaque->getType()->isRecordType()) {
568 assert(Adjustments.empty());
569 return EmitOpaqueValueLValue(opaque);
570 }
571 }
572
573 // Create and initialize the reference temporary.
574 RawAddress Alloca = Address::invalid();
575 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
576 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
577 Object.getPointer()->stripPointerCasts())) {
578 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
579 Object = Object.withElementType(TemporaryType);
580 // If the temporary is a global and has a constant initializer or is a
581 // constant temporary that we promoted to a global, we may have already
582 // initialized it.
583 if (!Var->hasInitializer()) {
584 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
586 if (RefType.getPointerAuth()) {
587 // Use the qualifier of the reference temporary to sign the pointer.
588 LValue LV = MakeRawAddrLValue(Object.getPointer(), RefType,
589 Object.getAlignment());
590 EmitScalarInit(E, M->getExtendingDecl(), LV, false);
591 } else {
592 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/ true);
593 }
594 }
595 } else {
596 switch (M->getStorageDuration()) {
597 case SD_Automatic:
598 if (EmitLifetimeStart(Alloca.getPointer())) {
600 Alloca);
601 }
602 break;
603
604 case SD_FullExpression: {
605 if (!ShouldEmitLifetimeMarkers)
606 break;
607
608 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
609 // marker. Instead, start the lifetime of a conditional temporary earlier
610 // so that it's unconditional. Don't do this with sanitizers which need
611 // more precise lifetime marks. However when inside an "await.suspend"
612 // block, we should always avoid conditional cleanup because it creates
613 // boolean marker that lives across await_suspend, which can destroy coro
614 // frame.
615 ConditionalEvaluation *OldConditional = nullptr;
616 CGBuilderTy::InsertPoint OldIP;
618 ((!SanOpts.has(SanitizerKind::HWAddress) &&
619 !SanOpts.has(SanitizerKind::Memory) &&
620 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
621 inSuspendBlock())) {
622 OldConditional = OutermostConditional;
623 OutermostConditional = nullptr;
624
625 OldIP = Builder.saveIP();
626 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
627 Builder.restoreIP(CGBuilderTy::InsertPoint(
628 Block, llvm::BasicBlock::iterator(Block->back())));
629 }
630
631 if (EmitLifetimeStart(Alloca.getPointer())) {
633 }
634
635 if (OldConditional) {
636 OutermostConditional = OldConditional;
637 Builder.restoreIP(OldIP);
638 }
639 break;
640 }
641
642 default:
643 break;
644 }
645 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
646 }
647 pushTemporaryCleanup(*this, M, E, Object);
648
649 // Perform derived-to-base casts and/or field accesses, to get from the
650 // temporary object we created (and, potentially, for which we extended
651 // the lifetime) to the subobject we're binding the reference to.
652 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
653 switch (Adjustment.Kind) {
655 Object =
656 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
657 Adjustment.DerivedToBase.BasePath->path_begin(),
658 Adjustment.DerivedToBase.BasePath->path_end(),
659 /*NullCheckValue=*/ false, E->getExprLoc());
660 break;
661
664 LV = EmitLValueForField(LV, Adjustment.Field);
665 assert(LV.isSimple() &&
666 "materialized temporary field is not a simple lvalue");
667 Object = LV.getAddress();
668 break;
669 }
670
672 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
674 E, Object, Ptr, Adjustment.Ptr.MPT, /*IsInBounds=*/true);
675 break;
676 }
677 }
678 }
679
680 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
681}
682
683RValue
685 // Emit the expression as an lvalue.
686 LValue LV = EmitLValue(E);
687 assert(LV.isSimple());
688 llvm::Value *Value = LV.getPointer(*this);
689
691 // C++11 [dcl.ref]p5 (as amended by core issue 453):
692 // If a glvalue to which a reference is directly bound designates neither
693 // an existing object or function of an appropriate type nor a region of
694 // storage of suitable size and alignment to contain an object of the
695 // reference's type, the behavior is undefined.
696 QualType Ty = E->getType();
698 }
699
700 return RValue::get(Value);
701}
702
703
704/// getAccessedFieldNo - Given an encoded value and a result number, return the
705/// input field number being accessed.
707 const llvm::Constant *Elts) {
708 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
709 ->getZExtValue();
710}
711
712static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
713 llvm::Value *Ptr) {
714 llvm::Value *A0 =
715 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
716 llvm::Value *A1 =
717 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
718 return Builder.CreateXor(Acc, A1);
719}
720
725
728 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
729 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
732}
733
735 return SanOpts.has(SanitizerKind::Null) ||
736 SanOpts.has(SanitizerKind::Alignment) ||
737 SanOpts.has(SanitizerKind::ObjectSize) ||
738 SanOpts.has(SanitizerKind::Vptr);
739}
740
742 llvm::Value *Ptr, QualType Ty,
743 CharUnits Alignment,
744 SanitizerSet SkippedChecks,
745 llvm::Value *ArraySize) {
747 return;
748
749 // Don't check pointers outside the default address space. The null check
750 // isn't correct, the object-size check isn't supported by LLVM, and we can't
751 // communicate the addresses to the runtime handler for the vptr check.
752 if (Ptr->getType()->getPointerAddressSpace())
753 return;
754
755 // Don't check pointers to volatile data. The behavior here is implementation-
756 // defined.
757 if (Ty.isVolatileQualified())
758 return;
759
760 // Quickly determine whether we have a pointer to an alloca. It's possible
761 // to skip null checks, and some alignment checks, for these pointers. This
762 // can reduce compile-time significantly.
763 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
764
765 llvm::Value *IsNonNull = nullptr;
766 bool IsGuaranteedNonNull =
767 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
768
769 llvm::BasicBlock *Done = nullptr;
770 bool DoneViaNullSanitize = false;
771
772 {
773 auto CheckHandler = SanitizerHandler::TypeMismatch;
774 SanitizerDebugLocation SanScope(this,
775 {SanitizerKind::SO_Null,
776 SanitizerKind::SO_ObjectSize,
777 SanitizerKind::SO_Alignment},
778 CheckHandler);
779
781 Checks;
782
783 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
784 bool AllowNullPointers = isNullPointerAllowed(TCK);
785 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
786 !IsGuaranteedNonNull) {
787 // The glvalue must not be an empty glvalue.
788 IsNonNull = Builder.CreateIsNotNull(Ptr);
789
790 // The IR builder can constant-fold the null check if the pointer points
791 // to a constant.
792 IsGuaranteedNonNull = IsNonNull == True;
793
794 // Skip the null check if the pointer is known to be non-null.
795 if (!IsGuaranteedNonNull) {
796 if (AllowNullPointers) {
797 // When performing pointer casts, it's OK if the value is null.
798 // Skip the remaining checks in that case.
799 Done = createBasicBlock("null");
800 DoneViaNullSanitize = true;
801 llvm::BasicBlock *Rest = createBasicBlock("not.null");
802 Builder.CreateCondBr(IsNonNull, Rest, Done);
803 EmitBlock(Rest);
804 } else {
805 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
806 }
807 }
808 }
809
810 if (SanOpts.has(SanitizerKind::ObjectSize) &&
811 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
812 !Ty->isIncompleteType()) {
813 uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
814 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
815 if (ArraySize)
816 Size = Builder.CreateMul(Size, ArraySize);
817
818 // Degenerate case: new X[0] does not need an objectsize check.
819 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
820 if (!ConstantSize || !ConstantSize->isNullValue()) {
821 // The glvalue must refer to a large enough storage region.
822 // FIXME: If Address Sanitizer is enabled, insert dynamic
823 // instrumentation
824 // to check this.
825 // FIXME: Get object address space
826 llvm::Type *Tys[2] = {IntPtrTy, Int8PtrTy};
827 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
828 llvm::Value *Min = Builder.getFalse();
829 llvm::Value *NullIsUnknown = Builder.getFalse();
830 llvm::Value *Dynamic = Builder.getFalse();
831 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
832 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
833 Checks.push_back(
834 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
835 }
836 }
837
838 llvm::MaybeAlign AlignVal;
839 llvm::Value *PtrAsInt = nullptr;
840
841 if (SanOpts.has(SanitizerKind::Alignment) &&
842 !SkippedChecks.has(SanitizerKind::Alignment)) {
843 AlignVal = Alignment.getAsMaybeAlign();
844 if (!Ty->isIncompleteType() && !AlignVal)
845 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
846 /*ForPointeeType=*/true)
847 .getAsMaybeAlign();
848
849 // The glvalue must be suitably aligned.
850 if (AlignVal && *AlignVal > llvm::Align(1) &&
851 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
852 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
853 llvm::Value *Align = Builder.CreateAnd(
854 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
855 llvm::Value *Aligned =
856 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
857 if (Aligned != True)
858 Checks.push_back(
859 std::make_pair(Aligned, SanitizerKind::SO_Alignment));
860 }
861 }
862
863 if (Checks.size() > 0) {
864 llvm::Constant *StaticData[] = {
866 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
867 llvm::ConstantInt::get(Int8Ty, TCK)};
868 EmitCheck(Checks, CheckHandler, StaticData, PtrAsInt ? PtrAsInt : Ptr);
869 }
870 }
871
872 // If possible, check that the vptr indicates that there is a subobject of
873 // type Ty at offset zero within this object.
874 //
875 // C++11 [basic.life]p5,6:
876 // [For storage which does not refer to an object within its lifetime]
877 // The program has undefined behavior if:
878 // -- the [pointer or glvalue] is used to access a non-static data member
879 // or call a non-static member function
880 if (SanOpts.has(SanitizerKind::Vptr) &&
881 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
882 SanitizerDebugLocation SanScope(this, {SanitizerKind::SO_Vptr},
883 SanitizerHandler::DynamicTypeCacheMiss);
884
885 // Ensure that the pointer is non-null before loading it. If there is no
886 // compile-time guarantee, reuse the run-time null check or emit a new one.
887 if (!IsGuaranteedNonNull) {
888 if (!IsNonNull)
889 IsNonNull = Builder.CreateIsNotNull(Ptr);
890 if (!Done)
891 Done = createBasicBlock("vptr.null");
892 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
893 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
894 EmitBlock(VptrNotNull);
895 }
896
897 // Compute a deterministic hash of the mangled name of the type.
898 SmallString<64> MangledName;
899 llvm::raw_svector_ostream Out(MangledName);
900 CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
901 Out);
902
903 // Contained in NoSanitizeList based on the mangled type.
904 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
905 Out.str())) {
906 // Load the vptr, and mix it with TypeHash.
907 llvm::Value *TypeHash =
908 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
909
910 llvm::Type *VPtrTy = llvm::PointerType::get(getLLVMContext(), 0);
911 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
912 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
913 Ty->getAsCXXRecordDecl(),
915 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
916
917 llvm::Value *Hash =
918 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
919 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
920
921 // Look the hash up in our cache.
922 const int CacheSize = 128;
923 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
924 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
925 "__ubsan_vptr_type_cache");
926 llvm::Value *Slot = Builder.CreateAnd(Hash,
927 llvm::ConstantInt::get(IntPtrTy,
928 CacheSize-1));
929 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
930 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
931 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
933
934 // If the hash isn't in the cache, call a runtime handler to perform the
935 // hard work of checking whether the vptr is for an object of the right
936 // type. This will either fill in the cache and return, or produce a
937 // diagnostic.
938 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
939 llvm::Constant *StaticData[] = {
942 CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
943 llvm::ConstantInt::get(Int8Ty, TCK)
944 };
945 llvm::Value *DynamicData[] = { Ptr, Hash };
946 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
947 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
948 DynamicData);
949 }
950 }
951
952 if (Done) {
953 SanitizerDebugLocation SanScope(
954 this,
955 {DoneViaNullSanitize ? SanitizerKind::SO_Null : SanitizerKind::SO_Vptr},
956 DoneViaNullSanitize ? SanitizerHandler::TypeMismatch
957 : SanitizerHandler::DynamicTypeCacheMiss);
958 Builder.CreateBr(Done);
959 EmitBlock(Done);
960 }
961}
962
964 QualType EltTy) {
966 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
967 if (!EltSize)
968 return nullptr;
969
970 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
971 if (!ArrayDeclRef)
972 return nullptr;
973
974 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
975 if (!ParamDecl)
976 return nullptr;
977
978 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
979 if (!POSAttr)
980 return nullptr;
981
982 // Don't load the size if it's a lower bound.
983 int POSType = POSAttr->getType();
984 if (POSType != 0 && POSType != 1)
985 return nullptr;
986
987 // Find the implicit size parameter.
988 auto PassedSizeIt = SizeArguments.find(ParamDecl);
989 if (PassedSizeIt == SizeArguments.end())
990 return nullptr;
991
992 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
993 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
994 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
995 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
996 C.getSizeType(), E->getExprLoc());
997 llvm::Value *SizeOfElement =
998 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
999 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
1000}
1001
1002/// If Base is known to point to the start of an array, return the length of
1003/// that array. Return 0 if the length cannot be determined.
1005 const Expr *Base,
1006 QualType &IndexedType,
1008 StrictFlexArraysLevel) {
1009 // For the vector indexing extension, the bound is the number of elements.
1010 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
1011 IndexedType = Base->getType();
1012 return CGF.Builder.getInt32(VT->getNumElements());
1013 }
1014
1015 Base = Base->IgnoreParens();
1016
1017 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1018 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
1019 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
1020 StrictFlexArraysLevel)) {
1021 CodeGenFunction::SanitizerScope SanScope(&CGF);
1022
1023 IndexedType = CE->getSubExpr()->getType();
1024 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
1025 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
1026 return CGF.Builder.getInt(CAT->getSize());
1027
1028 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
1029 return CGF.getVLASize(VAT).NumElts;
1030 // Ignore pass_object_size here. It's not applicable on decayed pointers.
1031 }
1032 }
1033
1034 CodeGenFunction::SanitizerScope SanScope(&CGF);
1035
1036 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
1037 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
1038 IndexedType = Base->getType();
1039 return POS;
1040 }
1041
1042 return nullptr;
1043}
1044
1045namespace {
1046
1047/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1048/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1049///
1050/// p in p-> a.b.c
1051///
1052/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1053/// looking for:
1054///
1055/// struct s {
1056/// struct s *ptr;
1057/// int count;
1058/// char array[] __attribute__((counted_by(count)));
1059/// };
1060///
1061/// If we have an expression like \p p->ptr->array[index], we want the
1062/// \p MemberExpr for \p p->ptr instead of \p p.
1063class StructAccessBase
1064 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1065 const RecordDecl *ExpectedRD;
1066
1067 bool IsExpectedRecordDecl(const Expr *E) const {
1068 QualType Ty = E->getType();
1069 if (Ty->isPointerType())
1070 Ty = Ty->getPointeeType();
1071 return ExpectedRD == Ty->getAsRecordDecl();
1072 }
1073
1074public:
1075 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1076
1077 //===--------------------------------------------------------------------===//
1078 // Visitor Methods
1079 //===--------------------------------------------------------------------===//
1080
1081 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1082 // horrors like this:
1083 //
1084 // struct S {
1085 // int x, y;
1086 // int blah[] __attribute__((counted_by(x)));
1087 // } s;
1088 //
1089 // int foo(int index, int val) {
1090 // int (S::*IHatePMDs)[] = &S::blah;
1091 // (s.*IHatePMDs)[index] = val;
1092 // }
1093
1094 const Expr *Visit(const Expr *E) {
1095 return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
1096 }
1097
1098 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1099
1100 // These are the types we expect to return (in order of most to least
1101 // likely):
1102 //
1103 // 1. DeclRefExpr - This is the expression for the base of the structure.
1104 // It's exactly what we want to build an access to the \p counted_by
1105 // field.
1106 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1107 // as the flexble array member's lexical enclosing \p RecordDecl. This
1108 // allows us to catch things like: "p->p->array"
1109 // 3. CompoundLiteralExpr - This is for people who create something
1110 // heretical like (struct foo has a flexible array member):
1111 //
1112 // (struct foo){ 1, 2 }.blah[idx];
1113 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1114 return IsExpectedRecordDecl(E) ? E : nullptr;
1115 }
1116 const Expr *VisitMemberExpr(const MemberExpr *E) {
1117 if (IsExpectedRecordDecl(E) && E->isArrow())
1118 return E;
1119 const Expr *Res = Visit(E->getBase());
1120 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1121 }
1122 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1123 return IsExpectedRecordDecl(E) ? E : nullptr;
1124 }
1125 const Expr *VisitCallExpr(const CallExpr *E) {
1126 return IsExpectedRecordDecl(E) ? E : nullptr;
1127 }
1128
1129 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1130 if (IsExpectedRecordDecl(E))
1131 return E;
1132 return Visit(E->getBase());
1133 }
1134 const Expr *VisitCastExpr(const CastExpr *E) {
1135 if (E->getCastKind() == CK_LValueToRValue)
1136 return IsExpectedRecordDecl(E) ? E : nullptr;
1137 return Visit(E->getSubExpr());
1138 }
1139 const Expr *VisitParenExpr(const ParenExpr *E) {
1140 return Visit(E->getSubExpr());
1141 }
1142 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1143 return Visit(E->getSubExpr());
1144 }
1145 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1146 return Visit(E->getSubExpr());
1147 }
1148};
1149
1150} // end anonymous namespace
1151
1153
1155 const FieldDecl *Field,
1156 RecIndicesTy &Indices) {
1157 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1158 int64_t FieldNo = -1;
1159 for (const FieldDecl *FD : RD->fields()) {
1160 if (!Layout.containsFieldDecl(FD))
1161 // This could happen if the field has a struct type that's empty. I don't
1162 // know why either.
1163 continue;
1164
1165 FieldNo = Layout.getLLVMFieldNo(FD);
1166 if (FD == Field) {
1167 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1168 return true;
1169 }
1170
1171 QualType Ty = FD->getType();
1172 if (Ty->isRecordType()) {
1173 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1174 if (RD->isUnion())
1175 FieldNo = 0;
1176 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1177 return true;
1178 }
1179 }
1180 }
1181
1182 return false;
1183}
1184
1186 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1187 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1188
1189 // Find the base struct expr (i.e. p in p->a.b.c.d).
1190 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1191 if (!StructBase || StructBase->HasSideEffects(getContext()))
1192 return nullptr;
1193
1194 llvm::Value *Res = nullptr;
1195 if (StructBase->getType()->isPointerType()) {
1196 LValueBaseInfo BaseInfo;
1197 TBAAAccessInfo TBAAInfo;
1198 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1199 Res = Addr.emitRawPointer(*this);
1200 } else if (StructBase->isLValue()) {
1201 LValue LV = EmitLValue(StructBase);
1202 Address Addr = LV.getAddress();
1203 Res = Addr.emitRawPointer(*this);
1204 } else {
1205 return nullptr;
1206 }
1207
1208 RecIndicesTy Indices;
1209 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1210 if (Indices.empty())
1211 return nullptr;
1212
1213 Indices.push_back(Builder.getInt32(0));
1214 CanQualType T = CGM.getContext().getCanonicalTagType(RD);
1215 return Builder.CreateInBoundsGEP(ConvertType(T), Res,
1216 RecIndicesTy(llvm::reverse(Indices)),
1217 "counted_by.gep");
1218}
1219
1220/// This method is typically called in contexts where we can't generate
1221/// side-effects, like in __builtin_dynamic_object_size. When finding
1222/// expressions, only choose those that have either already been emitted or can
1223/// be loaded without side-effects.
1224///
1225/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1226/// within the top-level struct.
1227/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1229 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1230 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1231 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1232 getIntAlign(), "counted_by.load");
1233 return nullptr;
1234}
1235
1237 llvm::Value *Index, QualType IndexType,
1238 bool Accessed) {
1239 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1240 "should not be called unless adding bounds checks");
1241 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1242 getLangOpts().getStrictFlexArraysLevel();
1243 QualType IndexedType;
1244 llvm::Value *Bound =
1245 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1246
1247 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1248}
1249
1250void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1251 llvm::Value *Index,
1252 QualType IndexType,
1253 QualType IndexedType, bool Accessed) {
1254 if (!Bound)
1255 return;
1256
1257 auto CheckKind = SanitizerKind::SO_ArrayBounds;
1258 auto CheckHandler = SanitizerHandler::OutOfBounds;
1259 SanitizerDebugLocation SanScope(this, {CheckKind}, CheckHandler);
1260
1261 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1262 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1263 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1264
1265 llvm::Constant *StaticData[] = {
1267 EmitCheckTypeDescriptor(IndexedType),
1268 EmitCheckTypeDescriptor(IndexType)
1269 };
1270 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1271 : Builder.CreateICmpULE(IndexVal, BoundVal);
1272 EmitCheck(std::make_pair(Check, CheckKind), CheckHandler, StaticData, Index);
1273}
1274
1275static bool
1278 bool &IncompleteType) {
1279 QualType CanonicalType = T.getCanonicalType();
1280 if (CanonicalType->isPointerType())
1281 return true; // base case
1282
1283 // Look through typedef chain to check for special types.
1284 for (QualType CurrentT = T; const auto *TT = CurrentT->getAs<TypedefType>();
1285 CurrentT = TT->getDecl()->getUnderlyingType()) {
1286 const IdentifierInfo *II = TT->getDecl()->getIdentifier();
1287 // Special Case: Syntactically uintptr_t is not a pointer; semantically,
1288 // however, very likely used as such. Therefore, classify uintptr_t as a
1289 // pointer, too.
1290 if (II && II->isStr("uintptr_t"))
1291 return true;
1292 }
1293
1294 // The type is an array; check the element type.
1295 if (const ArrayType *AT = dyn_cast<ArrayType>(CanonicalType))
1296 return typeContainsPointer(AT->getElementType(), VisitedRD, IncompleteType);
1297 // The type is a struct, class, or union.
1298 if (const RecordDecl *RD = CanonicalType->getAsRecordDecl()) {
1299 if (!RD->isCompleteDefinition()) {
1300 IncompleteType = true;
1301 return false;
1302 }
1303 if (!VisitedRD.insert(RD).second)
1304 return false; // already visited
1305 // Check all fields.
1306 for (const FieldDecl *Field : RD->fields()) {
1307 if (typeContainsPointer(Field->getType(), VisitedRD, IncompleteType))
1308 return true;
1309 }
1310 // For C++ classes, also check base classes.
1311 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1312 // Polymorphic types require a vptr.
1313 if (CXXRD->isDynamicClass())
1314 return true;
1315 for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
1316 if (typeContainsPointer(Base.getType(), VisitedRD, IncompleteType))
1317 return true;
1318 }
1319 }
1320 }
1321 return false;
1322}
1323
1324void CodeGenFunction::EmitAllocToken(llvm::CallBase *CB, QualType AllocType) {
1325 assert(SanOpts.has(SanitizerKind::AllocToken) &&
1326 "Only needed with -fsanitize=alloc-token");
1327
1328 llvm::MDBuilder MDB(getLLVMContext());
1329
1330 // Get unique type name.
1331 PrintingPolicy Policy(CGM.getContext().getLangOpts());
1332 Policy.SuppressTagKeyword = true;
1333 Policy.FullyQualifiedName = true;
1335 llvm::raw_svector_ostream TypeNameOS(TypeName);
1336 AllocType.getCanonicalType().print(TypeNameOS, Policy);
1337 auto *TypeNameMD = MDB.createString(TypeNameOS.str());
1338
1339 // Check if QualType contains a pointer. Implements a simple DFS to
1340 // recursively check if a type contains a pointer type.
1342 bool IncompleteType = false;
1343 const bool ContainsPtr =
1344 typeContainsPointer(AllocType, VisitedRD, IncompleteType);
1345 if (!ContainsPtr && IncompleteType)
1346 return;
1347 auto *ContainsPtrC = Builder.getInt1(ContainsPtr);
1348 auto *ContainsPtrMD = MDB.createConstant(ContainsPtrC);
1349
1350 // Format: !{<type-name>, <contains-pointer>}
1351 auto *MDN =
1352 llvm::MDNode::get(CGM.getLLVMContext(), {TypeNameMD, ContainsPtrMD});
1353 CB->setMetadata(llvm::LLVMContext::MD_alloc_token, MDN);
1354}
1355
1358 bool isInc, bool isPre) {
1359 ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1360
1361 llvm::Value *NextVal;
1362 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1363 uint64_t AmountVal = isInc ? 1 : -1;
1364 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1365
1366 // Add the inc/dec to the real part.
1367 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1368 } else {
1369 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1370 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1371 if (!isInc)
1372 FVal.changeSign();
1373 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1374
1375 // Add the inc/dec to the real part.
1376 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1377 }
1378
1379 ComplexPairTy IncVal(NextVal, InVal.second);
1380
1381 // Store the updated result through the lvalue.
1382 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1383 if (getLangOpts().OpenMP)
1384 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1385 E->getSubExpr());
1386
1387 // If this is a postinc, return the value read from memory, otherwise use the
1388 // updated value.
1389 return isPre ? IncVal : InVal;
1390}
1391
1393 CodeGenFunction *CGF) {
1394 // Bind VLAs in the cast type.
1395 if (CGF && E->getType()->isVariablyModifiedType())
1397
1398 if (CGDebugInfo *DI = getModuleDebugInfo())
1399 DI->EmitExplicitCastType(E->getType());
1400}
1401
1402//===----------------------------------------------------------------------===//
1403// LValue Expression Emission
1404//===----------------------------------------------------------------------===//
1405
1406static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx,
1407 CharUnits eltSize) {
1408 // If we have a constant index, we can use the exact offset of the
1409 // element we're accessing.
1410 if (auto *constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
1411 CharUnits offset = constantIdx->getZExtValue() * eltSize;
1412 return arrayAlign.alignmentAtOffset(offset);
1413 }
1414
1415 // Otherwise, use the worst-case alignment for any element.
1416 return arrayAlign.alignmentOfArrayElement(eltSize);
1417}
1418
1419/// Emit pointer + index arithmetic.
1421 const BinaryOperator *BO,
1422 LValueBaseInfo *BaseInfo,
1423 TBAAAccessInfo *TBAAInfo,
1424 KnownNonNull_t IsKnownNonNull) {
1425 assert(BO->isAdditiveOp() && "Expect an addition or subtraction.");
1426 Expr *pointerOperand = BO->getLHS();
1427 Expr *indexOperand = BO->getRHS();
1428 bool isSubtraction = BO->getOpcode() == BO_Sub;
1429
1430 Address BaseAddr = Address::invalid();
1431 llvm::Value *index = nullptr;
1432 // In a subtraction, the LHS is always the pointer.
1433 // Note: do not change the evaluation order.
1434 if (!isSubtraction && !pointerOperand->getType()->isAnyPointerType()) {
1435 std::swap(pointerOperand, indexOperand);
1436 index = CGF.EmitScalarExpr(indexOperand);
1437 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1439 } else {
1440 BaseAddr = CGF.EmitPointerWithAlignment(pointerOperand, BaseInfo, TBAAInfo,
1442 index = CGF.EmitScalarExpr(indexOperand);
1443 }
1444
1445 llvm::Value *pointer = BaseAddr.getBasePointer();
1446 llvm::Value *Res = CGF.EmitPointerArithmetic(
1447 BO, pointerOperand, pointer, indexOperand, index, isSubtraction);
1448 QualType PointeeTy = BO->getType()->getPointeeType();
1449 CharUnits Align =
1451 CGF.getContext().getTypeSizeInChars(PointeeTy));
1452 return Address(Res, CGF.ConvertTypeForMem(PointeeTy), Align,
1454 /*Offset=*/nullptr, IsKnownNonNull);
1455}
1456
1458 TBAAAccessInfo *TBAAInfo,
1459 KnownNonNull_t IsKnownNonNull,
1460 CodeGenFunction &CGF) {
1461 // We allow this with ObjC object pointers because of fragile ABIs.
1462 assert(E->getType()->isPointerType() ||
1464 E = E->IgnoreParens();
1465
1466 // Casts:
1467 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1468 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1469 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1470
1471 switch (CE->getCastKind()) {
1472 // Non-converting casts (but not C's implicit conversion from void*).
1473 case CK_BitCast:
1474 case CK_NoOp:
1475 case CK_AddressSpaceConversion:
1476 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1477 if (PtrTy->getPointeeType()->isVoidType())
1478 break;
1479
1480 LValueBaseInfo InnerBaseInfo;
1481 TBAAAccessInfo InnerTBAAInfo;
1483 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1484 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1485 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1486
1487 if (isa<ExplicitCastExpr>(CE)) {
1488 LValueBaseInfo TargetTypeBaseInfo;
1489 TBAAAccessInfo TargetTypeTBAAInfo;
1491 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1492 if (TBAAInfo)
1493 *TBAAInfo =
1494 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1495 // If the source l-value is opaque, honor the alignment of the
1496 // casted-to type.
1497 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1498 if (BaseInfo)
1499 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1500 Addr.setAlignment(Align);
1501 }
1502 }
1503
1504 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1505 CE->getCastKind() == CK_BitCast) {
1506 if (auto PT = E->getType()->getAs<PointerType>())
1507 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1508 /*MayBeNull=*/true,
1510 CE->getBeginLoc());
1511 }
1512
1513 llvm::Type *ElemTy =
1515 Addr = Addr.withElementType(ElemTy);
1516 if (CE->getCastKind() == CK_AddressSpaceConversion)
1518 Addr, CGF.ConvertType(E->getType()), ElemTy);
1519
1520 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1521 CE->getType());
1522 }
1523 break;
1524
1525 // Array-to-pointer decay.
1526 case CK_ArrayToPointerDecay:
1527 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1528
1529 // Derived-to-base conversions.
1530 case CK_UncheckedDerivedToBase:
1531 case CK_DerivedToBase: {
1532 // TODO: Support accesses to members of base classes in TBAA. For now, we
1533 // conservatively pretend that the complete object is of the base class
1534 // type.
1535 if (TBAAInfo)
1536 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1538 CE->getSubExpr(), BaseInfo, nullptr,
1539 (KnownNonNull_t)(IsKnownNonNull ||
1540 CE->getCastKind() == CK_UncheckedDerivedToBase));
1541 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1542 return CGF.GetAddressOfBaseClass(
1543 Addr, Derived, CE->path_begin(), CE->path_end(),
1544 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1545 }
1546
1547 // TODO: Is there any reason to treat base-to-derived conversions
1548 // specially?
1549 default:
1550 break;
1551 }
1552 }
1553
1554 // Unary &.
1555 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1556 if (UO->getOpcode() == UO_AddrOf) {
1557 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1558 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1559 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1560 return LV.getAddress();
1561 }
1562 }
1563
1564 // std::addressof and variants.
1565 if (auto *Call = dyn_cast<CallExpr>(E)) {
1566 switch (Call->getBuiltinCallee()) {
1567 default:
1568 break;
1569 case Builtin::BIaddressof:
1570 case Builtin::BI__addressof:
1571 case Builtin::BI__builtin_addressof: {
1572 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1573 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1574 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1575 return LV.getAddress();
1576 }
1577 }
1578 }
1579
1580 // Pointer arithmetic: pointer +/- index.
1581 if (auto *BO = dyn_cast<BinaryOperator>(E)) {
1582 if (BO->isAdditiveOp())
1583 return emitPointerArithmetic(CGF, BO, BaseInfo, TBAAInfo, IsKnownNonNull);
1584 }
1585
1586 // TODO: conditional operators, comma.
1587
1588 // Otherwise, use the alignment of the type.
1591 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1592}
1593
1594/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1595/// derive a more accurate bound on the alignment of the pointer.
1597 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1598 KnownNonNull_t IsKnownNonNull) {
1599 Address Addr =
1600 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1601 if (IsKnownNonNull && !Addr.isKnownNonNull())
1602 Addr.setKnownNonNull();
1603 return Addr;
1604}
1605
1607 llvm::Value *V = RV.getScalarVal();
1608 if (auto MPT = T->getAs<MemberPointerType>())
1609 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1610 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1611}
1612
1614 if (Ty->isVoidType())
1615 return RValue::get(nullptr);
1616
1617 switch (getEvaluationKind(Ty)) {
1618 case TEK_Complex: {
1619 llvm::Type *EltTy =
1621 llvm::Value *U = llvm::UndefValue::get(EltTy);
1622 return RValue::getComplex(std::make_pair(U, U));
1623 }
1624
1625 // If this is a use of an undefined aggregate type, the aggregate must have an
1626 // identifiable address. Just because the contents of the value are undefined
1627 // doesn't mean that the address can't be taken and compared.
1628 case TEK_Aggregate: {
1629 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1630 return RValue::getAggregate(DestPtr);
1631 }
1632
1633 case TEK_Scalar:
1634 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1635 }
1636 llvm_unreachable("bad evaluation kind");
1637}
1638
1640 const char *Name) {
1641 ErrorUnsupported(E, Name);
1642 return GetUndefRValue(E->getType());
1643}
1644
1646 const char *Name) {
1647 ErrorUnsupported(E, Name);
1648 llvm::Type *ElTy = ConvertType(E->getType());
1649 llvm::Type *Ty = UnqualPtrTy;
1650 return MakeAddrLValue(
1651 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1652}
1653
1655 const Expr *Base = Obj;
1656 while (!isa<CXXThisExpr>(Base)) {
1657 // The result of a dynamic_cast can be null.
1659 return false;
1660
1661 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1662 Base = CE->getSubExpr();
1663 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1664 Base = PE->getSubExpr();
1665 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1666 if (UO->getOpcode() == UO_Extension)
1667 Base = UO->getSubExpr();
1668 else
1669 return false;
1670 } else {
1671 return false;
1672 }
1673 }
1674 return true;
1675}
1676
1678 LValue LV;
1679 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1680 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1681 else
1682 LV = EmitLValue(E);
1683 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1684 SanitizerSet SkippedChecks;
1685 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1686 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1687 if (IsBaseCXXThis)
1688 SkippedChecks.set(SanitizerKind::Alignment, true);
1689 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1690 SkippedChecks.set(SanitizerKind::Null, true);
1691 }
1692 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1693 }
1694 return LV;
1695}
1696
1697/// EmitLValue - Emit code to compute a designator that specifies the location
1698/// of the expression.
1699///
1700/// This can return one of two things: a simple address or a bitfield reference.
1701/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1702/// an LLVM pointer type.
1703///
1704/// If this returns a bitfield reference, nothing about the pointee type of the
1705/// LLVM value is known: For example, it may not be a pointer to an integer.
1706///
1707/// If this returns a normal address, and if the lvalue's C type is fixed size,
1708/// this method guarantees that the returned pointer type will point to an LLVM
1709/// type of the same size of the lvalue's type. If the lvalue has a variable
1710/// length type, this is not possible.
1711///
1713 KnownNonNull_t IsKnownNonNull) {
1714 // Running with sufficient stack space to avoid deeply nested expressions
1715 // cause a stack overflow.
1716 LValue LV;
1717 CGM.runWithSufficientStackSpace(
1718 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1719
1720 if (IsKnownNonNull && !LV.isKnownNonNull())
1721 LV.setKnownNonNull();
1722 return LV;
1723}
1724
1726 const ASTContext &Ctx) {
1727 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1728 if (isa<OpaqueValueExpr>(SE))
1729 return SE->getType();
1730 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1731}
1732
1733LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1734 KnownNonNull_t IsKnownNonNull) {
1735 ApplyDebugLocation DL(*this, E);
1736 switch (E->getStmtClass()) {
1737 default: return EmitUnsupportedLValue(E, "l-value expression");
1738
1739 case Expr::ObjCPropertyRefExprClass:
1740 llvm_unreachable("cannot emit a property reference directly");
1741
1742 case Expr::ObjCSelectorExprClass:
1744 case Expr::ObjCIsaExprClass:
1746 case Expr::BinaryOperatorClass:
1748 case Expr::CompoundAssignOperatorClass: {
1749 QualType Ty = E->getType();
1750 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1751 Ty = AT->getValueType();
1752 if (!Ty->isAnyComplexType())
1755 }
1756 case Expr::CallExprClass:
1757 case Expr::CXXMemberCallExprClass:
1758 case Expr::CXXOperatorCallExprClass:
1759 case Expr::UserDefinedLiteralClass:
1761 case Expr::CXXRewrittenBinaryOperatorClass:
1762 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1763 IsKnownNonNull);
1764 case Expr::VAArgExprClass:
1766 case Expr::DeclRefExprClass:
1768 case Expr::ConstantExprClass: {
1769 const ConstantExpr *CE = cast<ConstantExpr>(E);
1770 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1771 QualType RetType = getConstantExprReferredType(CE, getContext());
1772 return MakeNaturalAlignAddrLValue(Result, RetType);
1773 }
1774 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1775 }
1776 case Expr::ParenExprClass:
1777 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1778 case Expr::GenericSelectionExprClass:
1779 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1780 IsKnownNonNull);
1781 case Expr::PredefinedExprClass:
1783 case Expr::StringLiteralClass:
1785 case Expr::ObjCEncodeExprClass:
1787 case Expr::PseudoObjectExprClass:
1789 case Expr::InitListExprClass:
1791 case Expr::CXXTemporaryObjectExprClass:
1792 case Expr::CXXConstructExprClass:
1794 case Expr::CXXBindTemporaryExprClass:
1796 case Expr::CXXUuidofExprClass:
1798 case Expr::LambdaExprClass:
1799 return EmitAggExprToLValue(E);
1800
1801 case Expr::ExprWithCleanupsClass: {
1802 const auto *cleanups = cast<ExprWithCleanups>(E);
1803 RunCleanupsScope Scope(*this);
1804 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1805 if (LV.isSimple()) {
1806 // Defend against branches out of gnu statement expressions surrounded by
1807 // cleanups.
1808 Address Addr = LV.getAddress();
1809 llvm::Value *V = Addr.getBasePointer();
1810 Scope.ForceCleanup({&V});
1811 Addr.replaceBasePointer(V);
1812 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1813 LV.getBaseInfo(), LV.getTBAAInfo());
1814 }
1815 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1816 // bitfield lvalue or some other non-simple lvalue?
1817 return LV;
1818 }
1819
1820 case Expr::CXXDefaultArgExprClass: {
1821 auto *DAE = cast<CXXDefaultArgExpr>(E);
1822 CXXDefaultArgExprScope Scope(*this, DAE);
1823 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1824 }
1825 case Expr::CXXDefaultInitExprClass: {
1826 auto *DIE = cast<CXXDefaultInitExpr>(E);
1827 CXXDefaultInitExprScope Scope(*this, DIE);
1828 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1829 }
1830 case Expr::CXXTypeidExprClass:
1832
1833 case Expr::ObjCMessageExprClass:
1835 case Expr::ObjCIvarRefExprClass:
1837 case Expr::StmtExprClass:
1839 case Expr::UnaryOperatorClass:
1841 case Expr::ArraySubscriptExprClass:
1843 case Expr::MatrixSubscriptExprClass:
1845 case Expr::ArraySectionExprClass:
1847 case Expr::ExtVectorElementExprClass:
1849 case Expr::CXXThisExprClass:
1851 case Expr::MemberExprClass:
1853 case Expr::CompoundLiteralExprClass:
1855 case Expr::ConditionalOperatorClass:
1857 case Expr::BinaryConditionalOperatorClass:
1859 case Expr::ChooseExprClass:
1860 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1861 case Expr::OpaqueValueExprClass:
1863 case Expr::SubstNonTypeTemplateParmExprClass:
1864 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1865 IsKnownNonNull);
1866 case Expr::ImplicitCastExprClass:
1867 case Expr::CStyleCastExprClass:
1868 case Expr::CXXFunctionalCastExprClass:
1869 case Expr::CXXStaticCastExprClass:
1870 case Expr::CXXDynamicCastExprClass:
1871 case Expr::CXXReinterpretCastExprClass:
1872 case Expr::CXXConstCastExprClass:
1873 case Expr::CXXAddrspaceCastExprClass:
1874 case Expr::ObjCBridgedCastExprClass:
1875 return EmitCastLValue(cast<CastExpr>(E));
1876
1877 case Expr::MaterializeTemporaryExprClass:
1879
1880 case Expr::CoawaitExprClass:
1882 case Expr::CoyieldExprClass:
1884 case Expr::PackIndexingExprClass:
1885 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1886 case Expr::HLSLOutArgExprClass:
1887 llvm_unreachable("cannot emit a HLSL out argument directly");
1888 }
1889}
1890
1891/// Given an object of the given canonical type, can we safely copy a
1892/// value out of it based on its initializer?
1894 assert(type.isCanonical());
1895 assert(!type->isReferenceType());
1896
1897 // Must be const-qualified but non-volatile.
1898 Qualifiers qs = type.getLocalQualifiers();
1899 if (!qs.hasConst() || qs.hasVolatile()) return false;
1900
1901 // Otherwise, all object types satisfy this except C++ classes with
1902 // mutable subobjects or non-trivial copy/destroy behavior.
1903 if (const auto *RT = dyn_cast<RecordType>(type))
1904 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getOriginalDecl())) {
1905 RD = RD->getDefinitionOrSelf();
1906 if (RD->hasMutableFields() || !RD->isTrivial())
1907 return false;
1908 }
1909
1910 return true;
1911}
1912
1913/// Can we constant-emit a load of a reference to a variable of the
1914/// given type? This is different from predicates like
1915/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1916/// in situations that don't necessarily satisfy the language's rules
1917/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1918/// to do this with const float variables even if those variables
1919/// aren't marked 'constexpr'.
1927 type = type.getCanonicalType();
1928 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1929 if (isConstantEmittableObjectType(ref->getPointeeType()))
1931 return CEK_AsReferenceOnly;
1932 }
1934 return CEK_AsValueOnly;
1935 return CEK_None;
1936}
1937
1938/// Try to emit a reference to the given value without producing it as
1939/// an l-value. This is just an optimization, but it avoids us needing
1940/// to emit global copies of variables if they're named without triggering
1941/// a formal use in a context where we can't emit a direct reference to them,
1942/// for instance if a block or lambda or a member of a local class uses a
1943/// const int variable or constexpr variable from an enclosing function.
1946 const ValueDecl *Value = RefExpr->getDecl();
1947
1948 // The value needs to be an enum constant or a constant variable.
1950 if (isa<ParmVarDecl>(Value)) {
1951 CEK = CEK_None;
1952 } else if (const auto *var = dyn_cast<VarDecl>(Value)) {
1953 CEK = checkVarTypeForConstantEmission(var->getType());
1954 } else if (isa<EnumConstantDecl>(Value)) {
1955 CEK = CEK_AsValueOnly;
1956 } else {
1957 CEK = CEK_None;
1958 }
1959 if (CEK == CEK_None) return ConstantEmission();
1960
1961 Expr::EvalResult result;
1962 bool resultIsReference;
1963 QualType resultType;
1964
1965 // It's best to evaluate all the way as an r-value if that's permitted.
1966 if (CEK != CEK_AsReferenceOnly &&
1967 RefExpr->EvaluateAsRValue(result, getContext())) {
1968 resultIsReference = false;
1969 resultType = RefExpr->getType().getUnqualifiedType();
1970
1971 // Otherwise, try to evaluate as an l-value.
1972 } else if (CEK != CEK_AsValueOnly &&
1973 RefExpr->EvaluateAsLValue(result, getContext())) {
1974 resultIsReference = true;
1975 resultType = Value->getType();
1976
1977 // Failure.
1978 } else {
1979 return ConstantEmission();
1980 }
1981
1982 // In any case, if the initializer has side-effects, abandon ship.
1983 if (result.HasSideEffects)
1984 return ConstantEmission();
1985
1986 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1987 // referencing a global host variable by copy. In this case the lambda should
1988 // make a copy of the value of the global host variable. The DRE of the
1989 // captured reference variable cannot be emitted as load from the host
1990 // global variable as compile time constant, since the host variable is not
1991 // accessible on device. The DRE of the captured reference variable has to be
1992 // loaded from captures.
1993 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1995 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1996 if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1997 const APValue::LValueBase &base = result.Val.getLValueBase();
1998 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1999 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
2000 if (!VD->hasAttr<CUDADeviceAttr>()) {
2001 return ConstantEmission();
2002 }
2003 }
2004 }
2005 }
2006 }
2007
2008 // Emit as a constant.
2009 llvm::Constant *C = ConstantEmitter(*this).emitAbstract(
2010 RefExpr->getLocation(), result.Val, resultType);
2011
2012 // Make sure we emit a debug reference to the global variable.
2013 // This should probably fire even for
2014 if (isa<VarDecl>(Value)) {
2015 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(Value)))
2016 EmitDeclRefExprDbgValue(RefExpr, result.Val);
2017 } else {
2019 EmitDeclRefExprDbgValue(RefExpr, result.Val);
2020 }
2021
2022 // If we emitted a reference constant, we need to dereference that.
2023 if (resultIsReference)
2025
2027}
2028
2030 const MemberExpr *ME) {
2031 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
2032 // Try to emit static variable member expressions as DREs.
2033 return DeclRefExpr::Create(
2035 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
2036 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
2037 }
2038 return nullptr;
2039}
2040
2044 return tryEmitAsConstant(DRE);
2045 return ConstantEmission();
2046}
2047
2049 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
2050 assert(Constant && "not a constant");
2051 if (Constant.isReference())
2052 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
2053 E->getExprLoc())
2054 .getScalarVal();
2055 return Constant.getValue();
2056}
2057
2059 SourceLocation Loc) {
2060 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
2061 lvalue.getType(), Loc, lvalue.getBaseInfo(),
2062 lvalue.getTBAAInfo(), lvalue.isNontemporal());
2063}
2064
2066 llvm::APInt &Min, llvm::APInt &End,
2067 bool StrictEnums, bool IsBool) {
2068 const auto *ED = Ty->getAsEnumDecl();
2069 bool IsRegularCPlusPlusEnum =
2070 CGF.getLangOpts().CPlusPlus && StrictEnums && ED && !ED->isFixed();
2071 if (!IsBool && !IsRegularCPlusPlusEnum)
2072 return false;
2073
2074 if (IsBool) {
2075 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
2076 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
2077 } else {
2078 ED->getValueRange(End, Min);
2079 }
2080 return true;
2081}
2082
2083llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
2084 llvm::APInt Min, End;
2085 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
2086 Ty->hasBooleanRepresentation() && !Ty->isVectorType()))
2087 return nullptr;
2088
2089 llvm::MDBuilder MDHelper(getLLVMContext());
2090 return MDHelper.createRange(Min, End);
2091}
2092
2094 SourceLocation Loc) {
2095 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2096 // In order to prevent the optimizer from throwing away the check, don't
2097 // attach range metadata to the load.
2098 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2099 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2100 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2101 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2102 llvm::MDNode::get(CGM.getLLVMContext(), {}));
2103 }
2104 }
2105}
2106
2108 SourceLocation Loc) {
2109 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
2110 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
2111 if (!HasBoolCheck && !HasEnumCheck)
2112 return false;
2113
2114 bool IsBool = (Ty->hasBooleanRepresentation() && !Ty->isVectorType()) ||
2115 NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
2116 bool NeedsBoolCheck = HasBoolCheck && IsBool;
2117 bool NeedsEnumCheck = HasEnumCheck && Ty->isEnumeralType();
2118 if (!NeedsBoolCheck && !NeedsEnumCheck)
2119 return false;
2120
2121 // Single-bit booleans don't need to be checked. Special-case this to avoid
2122 // a bit width mismatch when handling bitfield values. This is handled by
2123 // EmitFromMemory for the non-bitfield case.
2124 if (IsBool &&
2125 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
2126 return false;
2127
2128 if (NeedsEnumCheck &&
2129 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
2130 return false;
2131
2132 llvm::APInt Min, End;
2133 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
2134 return true;
2135
2137 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
2138
2139 auto &Ctx = getLLVMContext();
2140 auto CheckHandler = SanitizerHandler::LoadInvalidValue;
2141 SanitizerDebugLocation SanScope(this, {Kind}, CheckHandler);
2142 llvm::Value *Check;
2143 --End;
2144 if (!Min) {
2145 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
2146 } else {
2147 llvm::Value *Upper =
2148 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
2149 llvm::Value *Lower =
2150 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
2151 Check = Builder.CreateAnd(Upper, Lower);
2152 }
2153 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
2155 EmitCheck(std::make_pair(Check, Kind), CheckHandler, StaticArgs, Value);
2156 return true;
2157}
2158
2160 QualType Ty,
2161 SourceLocation Loc,
2162 LValueBaseInfo BaseInfo,
2163 TBAAAccessInfo TBAAInfo,
2164 bool isNontemporal) {
2165 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2166 if (GV->isThreadLocal())
2167 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2169
2170 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2171 // Boolean vectors use `iN` as storage type.
2172 if (ClangVecTy->isPackedVectorBoolType(getContext())) {
2173 llvm::Type *ValTy = ConvertType(Ty);
2174 unsigned ValNumElems =
2175 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2176 // Load the `iP` storage object (P is the padded vector size).
2177 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
2178 const auto *RawIntTy = RawIntV->getType();
2179 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
2180 // Bitcast iP --> <P x i1>.
2181 auto *PaddedVecTy = llvm::FixedVectorType::get(
2182 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2183 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2184 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2185 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2186
2187 return EmitFromMemory(V, Ty);
2188 }
2189
2190 // Handles vectors of sizes that are likely to be expanded to a larger size
2191 // to optimize performance.
2192 auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2193 auto *NewVecTy =
2194 CGM.getABIInfo().getOptimalVectorMemoryType(VTy, getLangOpts());
2195
2196 if (VTy != NewVecTy) {
2197 Address Cast = Addr.withElementType(NewVecTy);
2198 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2199 unsigned OldNumElements = VTy->getNumElements();
2200 SmallVector<int, 16> Mask(OldNumElements);
2201 std::iota(Mask.begin(), Mask.end(), 0);
2202 V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2203 return EmitFromMemory(V, Ty);
2204 }
2205 }
2206
2207 // Atomic operations have to be done on integral types.
2208 LValue AtomicLValue =
2209 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2210 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2211 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2212 }
2213
2214 Addr =
2215 Addr.withElementType(convertTypeForLoadStore(Ty, Addr.getElementType()));
2216
2217 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2218 if (isNontemporal) {
2219 llvm::MDNode *Node = llvm::MDNode::get(
2220 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2221 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2222 }
2223
2224 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2225
2226 maybeAttachRangeForLoad(Load, Ty, Loc);
2227
2228 return EmitFromMemory(Load, Ty);
2229}
2230
2231/// Converts a scalar value from its primary IR type (as returned
2232/// by ConvertType) to its load/store type (as returned by
2233/// convertTypeForLoadStore).
2234llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2235 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2236 Ty = AtomicTy->getValueType();
2237
2238 if (Ty->isExtVectorBoolType()) {
2239 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2240 if (StoreTy->isVectorTy() && StoreTy->getScalarSizeInBits() >
2241 Value->getType()->getScalarSizeInBits())
2242 return Builder.CreateZExt(Value, StoreTy);
2243
2244 // Expand to the memory bit width.
2245 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2246 // <N x i1> --> <P x i1>.
2247 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2248 // <P x i1> --> iP.
2249 Value = Builder.CreateBitCast(Value, StoreTy);
2250 }
2251
2252 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType()) {
2253 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2255 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2256 }
2257
2258 return Value;
2259}
2260
2261/// Converts a scalar value from its load/store type (as returned
2262/// by convertTypeForLoadStore) to its primary IR type (as returned
2263/// by ConvertType).
2264llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2265 if (auto *AtomicTy = Ty->getAs<AtomicType>())
2266 Ty = AtomicTy->getValueType();
2267
2269 const auto *RawIntTy = Value->getType();
2270
2271 // Bitcast iP --> <P x i1>.
2272 auto *PaddedVecTy = llvm::FixedVectorType::get(
2273 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2274 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2275 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2276 llvm::Type *ValTy = ConvertType(Ty);
2277 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2278 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2279 }
2280
2281 llvm::Type *ResTy = ConvertType(Ty);
2282 if (Ty->hasBooleanRepresentation() || Ty->isBitIntType() ||
2283 Ty->isExtVectorBoolType())
2284 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2285
2286 return Value;
2287}
2288
2289// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2290// MatrixType), if it points to a array (the memory type of MatrixType).
2292 CodeGenFunction &CGF,
2293 bool IsVector = true) {
2294 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2295 if (ArrayTy && IsVector) {
2296 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2297 ArrayTy->getNumElements());
2298
2299 return Addr.withElementType(VectorTy);
2300 }
2301 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2302 if (VectorTy && !IsVector) {
2303 auto *ArrayTy = llvm::ArrayType::get(
2304 VectorTy->getElementType(),
2305 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2306
2307 return Addr.withElementType(ArrayTy);
2308 }
2309
2310 return Addr;
2311}
2312
2313// Emit a store of a matrix LValue. This may require casting the original
2314// pointer to memory address (ArrayType) to a pointer to the value type
2315// (VectorType).
2316static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2317 bool isInit, CodeGenFunction &CGF) {
2318 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2319 value->getType()->isVectorTy());
2320 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2321 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2322 lvalue.isNontemporal());
2323}
2324
2326 bool Volatile, QualType Ty,
2327 LValueBaseInfo BaseInfo,
2328 TBAAAccessInfo TBAAInfo,
2329 bool isInit, bool isNontemporal) {
2330 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2331 if (GV->isThreadLocal())
2332 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2334
2335 // Handles vectors of sizes that are likely to be expanded to a larger size
2336 // to optimize performance.
2337 llvm::Type *SrcTy = Value->getType();
2338 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2339 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2340 auto *NewVecTy =
2341 CGM.getABIInfo().getOptimalVectorMemoryType(VecTy, getLangOpts());
2342 if (!ClangVecTy->isPackedVectorBoolType(getContext()) &&
2343 VecTy != NewVecTy) {
2344 SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1);
2345 std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2346 Value = Builder.CreateShuffleVector(Value, Mask, "extractVec");
2347 SrcTy = NewVecTy;
2348 }
2349 if (Addr.getElementType() != SrcTy)
2350 Addr = Addr.withElementType(SrcTy);
2351 }
2352 }
2353
2354 Value = EmitToMemory(Value, Ty);
2355
2356 LValue AtomicLValue =
2357 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2358 if (Ty->isAtomicType() ||
2359 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2360 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2361 return;
2362 }
2363
2364 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2366
2367 if (isNontemporal) {
2368 llvm::MDNode *Node =
2369 llvm::MDNode::get(Store->getContext(),
2370 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2371 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2372 }
2373
2374 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2375}
2376
2377void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2378 bool isInit) {
2379 if (lvalue.getType()->isConstantMatrixType()) {
2380 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2381 return;
2382 }
2383
2384 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2385 lvalue.getType(), lvalue.getBaseInfo(),
2386 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2387}
2388
2389// Emit a load of a LValue of matrix type. This may require casting the pointer
2390// to memory address (ArrayType) to a pointer to the value type (VectorType).
2392 CodeGenFunction &CGF) {
2393 assert(LV.getType()->isConstantMatrixType());
2394 Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);
2395 LV.setAddress(Addr);
2396 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2397}
2398
2400 SourceLocation Loc) {
2401 QualType Ty = LV.getType();
2402 switch (getEvaluationKind(Ty)) {
2403 case TEK_Scalar:
2404 return EmitLoadOfLValue(LV, Loc);
2405 case TEK_Complex:
2406 return RValue::getComplex(EmitLoadOfComplex(LV, Loc));
2407 case TEK_Aggregate:
2408 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2409 return Slot.asRValue();
2410 }
2411 llvm_unreachable("bad evaluation kind");
2412}
2413
2414/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2415/// method emits the address of the lvalue, then loads the result as an rvalue,
2416/// returning the rvalue.
2418 // Load from __ptrauth.
2419 if (PointerAuthQualifier PtrAuth = LV.getQuals().getPointerAuth()) {
2421 llvm::Value *Value = EmitLoadOfLValue(LV, Loc).getScalarVal();
2422 return RValue::get(EmitPointerAuthUnqualify(PtrAuth, Value, LV.getType(),
2423 LV.getAddress(),
2424 /*known nonnull*/ false));
2425 }
2426
2427 if (LV.isObjCWeak()) {
2428 // load of a __weak object.
2429 Address AddrWeakObj = LV.getAddress();
2430 return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
2431 AddrWeakObj));
2432 }
2434 // In MRC mode, we do a load+autorelease.
2435 if (!getLangOpts().ObjCAutoRefCount) {
2437 }
2438
2439 // In ARC mode, we load retained and then consume the value.
2440 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2441 Object = EmitObjCConsumeObject(LV.getType(), Object);
2442 return RValue::get(Object);
2443 }
2444
2445 if (LV.isSimple()) {
2446 assert(!LV.getType()->isFunctionType());
2447
2448 if (LV.getType()->isConstantMatrixType())
2449 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2450
2451 // Everything needs a load.
2452 return RValue::get(EmitLoadOfScalar(LV, Loc));
2453 }
2454
2455 if (LV.isVectorElt()) {
2456 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2457 LV.isVolatileQualified());
2458 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2459 "vecext"));
2460 }
2461
2462 // If this is a reference to a subset of the elements of a vector, either
2463 // shuffle the input or extract/insert them as appropriate.
2464 if (LV.isExtVectorElt()) {
2466 }
2467
2468 // Global Register variables always invoke intrinsics
2469 if (LV.isGlobalReg())
2470 return EmitLoadOfGlobalRegLValue(LV);
2471
2472 if (LV.isMatrixElt()) {
2473 llvm::Value *Idx = LV.getMatrixIdx();
2474 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2475 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2476 llvm::MatrixBuilder MB(Builder);
2477 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2478 }
2479 llvm::LoadInst *Load =
2480 Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
2481 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2482 }
2483
2484 assert(LV.isBitField() && "Unknown LValue type!");
2485 return EmitLoadOfBitfieldLValue(LV, Loc);
2486}
2487
2489 SourceLocation Loc) {
2490 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2491
2492 // Get the output type.
2493 llvm::Type *ResLTy = ConvertType(LV.getType());
2494
2495 Address Ptr = LV.getBitFieldAddress();
2496 llvm::Value *Val =
2497 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2498
2499 bool UseVolatile = LV.isVolatileQualified() &&
2500 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2501 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2502 const unsigned StorageSize =
2503 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2504 if (Info.IsSigned) {
2505 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2506 unsigned HighBits = StorageSize - Offset - Info.Size;
2507 if (HighBits)
2508 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2509 if (Offset + HighBits)
2510 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2511 } else {
2512 if (Offset)
2513 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2514 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2515 Val = Builder.CreateAnd(
2516 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2517 }
2518 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2519 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2520 return RValue::get(Val);
2521}
2522
2523// If this is a reference to a subset of the elements of a vector, create an
2524// appropriate shufflevector.
2526 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2527 LV.isVolatileQualified());
2528
2529 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2530 // IR value to a vector here allows the rest of codegen to behave as normal.
2531 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2532 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2533 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2534 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2535 }
2536
2537 const llvm::Constant *Elts = LV.getExtVectorElts();
2538
2539 // If the result of the expression is a non-vector type, we must be extracting
2540 // a single element. Just codegen as an extractelement.
2541 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2542 if (!ExprVT) {
2543 unsigned InIdx = getAccessedFieldNo(0, Elts);
2544 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2545
2546 llvm::Value *Element = Builder.CreateExtractElement(Vec, Elt);
2547
2548 llvm::Type *LVTy = ConvertType(LV.getType());
2549 if (Element->getType()->getPrimitiveSizeInBits() >
2550 LVTy->getPrimitiveSizeInBits())
2551 Element = Builder.CreateTrunc(Element, LVTy);
2552
2553 return RValue::get(Element);
2554 }
2555
2556 // Always use shuffle vector to try to retain the original program structure
2557 unsigned NumResultElts = ExprVT->getNumElements();
2558
2560 for (unsigned i = 0; i != NumResultElts; ++i)
2561 Mask.push_back(getAccessedFieldNo(i, Elts));
2562
2563 Vec = Builder.CreateShuffleVector(Vec, Mask);
2564
2565 if (LV.getType()->isExtVectorBoolType())
2566 Vec = Builder.CreateTrunc(Vec, ConvertType(LV.getType()), "truncv");
2567
2568 return RValue::get(Vec);
2569}
2570
2571/// Generates lvalue for partial ext_vector access.
2573 Address VectorAddress = LV.getExtVectorAddress();
2574 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2575 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2576
2577 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2578
2579 const llvm::Constant *Elts = LV.getExtVectorElts();
2580 unsigned ix = getAccessedFieldNo(0, Elts);
2581
2582 Address VectorBasePtrPlusIx =
2583 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2584 "vector.elt");
2585
2586 return VectorBasePtrPlusIx;
2587}
2588
2589/// Load of global named registers are always calls to intrinsics.
2591 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2592 "Bad type for register variable");
2593 llvm::MDNode *RegName = cast<llvm::MDNode>(
2594 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2595
2596 // We accept integer and pointer types only
2597 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2598 llvm::Type *Ty = OrigTy;
2599 if (OrigTy->isPointerTy())
2600 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2601 llvm::Type *Types[] = { Ty };
2602
2603 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2604 llvm::Value *Call = Builder.CreateCall(
2605 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2606 if (OrigTy->isPointerTy())
2607 Call = Builder.CreateIntToPtr(Call, OrigTy);
2608 return RValue::get(Call);
2609}
2610
2611/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2612/// lvalue, where both are guaranteed to the have the same type, and that type
2613/// is 'Ty'.
2615 bool isInit) {
2616 if (!Dst.isSimple()) {
2617 if (Dst.isVectorElt()) {
2618 // Read/modify/write the vector, inserting the new element.
2619 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2620 Dst.isVolatileQualified());
2621 llvm::Type *VecTy = Vec->getType();
2622 llvm::Value *SrcVal = Src.getScalarVal();
2623
2624 if (SrcVal->getType()->getPrimitiveSizeInBits() <
2625 VecTy->getScalarSizeInBits())
2626 SrcVal = Builder.CreateZExt(SrcVal, VecTy->getScalarType());
2627
2628 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2629 if (IRStoreTy) {
2630 auto *IRVecTy = llvm::FixedVectorType::get(
2631 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2632 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2633 // iN --> <N x i1>.
2634 }
2635
2636 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2637 // types which are mapped to vector LLVM IR types (e.g. for implementing
2638 // an ABI).
2639 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2640 EltTy && EltTy->getNumElements() == 1)
2641 SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2642
2643 Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2644 "vecins");
2645 if (IRStoreTy) {
2646 // <N x i1> --> <iN>.
2647 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2648 }
2649
2650 auto *I = Builder.CreateStore(Vec, Dst.getVectorAddress(),
2651 Dst.isVolatileQualified());
2653 return;
2654 }
2655
2656 // If this is an update of extended vector elements, insert them as
2657 // appropriate.
2658 if (Dst.isExtVectorElt())
2660
2661 if (Dst.isGlobalReg())
2662 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2663
2664 if (Dst.isMatrixElt()) {
2665 llvm::Value *Idx = Dst.getMatrixIdx();
2666 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2667 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2668 llvm::MatrixBuilder MB(Builder);
2669 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2670 }
2671 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2672 llvm::Value *Vec =
2673 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2674 auto *I = Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2675 Dst.isVolatileQualified());
2677 return;
2678 }
2679
2680 assert(Dst.isBitField() && "Unknown LValue type");
2681 return EmitStoreThroughBitfieldLValue(Src, Dst);
2682 }
2683
2684 // Handle __ptrauth qualification by re-signing the value.
2685 if (PointerAuthQualifier PointerAuth = Dst.getQuals().getPointerAuth()) {
2686 Src = RValue::get(EmitPointerAuthQualify(PointerAuth, Src.getScalarVal(),
2687 Dst.getType(), Dst.getAddress(),
2688 /*known nonnull*/ false));
2689 }
2690
2691 // There's special magic for assigning into an ARC-qualified l-value.
2692 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2693 switch (Lifetime) {
2695 llvm_unreachable("present but none");
2696
2698 // nothing special
2699 break;
2700
2702 if (isInit) {
2703 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2704 break;
2705 }
2706 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2707 return;
2708
2710 if (isInit)
2711 // Initialize and then skip the primitive store.
2713 else
2715 /*ignore*/ true);
2716 return;
2717
2720 Src.getScalarVal()));
2721 // fall into the normal path
2722 break;
2723 }
2724 }
2725
2726 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2727 // load of a __weak object.
2728 Address LvalueDst = Dst.getAddress();
2729 llvm::Value *src = Src.getScalarVal();
2730 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2731 return;
2732 }
2733
2734 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2735 // load of a __strong object.
2736 Address LvalueDst = Dst.getAddress();
2737 llvm::Value *src = Src.getScalarVal();
2738 if (Dst.isObjCIvar()) {
2739 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2740 llvm::Type *ResultType = IntPtrTy;
2742 llvm::Value *RHS = dst.emitRawPointer(*this);
2743 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2744 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2745 ResultType, "sub.ptr.lhs.cast");
2746 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2747 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2748 } else if (Dst.isGlobalObjCRef()) {
2749 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2750 Dst.isThreadLocalRef());
2751 }
2752 else
2753 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2754 return;
2755 }
2756
2757 assert(Src.isScalar() && "Can't emit an agg store with this method");
2758 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2759}
2760
2762 llvm::Value **Result) {
2763 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2764 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2765 Address Ptr = Dst.getBitFieldAddress();
2766
2767 // Get the source value, truncated to the width of the bit-field.
2768 llvm::Value *SrcVal = Src.getScalarVal();
2769
2770 // Cast the source to the storage type and shift it into place.
2771 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2772 /*isSigned=*/false);
2773 llvm::Value *MaskedVal = SrcVal;
2774
2775 const bool UseVolatile =
2776 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2777 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2778 const unsigned StorageSize =
2779 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2780 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2781 // See if there are other bits in the bitfield's storage we'll need to load
2782 // and mask together with source before storing.
2783 if (StorageSize != Info.Size) {
2784 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2785 llvm::Value *Val =
2786 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2787
2788 // Mask the source value as needed.
2789 if (!Dst.getType()->hasBooleanRepresentation())
2790 SrcVal = Builder.CreateAnd(
2791 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2792 "bf.value");
2793 MaskedVal = SrcVal;
2794 if (Offset)
2795 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2796
2797 // Mask out the original value.
2798 Val = Builder.CreateAnd(
2799 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2800 "bf.clear");
2801
2802 // Or together the unchanged values and the source value.
2803 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2804 } else {
2805 assert(Offset == 0);
2806 // According to the AACPS:
2807 // When a volatile bit-field is written, and its container does not overlap
2808 // with any non-bit-field member, its container must be read exactly once
2809 // and written exactly once using the access width appropriate to the type
2810 // of the container. The two accesses are not atomic.
2811 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2812 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2813 Builder.CreateLoad(Ptr, true, "bf.load");
2814 }
2815
2816 // Write the new value back out.
2817 auto *I = Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2818 addInstToCurrentSourceAtom(I, SrcVal);
2819
2820 // Return the new value of the bit-field, if requested.
2821 if (Result) {
2822 llvm::Value *ResultVal = MaskedVal;
2823
2824 // Sign extend the value if needed.
2825 if (Info.IsSigned) {
2826 assert(Info.Size <= StorageSize);
2827 unsigned HighBits = StorageSize - Info.Size;
2828 if (HighBits) {
2829 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2830 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2831 }
2832 }
2833
2834 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2835 "bf.result.cast");
2836 *Result = EmitFromMemory(ResultVal, Dst.getType());
2837 }
2838}
2839
2841 LValue Dst) {
2842 llvm::Value *SrcVal = Src.getScalarVal();
2843 Address DstAddr = Dst.getExtVectorAddress();
2844 if (DstAddr.getElementType()->getScalarSizeInBits() >
2845 SrcVal->getType()->getScalarSizeInBits())
2846 SrcVal = Builder.CreateZExt(
2847 SrcVal, convertTypeForLoadStore(Dst.getType(), SrcVal->getType()));
2848
2849 // HLSL allows storing to scalar values through ExtVector component LValues.
2850 // To support this we need to handle the case where the destination address is
2851 // a scalar.
2852 if (!DstAddr.getElementType()->isVectorTy()) {
2853 assert(!Dst.getType()->isVectorType() &&
2854 "this should only occur for non-vector l-values");
2855 Builder.CreateStore(SrcVal, DstAddr, Dst.isVolatileQualified());
2856 return;
2857 }
2858
2859 // This access turns into a read/modify/write of the vector. Load the input
2860 // value now.
2861 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2862 llvm::Type *VecTy = Vec->getType();
2863 const llvm::Constant *Elts = Dst.getExtVectorElts();
2864
2865 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2866 unsigned NumSrcElts = VTy->getNumElements();
2867 unsigned NumDstElts = cast<llvm::FixedVectorType>(VecTy)->getNumElements();
2868 if (NumDstElts == NumSrcElts) {
2869 // Use shuffle vector is the src and destination are the same number of
2870 // elements and restore the vector mask since it is on the side it will be
2871 // stored.
2872 SmallVector<int, 4> Mask(NumDstElts);
2873 for (unsigned i = 0; i != NumSrcElts; ++i)
2874 Mask[getAccessedFieldNo(i, Elts)] = i;
2875
2876 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2877 } else if (NumDstElts > NumSrcElts) {
2878 // Extended the source vector to the same length and then shuffle it
2879 // into the destination.
2880 // FIXME: since we're shuffling with undef, can we just use the indices
2881 // into that? This could be simpler.
2882 SmallVector<int, 4> ExtMask;
2883 for (unsigned i = 0; i != NumSrcElts; ++i)
2884 ExtMask.push_back(i);
2885 ExtMask.resize(NumDstElts, -1);
2886 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2887 // build identity
2889 for (unsigned i = 0; i != NumDstElts; ++i)
2890 Mask.push_back(i);
2891
2892 // When the vector size is odd and .odd or .hi is used, the last element
2893 // of the Elts constant array will be one past the size of the vector.
2894 // Ignore the last element here, if it is greater than the mask size.
2895 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2896 NumSrcElts--;
2897
2898 // modify when what gets shuffled in
2899 for (unsigned i = 0; i != NumSrcElts; ++i)
2900 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2901 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2902 } else {
2903 // We should never shorten the vector
2904 llvm_unreachable("unexpected shorten vector length");
2905 }
2906 } else {
2907 // If the Src is a scalar (not a vector), and the target is a vector it must
2908 // be updating one element.
2909 unsigned InIdx = getAccessedFieldNo(0, Elts);
2910 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2911
2912 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2913 }
2914
2915 Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
2916 Dst.isVolatileQualified());
2917}
2918
2919/// Store of global named registers are always calls to intrinsics.
2921 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2922 "Bad type for register variable");
2923 llvm::MDNode *RegName = cast<llvm::MDNode>(
2924 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2925 assert(RegName && "Register LValue is not metadata");
2926
2927 // We accept integer and pointer types only
2928 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2929 llvm::Type *Ty = OrigTy;
2930 if (OrigTy->isPointerTy())
2931 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2932 llvm::Type *Types[] = { Ty };
2933
2934 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2935 llvm::Value *Value = Src.getScalarVal();
2936 if (OrigTy->isPointerTy())
2937 Value = Builder.CreatePtrToInt(Value, Ty);
2938 Builder.CreateCall(
2939 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2940}
2941
2942// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2943// generating write-barries API. It is currently a global, ivar,
2944// or neither.
2945static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2946 LValue &LV,
2947 bool IsMemberAccess=false) {
2948 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2949 return;
2950
2951 if (isa<ObjCIvarRefExpr>(E)) {
2952 QualType ExpTy = E->getType();
2953 if (IsMemberAccess && ExpTy->isPointerType()) {
2954 // If ivar is a structure pointer, assigning to field of
2955 // this struct follows gcc's behavior and makes it a non-ivar
2956 // writer-barrier conservatively.
2957 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2958 if (ExpTy->isRecordType()) {
2959 LV.setObjCIvar(false);
2960 return;
2961 }
2962 }
2963 LV.setObjCIvar(true);
2964 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2965 LV.setBaseIvarExp(Exp->getBase());
2966 LV.setObjCArray(E->getType()->isArrayType());
2967 return;
2968 }
2969
2970 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2971 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2972 if (VD->hasGlobalStorage()) {
2973 LV.setGlobalObjCRef(true);
2974 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2975 }
2976 }
2977 LV.setObjCArray(E->getType()->isArrayType());
2978 return;
2979 }
2980
2981 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2982 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2983 return;
2984 }
2985
2986 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2987 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2988 if (LV.isObjCIvar()) {
2989 // If cast is to a structure pointer, follow gcc's behavior and make it
2990 // a non-ivar write-barrier.
2991 QualType ExpTy = E->getType();
2992 if (ExpTy->isPointerType())
2993 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2994 if (ExpTy->isRecordType())
2995 LV.setObjCIvar(false);
2996 }
2997 return;
2998 }
2999
3000 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
3001 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
3002 return;
3003 }
3004
3005 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
3006 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3007 return;
3008 }
3009
3010 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
3011 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3012 return;
3013 }
3014
3015 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
3016 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
3017 return;
3018 }
3019
3020 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
3021 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
3022 if (LV.isObjCIvar() && !LV.isObjCArray())
3023 // Using array syntax to assigning to what an ivar points to is not
3024 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
3025 LV.setObjCIvar(false);
3026 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
3027 // Using array syntax to assigning to what global points to is not
3028 // same as assigning to the global itself. {id *G;} G[i] = 0;
3029 LV.setGlobalObjCRef(false);
3030 return;
3031 }
3032
3033 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
3034 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
3035 // We don't know if member is an 'ivar', but this flag is looked at
3036 // only in the context of LV.isObjCIvar().
3037 LV.setObjCArray(E->getType()->isArrayType());
3038 return;
3039 }
3040}
3041
3043 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
3044 llvm::Type *RealVarTy, SourceLocation Loc) {
3045 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
3047 CGF, VD, Addr, Loc);
3048 else
3049 Addr =
3050 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
3051
3052 Addr = Addr.withElementType(RealVarTy);
3054}
3055
3057 const VarDecl *VD, QualType T) {
3058 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3059 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
3060 // Return an invalid address if variable is MT_To (or MT_Enter starting with
3061 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
3062 // and MT_To (or MT_Enter) with unified memory, return a valid address.
3063 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3064 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3066 return Address::invalid();
3067 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
3068 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
3069 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
3071 "Expected link clause OR to clause with unified memory enabled.");
3072 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
3074 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
3075}
3076
3077Address
3079 LValueBaseInfo *PointeeBaseInfo,
3080 TBAAAccessInfo *PointeeTBAAInfo) {
3081 llvm::LoadInst *Load =
3082 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
3083 CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
3084 QualType PTy = RefLVal.getType()->getPointeeType();
3085 CharUnits Align = CGM.getNaturalTypeAlignment(
3086 PTy, PointeeBaseInfo, PointeeTBAAInfo, /*ForPointeeType=*/true);
3087 if (!PTy->isIncompleteType()) {
3088 llvm::LLVMContext &Ctx = getLLVMContext();
3089 llvm::MDBuilder MDB(Ctx);
3090 // Emit !nonnull metadata
3091 if (CGM.getTypes().getTargetAddressSpace(PTy) == 0 &&
3092 !CGM.getCodeGenOpts().NullPointerIsValid)
3093 Load->setMetadata(llvm::LLVMContext::MD_nonnull,
3094 llvm::MDNode::get(Ctx, {}));
3095 // Emit !align metadata
3096 if (PTy->isObjectType()) {
3097 auto AlignVal = Align.getQuantity();
3098 if (AlignVal > 1) {
3099 Load->setMetadata(
3100 llvm::LLVMContext::MD_align,
3101 llvm::MDNode::get(Ctx, MDB.createConstant(llvm::ConstantInt::get(
3102 Builder.getInt64Ty(), AlignVal))));
3103 }
3104 }
3105 }
3106 return makeNaturalAddressForPointer(Load, PTy, Align,
3107 /*ForPointeeType=*/true, PointeeBaseInfo,
3108 PointeeTBAAInfo);
3109}
3110
3112 LValueBaseInfo PointeeBaseInfo;
3113 TBAAAccessInfo PointeeTBAAInfo;
3114 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
3115 &PointeeTBAAInfo);
3116 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
3117 PointeeBaseInfo, PointeeTBAAInfo);
3118}
3119
3121 const PointerType *PtrTy,
3122 LValueBaseInfo *BaseInfo,
3123 TBAAAccessInfo *TBAAInfo) {
3124 llvm::Value *Addr = Builder.CreateLoad(Ptr);
3125 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
3126 CharUnits(), /*ForPointeeType=*/true,
3127 BaseInfo, TBAAInfo);
3128}
3129
3131 const PointerType *PtrTy) {
3132 LValueBaseInfo BaseInfo;
3133 TBAAAccessInfo TBAAInfo;
3134 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
3135 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
3136}
3137
3139 const Expr *E, const VarDecl *VD) {
3140 QualType T = E->getType();
3141
3142 // If it's thread_local, emit a call to its wrapper function instead.
3143 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
3145 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
3146 // Check if the variable is marked as declare target with link clause in
3147 // device codegen.
3148 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
3150 if (Addr.isValid())
3152 }
3153
3154 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
3155
3156 if (VD->getTLSKind() != VarDecl::TLS_None)
3157 V = CGF.Builder.CreateThreadLocalAddress(V);
3158
3159 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
3160 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
3161 Address Addr(V, RealVarTy, Alignment);
3162 // Emit reference to the private copy of the variable if it is an OpenMP
3163 // threadprivate variable.
3164 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
3165 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3166 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
3167 E->getExprLoc());
3168 }
3169 LValue LV = VD->getType()->isReferenceType() ?
3173 setObjCGCLValueClass(CGF.getContext(), E, LV);
3174 return LV;
3175}
3176
3178 llvm::Type *Ty) {
3179 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3180 if (FD->hasAttr<WeakRefAttr>()) {
3182 return aliasee.getPointer();
3183 }
3184
3185 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
3186 return V;
3187}
3188
3189static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
3190 GlobalDecl GD) {
3191 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3192 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
3193 QualType ETy = E->getType();
3195 if (auto *GV = dyn_cast<llvm::GlobalValue>(V))
3196 V = llvm::NoCFIValue::get(GV);
3197 }
3198 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
3199 return CGF.MakeAddrLValue(V, ETy, Alignment, AlignmentSource::Decl);
3200}
3201
3203 llvm::Value *ThisValue) {
3204
3205 return CGF.EmitLValueForLambdaField(FD, ThisValue);
3206}
3207
3208/// Named Registers are named metadata pointing to the register name
3209/// which will be read from/written to as an argument to the intrinsic
3210/// @llvm.read/write_register.
3211/// So far, only the name is being passed down, but other options such as
3212/// register type, allocation type or even optimization options could be
3213/// passed down via the metadata node.
3214static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
3215 SmallString<64> Name("llvm.named.register.");
3216 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
3217 assert(Asm->getLabel().size() < 64-Name.size() &&
3218 "Register name too big");
3219 Name.append(Asm->getLabel());
3220 llvm::NamedMDNode *M =
3221 CGM.getModule().getOrInsertNamedMetadata(Name);
3222 if (M->getNumOperands() == 0) {
3223 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
3224 Asm->getLabel());
3225 llvm::Metadata *Ops[] = {Str};
3226 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
3227 }
3228
3229 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
3230
3231 llvm::Value *Ptr =
3232 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
3233 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
3234}
3235
3236/// Determine whether we can emit a reference to \p VD from the current
3237/// context, despite not necessarily having seen an odr-use of the variable in
3238/// this context.
3240 const DeclRefExpr *E,
3241 const VarDecl *VD) {
3242 // For a variable declared in an enclosing scope, do not emit a spurious
3243 // reference even if we have a capture, as that will emit an unwarranted
3244 // reference to our capture state, and will likely generate worse code than
3245 // emitting a local copy.
3247 return false;
3248
3249 // For a local declaration declared in this function, we can always reference
3250 // it even if we don't have an odr-use.
3251 if (VD->hasLocalStorage()) {
3252 return VD->getDeclContext() ==
3253 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
3254 }
3255
3256 // For a global declaration, we can emit a reference to it if we know
3257 // for sure that we are able to emit a definition of it.
3258 VD = VD->getDefinition(CGF.getContext());
3259 if (!VD)
3260 return false;
3261
3262 // Don't emit a spurious reference if it might be to a variable that only
3263 // exists on a different device / target.
3264 // FIXME: This is unnecessarily broad. Check whether this would actually be a
3265 // cross-target reference.
3266 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3267 CGF.getLangOpts().OpenCL) {
3268 return false;
3269 }
3270
3271 // We can emit a spurious reference only if the linkage implies that we'll
3272 // be emitting a non-interposable symbol that will be retained until link
3273 // time.
3274 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3275 case llvm::GlobalValue::ExternalLinkage:
3276 case llvm::GlobalValue::LinkOnceODRLinkage:
3277 case llvm::GlobalValue::WeakODRLinkage:
3278 case llvm::GlobalValue::InternalLinkage:
3279 case llvm::GlobalValue::PrivateLinkage:
3280 return true;
3281 default:
3282 return false;
3283 }
3284}
3285
3287 const NamedDecl *ND = E->getDecl();
3288 QualType T = E->getType();
3289
3290 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3291 "should not emit an unevaluated operand");
3292
3293 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3294 // Global Named registers access via intrinsics only
3295 if (VD->getStorageClass() == SC_Register &&
3296 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3297 return EmitGlobalNamedRegister(VD, CGM);
3298
3299 // If this DeclRefExpr does not constitute an odr-use of the variable,
3300 // we're not permitted to emit a reference to it in general, and it might
3301 // not be captured if capture would be necessary for a use. Emit the
3302 // constant value directly instead.
3303 if (E->isNonOdrUse() == NOUR_Constant &&
3304 (VD->getType()->isReferenceType() ||
3305 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3306 VD->getAnyInitializer(VD);
3307 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3308 E->getLocation(), *VD->evaluateValue(), VD->getType());
3309 assert(Val && "failed to emit constant expression");
3310
3312 if (!VD->getType()->isReferenceType()) {
3313 // Spill the constant value to a global.
3314 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3315 getContext().getDeclAlign(VD));
3316 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3317 auto *PTy = llvm::PointerType::get(
3318 getLLVMContext(), getTypes().getTargetAddressSpace(VD->getType()));
3319 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3320 } else {
3321 // Should we be using the alignment of the constant pointer we emitted?
3322 CharUnits Alignment =
3323 CGM.getNaturalTypeAlignment(E->getType(),
3324 /* BaseInfo= */ nullptr,
3325 /* TBAAInfo= */ nullptr,
3326 /* forPointeeType= */ true);
3327 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3328 }
3330 }
3331
3332 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3333
3334 // Check for captured variables.
3336 VD = VD->getCanonicalDecl();
3337 if (auto *FD = LambdaCaptureFields.lookup(VD))
3338 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3339 if (CapturedStmtInfo) {
3340 auto I = LocalDeclMap.find(VD);
3341 if (I != LocalDeclMap.end()) {
3342 LValue CapLVal;
3343 if (VD->getType()->isReferenceType())
3344 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3346 else
3347 CapLVal = MakeAddrLValue(I->second, T);
3348 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3349 // in simd context.
3350 if (getLangOpts().OpenMP &&
3351 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3352 CapLVal.setNontemporal(/*Value=*/true);
3353 return CapLVal;
3354 }
3355 LValue CapLVal =
3356 EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
3357 CapturedStmtInfo->getContextValue());
3358 Address LValueAddress = CapLVal.getAddress();
3359 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3360 LValueAddress.getElementType(),
3361 getContext().getDeclAlign(VD)),
3362 CapLVal.getType(),
3364 CapLVal.getTBAAInfo());
3365 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3366 // in simd context.
3367 if (getLangOpts().OpenMP &&
3368 CGM.getOpenMPRuntime().isNontemporalDecl(VD))
3369 CapLVal.setNontemporal(/*Value=*/true);
3370 return CapLVal;
3371 }
3372
3373 assert(isa<BlockDecl>(CurCodeDecl));
3374 Address addr = GetAddrOfBlockDecl(VD);
3375 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3376 }
3377 }
3378
3379 // FIXME: We should be able to assert this for FunctionDecls as well!
3380 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3381 // those with a valid source location.
3382 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3383 !E->getLocation().isValid()) &&
3384 "Should not use decl without marking it used!");
3385
3386 if (ND->hasAttr<WeakRefAttr>()) {
3387 const auto *VD = cast<ValueDecl>(ND);
3388 ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
3389 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3390 }
3391
3392 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3393 // Check if this is a global variable.
3394 if (VD->hasLinkage() || VD->isStaticDataMember())
3395 return EmitGlobalVarDeclLValue(*this, E, VD);
3396
3397 Address addr = Address::invalid();
3398
3399 // The variable should generally be present in the local decl map.
3400 auto iter = LocalDeclMap.find(VD);
3401 if (iter != LocalDeclMap.end()) {
3402 addr = iter->second;
3403
3404 // Otherwise, it might be static local we haven't emitted yet for
3405 // some reason; most likely, because it's in an outer function.
3406 } else if (VD->isStaticLocal()) {
3407 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3408 *VD, CGM.getLLVMLinkageVarDefinition(VD));
3409 addr = Address(
3410 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3411
3412 // No other cases for now.
3413 } else {
3414 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3415 }
3416
3417 // Handle threadlocal function locals.
3418 if (VD->getTLSKind() != VarDecl::TLS_None)
3419 addr = addr.withPointer(
3420 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3422
3423 // Check for OpenMP threadprivate variables.
3424 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3425 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3427 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3428 E->getExprLoc());
3429 }
3430
3431 // Drill into block byref variables.
3432 bool isBlockByref = VD->isEscapingByref();
3433 if (isBlockByref) {
3434 addr = emitBlockByrefAddress(addr, VD);
3435 }
3436
3437 // Drill into reference types.
3438 LValue LV = VD->getType()->isReferenceType() ?
3441
3442 bool isLocalStorage = VD->hasLocalStorage();
3443
3444 bool NonGCable = isLocalStorage &&
3445 !VD->getType()->isReferenceType() &&
3446 !isBlockByref;
3447 if (NonGCable) {
3449 LV.setNonGC(true);
3450 }
3451
3452 bool isImpreciseLifetime =
3453 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3454 if (isImpreciseLifetime)
3457 return LV;
3458 }
3459
3460 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3461 return EmitFunctionDeclLValue(*this, E, FD);
3462
3463 // FIXME: While we're emitting a binding from an enclosing scope, all other
3464 // DeclRefExprs we see should be implicitly treated as if they also refer to
3465 // an enclosing scope.
3466 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3468 auto *FD = LambdaCaptureFields.lookup(BD);
3469 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3470 }
3471 // Suppress debug location updates when visiting the binding, since the
3472 // binding may emit instructions that would otherwise be associated with the
3473 // binding itself, rather than the expression referencing the binding. (this
3474 // leads to jumpy debug stepping behavior where the location/debugger jump
3475 // back to the binding declaration, then back to the expression referencing
3476 // the binding)
3478 return EmitLValue(BD->getBinding(), NotKnownNonNull);
3479 }
3480
3481 // We can form DeclRefExprs naming GUID declarations when reconstituting
3482 // non-type template parameters into expressions.
3483 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3484 return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
3486
3487 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3488 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3489 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3490
3491 if (AS != T.getAddressSpace()) {
3492 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3493 auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3494 auto ASC = getTargetHooks().performAddrSpaceCast(CGM, ATPO.getPointer(),
3495 AS, PtrTy);
3496 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3497 }
3498
3499 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3500 }
3501
3502 llvm_unreachable("Unhandled DeclRefExpr");
3503}
3504
3506 // __extension__ doesn't affect lvalue-ness.
3507 if (E->getOpcode() == UO_Extension)
3508 return EmitLValue(E->getSubExpr());
3509
3511 switch (E->getOpcode()) {
3512 default: llvm_unreachable("Unknown unary operator lvalue!");
3513 case UO_Deref: {
3515 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3516
3517 LValueBaseInfo BaseInfo;
3518 TBAAAccessInfo TBAAInfo;
3520 &TBAAInfo);
3521 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3523
3524 // We should not generate __weak write barrier on indirect reference
3525 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3526 // But, we continue to generate __strong write barrier on indirect write
3527 // into a pointer to object.
3528 if (getLangOpts().ObjC &&
3529 getLangOpts().getGC() != LangOptions::NonGC &&
3530 LV.isObjCWeak())
3532 return LV;
3533 }
3534 case UO_Real:
3535 case UO_Imag: {
3536 LValue LV = EmitLValue(E->getSubExpr());
3537 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3538
3539 // __real is valid on scalars. This is a faster way of testing that.
3540 // __imag can only produce an rvalue on scalars.
3541 if (E->getOpcode() == UO_Real &&
3542 !LV.getAddress().getElementType()->isStructTy()) {
3543 assert(E->getSubExpr()->getType()->isArithmeticType());
3544 return LV;
3545 }
3546
3547 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3548
3549 Address Component =
3550 (E->getOpcode() == UO_Real
3553 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3554 CGM.getTBAAInfoForSubobject(LV, T));
3555 ElemLV.getQuals().addQualifiers(LV.getQuals());
3556 return ElemLV;
3557 }
3558 case UO_PreInc:
3559 case UO_PreDec: {
3560 LValue LV = EmitLValue(E->getSubExpr());
3561 bool isInc = E->getOpcode() == UO_PreInc;
3562
3563 if (E->getType()->isAnyComplexType())
3564 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3565 else
3566 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3567 return LV;
3568 }
3569 }
3570}
3571
3573 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
3575}
3576
3578 return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
3580}
3581
3583 auto SL = E->getFunctionName();
3584 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3585 StringRef FnName = CurFn->getName();
3586 FnName.consume_front("\01");
3587 StringRef NameItems[] = {
3589 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3590 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3591 std::string Name = std::string(SL->getString());
3592 if (!Name.empty()) {
3593 unsigned Discriminator =
3594 CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3595 if (Discriminator)
3596 Name += "_" + Twine(Discriminator + 1).str();
3597 auto C = CGM.GetAddrOfConstantCString(Name, GVName);
3599 } else {
3600 auto C = CGM.GetAddrOfConstantCString(std::string(FnName), GVName);
3602 }
3603 }
3604 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3606}
3607
3608/// Emit a type description suitable for use by a runtime sanitizer library. The
3609/// format of a type descriptor is
3610///
3611/// \code
3612/// { i16 TypeKind, i16 TypeInfo }
3613/// \endcode
3614///
3615/// followed by an array of i8 containing the type name with extra information
3616/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3617/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3618/// anything else.
3620 // Only emit each type's descriptor once.
3621 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3622 return C;
3623
3624 uint16_t TypeKind = TK_Unknown;
3625 uint16_t TypeInfo = 0;
3626 bool IsBitInt = false;
3627
3628 if (T->isIntegerType()) {
3629 TypeKind = TK_Integer;
3630 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3631 (T->isSignedIntegerType() ? 1 : 0);
3632 // Follow suggestion from discussion of issue 64100.
3633 // So we can write the exact amount of bits in TypeName after '\0'
3634 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3635 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3636 // Do a sanity checks as we are using 32-bit type to store bit length.
3637 assert(getContext().getTypeSize(T) > 0 &&
3638 " non positive amount of bits in __BitInt type");
3639 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3640 " too many bits in __BitInt type");
3641
3642 // Redefine TypeKind with the actual __BitInt type if we have signed
3643 // BitInt.
3644 TypeKind = TK_BitInt;
3645 IsBitInt = true;
3646 }
3647 } else if (T->isFloatingType()) {
3648 TypeKind = TK_Float;
3650 }
3651
3652 // Format the type name as if for a diagnostic, including quotes and
3653 // optionally an 'aka'.
3654 SmallString<32> Buffer;
3655 CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
3656 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3657 StringRef(), {}, Buffer, {});
3658
3659 if (IsBitInt) {
3660 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3661 // endianness, zero.
3662 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3663 const auto *EIT = T->castAs<BitIntType>();
3664 uint32_t Bits = EIT->getNumBits();
3665 llvm::support::endian::write32(S + 1, Bits,
3666 getTarget().isBigEndian()
3667 ? llvm::endianness::big
3668 : llvm::endianness::little);
3669 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3670 Buffer.append(Str);
3671 }
3672
3673 llvm::Constant *Components[] = {
3674 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3675 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3676 };
3677 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3678
3679 auto *GV = new llvm::GlobalVariable(
3680 CGM.getModule(), Descriptor->getType(),
3681 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3682 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3683 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
3684
3685 // Remember the descriptor for this type.
3686 CGM.setTypeDescriptorInMap(T, GV);
3687
3688 return GV;
3689}
3690
3691llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3692 llvm::Type *TargetTy = IntPtrTy;
3693
3694 if (V->getType() == TargetTy)
3695 return V;
3696
3697 // Floating-point types which fit into intptr_t are bitcast to integers
3698 // and then passed directly (after zero-extension, if necessary).
3699 if (V->getType()->isFloatingPointTy()) {
3700 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3701 if (Bits <= TargetTy->getIntegerBitWidth())
3702 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3703 Bits));
3704 }
3705
3706 // Integers which fit in intptr_t are zero-extended and passed directly.
3707 if (V->getType()->isIntegerTy() &&
3708 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3709 return Builder.CreateZExt(V, TargetTy);
3710
3711 // Pointers are passed directly, everything else is passed by address.
3712 if (!V->getType()->isPointerTy()) {
3713 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3714 Builder.CreateStore(V, Ptr);
3715 V = Ptr.getPointer();
3716 }
3717 return Builder.CreatePtrToInt(V, TargetTy);
3718}
3719
3720/// Emit a representation of a SourceLocation for passing to a handler
3721/// in a sanitizer runtime library. The format for this data is:
3722/// \code
3723/// struct SourceLocation {
3724/// const char *Filename;
3725/// int32_t Line, Column;
3726/// };
3727/// \endcode
3728/// For an invalid SourceLocation, the Filename pointer is null.
3730 llvm::Constant *Filename;
3731 int Line, Column;
3732
3734 if (PLoc.isValid()) {
3735 StringRef FilenameString = PLoc.getFilename();
3736
3737 int PathComponentsToStrip =
3738 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3739 if (PathComponentsToStrip < 0) {
3740 assert(PathComponentsToStrip != INT_MIN);
3741 int PathComponentsToKeep = -PathComponentsToStrip;
3742 auto I = llvm::sys::path::rbegin(FilenameString);
3743 auto E = llvm::sys::path::rend(FilenameString);
3744 while (I != E && --PathComponentsToKeep)
3745 ++I;
3746
3747 FilenameString = FilenameString.substr(I - E);
3748 } else if (PathComponentsToStrip > 0) {
3749 auto I = llvm::sys::path::begin(FilenameString);
3750 auto E = llvm::sys::path::end(FilenameString);
3751 while (I != E && PathComponentsToStrip--)
3752 ++I;
3753
3754 if (I != E)
3755 FilenameString =
3756 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3757 else
3758 FilenameString = llvm::sys::path::filename(FilenameString);
3759 }
3760
3761 auto FilenameGV =
3762 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3763 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
3765 FilenameGV.getPointer()->stripPointerCasts()));
3766 Filename = FilenameGV.getPointer();
3767 Line = PLoc.getLine();
3768 Column = PLoc.getColumn();
3769 } else {
3770 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3771 Line = Column = 0;
3772 }
3773
3774 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3775 Builder.getInt32(Column)};
3776
3777 return llvm::ConstantStruct::getAnon(Data);
3778}
3779
3780namespace {
3781/// Specify under what conditions this check can be recovered
3782enum class CheckRecoverableKind {
3783 /// Always terminate program execution if this check fails.
3785 /// Check supports recovering, runtime has both fatal (noreturn) and
3786 /// non-fatal handlers for this check.
3787 Recoverable,
3788 /// Runtime conditionally aborts, always need to support recovery.
3790};
3791}
3792
3793static CheckRecoverableKind
3795 if (Ordinal == SanitizerKind::SO_Vptr)
3796 return CheckRecoverableKind::AlwaysRecoverable;
3797 else if (Ordinal == SanitizerKind::SO_Return ||
3798 Ordinal == SanitizerKind::SO_Unreachable)
3799 return CheckRecoverableKind::Unrecoverable;
3800 else
3801 return CheckRecoverableKind::Recoverable;
3802}
3803
3804namespace {
3805struct SanitizerHandlerInfo {
3806 char const *const Name;
3807 unsigned Version;
3808};
3809}
3810
3811const SanitizerHandlerInfo SanitizerHandlers[] = {
3812#define SANITIZER_CHECK(Enum, Name, Version, Msg) {#Name, Version},
3814#undef SANITIZER_CHECK
3815};
3816
3818 llvm::FunctionType *FnType,
3820 SanitizerHandler CheckHandler,
3821 CheckRecoverableKind RecoverKind, bool IsFatal,
3822 llvm::BasicBlock *ContBB, bool NoMerge) {
3823 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3824 std::optional<ApplyDebugLocation> DL;
3825 if (!CGF.Builder.getCurrentDebugLocation()) {
3826 // Ensure that the call has at least an artificial debug location.
3827 DL.emplace(CGF, SourceLocation());
3828 }
3829 bool NeedsAbortSuffix =
3830 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3831 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3832 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3833 const StringRef CheckName = CheckInfo.Name;
3834 std::string FnName = "__ubsan_handle_" + CheckName.str();
3835 if (CheckInfo.Version && !MinimalRuntime)
3836 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3837 if (MinimalRuntime)
3838 FnName += "_minimal";
3839 if (NeedsAbortSuffix)
3840 FnName += "_abort";
3841 bool MayReturn =
3842 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3843
3844 llvm::AttrBuilder B(CGF.getLLVMContext());
3845 if (!MayReturn) {
3846 B.addAttribute(llvm::Attribute::NoReturn)
3847 .addAttribute(llvm::Attribute::NoUnwind);
3848 }
3849 B.addUWTableAttr(llvm::UWTableKind::Default);
3850
3851 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3852 FnType, FnName,
3853 llvm::AttributeList::get(CGF.getLLVMContext(),
3854 llvm::AttributeList::FunctionIndex, B),
3855 /*Local=*/true);
3856 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3857 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
3858 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3859 if (NoMerge)
3860 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
3861 if (!MayReturn) {
3862 HandlerCall->setDoesNotReturn();
3863 CGF.Builder.CreateUnreachable();
3864 } else {
3865 CGF.Builder.CreateBr(ContBB);
3866 }
3867}
3868
3870 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
3871 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3872 ArrayRef<llvm::Value *> DynamicArgs, const TrapReason *TR) {
3873 assert(IsSanitizerScope);
3874 assert(Checked.size() > 0);
3875 assert(CheckHandler >= 0 &&
3876 size_t(CheckHandler) < std::size(SanitizerHandlers));
3877 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3878
3879 llvm::Value *FatalCond = nullptr;
3880 llvm::Value *RecoverableCond = nullptr;
3881 llvm::Value *TrapCond = nullptr;
3882 bool NoMerge = false;
3883 // Expand checks into:
3884 // (Check1 || !allow_ubsan_check) && (Check2 || !allow_ubsan_check) ...
3885 // We need separate allow_ubsan_check intrinsics because they have separately
3886 // specified cutoffs.
3887 // This expression looks expensive but will be simplified after
3888 // LowerAllowCheckPass.
3889 for (auto &[Check, Ord] : Checked) {
3890 llvm::Value *GuardedCheck = Check;
3892 (CGM.getCodeGenOpts().SanitizeSkipHotCutoffs[Ord] > 0)) {
3893 llvm::Value *Allow = Builder.CreateCall(
3894 CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3895 llvm::ConstantInt::get(CGM.Int8Ty, Ord));
3896 GuardedCheck = Builder.CreateOr(Check, Builder.CreateNot(Allow));
3897 }
3898
3899 // -fsanitize-trap= overrides -fsanitize-recover=.
3900 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
3901 : CGM.getCodeGenOpts().SanitizeRecover.has(Ord)
3902 ? RecoverableCond
3903 : FatalCond;
3904 Cond = Cond ? Builder.CreateAnd(Cond, GuardedCheck) : GuardedCheck;
3905
3906 if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Ord))
3907 NoMerge = true;
3908 }
3909
3910 if (TrapCond)
3911 EmitTrapCheck(TrapCond, CheckHandler, NoMerge, TR);
3912 if (!FatalCond && !RecoverableCond)
3913 return;
3914
3915 llvm::Value *JointCond;
3916 if (FatalCond && RecoverableCond)
3917 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3918 else
3919 JointCond = FatalCond ? FatalCond : RecoverableCond;
3920 assert(JointCond);
3921
3922 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3923 assert(SanOpts.has(Checked[0].second));
3924#ifndef NDEBUG
3925 for (int i = 1, n = Checked.size(); i < n; ++i) {
3926 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3927 "All recoverable kinds in a single check must be same!");
3928 assert(SanOpts.has(Checked[i].second));
3929 }
3930#endif
3931
3932 llvm::BasicBlock *Cont = createBasicBlock("cont");
3933 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3934 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3935 // Give hint that we very much don't expect to execute the handler
3936 llvm::MDBuilder MDHelper(getLLVMContext());
3937 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3938 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3939 EmitBlock(Handlers);
3940
3941 // Clear arguments for the MinimalRuntime handler.
3942 if (CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3943 switch (CheckHandler) {
3944 case SanitizerHandler::TypeMismatch:
3945 // Pass value pointer only. It adds minimal overhead.
3946 StaticArgs = {};
3947 assert(DynamicArgs.size() == 1);
3948 break;
3949 default:
3950 // No arguments for other checks.
3951 StaticArgs = {};
3952 DynamicArgs = {};
3953 break;
3954 }
3955 }
3956
3957 // Handler functions take an i8* pointing to the (handler-specific) static
3958 // information block, followed by a sequence of intptr_t arguments
3959 // representing operand values.
3962
3963 Args.reserve(DynamicArgs.size() + 1);
3964 ArgTypes.reserve(DynamicArgs.size() + 1);
3965
3966 // Emit handler arguments and create handler function type.
3967 if (!StaticArgs.empty()) {
3968 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3969 auto *InfoPtr = new llvm::GlobalVariable(
3970 CGM.getModule(), Info->getType(),
3971 // Non-constant global is used in a handler to deduplicate reports.
3972 // TODO: change deduplication logic and make it constant.
3973 /*isConstant=*/false, llvm::GlobalVariable::PrivateLinkage, Info, "",
3974 nullptr, llvm::GlobalVariable::NotThreadLocal,
3975 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3976 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3977 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3978 Args.push_back(InfoPtr);
3979 ArgTypes.push_back(Args.back()->getType());
3980 }
3981
3982 for (llvm::Value *DynamicArg : DynamicArgs) {
3983 Args.push_back(EmitCheckValue(DynamicArg));
3984 ArgTypes.push_back(IntPtrTy);
3985 }
3986
3987 llvm::FunctionType *FnType =
3988 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3989
3990 if (!FatalCond || !RecoverableCond) {
3991 // Simple case: we need to generate a single handler call, either
3992 // fatal, or non-fatal.
3993 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3994 (FatalCond != nullptr), Cont, NoMerge);
3995 } else {
3996 // Emit two handler calls: first one for set of unrecoverable checks,
3997 // another one for recoverable.
3998 llvm::BasicBlock *NonFatalHandlerBB =
3999 createBasicBlock("non_fatal." + CheckName);
4000 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
4001 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
4002 EmitBlock(FatalHandlerBB);
4003 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
4004 NonFatalHandlerBB, NoMerge);
4005 EmitBlock(NonFatalHandlerBB);
4006 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
4007 Cont, NoMerge);
4008 }
4009
4010 EmitBlock(Cont);
4011}
4012
4014 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
4015 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
4016 ArrayRef<llvm::Constant *> StaticArgs) {
4017 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
4018
4019 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
4020 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
4021
4022 llvm::MDBuilder MDHelper(getLLVMContext());
4023 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
4024 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
4025
4026 EmitBlock(CheckBB);
4027
4028 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
4029
4030 llvm::CallInst *CheckCall;
4031 llvm::FunctionCallee SlowPathFn;
4032 if (WithDiag) {
4033 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
4034 auto *InfoPtr =
4035 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
4036 llvm::GlobalVariable::PrivateLinkage, Info);
4037 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
4038 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
4039
4040 SlowPathFn = CGM.getModule().getOrInsertFunction(
4041 "__cfi_slowpath_diag",
4042 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
4043 false));
4044 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
4045 } else {
4046 SlowPathFn = CGM.getModule().getOrInsertFunction(
4047 "__cfi_slowpath",
4048 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
4049 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
4050 }
4051
4052 CGM.setDSOLocal(
4053 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
4054 CheckCall->setDoesNotThrow();
4055
4056 EmitBlock(Cont);
4057}
4058
4059// Emit a stub for __cfi_check function so that the linker knows about this
4060// symbol in LTO mode.
4062 llvm::Module *M = &CGM.getModule();
4063 ASTContext &C = getContext();
4064 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
4065
4067 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
4068 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
4069 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
4071 FnArgs.push_back(&ArgCallsiteTypeId);
4072 FnArgs.push_back(&ArgAddr);
4073 FnArgs.push_back(&ArgCFICheckFailData);
4074 const CGFunctionInfo &FI =
4075 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, FnArgs);
4076
4077 llvm::Function *F = llvm::Function::Create(
4078 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
4079 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
4080 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4081 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4082 F->setAlignment(llvm::Align(4096));
4083 CGM.setDSOLocal(F);
4084
4085 llvm::LLVMContext &Ctx = M->getContext();
4086 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
4087 // CrossDSOCFI pass is not executed if there is no executable code.
4088 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
4089 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
4090 llvm::ReturnInst::Create(Ctx, nullptr, BB);
4091}
4092
4093// This function is basically a switch over the CFI failure kind, which is
4094// extracted from CFICheckFailData (1st function argument). Each case is either
4095// llvm.trap or a call to one of the two runtime handlers, based on
4096// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
4097// failure kind) traps, but this should really never happen. CFICheckFailData
4098// can be nullptr if the calling module has -fsanitize-trap behavior for this
4099// check kind; in this case __cfi_check_fail traps as well.
4101 auto CheckHandler = SanitizerHandler::CFICheckFail;
4102 // TODO: the SanitizerKind is not yet determined for this check (and might
4103 // not even be available, if Data == nullptr). However, we still want to
4104 // annotate the instrumentation. We approximate this by using all the CFI
4105 // kinds.
4106 SanitizerDebugLocation SanScope(
4107 this,
4108 {SanitizerKind::SO_CFIVCall, SanitizerKind::SO_CFINVCall,
4109 SanitizerKind::SO_CFIDerivedCast, SanitizerKind::SO_CFIUnrelatedCast,
4110 SanitizerKind::SO_CFIICall},
4111 CheckHandler);
4112 FunctionArgList Args;
4117 Args.push_back(&ArgData);
4118 Args.push_back(&ArgAddr);
4119
4120 const CGFunctionInfo &FI =
4121 CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
4122
4123 llvm::Function *F = llvm::Function::Create(
4124 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
4125 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
4126
4127 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
4128 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
4129 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
4130
4131 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
4132 SourceLocation());
4133
4135
4136 // This function is not affected by NoSanitizeList. This function does
4137 // not have a source location, but "src:*" would still apply. Revert any
4138 // changes to SanOpts made in StartFunction.
4139 SanOpts = CGM.getLangOpts().Sanitize;
4140
4141 llvm::Value *Data =
4142 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
4143 CGM.getContext().VoidPtrTy, ArgData.getLocation());
4144 llvm::Value *Addr =
4145 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
4146 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
4147
4148 // Data == nullptr means the calling module has trap behaviour for this check.
4149 llvm::Value *DataIsNotNullPtr =
4150 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
4151 // TODO: since there is no data, we don't know the CheckKind, and therefore
4152 // cannot inspect CGM.getCodeGenOpts().SanitizeMergeHandlers. We default to
4153 // NoMerge = false. Users can disable merging by disabling optimization.
4154 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail,
4155 /*NoMerge=*/false);
4156
4157 llvm::StructType *SourceLocationTy =
4158 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
4159 llvm::StructType *CfiCheckFailDataTy =
4160 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
4161
4162 llvm::Value *V = Builder.CreateConstGEP2_32(
4163 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
4164
4165 Address CheckKindAddr(V, Int8Ty, getIntAlign());
4166 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
4167
4168 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
4169 CGM.getLLVMContext(),
4170 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
4171 llvm::Value *ValidVtable = Builder.CreateZExt(
4172 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
4173 {Addr, AllVtables}),
4174 IntPtrTy);
4175
4176 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
4177 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
4178 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
4179 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
4180 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
4181 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
4182
4183 for (auto CheckKindOrdinalPair : CheckKinds) {
4184 int Kind = CheckKindOrdinalPair.first;
4185 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
4186
4187 // TODO: we could apply SanitizerAnnotateDebugInfo(Ordinal) instead of
4188 // relying on the SanitizerScope with all CFI ordinals
4189
4190 llvm::Value *Cond =
4191 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
4192 if (CGM.getLangOpts().Sanitize.has(Ordinal))
4193 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
4194 {}, {Data, Addr, ValidVtable});
4195 else
4196 // TODO: we can't rely on CGM.getCodeGenOpts().SanitizeMergeHandlers.
4197 // Although the compiler allows SanitizeMergeHandlers to be set
4198 // independently of CGM.getLangOpts().Sanitize, Driver/SanitizerArgs.cpp
4199 // requires that SanitizeMergeHandlers is a subset of Sanitize.
4200 EmitTrapCheck(Cond, CheckHandler, /*NoMerge=*/false);
4201 }
4202
4204 // The only reference to this function will be created during LTO link.
4205 // Make sure it survives until then.
4206 CGM.addUsedGlobal(F);
4207}
4208
4210 if (SanOpts.has(SanitizerKind::Unreachable)) {
4211 auto CheckOrdinal = SanitizerKind::SO_Unreachable;
4212 auto CheckHandler = SanitizerHandler::BuiltinUnreachable;
4213 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4214 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
4215 CheckOrdinal),
4216 CheckHandler, EmitCheckSourceLocation(Loc), {});
4217 }
4218 Builder.CreateUnreachable();
4219}
4220
4221void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
4222 SanitizerHandler CheckHandlerID,
4223 bool NoMerge, const TrapReason *TR) {
4224 llvm::BasicBlock *Cont = createBasicBlock("cont");
4225
4226 // If we're optimizing, collapse all calls to trap down to just one per
4227 // check-type per function to save on code size.
4228 if ((int)TrapBBs.size() <= CheckHandlerID)
4229 TrapBBs.resize(CheckHandlerID + 1);
4230
4231 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
4232
4233 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
4234 llvm::StringRef TrapMessage;
4235 llvm::StringRef TrapCategory;
4236 auto DebugTrapReasonKind = CGM.getCodeGenOpts().getSanitizeDebugTrapReasons();
4237 if (TR && !TR->isEmpty() &&
4238 DebugTrapReasonKind ==
4240 TrapMessage = TR->getMessage();
4241 TrapCategory = TR->getCategory();
4242 } else {
4243 TrapMessage = GetUBSanTrapForHandler(CheckHandlerID);
4244 TrapCategory = "Undefined Behavior Sanitizer";
4245 }
4246
4247 if (getDebugInfo() && !TrapMessage.empty() &&
4248 DebugTrapReasonKind !=
4250 TrapLocation) {
4251 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
4252 TrapLocation, TrapCategory, TrapMessage);
4253 }
4254
4255 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
4256 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
4257
4258 llvm::MDBuilder MDHelper(getLLVMContext());
4259 if (TrapBB && !NoMerge) {
4260 auto Call = TrapBB->begin();
4261 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
4262
4263 Call->applyMergedLocation(Call->getDebugLoc(), TrapLocation);
4264
4265 Builder.CreateCondBr(Checked, Cont, TrapBB,
4266 MDHelper.createLikelyBranchWeights());
4267 } else {
4268 TrapBB = createBasicBlock("trap");
4269 Builder.CreateCondBr(Checked, Cont, TrapBB,
4270 MDHelper.createLikelyBranchWeights());
4271 EmitBlock(TrapBB);
4272
4273 ApplyDebugLocation applyTrapDI(*this, TrapLocation);
4274
4275 llvm::CallInst *TrapCall =
4276 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
4277 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
4278
4279 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4280 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4281 CGM.getCodeGenOpts().TrapFuncName);
4282 TrapCall->addFnAttr(A);
4283 }
4284 if (NoMerge)
4285 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4286 TrapCall->setDoesNotReturn();
4287 TrapCall->setDoesNotThrow();
4288 Builder.CreateUnreachable();
4289 }
4290
4291 EmitBlock(Cont);
4292}
4293
4294llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
4295 llvm::CallInst *TrapCall =
4296 Builder.CreateCall(CGM.getIntrinsic(IntrID));
4297
4298 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
4299 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
4300 CGM.getCodeGenOpts().TrapFuncName);
4301 TrapCall->addFnAttr(A);
4302 }
4303
4305 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
4306 return TrapCall;
4307}
4308
4310 LValueBaseInfo *BaseInfo,
4311 TBAAAccessInfo *TBAAInfo) {
4312 assert(E->getType()->isArrayType() &&
4313 "Array to pointer decay must have array source type!");
4314
4315 // Expressions of array type can't be bitfields or vector elements.
4316 LValue LV = EmitLValue(E);
4317 Address Addr = LV.getAddress();
4318
4319 // If the array type was an incomplete type, we need to make sure
4320 // the decay ends up being the right type.
4321 llvm::Type *NewTy = ConvertType(E->getType());
4322 Addr = Addr.withElementType(NewTy);
4323
4324 // Note that VLA pointers are always decayed, so we don't need to do
4325 // anything here.
4326 if (!E->getType()->isVariableArrayType()) {
4327 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4328 "Expected pointer to array");
4329 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4330 }
4331
4332 // The result of this decay conversion points to an array element within the
4333 // base lvalue. However, since TBAA currently does not support representing
4334 // accesses to elements of member arrays, we conservatively represent accesses
4335 // to the pointee object as if it had no any base lvalue specified.
4336 // TODO: Support TBAA for member arrays.
4338 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4339 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4340
4341 return Addr.withElementType(ConvertTypeForMem(EltType));
4342}
4343
4344/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4345/// array to pointer, return the array subexpression.
4346static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4347 // If this isn't just an array->pointer decay, bail out.
4348 const auto *CE = dyn_cast<CastExpr>(E);
4349 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4350 return nullptr;
4351
4352 // If this is a decay from variable width array, bail out.
4353 const Expr *SubExpr = CE->getSubExpr();
4354 if (SubExpr->getType()->isVariableArrayType())
4355 return nullptr;
4356
4357 return SubExpr;
4358}
4359
4361 llvm::Type *elemType,
4362 llvm::Value *ptr,
4363 ArrayRef<llvm::Value*> indices,
4364 bool inbounds,
4365 bool signedIndices,
4366 SourceLocation loc,
4367 const llvm::Twine &name = "arrayidx") {
4368 if (inbounds) {
4369 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4371 name);
4372 } else {
4373 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4374 }
4375}
4376
4379 llvm::Type *elementType, bool inbounds,
4380 bool signedIndices, SourceLocation loc,
4381 CharUnits align,
4382 const llvm::Twine &name = "arrayidx") {
4383 if (inbounds) {
4384 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4386 align, name);
4387 } else {
4388 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4389 }
4390}
4391
4393 const VariableArrayType *vla) {
4394 QualType eltType;
4395 do {
4396 eltType = vla->getElementType();
4397 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4398 return eltType;
4399}
4400
4402 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4403}
4404
4405static bool hasBPFPreserveStaticOffset(const Expr *E) {
4406 if (!E)
4407 return false;
4408 QualType PointeeType = E->getType()->getPointeeType();
4409 if (PointeeType.isNull())
4410 return false;
4411 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4412 return hasBPFPreserveStaticOffset(BaseDecl);
4413 return false;
4414}
4415
4416// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4418 Address &Addr) {
4419 if (!CGF.getTarget().getTriple().isBPF())
4420 return Addr;
4421
4422 llvm::Function *Fn =
4423 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4424 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4425 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4426}
4427
4428/// Given an array base, check whether its member access belongs to a record
4429/// with preserve_access_index attribute or not.
4430static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4431 if (!ArrayBase || !CGF.getDebugInfo())
4432 return false;
4433
4434 // Only support base as either a MemberExpr or DeclRefExpr.
4435 // DeclRefExpr to cover cases like:
4436 // struct s { int a; int b[10]; };
4437 // struct s *p;
4438 // p[1].a
4439 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4440 // p->b[5] is a MemberExpr example.
4441 const Expr *E = ArrayBase->IgnoreImpCasts();
4442 if (const auto *ME = dyn_cast<MemberExpr>(E))
4443 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4444
4445 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4446 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4447 if (!VarDef)
4448 return false;
4449
4450 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4451 if (!PtrT)
4452 return false;
4453
4454 const auto *PointeeT = PtrT->getPointeeType()
4456 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4457 return RecT->getOriginalDecl()
4458 ->getMostRecentDecl()
4459 ->hasAttr<BPFPreserveAccessIndexAttr>();
4460 return false;
4461 }
4462
4463 return false;
4464}
4465
4468 QualType eltType, bool inbounds,
4469 bool signedIndices, SourceLocation loc,
4470 QualType *arrayType = nullptr,
4471 const Expr *Base = nullptr,
4472 const llvm::Twine &name = "arrayidx") {
4473 // All the indices except that last must be zero.
4474#ifndef NDEBUG
4475 for (auto *idx : indices.drop_back())
4476 assert(isa<llvm::ConstantInt>(idx) &&
4477 cast<llvm::ConstantInt>(idx)->isZero());
4478#endif
4479
4480 // Determine the element size of the statically-sized base. This is
4481 // the thing that the indices are expressed in terms of.
4482 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4483 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4484 }
4485
4486 // We can use that to compute the best alignment of the element.
4487 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4488 CharUnits eltAlign =
4489 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4490
4492 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4493
4494 llvm::Value *eltPtr;
4495 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4496 if (!LastIndex ||
4498 addr = emitArraySubscriptGEP(CGF, addr, indices,
4499 CGF.ConvertTypeForMem(eltType), inbounds,
4500 signedIndices, loc, eltAlign, name);
4501 return addr;
4502 } else {
4503 // Remember the original array subscript for bpf target
4504 unsigned idx = LastIndex->getZExtValue();
4505 llvm::DIType *DbgInfo = nullptr;
4506 if (arrayType)
4507 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4508 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4509 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4510 idx, DbgInfo);
4511 }
4512
4513 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4514}
4515
4516namespace {
4517
4518/// StructFieldAccess is a simple visitor class to grab the first l-value to
4519/// r-value cast Expr.
4520struct StructFieldAccess
4521 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
4522 const Expr *VisitCastExpr(const CastExpr *E) {
4523 if (E->getCastKind() == CK_LValueToRValue)
4524 return E;
4525 return Visit(E->getSubExpr());
4526 }
4527 const Expr *VisitParenExpr(const ParenExpr *E) {
4528 return Visit(E->getSubExpr());
4529 }
4530};
4531
4532} // end anonymous namespace
4533
4534/// The offset of a field from the beginning of the record.
4536 const FieldDecl *Field, int64_t &Offset) {
4537 ASTContext &Ctx = CGF.getContext();
4538 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4539 unsigned FieldNo = 0;
4540
4541 for (const FieldDecl *FD : RD->fields()) {
4542 if (FD == Field) {
4543 Offset += Layout.getFieldOffset(FieldNo);
4544 return true;
4545 }
4546
4547 QualType Ty = FD->getType();
4548 if (Ty->isRecordType())
4549 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4550 Offset += Layout.getFieldOffset(FieldNo);
4551 return true;
4552 }
4553
4554 if (!RD->isUnion())
4555 ++FieldNo;
4556 }
4557
4558 return false;
4559}
4560
4561/// Returns the relative offset difference between \p FD1 and \p FD2.
4562/// \code
4563/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4564/// \endcode
4565/// Both fields must be within the same struct.
4566static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4567 const FieldDecl *FD1,
4568 const FieldDecl *FD2) {
4569 const RecordDecl *FD1OuterRec =
4571 const RecordDecl *FD2OuterRec =
4573
4574 if (FD1OuterRec != FD2OuterRec)
4575 // Fields must be within the same RecordDecl.
4576 return std::optional<int64_t>();
4577
4578 int64_t FD1Offset = 0;
4579 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4580 return std::optional<int64_t>();
4581
4582 int64_t FD2Offset = 0;
4583 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4584 return std::optional<int64_t>();
4585
4586 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4587}
4588
4589/// EmitCountedByBoundsChecking - If the array being accessed has a "counted_by"
4590/// attribute, generate bounds checking code. The "count" field is at the top
4591/// level of the struct or in an anonymous struct, that's also at the top level.
4592/// Future expansions may allow the "count" to reside at any place in the
4593/// struct, but the value of "counted_by" will be a "simple" path to the count,
4594/// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4595/// similar to emit the correct GEP.
4597 const Expr *E, llvm::Value *Idx, Address Addr, QualType IdxTy,
4598 QualType ArrayTy, bool Accessed, bool FlexibleArray) {
4599 const auto *ME = dyn_cast<MemberExpr>(E->IgnoreImpCasts());
4600 if (!ME || !ME->getMemberDecl()->getType()->isCountAttributedType())
4601 return;
4602
4603 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4604 getLangOpts().getStrictFlexArraysLevel();
4605 if (FlexibleArray &&
4606 !ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel))
4607 return;
4608
4609 const FieldDecl *FD = cast<FieldDecl>(ME->getMemberDecl());
4610 const FieldDecl *CountFD = FD->findCountedByField();
4611 if (!CountFD)
4612 return;
4613
4614 if (std::optional<int64_t> Diff =
4615 getOffsetDifferenceInBits(*this, CountFD, FD)) {
4616 if (!Addr.isValid()) {
4617 // An invalid Address indicates we're checking a pointer array access.
4618 // Emit the checked L-Value here.
4620 Addr = LV.getAddress();
4621 }
4622
4623 // FIXME: The 'static_cast' is necessary, otherwise the result turns into a
4624 // uint64_t, which messes things up if we have a negative offset difference.
4625 Diff = *Diff / static_cast<int64_t>(CGM.getContext().getCharWidth());
4626
4627 // Create a GEP with the byte offset between the counted object and the
4628 // count and use that to load the count value.
4629 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Int8PtrTy, Int8Ty);
4630
4631 llvm::Type *CountTy = ConvertType(CountFD->getType());
4632 llvm::Value *Res =
4633 Builder.CreateInBoundsGEP(Int8Ty, Addr.emitRawPointer(*this),
4634 Builder.getInt32(*Diff), ".counted_by.gep");
4635 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4636 ".counted_by.load");
4637
4638 // Now emit the bounds checking.
4639 EmitBoundsCheckImpl(E, Res, Idx, IdxTy, ArrayTy, Accessed);
4640 }
4641}
4642
4644 bool Accessed) {
4645 // The index must always be an integer, which is not an aggregate. Emit it
4646 // in lexical order (this complexity is, sadly, required by C++17).
4647 llvm::Value *IdxPre =
4648 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4649 bool SignedIndices = false;
4650 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4651 auto *Idx = IdxPre;
4652 if (E->getLHS() != E->getIdx()) {
4653 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4654 Idx = EmitScalarExpr(E->getIdx());
4655 }
4656
4657 QualType IdxTy = E->getIdx()->getType();
4658 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4659 SignedIndices |= IdxSigned;
4660
4661 if (SanOpts.has(SanitizerKind::ArrayBounds))
4662 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4663
4664 // Extend or truncate the index type to 32 or 64-bits.
4665 if (Promote && Idx->getType() != IntPtrTy)
4666 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4667
4668 return Idx;
4669 };
4670 IdxPre = nullptr;
4671
4672 // If the base is a vector type, then we are forming a vector element lvalue
4673 // with this subscript.
4674 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4676 // Emit the vector as an lvalue to get its address.
4677 LValue LHS = EmitLValue(E->getBase());
4678 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4679 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4680 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4681 LHS.getBaseInfo(), TBAAAccessInfo());
4682 }
4683
4684 // The HLSL runtime handle the subscript expression on global resource arrays.
4685 if (getLangOpts().HLSL && (E->getType()->isHLSLResourceRecord() ||
4687 std::optional<LValue> LV =
4688 CGM.getHLSLRuntime().emitResourceArraySubscriptExpr(E, *this);
4689 if (LV.has_value())
4690 return *LV;
4691 }
4692
4693 // All the other cases basically behave like simple offsetting.
4694
4695 // Handle the extvector case we ignored above.
4697 LValue LV = EmitLValue(E->getBase());
4698 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4700
4701 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4702 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4703 SignedIndices, E->getExprLoc());
4704 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4705 CGM.getTBAAInfoForSubobject(LV, EltType));
4706 }
4707
4708 LValueBaseInfo EltBaseInfo;
4709 TBAAAccessInfo EltTBAAInfo;
4711 if (const VariableArrayType *vla =
4712 getContext().getAsVariableArrayType(E->getType())) {
4713 // The base must be a pointer, which is not an aggregate. Emit
4714 // it. It needs to be emitted first in case it's what captures
4715 // the VLA bounds.
4716 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4717 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4718
4719 // The element count here is the total number of non-VLA elements.
4720 llvm::Value *numElements = getVLASize(vla).NumElts;
4721
4722 // Effectively, the multiply by the VLA size is part of the GEP.
4723 // GEP indexes are signed, and scaling an index isn't permitted to
4724 // signed-overflow, so we use the same semantics for our explicit
4725 // multiply. We suppress this if overflow is not undefined behavior.
4726 if (getLangOpts().PointerOverflowDefined) {
4727 Idx = Builder.CreateMul(Idx, numElements);
4728 } else {
4729 Idx = Builder.CreateNSWMul(Idx, numElements);
4730 }
4731
4732 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4733 !getLangOpts().PointerOverflowDefined,
4734 SignedIndices, E->getExprLoc());
4735
4736 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4737 // Indexing over an interface, as in "NSString *P; P[4];"
4738
4739 // Emit the base pointer.
4740 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4741 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4742
4743 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4744 llvm::Value *InterfaceSizeVal =
4745 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4746
4747 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4748
4749 // We don't necessarily build correct LLVM struct types for ObjC
4750 // interfaces, so we can't rely on GEP to do this scaling
4751 // correctly, so we need to cast to i8*. FIXME: is this actually
4752 // true? A lot of other things in the fragile ABI would break...
4753 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4754
4755 // Do the GEP.
4756 CharUnits EltAlign =
4757 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4758 llvm::Value *EltPtr =
4759 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4760 ScaledIdx, false, SignedIndices, E->getExprLoc());
4761 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4762 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4763 // If this is A[i] where A is an array, the frontend will have decayed the
4764 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4765 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4766 // "gep x, i" here. Emit one "gep A, 0, i".
4767 assert(Array->getType()->isArrayType() &&
4768 "Array to pointer decay must have array source type!");
4769 LValue ArrayLV;
4770 // For simple multidimensional array indexing, set the 'accessed' flag for
4771 // better bounds-checking of the base expression.
4772 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4773 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4774 else
4775 ArrayLV = EmitLValue(Array);
4776 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4777
4778 if (SanOpts.has(SanitizerKind::ArrayBounds))
4779 EmitCountedByBoundsChecking(Array, Idx, ArrayLV.getAddress(),
4780 E->getIdx()->getType(), Array->getType(),
4781 Accessed, /*FlexibleArray=*/true);
4782
4783 // Propagate the alignment from the array itself to the result.
4784 QualType arrayType = Array->getType();
4786 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4787 E->getType(), !getLangOpts().PointerOverflowDefined, SignedIndices,
4788 E->getExprLoc(), &arrayType, E->getBase());
4789 EltBaseInfo = ArrayLV.getBaseInfo();
4790 if (!CGM.getCodeGenOpts().NewStructPathTBAA) {
4791 // Since CodeGenTBAA::getTypeInfoHelper only handles array types for
4792 // new struct path TBAA, we must a use a plain access.
4793 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4794 } else if (ArrayLV.getTBAAInfo().isMayAlias()) {
4795 EltTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4796 } else if (ArrayLV.getTBAAInfo().isIncomplete()) {
4797 // The array element is complete, even if the array is not.
4798 EltTBAAInfo = CGM.getTBAAAccessInfo(E->getType());
4799 } else {
4800 // The TBAA access info from the array (base) lvalue is ordinary. We will
4801 // adapt it to create access info for the element.
4802 EltTBAAInfo = ArrayLV.getTBAAInfo();
4803
4804 // We retain the TBAA struct path (BaseType and Offset members) from the
4805 // array. In the TBAA representation, we map any array access to the
4806 // element at index 0, as the index is generally a runtime value. This
4807 // element has the same offset in the base type as the array itself.
4808 // If the array lvalue had no base type, there is no point trying to
4809 // generate one, since an array itself is not a valid base type.
4810
4811 // We also retain the access type from the base lvalue, but the access
4812 // size must be updated to the size of an individual element.
4813 EltTBAAInfo.Size =
4815 }
4816 } else {
4817 // The base must be a pointer; emit it with an estimate of its alignment.
4818 Address BaseAddr =
4819 EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4820 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4821 QualType ptrType = E->getBase()->getType();
4822 Addr = emitArraySubscriptGEP(*this, BaseAddr, Idx, E->getType(),
4823 !getLangOpts().PointerOverflowDefined,
4824 SignedIndices, E->getExprLoc(), &ptrType,
4825 E->getBase());
4826
4827 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4828 StructFieldAccess Visitor;
4829 const Expr *Base = Visitor.Visit(E->getBase());
4830
4831 if (const auto *CE = dyn_cast_if_present<CastExpr>(Base);
4832 CE && CE->getCastKind() == CK_LValueToRValue)
4834 E->getIdx()->getType(), ptrType, Accessed,
4835 /*FlexibleArray=*/false);
4836 }
4837 }
4838
4839 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4840
4841 if (getLangOpts().ObjC &&
4842 getLangOpts().getGC() != LangOptions::NonGC) {
4845 }
4846 return LV;
4847}
4848
4850 llvm::Value *Idx = EmitScalarExpr(E);
4851 if (Idx->getType() == IntPtrTy)
4852 return Idx;
4853 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
4854 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
4855}
4856
4858 assert(
4859 !E->isIncomplete() &&
4860 "incomplete matrix subscript expressions should be rejected during Sema");
4861 LValue Base = EmitLValue(E->getBase());
4862
4863 // Extend or truncate the index type to 32 or 64-bits if needed.
4864 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
4865 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
4866
4867 llvm::Value *NumRows = Builder.getIntN(
4868 RowIdx->getType()->getScalarSizeInBits(),
4870 llvm::Value *FinalIdx =
4871 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4872 return LValue::MakeMatrixElt(
4873 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4874 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4875}
4876
4878 LValueBaseInfo &BaseInfo,
4879 TBAAAccessInfo &TBAAInfo,
4880 QualType BaseTy, QualType ElTy,
4881 bool IsLowerBound) {
4882 LValue BaseLVal;
4883 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4884 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4885 if (BaseTy->isArrayType()) {
4886 Address Addr = BaseLVal.getAddress();
4887 BaseInfo = BaseLVal.getBaseInfo();
4888
4889 // If the array type was an incomplete type, we need to make sure
4890 // the decay ends up being the right type.
4891 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4892 Addr = Addr.withElementType(NewTy);
4893
4894 // Note that VLA pointers are always decayed, so we don't need to do
4895 // anything here.
4896 if (!BaseTy->isVariableArrayType()) {
4897 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4898 "Expected pointer to array");
4899 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4900 }
4901
4902 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4903 }
4904 LValueBaseInfo TypeBaseInfo;
4905 TBAAAccessInfo TypeTBAAInfo;
4906 CharUnits Align =
4907 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4908 BaseInfo.mergeForCast(TypeBaseInfo);
4909 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4910 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4911 CGF.ConvertTypeForMem(ElTy), Align);
4912 }
4913 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4914}
4915
4917 bool IsLowerBound) {
4918
4919 assert(!E->isOpenACCArraySection() &&
4920 "OpenACC Array section codegen not implemented");
4921
4923 QualType ResultExprTy;
4924 if (auto *AT = getContext().getAsArrayType(BaseTy))
4925 ResultExprTy = AT->getElementType();
4926 else
4927 ResultExprTy = BaseTy->getPointeeType();
4928 llvm::Value *Idx = nullptr;
4929 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4930 // Requesting lower bound or upper bound, but without provided length and
4931 // without ':' symbol for the default length -> length = 1.
4932 // Idx = LowerBound ?: 0;
4933 if (auto *LowerBound = E->getLowerBound()) {
4934 Idx = Builder.CreateIntCast(
4935 EmitScalarExpr(LowerBound), IntPtrTy,
4936 LowerBound->getType()->hasSignedIntegerRepresentation());
4937 } else
4938 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4939 } else {
4940 // Try to emit length or lower bound as constant. If this is possible, 1
4941 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4942 // IR (LB + Len) - 1.
4943 auto &C = CGM.getContext();
4944 auto *Length = E->getLength();
4945 llvm::APSInt ConstLength;
4946 if (Length) {
4947 // Idx = LowerBound + Length - 1;
4948 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4949 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4950 Length = nullptr;
4951 }
4952 auto *LowerBound = E->getLowerBound();
4953 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4954 if (LowerBound) {
4955 if (std::optional<llvm::APSInt> LB =
4956 LowerBound->getIntegerConstantExpr(C)) {
4957 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4958 LowerBound = nullptr;
4959 }
4960 }
4961 if (!Length)
4962 --ConstLength;
4963 else if (!LowerBound)
4964 --ConstLowerBound;
4965
4966 if (Length || LowerBound) {
4967 auto *LowerBoundVal =
4968 LowerBound
4969 ? Builder.CreateIntCast(
4970 EmitScalarExpr(LowerBound), IntPtrTy,
4971 LowerBound->getType()->hasSignedIntegerRepresentation())
4972 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4973 auto *LengthVal =
4974 Length
4975 ? Builder.CreateIntCast(
4976 EmitScalarExpr(Length), IntPtrTy,
4977 Length->getType()->hasSignedIntegerRepresentation())
4978 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4979 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4980 /*HasNUW=*/false,
4981 !getLangOpts().PointerOverflowDefined);
4982 if (Length && LowerBound) {
4983 Idx = Builder.CreateSub(
4984 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4985 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
4986 }
4987 } else
4988 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4989 } else {
4990 // Idx = ArraySize - 1;
4991 QualType ArrayTy = BaseTy->isPointerType()
4993 : BaseTy;
4994 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4995 Length = VAT->getSizeExpr();
4996 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4997 ConstLength = *L;
4998 Length = nullptr;
4999 }
5000 } else {
5001 auto *CAT = C.getAsConstantArrayType(ArrayTy);
5002 assert(CAT && "unexpected type for array initializer");
5003 ConstLength = CAT->getSize();
5004 }
5005 if (Length) {
5006 auto *LengthVal = Builder.CreateIntCast(
5007 EmitScalarExpr(Length), IntPtrTy,
5008 Length->getType()->hasSignedIntegerRepresentation());
5009 Idx = Builder.CreateSub(
5010 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
5011 /*HasNUW=*/false, !getLangOpts().PointerOverflowDefined);
5012 } else {
5013 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
5014 --ConstLength;
5015 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
5016 }
5017 }
5018 }
5019 assert(Idx);
5020
5021 Address EltPtr = Address::invalid();
5022 LValueBaseInfo BaseInfo;
5023 TBAAAccessInfo TBAAInfo;
5024 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
5025 // The base must be a pointer, which is not an aggregate. Emit
5026 // it. It needs to be emitted first in case it's what captures
5027 // the VLA bounds.
5028 Address Base =
5029 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
5030 BaseTy, VLA->getElementType(), IsLowerBound);
5031 // The element count here is the total number of non-VLA elements.
5032 llvm::Value *NumElements = getVLASize(VLA).NumElts;
5033
5034 // Effectively, the multiply by the VLA size is part of the GEP.
5035 // GEP indexes are signed, and scaling an index isn't permitted to
5036 // signed-overflow, so we use the same semantics for our explicit
5037 // multiply. We suppress this if overflow is not undefined behavior.
5038 if (getLangOpts().PointerOverflowDefined)
5039 Idx = Builder.CreateMul(Idx, NumElements);
5040 else
5041 Idx = Builder.CreateNSWMul(Idx, NumElements);
5042 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
5043 !getLangOpts().PointerOverflowDefined,
5044 /*signedIndices=*/false, E->getExprLoc());
5045 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
5046 // If this is A[i] where A is an array, the frontend will have decayed the
5047 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
5048 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
5049 // "gep x, i" here. Emit one "gep A, 0, i".
5050 assert(Array->getType()->isArrayType() &&
5051 "Array to pointer decay must have array source type!");
5052 LValue ArrayLV;
5053 // For simple multidimensional array indexing, set the 'accessed' flag for
5054 // better bounds-checking of the base expression.
5055 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
5056 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
5057 else
5058 ArrayLV = EmitLValue(Array);
5059
5060 // Propagate the alignment from the array itself to the result.
5061 EltPtr = emitArraySubscriptGEP(
5062 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
5063 ResultExprTy, !getLangOpts().PointerOverflowDefined,
5064 /*signedIndices=*/false, E->getExprLoc());
5065 BaseInfo = ArrayLV.getBaseInfo();
5066 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
5067 } else {
5068 Address Base =
5069 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
5070 ResultExprTy, IsLowerBound);
5071 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
5072 !getLangOpts().PointerOverflowDefined,
5073 /*signedIndices=*/false, E->getExprLoc());
5074 }
5075
5076 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
5077}
5078
5081 // Emit the base vector as an l-value.
5082 LValue Base;
5083
5084 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
5085 if (E->isArrow()) {
5086 // If it is a pointer to a vector, emit the address and form an lvalue with
5087 // it.
5088 LValueBaseInfo BaseInfo;
5089 TBAAAccessInfo TBAAInfo;
5090 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
5091 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
5092 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
5093 Base.getQuals().removeObjCGCAttr();
5094 } else if (E->getBase()->isGLValue()) {
5095 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
5096 // emit the base as an lvalue.
5097 assert(E->getBase()->getType()->isVectorType());
5098 Base = EmitLValue(E->getBase());
5099 } else {
5100 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
5101 assert(E->getBase()->getType()->isVectorType() &&
5102 "Result must be a vector");
5103 llvm::Value *Vec = EmitScalarExpr(E->getBase());
5104
5105 // Store the vector to memory (because LValue wants an address).
5106 Address VecMem = CreateMemTemp(E->getBase()->getType());
5107 // need to zero extend an hlsl boolean vector to store it back to memory
5108 QualType Ty = E->getBase()->getType();
5109 llvm::Type *LTy = convertTypeForLoadStore(Ty, Vec->getType());
5110 if (LTy->getScalarSizeInBits() > Vec->getType()->getScalarSizeInBits())
5111 Vec = Builder.CreateZExt(Vec, LTy);
5112 Builder.CreateStore(Vec, VecMem);
5114 }
5115
5116 QualType type =
5117 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
5118
5119 // Encode the element access list into a vector of unsigned indices.
5121 E->getEncodedElementAccess(Indices);
5122
5123 if (Base.isSimple()) {
5124 llvm::Constant *CV =
5125 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
5126 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
5127 Base.getBaseInfo(), TBAAAccessInfo());
5128 }
5129 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
5130
5131 llvm::Constant *BaseElts = Base.getExtVectorElts();
5133
5134 for (unsigned Index : Indices)
5135 CElts.push_back(BaseElts->getAggregateElement(Index));
5136 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
5137 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
5138 Base.getBaseInfo(), TBAAAccessInfo());
5139}
5140
5142 const Expr *UnderlyingBaseExpr = E->IgnoreParens();
5143 while (auto *BaseMemberExpr = dyn_cast<MemberExpr>(UnderlyingBaseExpr))
5144 UnderlyingBaseExpr = BaseMemberExpr->getBase()->IgnoreParens();
5145 return getContext().isSentinelNullExpr(UnderlyingBaseExpr);
5146}
5147
5149 if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
5151 return EmitDeclRefLValue(DRE);
5152 }
5153
5154 Expr *BaseExpr = E->getBase();
5155 // Check whether the underlying base pointer is a constant null.
5156 // If so, we do not set inbounds flag for GEP to avoid breaking some
5157 // old-style offsetof idioms.
5158 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
5160 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
5161 LValue BaseLV;
5162 if (E->isArrow()) {
5163 LValueBaseInfo BaseInfo;
5164 TBAAAccessInfo TBAAInfo;
5165 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
5166 QualType PtrTy = BaseExpr->getType()->getPointeeType();
5167 SanitizerSet SkippedChecks;
5168 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
5169 if (IsBaseCXXThis)
5170 SkippedChecks.set(SanitizerKind::Alignment, true);
5171 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
5172 SkippedChecks.set(SanitizerKind::Null, true);
5174 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
5175 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
5176 } else
5177 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
5178
5179 NamedDecl *ND = E->getMemberDecl();
5180 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
5181 LValue LV = EmitLValueForField(BaseLV, Field, IsInBounds);
5183 if (getLangOpts().OpenMP) {
5184 // If the member was explicitly marked as nontemporal, mark it as
5185 // nontemporal. If the base lvalue is marked as nontemporal, mark access
5186 // to children as nontemporal too.
5187 if ((IsWrappedCXXThis(BaseExpr) &&
5188 CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
5189 BaseLV.isNontemporal())
5190 LV.setNontemporal(/*Value=*/true);
5191 }
5192 return LV;
5193 }
5194
5195 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
5196 return EmitFunctionDeclLValue(*this, E, FD);
5197
5198 llvm_unreachable("Unhandled member declaration!");
5199}
5200
5201/// Given that we are currently emitting a lambda, emit an l-value for
5202/// one of its members.
5203///
5205 llvm::Value *ThisValue) {
5206 bool HasExplicitObjectParameter = false;
5207 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
5208 if (MD) {
5209 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
5210 assert(MD->getParent()->isLambda());
5211 assert(MD->getParent() == Field->getParent());
5212 }
5213 LValue LambdaLV;
5214 if (HasExplicitObjectParameter) {
5215 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
5216 auto It = LocalDeclMap.find(D);
5217 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
5218 Address AddrOfExplicitObject = It->getSecond();
5219 if (D->getType()->isReferenceType())
5220 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
5222 else
5223 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
5225
5226 // Make sure we have an lvalue to the lambda itself and not a derived class.
5227 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
5228 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
5229 if (ThisTy != LambdaTy) {
5230 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
5232 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
5233 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
5235 LambdaLV = MakeAddrLValue(Base, T);
5236 }
5237 } else {
5238 CanQualType LambdaTagType =
5239 getContext().getCanonicalTagType(Field->getParent());
5240 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
5241 }
5242 return EmitLValueForField(LambdaLV, Field);
5243}
5244
5246 return EmitLValueForLambdaField(Field, CXXABIThisValue);
5247}
5248
5249/// Get the field index in the debug info. The debug info structure/union
5250/// will ignore the unnamed bitfields.
5252 unsigned FieldIndex) {
5253 unsigned I = 0, Skipped = 0;
5254
5255 for (auto *F : Rec->getDefinition()->fields()) {
5256 if (I == FieldIndex)
5257 break;
5258 if (F->isUnnamedBitField())
5259 Skipped++;
5260 I++;
5261 }
5262
5263 return FieldIndex - Skipped;
5264}
5265
5266/// Get the address of a zero-sized field within a record. The resulting
5267/// address doesn't necessarily have the right type.
5269 const FieldDecl *Field,
5270 bool IsInBounds) {
5272 CGF.getContext().getFieldOffset(Field));
5273 if (Offset.isZero())
5274 return Base;
5275 Base = Base.withElementType(CGF.Int8Ty);
5276 if (!IsInBounds)
5277 return CGF.Builder.CreateConstByteGEP(Base, Offset);
5278 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
5279}
5280
5281/// Drill down to the storage of a field without walking into
5282/// reference types.
5283///
5284/// The resulting address doesn't necessarily have the right type.
5286 const FieldDecl *field, bool IsInBounds) {
5287 if (isEmptyFieldForLayout(CGF.getContext(), field))
5288 return emitAddrOfZeroSizeField(CGF, base, field, IsInBounds);
5289
5290 const RecordDecl *rec = field->getParent();
5291
5292 unsigned idx =
5293 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5294
5295 if (!IsInBounds)
5296 return CGF.Builder.CreateConstGEP2_32(base, 0, idx, field->getName());
5297
5298 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
5299}
5300
5302 Address addr, const FieldDecl *field) {
5303 const RecordDecl *rec = field->getParent();
5304 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
5305 base.getType(), rec->getLocation());
5306
5307 unsigned idx =
5308 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
5309
5311 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
5312}
5313
5314static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
5315 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
5316 if (!RD)
5317 return false;
5318
5319 if (RD->isDynamicClass())
5320 return true;
5321
5322 for (const auto &Base : RD->bases())
5323 if (hasAnyVptr(Base.getType(), Context))
5324 return true;
5325
5326 for (const FieldDecl *Field : RD->fields())
5327 if (hasAnyVptr(Field->getType(), Context))
5328 return true;
5329
5330 return false;
5331}
5332
5334 bool IsInBounds) {
5335 LValueBaseInfo BaseInfo = base.getBaseInfo();
5336
5337 if (field->isBitField()) {
5338 const CGRecordLayout &RL =
5339 CGM.getTypes().getCGRecordLayout(field->getParent());
5340 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
5341 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
5342 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
5343 Info.VolatileStorageSize != 0 &&
5344 field->getType()
5347 Address Addr = base.getAddress();
5348 unsigned Idx = RL.getLLVMFieldNo(field);
5349 const RecordDecl *rec = field->getParent();
5352 if (!UseVolatile) {
5353 if (!IsInPreservedAIRegion &&
5354 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5355 if (Idx != 0) {
5356 // For structs, we GEP to the field that the record layout suggests.
5357 if (!IsInBounds)
5358 Addr = Builder.CreateConstGEP2_32(Addr, 0, Idx, field->getName());
5359 else
5360 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
5361 }
5362 } else {
5363 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
5364 getContext().getCanonicalTagType(rec), rec->getLocation());
5365 Addr = Builder.CreatePreserveStructAccessIndex(
5366 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
5367 DbgInfo);
5368 }
5369 }
5370 const unsigned SS =
5371 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
5372 // Get the access type.
5373 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
5374 Addr = Addr.withElementType(FieldIntTy);
5375 if (UseVolatile) {
5376 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
5377 if (VolatileOffset)
5378 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
5379 }
5380
5381 QualType fieldType =
5382 field->getType().withCVRQualifiers(base.getVRQualifiers());
5383 // TODO: Support TBAA for bit fields.
5384 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
5385 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
5386 TBAAAccessInfo());
5387 }
5388
5389 // Fields of may-alias structures are may-alias themselves.
5390 // FIXME: this should get propagated down through anonymous structs
5391 // and unions.
5392 QualType FieldType = field->getType();
5393 const RecordDecl *rec = field->getParent();
5394 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
5395 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
5396 TBAAAccessInfo FieldTBAAInfo;
5397 if (base.getTBAAInfo().isMayAlias() ||
5398 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
5399 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5400 } else if (rec->isUnion()) {
5401 // TODO: Support TBAA for unions.
5402 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
5403 } else {
5404 // If no base type been assigned for the base access, then try to generate
5405 // one for this base lvalue.
5406 FieldTBAAInfo = base.getTBAAInfo();
5407 if (!FieldTBAAInfo.BaseType) {
5408 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
5409 assert(!FieldTBAAInfo.Offset &&
5410 "Nonzero offset for an access with no base type!");
5411 }
5412
5413 // Adjust offset to be relative to the base type.
5414 const ASTRecordLayout &Layout =
5416 unsigned CharWidth = getContext().getCharWidth();
5417 if (FieldTBAAInfo.BaseType)
5418 FieldTBAAInfo.Offset +=
5419 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
5420
5421 // Update the final access type and size.
5422 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
5423 FieldTBAAInfo.Size =
5425 }
5426
5427 Address addr = base.getAddress();
5429 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
5430 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
5431 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5432 ClassDef->isDynamicClass()) {
5433 // Getting to any field of dynamic object requires stripping dynamic
5434 // information provided by invariant.group. This is because accessing
5435 // fields may leak the real address of dynamic object, which could result
5436 // in miscompilation when leaked pointer would be compared.
5437 auto *stripped =
5438 Builder.CreateStripInvariantGroup(addr.emitRawPointer(*this));
5439 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5440 }
5441 }
5442
5443 unsigned RecordCVR = base.getVRQualifiers();
5444 if (rec->isUnion()) {
5445 // For unions, there is no pointer adjustment.
5446 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5447 hasAnyVptr(FieldType, getContext()))
5448 // Because unions can easily skip invariant.barriers, we need to add
5449 // a barrier every time CXXRecord field with vptr is referenced.
5450 addr = Builder.CreateLaunderInvariantGroup(addr);
5451
5453 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5454 // Remember the original union field index
5455 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5456 rec->getLocation());
5457 addr =
5458 Address(Builder.CreatePreserveUnionAccessIndex(
5459 addr.emitRawPointer(*this),
5460 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5461 addr.getElementType(), addr.getAlignment());
5462 }
5463
5464 if (FieldType->isReferenceType())
5465 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5466 } else {
5467 if (!IsInPreservedAIRegion &&
5468 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5469 // For structs, we GEP to the field that the record layout suggests.
5470 addr = emitAddrOfFieldStorage(*this, addr, field, IsInBounds);
5471 else
5472 // Remember the original struct field index
5473 addr = emitPreserveStructAccess(*this, base, addr, field);
5474 }
5475
5476 // If this is a reference field, load the reference right now.
5477 if (FieldType->isReferenceType()) {
5478 LValue RefLVal =
5479 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5480 if (RecordCVR & Qualifiers::Volatile)
5481 RefLVal.getQuals().addVolatile();
5482 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5483
5484 // Qualifiers on the struct don't apply to the referencee.
5485 RecordCVR = 0;
5486 FieldType = FieldType->getPointeeType();
5487 }
5488
5489 // Make sure that the address is pointing to the right type. This is critical
5490 // for both unions and structs.
5491 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5492
5493 if (field->hasAttr<AnnotateAttr>())
5494 addr = EmitFieldAnnotations(field, addr);
5495
5496 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5497 LV.getQuals().addCVRQualifiers(RecordCVR);
5498
5499 // __weak attribute on a field is ignored.
5502
5503 return LV;
5504}
5505
5506LValue
5508 const FieldDecl *Field) {
5509 QualType FieldType = Field->getType();
5510
5511 if (!FieldType->isReferenceType())
5512 return EmitLValueForField(Base, Field);
5513
5515 *this, Base.getAddress(), Field,
5516 /*IsInBounds=*/!getLangOpts().PointerOverflowDefined);
5517
5518 // Make sure that the address is pointing to the right type.
5519 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5520 V = V.withElementType(llvmType);
5521
5522 // TODO: Generate TBAA information that describes this access as a structure
5523 // member access and not just an access to an object of the field's type. This
5524 // should be similar to what we do in EmitLValueForField().
5525 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5526 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5527 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5528 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5529 CGM.getTBAAInfoForSubobject(Base, FieldType));
5530}
5531
5533 if (E->isFileScope()) {
5534 ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
5535 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5536 }
5537 if (E->getType()->isVariablyModifiedType())
5538 // make sure to emit the VLA size.
5540
5541 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5542 const Expr *InitExpr = E->getInitializer();
5544
5545 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5546 /*Init*/ true);
5547
5548 // Block-scope compound literals are destroyed at the end of the enclosing
5549 // scope in C.
5550 if (!getLangOpts().CPlusPlus)
5553 E->getType(), getDestroyer(DtorKind),
5554 DtorKind & EHCleanup);
5555
5556 return Result;
5557}
5558
5560 if (!E->isGLValue())
5561 // Initializing an aggregate temporary in C++11: T{...}.
5562 return EmitAggExprToLValue(E);
5563
5564 // An lvalue initializer list must be initializing a reference.
5565 assert(E->isTransparent() && "non-transparent glvalue init list");
5566 return EmitLValue(E->getInit(0));
5567}
5568
5569/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5570/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5571/// LValue is returned and the current block has been terminated.
5572static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5573 const Expr *Operand) {
5574 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5575 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5576 return std::nullopt;
5577 }
5578
5579 return CGF.EmitLValue(Operand);
5580}
5581
5582namespace {
5583// Handle the case where the condition is a constant evaluatable simple integer,
5584// which means we don't have to separately handle the true/false blocks.
5585std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5586 CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
5587 const Expr *condExpr = E->getCond();
5588 bool CondExprBool;
5589 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5590 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5591 if (!CondExprBool)
5592 std::swap(Live, Dead);
5593
5594 if (!CGF.ContainsLabel(Dead)) {
5595 // If the true case is live, we need to track its region.
5596 if (CondExprBool)
5598 CGF.markStmtMaybeUsed(Dead);
5599 // If a throw expression we emit it and return an undefined lvalue
5600 // because it can't be used.
5601 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5602 CGF.EmitCXXThrowExpr(ThrowExpr);
5603 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5604 llvm::Type *Ty = CGF.UnqualPtrTy;
5605 return CGF.MakeAddrLValue(
5606 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5607 Dead->getType());
5608 }
5609 return CGF.EmitLValue(Live);
5610 }
5611 }
5612 return std::nullopt;
5613}
5614struct ConditionalInfo {
5615 llvm::BasicBlock *lhsBlock, *rhsBlock;
5616 std::optional<LValue> LHS, RHS;
5617};
5618
5619// Create and generate the 3 blocks for a conditional operator.
5620// Leaves the 'current block' in the continuation basic block.
5621template<typename FuncTy>
5622ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5623 const AbstractConditionalOperator *E,
5624 const FuncTy &BranchGenFunc) {
5625 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5626 CGF.createBasicBlock("cond.false"), std::nullopt,
5627 std::nullopt};
5628 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5629
5631 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5632 CGF.getProfileCount(E));
5633
5634 // Any temporaries created here are conditional.
5635 CGF.EmitBlock(Info.lhsBlock);
5637 eval.begin(CGF);
5638 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5639 eval.end(CGF);
5640 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5641
5642 if (Info.LHS)
5643 CGF.Builder.CreateBr(endBlock);
5644
5645 // Any temporaries created here are conditional.
5646 CGF.EmitBlock(Info.rhsBlock);
5647 eval.begin(CGF);
5648 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5649 eval.end(CGF);
5650 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5651 CGF.EmitBlock(endBlock);
5652
5653 return Info;
5654}
5655} // namespace
5656
5658 const AbstractConditionalOperator *E) {
5659 if (!E->isGLValue()) {
5660 // ?: here should be an aggregate.
5661 assert(hasAggregateEvaluationKind(E->getType()) &&
5662 "Unexpected conditional operator!");
5663 return (void)EmitAggExprToLValue(E);
5664 }
5665
5666 OpaqueValueMapping binding(*this, E);
5667 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5668 return;
5669
5670 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5671 CGF.EmitIgnoredExpr(E);
5672 return LValue{};
5673 });
5674}
5677 if (!expr->isGLValue()) {
5678 // ?: here should be an aggregate.
5679 assert(hasAggregateEvaluationKind(expr->getType()) &&
5680 "Unexpected conditional operator!");
5681 return EmitAggExprToLValue(expr);
5682 }
5683
5684 OpaqueValueMapping binding(*this, expr);
5685 if (std::optional<LValue> Res =
5686 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5687 return *Res;
5688
5689 ConditionalInfo Info = EmitConditionalBlocks(
5690 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5691 return EmitLValueOrThrowExpression(CGF, E);
5692 });
5693
5694 if ((Info.LHS && !Info.LHS->isSimple()) ||
5695 (Info.RHS && !Info.RHS->isSimple()))
5696 return EmitUnsupportedLValue(expr, "conditional operator");
5697
5698 if (Info.LHS && Info.RHS) {
5699 Address lhsAddr = Info.LHS->getAddress();
5700 Address rhsAddr = Info.RHS->getAddress();
5702 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5703 Builder.GetInsertBlock(), expr->getType());
5704 AlignmentSource alignSource =
5705 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5706 Info.RHS->getBaseInfo().getAlignmentSource());
5707 TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
5708 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5709 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5710 TBAAInfo);
5711 } else {
5712 assert((Info.LHS || Info.RHS) &&
5713 "both operands of glvalue conditional are throw-expressions?");
5714 return Info.LHS ? *Info.LHS : *Info.RHS;
5715 }
5716}
5717
5718/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5719/// type. If the cast is to a reference, we can have the usual lvalue result,
5720/// otherwise if a cast is needed by the code generator in an lvalue context,
5721/// then it must mean that we need the address of an aggregate in order to
5722/// access one of its members. This can happen for all the reasons that casts
5723/// are permitted with aggregate result, including noop aggregate casts, and
5724/// cast from scalar to union.
5726 switch (E->getCastKind()) {
5727 case CK_ToVoid:
5728 case CK_BitCast:
5729 case CK_LValueToRValueBitCast:
5730 case CK_ArrayToPointerDecay:
5731 case CK_FunctionToPointerDecay:
5732 case CK_NullToMemberPointer:
5733 case CK_NullToPointer:
5734 case CK_IntegralToPointer:
5735 case CK_PointerToIntegral:
5736 case CK_PointerToBoolean:
5737 case CK_IntegralCast:
5738 case CK_BooleanToSignedIntegral:
5739 case CK_IntegralToBoolean:
5740 case CK_IntegralToFloating:
5741 case CK_FloatingToIntegral:
5742 case CK_FloatingToBoolean:
5743 case CK_FloatingCast:
5744 case CK_FloatingRealToComplex:
5745 case CK_FloatingComplexToReal:
5746 case CK_FloatingComplexToBoolean:
5747 case CK_FloatingComplexCast:
5748 case CK_FloatingComplexToIntegralComplex:
5749 case CK_IntegralRealToComplex:
5750 case CK_IntegralComplexToReal:
5751 case CK_IntegralComplexToBoolean:
5752 case CK_IntegralComplexCast:
5753 case CK_IntegralComplexToFloatingComplex:
5754 case CK_DerivedToBaseMemberPointer:
5755 case CK_BaseToDerivedMemberPointer:
5756 case CK_MemberPointerToBoolean:
5757 case CK_ReinterpretMemberPointer:
5758 case CK_AnyPointerToBlockPointerCast:
5759 case CK_ARCProduceObject:
5760 case CK_ARCConsumeObject:
5761 case CK_ARCReclaimReturnedObject:
5762 case CK_ARCExtendBlockObject:
5763 case CK_CopyAndAutoreleaseBlockObject:
5764 case CK_IntToOCLSampler:
5765 case CK_FloatingToFixedPoint:
5766 case CK_FixedPointToFloating:
5767 case CK_FixedPointCast:
5768 case CK_FixedPointToBoolean:
5769 case CK_FixedPointToIntegral:
5770 case CK_IntegralToFixedPoint:
5771 case CK_MatrixCast:
5772 case CK_HLSLVectorTruncation:
5773 case CK_HLSLArrayRValue:
5774 case CK_HLSLElementwiseCast:
5775 case CK_HLSLAggregateSplatCast:
5776 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5777
5778 case CK_Dependent:
5779 llvm_unreachable("dependent cast kind in IR gen!");
5780
5781 case CK_BuiltinFnToFnPtr:
5782 llvm_unreachable("builtin functions are handled elsewhere");
5783
5784 // These are never l-values; just use the aggregate emission code.
5785 case CK_NonAtomicToAtomic:
5786 case CK_AtomicToNonAtomic:
5787 return EmitAggExprToLValue(E);
5788
5789 case CK_Dynamic: {
5790 LValue LV = EmitLValue(E->getSubExpr());
5791 Address V = LV.getAddress();
5792 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5794 }
5795
5796 case CK_ConstructorConversion:
5797 case CK_UserDefinedConversion:
5798 case CK_CPointerToObjCPointerCast:
5799 case CK_BlockPointerToObjCPointerCast:
5800 case CK_LValueToRValue:
5801 return EmitLValue(E->getSubExpr());
5802
5803 case CK_NoOp: {
5804 // CK_NoOp can model a qualification conversion, which can remove an array
5805 // bound and change the IR type.
5806 // FIXME: Once pointee types are removed from IR, remove this.
5807 LValue LV = EmitLValue(E->getSubExpr());
5808 // Propagate the volatile qualifer to LValue, if exist in E.
5810 LV.getQuals() = E->getType().getQualifiers();
5811 if (LV.isSimple()) {
5812 Address V = LV.getAddress();
5813 if (V.isValid()) {
5814 llvm::Type *T = ConvertTypeForMem(E->getType());
5815 if (V.getElementType() != T)
5816 LV.setAddress(V.withElementType(T));
5817 }
5818 }
5819 return LV;
5820 }
5821
5822 case CK_UncheckedDerivedToBase:
5823 case CK_DerivedToBase: {
5824 auto *DerivedClassDecl = E->getSubExpr()->getType()->castAsCXXRecordDecl();
5825 LValue LV = EmitLValue(E->getSubExpr());
5826 Address This = LV.getAddress();
5827
5828 // Perform the derived-to-base conversion
5830 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5831 /*NullCheckValue=*/false, E->getExprLoc());
5832
5833 // TODO: Support accesses to members of base classes in TBAA. For now, we
5834 // conservatively pretend that the complete object is of the base class
5835 // type.
5836 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5837 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5838 }
5839 case CK_ToUnion:
5840 return EmitAggExprToLValue(E);
5841 case CK_BaseToDerived: {
5842 auto *DerivedClassDecl = E->getType()->castAsCXXRecordDecl();
5843 LValue LV = EmitLValue(E->getSubExpr());
5844
5845 // Perform the base-to-derived conversion
5847 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5848 /*NullCheckValue=*/false);
5849
5850 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5851 // performed and the object is not of the derived type.
5854 E->getType());
5855
5856 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5857 EmitVTablePtrCheckForCast(E->getType(), Derived,
5858 /*MayBeNull=*/false, CFITCK_DerivedCast,
5859 E->getBeginLoc());
5860
5861 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5862 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5863 }
5864 case CK_LValueBitCast: {
5865 // This must be a reinterpret_cast (or c-style equivalent).
5866 const auto *CE = cast<ExplicitCastExpr>(E);
5867
5868 CGM.EmitExplicitCastExprType(CE, this);
5869 LValue LV = EmitLValue(E->getSubExpr());
5871 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5872
5873 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5875 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5876 E->getBeginLoc());
5877
5878 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5879 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5880 }
5881 case CK_AddressSpaceConversion: {
5882 LValue LV = EmitLValue(E->getSubExpr());
5883 QualType DestTy = getContext().getPointerType(E->getType());
5884 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5885 *this, LV.getPointer(*this),
5886 E->getSubExpr()->getType().getAddressSpace(), ConvertType(DestTy));
5888 LV.getAddress().getAlignment()),
5889 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5890 }
5891 case CK_ObjCObjectLValueCast: {
5892 LValue LV = EmitLValue(E->getSubExpr());
5894 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5895 CGM.getTBAAInfoForSubobject(LV, E->getType()));
5896 }
5897 case CK_ZeroToOCLOpaqueType:
5898 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5899
5900 case CK_VectorSplat: {
5901 // LValue results of vector splats are only supported in HLSL.
5902 if (!getLangOpts().HLSL)
5903 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5904 return EmitLValue(E->getSubExpr());
5905 }
5906 }
5907
5908 llvm_unreachable("Unhandled lvalue cast kind?");
5909}
5910
5915
5916std::pair<LValue, LValue>
5918 // Emitting the casted temporary through an opaque value.
5919 LValue BaseLV = EmitLValue(E->getArgLValue());
5921
5922 QualType ExprTy = E->getType();
5923 Address OutTemp = CreateIRTemp(ExprTy);
5924 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
5925
5926 if (E->isInOut())
5928 TempLV);
5929
5931 return std::make_pair(BaseLV, TempLV);
5932}
5933
5935 CallArgList &Args, QualType Ty) {
5936
5937 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
5938
5939 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
5940 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
5941
5943
5944 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
5945 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast());
5946 Args.add(RValue::get(TmpAddr, *this), Ty);
5947 return TempLV;
5948}
5949
5950LValue
5953
5954 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5955 it = OpaqueLValues.find(e);
5956
5957 if (it != OpaqueLValues.end())
5958 return it->second;
5959
5960 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5961 return EmitLValue(e->getSourceExpr());
5962}
5963
5964RValue
5967
5968 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5969 it = OpaqueRValues.find(e);
5970
5971 if (it != OpaqueRValues.end())
5972 return it->second;
5973
5974 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5975 return EmitAnyExpr(e->getSourceExpr());
5976}
5977
5980 return OpaqueLValues.contains(E);
5981 return OpaqueRValues.contains(E);
5982}
5983
5985 const FieldDecl *FD,
5986 SourceLocation Loc) {
5987 QualType FT = FD->getType();
5988 LValue FieldLV = EmitLValueForField(LV, FD);
5989 switch (getEvaluationKind(FT)) {
5990 case TEK_Complex:
5991 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5992 case TEK_Aggregate:
5993 return FieldLV.asAggregateRValue();
5994 case TEK_Scalar:
5995 // This routine is used to load fields one-by-one to perform a copy, so
5996 // don't load reference fields.
5997 if (FD->getType()->isReferenceType())
5998 return RValue::get(FieldLV.getPointer(*this));
5999 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
6000 // primitive load.
6001 if (FieldLV.isBitField())
6002 return EmitLoadOfLValue(FieldLV, Loc);
6003 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
6004 }
6005 llvm_unreachable("bad evaluation kind");
6006}
6007
6008//===--------------------------------------------------------------------===//
6009// Expression Emission
6010//===--------------------------------------------------------------------===//
6011
6014 llvm::CallBase **CallOrInvoke) {
6015 llvm::CallBase *CallOrInvokeStorage;
6016 if (!CallOrInvoke) {
6017 CallOrInvoke = &CallOrInvokeStorage;
6018 }
6019
6020 auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] {
6021 if (E->isCoroElideSafe()) {
6022 auto *I = *CallOrInvoke;
6023 if (I)
6024 I->addFnAttr(llvm::Attribute::CoroElideSafe);
6025 }
6026 });
6027
6028 // Builtins never have block type.
6029 if (E->getCallee()->getType()->isBlockPointerType())
6030 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
6031
6032 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
6033 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
6034
6035 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
6036 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
6037
6038 // A CXXOperatorCallExpr is created even for explicit object methods, but
6039 // these should be treated like static function call.
6040 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
6041 if (const auto *MD =
6042 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
6043 MD && MD->isImplicitObjectMemberFunction())
6044 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
6045
6046 CGCallee callee = EmitCallee(E->getCallee());
6047
6048 if (callee.isBuiltin()) {
6049 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
6050 E, ReturnValue);
6051 }
6052
6053 if (callee.isPseudoDestructor()) {
6055 }
6056
6057 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
6058 /*Chain=*/nullptr, CallOrInvoke);
6059}
6060
6061/// Emit a CallExpr without considering whether it might be a subclass.
6064 llvm::CallBase **CallOrInvoke) {
6065 CGCallee Callee = EmitCallee(E->getCallee());
6066 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
6067 /*Chain=*/nullptr, CallOrInvoke);
6068}
6069
6070// Detect the unusual situation where an inline version is shadowed by a
6071// non-inline version. In that case we should pick the external one
6072// everywhere. That's GCC behavior too.
6074 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
6075 if (!PD->isInlineBuiltinDeclaration())
6076 return false;
6077 return true;
6078}
6079
6081 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
6082
6083 if (auto builtinID = FD->getBuiltinID()) {
6084 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
6085 std::string NoBuiltins = "no-builtins";
6086
6087 StringRef Ident = CGF.CGM.getMangledName(GD);
6088 std::string FDInlineName = (Ident + ".inline").str();
6089
6090 bool IsPredefinedLibFunction =
6092 bool HasAttributeNoBuiltin =
6093 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
6094 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
6095
6096 // When directing calling an inline builtin, call it through it's mangled
6097 // name to make it clear it's not the actual builtin.
6098 if (CGF.CurFn->getName() != FDInlineName &&
6100 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6101 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
6102 llvm::Module *M = Fn->getParent();
6103 llvm::Function *Clone = M->getFunction(FDInlineName);
6104 if (!Clone) {
6105 Clone = llvm::Function::Create(Fn->getFunctionType(),
6106 llvm::GlobalValue::InternalLinkage,
6107 Fn->getAddressSpace(), FDInlineName, M);
6108 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
6109 }
6110 return CGCallee::forDirect(Clone, GD);
6111 }
6112
6113 // Replaceable builtins provide their own implementation of a builtin. If we
6114 // are in an inline builtin implementation, avoid trivial infinite
6115 // recursion. Honor __attribute__((no_builtin("foo"))) or
6116 // __attribute__((no_builtin)) on the current function unless foo is
6117 // not a predefined library function which means we must generate the
6118 // builtin no matter what.
6119 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
6120 return CGCallee::forBuiltin(builtinID, FD);
6121 }
6122
6123 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
6124 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
6125 FD->hasAttr<CUDAGlobalAttr>())
6126 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
6127 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
6128
6129 return CGCallee::forDirect(CalleePtr, GD);
6130}
6131
6133 if (DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6135 return GlobalDecl(FD);
6136}
6137
6139 E = E->IgnoreParens();
6140
6141 // Look through function-to-pointer decay.
6142 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
6143 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
6144 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
6145 return EmitCallee(ICE->getSubExpr());
6146 }
6147
6148 // Try to remember the original __ptrauth qualifier for loads of
6149 // function pointers.
6150 if (ICE->getCastKind() == CK_LValueToRValue) {
6151 const Expr *SubExpr = ICE->getSubExpr();
6152 if (const auto *PtrType = SubExpr->getType()->getAs<PointerType>()) {
6153 std::pair<llvm::Value *, CGPointerAuthInfo> Result =
6155
6157 assert(FunctionType->isFunctionType());
6158
6159 GlobalDecl GD;
6160 if (const auto *VD =
6161 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee())) {
6162 GD = GlobalDecl(VD);
6163 }
6165 CGCallee Callee(CalleeInfo, Result.first, Result.second);
6166 return Callee;
6167 }
6168 }
6169
6170 // Resolve direct calls.
6171 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
6172 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
6174 }
6175 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
6176 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
6177 EmitIgnoredExpr(ME->getBase());
6178 return EmitDirectCallee(*this, FD);
6179 }
6180
6181 // Look through template substitutions.
6182 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
6183 return EmitCallee(NTTP->getReplacement());
6184
6185 // Treat pseudo-destructor calls differently.
6186 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
6188 }
6189
6190 // Otherwise, we have an indirect reference.
6191 llvm::Value *calleePtr;
6193 if (auto ptrType = E->getType()->getAs<PointerType>()) {
6194 calleePtr = EmitScalarExpr(E);
6195 functionType = ptrType->getPointeeType();
6196 } else {
6197 functionType = E->getType();
6198 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
6199 }
6200 assert(functionType->isFunctionType());
6201
6202 GlobalDecl GD;
6203 if (const auto *VD =
6204 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
6205 GD = GlobalDecl(VD);
6206
6207 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
6208 CGPointerAuthInfo pointerAuth = CGM.getFunctionPointerAuthInfo(functionType);
6209 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
6210 return callee;
6211}
6212
6214 // Comma expressions just emit their LHS then their RHS as an l-value.
6215 if (E->getOpcode() == BO_Comma) {
6216 EmitIgnoredExpr(E->getLHS());
6218 return EmitLValue(E->getRHS());
6219 }
6220
6221 if (E->getOpcode() == BO_PtrMemD ||
6222 E->getOpcode() == BO_PtrMemI)
6224
6225 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
6226
6227 // Create a Key Instructions source location atom group that covers both
6228 // LHS and RHS expressions. Nested RHS expressions may get subsequently
6229 // separately grouped (1 below):
6230 //
6231 // 1. `a = b = c` -> Two atoms.
6232 // 2. `x = new(1)` -> One atom (for both addr store and value store).
6233 // 3. Complex and agg assignment -> One atom.
6235
6236 // Note that in all of these cases, __block variables need the RHS
6237 // evaluated first just in case the variable gets moved by the RHS.
6238
6239 switch (getEvaluationKind(E->getType())) {
6240 case TEK_Scalar: {
6241 if (PointerAuthQualifier PtrAuth =
6242 E->getLHS()->getType().getPointerAuth()) {
6244 LValue CopiedLV = LV;
6245 CopiedLV.getQuals().removePointerAuth();
6246 llvm::Value *RV =
6247 EmitPointerAuthQualify(PtrAuth, E->getRHS(), CopiedLV.getAddress());
6248 EmitNullabilityCheck(CopiedLV, RV, E->getExprLoc());
6249 EmitStoreThroughLValue(RValue::get(RV), CopiedLV);
6250 return LV;
6251 }
6252
6253 switch (E->getLHS()->getType().getObjCLifetime()) {
6255 return EmitARCStoreStrong(E, /*ignored*/ false).first;
6256
6258 return EmitARCStoreAutoreleasing(E).first;
6259
6260 // No reason to do any of these differently.
6264 break;
6265 }
6266
6267 // TODO: Can we de-duplicate this code with the corresponding code in
6268 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
6269 RValue RV;
6270 llvm::Value *Previous = nullptr;
6271 QualType SrcType = E->getRHS()->getType();
6272 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
6273 // we want to extract that value and potentially (if the bitfield sanitizer
6274 // is enabled) use it to check for an implicit conversion.
6275 if (E->getLHS()->refersToBitField()) {
6276 llvm::Value *RHS =
6278 RV = RValue::get(RHS);
6279 } else
6280 RV = EmitAnyExpr(E->getRHS());
6281
6283
6284 if (RV.isScalar())
6286
6287 if (LV.isBitField()) {
6288 llvm::Value *Result = nullptr;
6289 // If bitfield sanitizers are enabled we want to use the result
6290 // to check whether a truncation or sign change has occurred.
6291 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
6293 else
6295
6296 // If the expression contained an implicit conversion, make sure
6297 // to use the value before the scalar conversion.
6298 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
6299 QualType DstType = E->getLHS()->getType();
6300 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
6301 LV.getBitFieldInfo(), E->getExprLoc());
6302 } else
6303 EmitStoreThroughLValue(RV, LV);
6304
6305 if (getLangOpts().OpenMP)
6306 CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
6307 E->getLHS());
6308 return LV;
6309 }
6310
6311 case TEK_Complex:
6313
6314 case TEK_Aggregate:
6315 // If the lang opt is HLSL and the LHS is a constant array
6316 // then we are performing a copy assignment and call a special
6317 // function because EmitAggExprToLValue emits to a temporary LValue
6319 return EmitHLSLArrayAssignLValue(E);
6320
6321 return EmitAggExprToLValue(E);
6322 }
6323 llvm_unreachable("bad evaluation kind");
6324}
6325
6326// This function implements trivial copy assignment for HLSL's
6327// assignable constant arrays.
6329 // Don't emit an LValue for the RHS because it might not be an LValue
6330 LValue LHS = EmitLValue(E->getLHS());
6331 // In C the RHS of an assignment operator is an RValue.
6332 // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call
6333 // EmitInitializationToLValue to emit an RValue into an LValue.
6335 return LHS;
6336}
6337
6339 llvm::CallBase **CallOrInvoke) {
6340 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
6341
6342 if (!RV.isScalar())
6343 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6345
6346 assert(E->getCallReturnType(getContext())->isReferenceType() &&
6347 "Can't have a scalar return unless the return type is a "
6348 "reference type!");
6349
6351}
6352
6354 // FIXME: This shouldn't require another copy.
6355 return EmitAggExprToLValue(E);
6356}
6357
6360 && "binding l-value to type which needs a temporary");
6361 AggValueSlot Slot = CreateAggTemp(E->getType());
6362 EmitCXXConstructExpr(E, Slot);
6364}
6365
6366LValue
6370
6372 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
6373 .withElementType(ConvertType(E->getType()));
6374}
6375
6380
6381LValue
6389
6392
6393 if (!RV.isScalar())
6394 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6396
6397 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
6398 "Can't have a scalar return unless the return type is a "
6399 "reference type!");
6400
6402}
6403
6405 Address V =
6406 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
6408}
6409
6411 const ObjCIvarDecl *Ivar) {
6412 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
6413}
6414
6415llvm::Value *
6417 const ObjCIvarDecl *Ivar) {
6418 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
6419 QualType PointerDiffType = getContext().getPointerDiffType();
6420 return Builder.CreateZExtOrTrunc(OffsetValue,
6421 getTypes().ConvertType(PointerDiffType));
6422}
6423
6425 llvm::Value *BaseValue,
6426 const ObjCIvarDecl *Ivar,
6427 unsigned CVRQualifiers) {
6428 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
6429 Ivar, CVRQualifiers);
6430}
6431
6433 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
6434 llvm::Value *BaseValue = nullptr;
6435 const Expr *BaseExpr = E->getBase();
6436 Qualifiers BaseQuals;
6437 QualType ObjectTy;
6438 if (E->isArrow()) {
6439 BaseValue = EmitScalarExpr(BaseExpr);
6440 ObjectTy = BaseExpr->getType()->getPointeeType();
6441 BaseQuals = ObjectTy.getQualifiers();
6442 } else {
6443 LValue BaseLV = EmitLValue(BaseExpr);
6444 BaseValue = BaseLV.getPointer(*this);
6445 ObjectTy = BaseExpr->getType();
6446 BaseQuals = ObjectTy.getQualifiers();
6447 }
6448
6449 LValue LV =
6450 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
6451 BaseQuals.getCVRQualifiers());
6453 return LV;
6454}
6455
6457 // Can only get l-value for message expression returning aggregate type
6458 RValue RV = EmitAnyExprToTemp(E);
6459 return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
6461}
6462
6464 const CGCallee &OrigCallee, const CallExpr *E,
6466 llvm::Value *Chain,
6467 llvm::CallBase **CallOrInvoke,
6468 CGFunctionInfo const **ResolvedFnInfo) {
6469 // Get the actual function type. The callee type will always be a pointer to
6470 // function type or a block pointer type.
6471 assert(CalleeType->isFunctionPointerType() &&
6472 "Call must have function pointer type!");
6473
6474 const Decl *TargetDecl =
6475 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
6476
6477 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
6478 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
6479 "trying to emit a call to an immediate function");
6480
6481 CalleeType = getContext().getCanonicalType(CalleeType);
6482
6483 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
6484
6485 CGCallee Callee = OrigCallee;
6486
6487 if (SanOpts.has(SanitizerKind::Function) &&
6488 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6489 !isa<FunctionNoProtoType>(PointeeType)) {
6490 if (llvm::Constant *PrefixSig =
6491 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
6492 auto CheckOrdinal = SanitizerKind::SO_Function;
6493 auto CheckHandler = SanitizerHandler::FunctionTypeMismatch;
6494 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6495 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6496
6497 llvm::Type *PrefixSigType = PrefixSig->getType();
6498 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6499 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6500
6501 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6502 if (CGM.getCodeGenOpts().PointerAuth.FunctionPointers) {
6503 // Use raw pointer since we are using the callee pointer as data here.
6504 Address Addr =
6505 Address(CalleePtr, CalleePtr->getType(),
6507 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6508 Callee.getPointerAuthInfo(), nullptr);
6509 CalleePtr = Addr.emitRawPointer(*this);
6510 }
6511
6512 // On 32-bit Arm, the low bit of a function pointer indicates whether
6513 // it's using the Arm or Thumb instruction set. The actual first
6514 // instruction lives at the same address either way, so we must clear
6515 // that low bit before using the function address to find the prefix
6516 // structure.
6517 //
6518 // This applies to both Arm and Thumb target triples, because
6519 // either one could be used in an interworking context where it
6520 // might be passed function pointers of both types.
6521 llvm::Value *AlignedCalleePtr;
6522 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6523 llvm::Value *CalleeAddress =
6524 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
6525 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
6526 llvm::Value *AlignedCalleeAddress =
6527 Builder.CreateAnd(CalleeAddress, Mask);
6528 AlignedCalleePtr =
6529 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
6530 } else {
6531 AlignedCalleePtr = CalleePtr;
6532 }
6533
6534 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6535 llvm::Value *CalleeSigPtr =
6536 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6537 llvm::Value *CalleeSig =
6538 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6539 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6540
6541 llvm::BasicBlock *Cont = createBasicBlock("cont");
6542 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6543 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6544
6545 EmitBlock(TypeCheck);
6546 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6547 Int32Ty,
6548 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6549 getPointerAlign());
6550 llvm::Value *CalleeTypeHashMatch =
6551 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6552 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6553 EmitCheckTypeDescriptor(CalleeType)};
6554 EmitCheck(std::make_pair(CalleeTypeHashMatch, CheckOrdinal), CheckHandler,
6555 StaticData, {CalleePtr});
6556
6557 Builder.CreateBr(Cont);
6558 EmitBlock(Cont);
6559 }
6560 }
6561
6562 const auto *FnType = cast<FunctionType>(PointeeType);
6563
6564 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
6565 FD && DeviceKernelAttr::isOpenCLSpelling(FD->getAttr<DeviceKernelAttr>()))
6566 CGM.getTargetCodeGenInfo().setOCLKernelStubCallingConvention(FnType);
6567
6568 bool CFIUnchecked =
6570
6571 // If we are checking indirect calls and this call is indirect, check that the
6572 // function pointer is a member of the bit set for the function type.
6573 if (SanOpts.has(SanitizerKind::CFIICall) &&
6574 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) && !CFIUnchecked) {
6575 auto CheckOrdinal = SanitizerKind::SO_CFIICall;
6576 auto CheckHandler = SanitizerHandler::CFICheckFail;
6577 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6578 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6579
6580 llvm::Metadata *MD =
6581 CGM.CreateMetadataIdentifierForFnType(QualType(FnType, 0));
6582
6583 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6584
6585 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6586 llvm::Value *TypeTest = Builder.CreateCall(
6587 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6588
6589 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6590 llvm::Constant *StaticData[] = {
6591 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6594 };
6595 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6596 EmitCfiSlowPathCheck(CheckOrdinal, TypeTest, CrossDsoTypeId, CalleePtr,
6597 StaticData);
6598 } else {
6599 EmitCheck(std::make_pair(TypeTest, CheckOrdinal), CheckHandler,
6600 StaticData, {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6601 }
6602 }
6603
6604 CallArgList Args;
6605 if (Chain)
6606 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6607
6608 // C++17 requires that we evaluate arguments to a call using assignment syntax
6609 // right-to-left, and that we evaluate arguments to certain other operators
6610 // left-to-right. Note that we allow this to override the order dictated by
6611 // the calling convention on the MS ABI, which means that parameter
6612 // destruction order is not necessarily reverse construction order.
6613 // FIXME: Revisit this based on C++ committee response to unimplementability.
6615 bool StaticOperator = false;
6616 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
6617 if (OCE->isAssignmentOp())
6619 else {
6620 switch (OCE->getOperator()) {
6621 case OO_LessLess:
6622 case OO_GreaterGreater:
6623 case OO_AmpAmp:
6624 case OO_PipePipe:
6625 case OO_Comma:
6626 case OO_ArrowStar:
6628 break;
6629 default:
6630 break;
6631 }
6632 }
6633
6634 if (const auto *MD =
6635 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6636 MD && MD->isStatic())
6637 StaticOperator = true;
6638 }
6639
6640 auto Arguments = E->arguments();
6641 if (StaticOperator) {
6642 // If we're calling a static operator, we need to emit the object argument
6643 // and ignore it.
6644 EmitIgnoredExpr(E->getArg(0));
6645 Arguments = drop_begin(Arguments, 1);
6646 }
6647 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6648 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6649
6650 const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
6651 Args, FnType, /*ChainCall=*/Chain);
6652
6653 if (ResolvedFnInfo)
6654 *ResolvedFnInfo = &FnInfo;
6655
6656 // HIP function pointer contains kernel handle when it is used in triple
6657 // chevron. The kernel stub needs to be loaded from kernel handle and used
6658 // as callee.
6659 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6661 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6662 llvm::Value *Handle = Callee.getFunctionPointer();
6663 auto *Stub = Builder.CreateLoad(
6664 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6665 Callee.setFunctionPointer(Stub);
6666 }
6667 llvm::CallBase *LocalCallOrInvoke = nullptr;
6668 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
6669 E == MustTailCall, E->getExprLoc());
6670
6671 // Generate function declaration DISuprogram in order to be used
6672 // in debug info about call sites.
6673 if (CGDebugInfo *DI = getDebugInfo()) {
6674 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6675 FunctionArgList Args;
6676 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6677 DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
6678 DI->getFunctionType(CalleeDecl, ResTy, Args),
6679 CalleeDecl);
6680 }
6681 }
6682 if (CallOrInvoke)
6683 *CallOrInvoke = LocalCallOrInvoke;
6684
6685 return Call;
6686}
6687
6690 Address BaseAddr = Address::invalid();
6691 if (E->getOpcode() == BO_PtrMemI) {
6692 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6693 } else {
6694 BaseAddr = EmitLValue(E->getLHS()).getAddress();
6695 }
6696
6697 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6698 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6699
6700 LValueBaseInfo BaseInfo;
6701 TBAAAccessInfo TBAAInfo;
6702 bool IsInBounds = !getLangOpts().PointerOverflowDefined &&
6705 E, BaseAddr, OffsetV, MPT, IsInBounds, &BaseInfo, &TBAAInfo);
6706
6707 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6708}
6709
6710/// Given the address of a temporary variable, produce an r-value of
6711/// its type.
6713 QualType type,
6714 SourceLocation loc) {
6716 switch (getEvaluationKind(type)) {
6717 case TEK_Complex:
6718 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6719 case TEK_Aggregate:
6720 return lvalue.asAggregateRValue();
6721 case TEK_Scalar:
6722 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6723 }
6724 llvm_unreachable("bad evaluation kind");
6725}
6726
6727void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6728 assert(Val->getType()->isFPOrFPVectorTy());
6729 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6730 return;
6731
6732 llvm::MDBuilder MDHelper(getLLVMContext());
6733 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6734
6735 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6736}
6737
6739 llvm::Type *EltTy = Val->getType()->getScalarType();
6740 if (!EltTy->isFloatTy())
6741 return;
6742
6743 if ((getLangOpts().OpenCL &&
6744 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6745 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6746 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6747 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6748 //
6749 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6750 // build option allows an application to specify that single precision
6751 // floating-point divide (x/y and 1/x) and sqrt used in the program
6752 // source are correctly rounded.
6753 //
6754 // TODO: CUDA has a prec-sqrt flag
6755 SetFPAccuracy(Val, 3.0f);
6756 }
6757}
6758
6760 llvm::Type *EltTy = Val->getType()->getScalarType();
6761 if (!EltTy->isFloatTy())
6762 return;
6763
6764 if ((getLangOpts().OpenCL &&
6765 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6766 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6767 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6768 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6769 //
6770 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6771 // build option allows an application to specify that single precision
6772 // floating-point divide (x/y and 1/x) and sqrt used in the program
6773 // source are correctly rounded.
6774 //
6775 // TODO: CUDA has a prec-div flag
6776 SetFPAccuracy(Val, 2.5f);
6777 }
6778}
6779
6780namespace {
6781 struct LValueOrRValue {
6782 LValue LV;
6783 RValue RV;
6784 };
6785}
6786
6787static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6788 const PseudoObjectExpr *E,
6789 bool forLValue,
6790 AggValueSlot slot) {
6792
6793 // Find the result expression, if any.
6794 const Expr *resultExpr = E->getResultExpr();
6795 LValueOrRValue result;
6796
6798 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6799 const Expr *semantic = *i;
6800
6801 // If this semantic expression is an opaque value, bind it
6802 // to the result of its source expression.
6803 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6804 // Skip unique OVEs.
6805 if (ov->isUnique()) {
6806 assert(ov != resultExpr &&
6807 "A unique OVE cannot be used as the result expression");
6808 continue;
6809 }
6810
6811 // If this is the result expression, we may need to evaluate
6812 // directly into the slot.
6814 OVMA opaqueData;
6815 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6817 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6818 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6820 opaqueData = OVMA::bind(CGF, ov, LV);
6821 result.RV = slot.asRValue();
6822
6823 // Otherwise, emit as normal.
6824 } else {
6825 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6826
6827 // If this is the result, also evaluate the result now.
6828 if (ov == resultExpr) {
6829 if (forLValue)
6830 result.LV = CGF.EmitLValue(ov);
6831 else
6832 result.RV = CGF.EmitAnyExpr(ov, slot);
6833 }
6834 }
6835
6836 opaques.push_back(opaqueData);
6837
6838 // Otherwise, if the expression is the result, evaluate it
6839 // and remember the result.
6840 } else if (semantic == resultExpr) {
6841 if (forLValue)
6842 result.LV = CGF.EmitLValue(semantic);
6843 else
6844 result.RV = CGF.EmitAnyExpr(semantic, slot);
6845
6846 // Otherwise, evaluate the expression in an ignored context.
6847 } else {
6848 CGF.EmitIgnoredExpr(semantic);
6849 }
6850 }
6851
6852 // Unbind all the opaques now.
6853 for (CodeGenFunction::OpaqueValueMappingData &opaque : opaques)
6854 opaque.unbind(CGF);
6855
6856 return result;
6857}
6858
6860 AggValueSlot slot) {
6861 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6862}
6863
6867
6869 LValue Val, SmallVectorImpl<LValue> &AccessList) {
6870
6872 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
6873 WorkList;
6874 llvm::IntegerType *IdxTy = llvm::IntegerType::get(getLLVMContext(), 32);
6875 WorkList.push_back({Val, Val.getType(), {llvm::ConstantInt::get(IdxTy, 0)}});
6876
6877 while (!WorkList.empty()) {
6878 auto [LVal, T, IdxList] = WorkList.pop_back_val();
6879 T = T.getCanonicalType().getUnqualifiedType();
6880 assert(!isa<MatrixType>(T) && "Matrix types not yet supported in HLSL");
6881
6882 if (const auto *CAT = dyn_cast<ConstantArrayType>(T)) {
6883 uint64_t Size = CAT->getZExtSize();
6884 for (int64_t I = Size - 1; I > -1; I--) {
6885 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
6886 IdxListCopy.push_back(llvm::ConstantInt::get(IdxTy, I));
6887 WorkList.emplace_back(LVal, CAT->getElementType(), IdxListCopy);
6888 }
6889 } else if (const auto *RT = dyn_cast<RecordType>(T)) {
6890 const RecordDecl *Record = RT->getOriginalDecl()->getDefinitionOrSelf();
6891 assert(!Record->isUnion() && "Union types not supported in flat cast.");
6892
6893 const CXXRecordDecl *CXXD = dyn_cast<CXXRecordDecl>(Record);
6894
6896 std::tuple<LValue, QualType, llvm::SmallVector<llvm::Value *, 4>>, 16>
6897 ReverseList;
6898 if (CXXD && CXXD->isStandardLayout())
6900
6901 // deal with potential base classes
6902 if (CXXD && !CXXD->isStandardLayout()) {
6903 if (CXXD->getNumBases() > 0) {
6904 assert(CXXD->getNumBases() == 1 &&
6905 "HLSL doesn't support multiple inheritance.");
6906 auto Base = CXXD->bases_begin();
6907 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
6908 IdxListCopy.push_back(llvm::ConstantInt::get(
6909 IdxTy, 0)); // base struct should be at index zero
6910 ReverseList.emplace_back(LVal, Base->getType(), IdxListCopy);
6911 }
6912 }
6913
6914 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(Record);
6915
6916 llvm::Type *LLVMT = ConvertTypeForMem(T);
6918 LValue RLValue;
6919 bool createdGEP = false;
6920 for (auto *FD : Record->fields()) {
6921 if (FD->isBitField()) {
6922 if (FD->isUnnamedBitField())
6923 continue;
6924 if (!createdGEP) {
6925 createdGEP = true;
6926 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
6927 LLVMT, Align, "gep");
6928 RLValue = MakeAddrLValue(GEP, T);
6929 }
6930 LValue FieldLVal = EmitLValueForField(RLValue, FD, true);
6931 ReverseList.push_back({FieldLVal, FD->getType(), {}});
6932 } else {
6933 llvm::SmallVector<llvm::Value *, 4> IdxListCopy = IdxList;
6934 IdxListCopy.push_back(
6935 llvm::ConstantInt::get(IdxTy, Layout.getLLVMFieldNo(FD)));
6936 ReverseList.emplace_back(LVal, FD->getType(), IdxListCopy);
6937 }
6938 }
6939
6940 std::reverse(ReverseList.begin(), ReverseList.end());
6941 llvm::append_range(WorkList, ReverseList);
6942 } else if (const auto *VT = dyn_cast<VectorType>(T)) {
6943 llvm::Type *LLVMT = ConvertTypeForMem(T);
6945 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList, LLVMT,
6946 Align, "vector.gep");
6947 LValue Base = MakeAddrLValue(GEP, T);
6948 for (unsigned I = 0, E = VT->getNumElements(); I < E; I++) {
6949 llvm::Constant *Idx = llvm::ConstantInt::get(IdxTy, I);
6950 LValue LV =
6951 LValue::MakeVectorElt(Base.getAddress(), Idx, VT->getElementType(),
6952 Base.getBaseInfo(), TBAAAccessInfo());
6953 AccessList.emplace_back(LV);
6954 }
6955 } else { // a scalar/builtin type
6956 if (!IdxList.empty()) {
6957 llvm::Type *LLVMT = ConvertTypeForMem(T);
6959 Address GEP = Builder.CreateInBoundsGEP(LVal.getAddress(), IdxList,
6960 LLVMT, Align, "gep");
6961 AccessList.emplace_back(MakeAddrLValue(GEP, T));
6962 } else // must be a bitfield we already created an lvalue for
6963 AccessList.emplace_back(LVal);
6964 }
6965 }
6966}
Defines the clang::ASTContext interface.
#define V(N, I)
This file provides some common utility functions for processing Lambda related AST Constructs.
Defines enum values for all the target-independent builtin functions.
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition CGExpr.cpp:2945
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition CGExpr.cpp:3214
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition CGExpr.cpp:712
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition CGExpr.cpp:4346
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition CGExpr.cpp:4535
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition CGExpr.cpp:4401
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type?
Definition CGExpr.cpp:1920
@ CEK_AsReferenceOnly
Definition CGExpr.cpp:1922
@ CEK_AsValueOnly
Definition CGExpr.cpp:1924
@ CEK_None
Definition CGExpr.cpp:1921
@ CEK_AsValueOrReference
Definition CGExpr.cpp:1923
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition CGExpr.cpp:1893
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition CGExpr.cpp:3202
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition CGExpr.cpp:5572
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition CGExpr.cpp:3794
static bool typeContainsPointer(QualType T, llvm::SmallPtrSet< const RecordDecl *, 4 > &VisitedRD, bool &IncompleteType)
Definition CGExpr.cpp:1276
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition CGExpr.cpp:4360
SmallVector< llvm::Value *, 8 > RecIndicesTy
Definition CGExpr.cpp:1152
static GlobalDecl getGlobalDeclForDirectCall(const FunctionDecl *FD)
Definition CGExpr.cpp:6132
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition CGExpr.cpp:3189
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition CGExpr.cpp:2291
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition CGExpr.cpp:6787
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition CGExpr.cpp:4417
static llvm::StringRef GetUBSanTrapForHandler(SanitizerHandler ID)
Definition CGExpr.cpp:89
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition CGExpr.cpp:1004
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition CGExpr.cpp:2391
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition CGExpr.cpp:1926
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field, bool IsInBounds)
Get the address of a zero-sized field within a record.
Definition CGExpr.cpp:5268
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition CGExpr.cpp:1725
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field, bool IsInBounds)
Drill down to the storage of a field without walking into reference types.
Definition CGExpr.cpp:5285
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition CGExpr.cpp:2065
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition CGExpr.cpp:4566
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition CGExpr.cpp:6080
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition CGExpr.cpp:3042
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition CGExpr.cpp:1154
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition CGExpr.cpp:6073
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition CGExpr.cpp:3138
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition CGExpr.cpp:5314
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition CGExpr.cpp:4430
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition CGExpr.cpp:3056
VariableTypeDescriptorKind
Definition CGExpr.cpp:74
@ TK_Float
A floating-point type.
Definition CGExpr.cpp:78
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition CGExpr.cpp:82
@ TK_Integer
An integer type.
Definition CGExpr.cpp:76
@ TK_BitInt
An _BitInt(N) type.
Definition CGExpr.cpp:80
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition CGExpr.cpp:2316
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition CGExpr.cpp:1457
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition CGExpr.cpp:5301
const SanitizerHandlerInfo SanitizerHandlers[]
Definition CGExpr.cpp:3811
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition CGExpr.cpp:3817
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition CGExpr.cpp:4877
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
FormatToken * Previous
The previous token in the unwrapped line.
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
Defines the clang::Module class, which describes a module in the source code.
static QualType getUnderlyingType(const SubRegion *R)
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
#define LIST_SANITIZER_CHECKS
SanitizerHandler
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
a trap message and trap category.
const LValueBase getLValueBase() const
Definition APValue.cpp:983
bool isLValue() const
Definition APValue.h:472
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:833
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
Builtin::Context & BuiltinInfo
Definition ASTContext.h:774
const LangOptions & getLangOpts() const
Definition ASTContext.h:926
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
unsigned getTargetAddressSpace(LangAS AS) const
bool isSentinelNullExpr(const Expr *E)
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4287
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition Expr.h:7105
Expr * getBase()
Get base of the array section.
Definition Expr.h:7171
Expr * getLength()
Get length of array section.
Definition Expr.h:7181
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition Expr.cpp:5265
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:7210
Expr * getLowerBound()
Get lower bound of array section.
Definition Expr.h:7175
bool isOpenACCArraySection() const
Definition Expr.h:7168
SourceLocation getColonLocFirst() const
Definition Expr.h:7202
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2776
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2750
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
SourceLocation getExprLoc() const
Definition Expr.h:4013
Expr * getRHS() const
Definition Expr.h:4024
static bool isAdditiveOp(Opcode Opc)
Definition Expr.h:4058
Opcode getOpcode() const
Definition Expr.h:4017
A fixed int type of a specified bitwidth.
Definition TypeBase.h:8146
unsigned getNumBits() const
Definition TypeBase.h:8158
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition Builtins.h:313
Represents a base class of a C++ class.
Definition DeclCXX.h:146
Represents binding an expression to a temporary.
Definition ExprCXX.h:1494
CXXTemporary * getTemporary()
Definition ExprCXX.h:1512
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1366
bool isStandardLayout() const
Determine whether this class is standard-layout per C++ [class]p7.
Definition DeclCXX.h:1225
unsigned getNumBases() const
Retrieves the number of base classes of this class.
Definition DeclCXX.h:602
base_class_iterator bases_begin()
Definition DeclCXX.h:615
bool isDynamicClass() const
Definition DeclCXX.h:574
bool hasDefinition() const
Definition DeclCXX.h:561
const CXXRecordDecl * getStandardLayoutBaseWithFields() const
If this is a standard-layout class or union, any and all data members will be declared in the same ty...
Definition DeclCXX.cpp:559
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:848
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition ExprCXX.h:1069
MSGuidDecl * getGuidDecl() const
Definition ExprCXX.h:1115
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
SourceLocation getBeginLoc() const
Definition Expr.h:3211
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
Expr * getCallee()
Definition Expr.h:3024
bool isCoroElideSafe() const
Definition Expr.h:3051
arg_range arguments()
Definition Expr.h:3129
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
path_iterator path_begin()
Definition Expr.h:3680
CastKind getCastKind() const
Definition Expr.h:3654
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
path_iterator path_end()
Definition Expr.h:3681
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
@ None
Trap Messages are omitted.
@ Detailed
Trap Message includes more context (e.g.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * getBasePointer() const
Definition Address.h:198
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:261
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
An aggregate value slot.
Definition CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:572
Address getAddress() const
Definition CGValue.h:644
void setExternallyDestructed(bool destructed=true)
Definition CGValue.h:613
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:602
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition CGValue.h:587
RValue asRValue() const
Definition CGValue.h:666
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition CGBuilder.h:309
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition CGBuilder.h:296
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition CGBuilder.h:335
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition CGBuilder.h:245
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:223
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
Address CreateConstByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Definition CGBuilder.h:319
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition CGBuilder.h:417
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:193
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
Abstract information about a function or function prototype.
Definition CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition CGCall.h:59
All available information about a concrete callee.
Definition CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CGCall.h:172
bool isPseudoDestructor() const
Definition CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition CGCall.h:123
unsigned getBuiltinID() const
Definition CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
bool isBuiltin() const
Definition CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition CGDebugInfo.h:59
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr)
Definition CGCall.h:320
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
An object to manage conditionally-evaluated expressions.
llvm::BasicBlock * getStartingBlock() const
Returns a block which will be executed prior to each evaluation of the conditional code.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
LValue getReferenceLValue(CodeGenFunction &CGF, const Expr *RefExpr) const
A non-RAII class containing all the information about a bound opaque value.
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
Definition CGExpr.cpp:4857
LValue EmitCoawaitLValue(const CoawaitExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2752
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
Definition CGExpr.cpp:3111
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
Definition CGExpr.cpp:6358
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:5675
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
Definition CGExpr.cpp:1357
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6759
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitInitListLValue(const InitListExpr *E)
Definition CGExpr.cpp:5559
bool isUnderlyingBasePointerConstantNull(const Expr *E)
Check whether the underlying base pointer is a constant null.
Definition CGExpr.cpp:5141
void EmitARCInitWeak(Address addr, llvm::Value *value)
i8* @objc_initWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2663
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:181
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
Definition CGExpr.cpp:4643
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
Definition CGExpr.cpp:6390
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1185
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
llvm::Type * ConvertType(QualType T)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6371
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
CGCapturedStmtInfo * CapturedStmtInfo
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
Definition CGClass.cpp:286
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
Definition CGExpr.cpp:2840
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Definition CGExpr.cpp:3577
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
Definition CGExpr.cpp:5532
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:6712
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2761
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3729
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
Definition CGExpr.cpp:6376
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6738
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Emit a CallExpr without considering whether it might be a subclass.
Definition CGExpr.cpp:6062
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
Definition CGExpr.cpp:721
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1236
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:6859
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
Definition CGExpr.cpp:5251
const LangOptions & getLangOpts() const
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
Definition CGExpr.cpp:4100
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
Definition CGExpr.cpp:684
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
Definition CGExpr.cpp:6689
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
Definition CGExpr.cpp:6424
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:394
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
Definition CGExpr.cpp:5657
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
pushDestroy - Push the standard destructor for the given type as at least a normal cleanup.
Definition CGDecl.cpp:2278
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
Definition CGDecl.cpp:787
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
Definition CGExpr.cpp:3120
RValue EmitLoadOfGlobalRegLValue(LValue LV)
Load of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:2590
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2890
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6213
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
Definition CGDecl.cpp:2251
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
Definition CGExpr.cpp:2093
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
Definition CGExpr.cpp:6864
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3619
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
Definition CGExpr.cpp:1250
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
Definition CGExpr.cpp:5911
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
Definition CGExpr.cpp:963
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6416
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Definition CGExpr.cpp:2399
LValue EmitLValueForField(LValue Base, const FieldDecl *Field, bool IsInBounds=true)
Definition CGExpr.cpp:5333
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:174
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, bool IsInBounds, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Emit the address of a field using a member data pointer.
Definition CGClass.cpp:150
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
Definition CGExpr.cpp:5934
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
Definition CGExpr.cpp:726
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6138
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:242
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6012
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2417
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
Definition CGExpr.cpp:4916
Address GetAddrOfBlockDecl(const VarDecl *var)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
Definition CGExpr.cpp:4061
void pushFullExprCleanup(CleanupKind kind, As... A)
pushFullExprCleanup - Push a cleanup to be run at the end of the current full-expression.
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
Definition CGExpr.cpp:6727
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:223
LValue EmitPredefinedLValue(const PredefinedExpr *E)
Definition CGExpr.cpp:3582
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3869
LValue EmitDeclRefLValue(const DeclRefExpr *E)
Definition CGExpr.cpp:3286
LValue EmitStringLiteralLValue(const StringLiteral *E)
Definition CGExpr.cpp:3572
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5965
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2048
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1645
LValue MakeRawAddrLValue(llvm::Value *V, QualType T, CharUnits Alignment, AlignmentSource Source=AlignmentSource::Type)
Same as MakeAddrLValue above except that the pointer is known to be unsigned.
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
Definition CGExpr.cpp:5507
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2234
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:151
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5951
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
Definition CGExpr.cpp:2107
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5241
const TargetCodeGenInfo & getTargetHooks() const
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Definition CGExpr.cpp:6328
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
Definition CGExpr.cpp:215
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
LValue EmitVAArgExprLValue(const VAArgExpr *E)
Definition CGExpr.cpp:6353
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
Definition CGExpr.cpp:283
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitStmtExprLValue(const StmtExpr *E)
Definition CGExpr.cpp:6456
llvm::Value * EmitARCLoadWeakRetained(Address addr)
i8* @objc_loadWeakRetained(i8** addr)
Definition CGObjC.cpp:2643
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates a alloca and inserts it into the entry block.
Definition CGExpr.cpp:103
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
Definition CGExpr.cpp:6432
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2614
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4309
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2331
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1613
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
void EmitAllocToken(llvm::CallBase *CB, QualType AllocType)
Emit additional metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1324
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
LValue EmitCastLValue(const CastExpr *E)
EmitCastLValue - Casts are never lvalues unless that cast is to a reference type.
Definition CGExpr.cpp:5725
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
Definition CGExpr.cpp:508
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
Definition CGExpr.cpp:3130
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3691
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:293
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:264
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
Definition CGExpr.cpp:5080
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
Definition CGExpr.cpp:1639
CleanupKind getCleanupKind(QualType::DestructionKind kind)
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
Definition CGExpr.cpp:5917
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Definition CGExpr.cpp:6404
llvm::Type * ConvertTypeForMem(QualType T)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6338
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition CGExpr.cpp:2488
llvm::Value * EmitARCLoadWeak(Address addr)
i8* @objc_loadWeak(i8** addr) Essentially objc_autorelease(objc_loadWeakRetained(addr)).
Definition CGObjC.cpp:2636
LValue EmitLValueForLambdaField(const FieldDecl *Field)
Definition CGExpr.cpp:5245
void markStmtMaybeUsed(const Stmt *S)
CodeGenTypes & getTypes() const
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
Definition CGExpr.cpp:6410
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:6868
LValue EmitCoyieldLValue(const CoyieldExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
Definition CGExpr.cpp:4013
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
Emits all the code to cause the given temporary to be cleaned up.
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
Definition CGExpr.cpp:3505
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1596
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1677
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:186
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
Definition CGExpr.cpp:3078
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
Definition CGExpr.cpp:5984
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
Definition CGObjC.cpp:2160
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
Definition CGExpr.cpp:6382
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:734
Address EmitExtVectorElementLValue(LValue V)
Generates lvalue for partial ext_vector access.
Definition CGExpr.cpp:2572
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Definition CGExpr.cpp:323
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition CGExpr.cpp:2525
static bool hasAggregateEvaluationKind(QualType T)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
Definition CGExpr.cpp:1654
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
EmitCallArgs - Emit call arguments for a function.
Definition CGCall.cpp:4681
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:4849
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4294
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4221
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2264
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1228
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4209
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
Definition CGExpr.cpp:6367
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
generateDestroyHelper - Generates a helper function which, when invoked, destroys the given object.
LValue EmitMemberExpr(const MemberExpr *E)
Definition CGExpr.cpp:5148
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1945
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1712
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Store of global named registers are always calls to intrinsics.
Definition CGExpr.cpp:2920
bool isOpaqueValueEmitted(const OpaqueValueExpr *E)
isOpaqueValueEmitted - Return true if the opaque value expression has already been emitted.
Definition CGExpr.cpp:5978
std::pair< llvm::Value *, CGPointerAuthInfo > EmitOrigPointerRValue(const Expr *E)
Retrieve a pointer rvalue and its ptrauth info.
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
Definition CGExpr.cpp:706
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
Definition CGExpr.cpp:1606
void EmitCountedByBoundsChecking(const Expr *E, llvm::Value *Idx, Address Addr, QualType IdxTy, QualType ArrayTy, bool Accessed, bool FlexibleArray)
EmitCountedByBoundsChecking - If the array being accessed has a "counted_by" attribute,...
Definition CGExpr.cpp:4596
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:655
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1392
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition CGExpr.cpp:3177
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition CGCXX.cpp:217
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type)
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
A specialization of Address that requires the address to be an LLVM Constant.
Definition Address.h:296
llvm::Constant * getPointer() const
Definition Address.h:308
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
AlignmentSource getAlignmentSource() const
Definition CGValue.h:171
LValue - This represents an lvalue references.
Definition CGValue.h:182
bool isBitField() const
Definition CGValue.h:280
bool isMatrixElt() const
Definition CGValue.h:283
Expr * getBaseIvarExp() const
Definition CGValue.h:332
llvm::Constant * getExtVectorElts() const
Definition CGValue.h:409
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition CGValue.h:478
bool isObjCStrong() const
Definition CGValue.h:324
bool isGlobalObjCRef() const
Definition CGValue.h:306
bool isVectorElt() const
Definition CGValue.h:279
bool isSimple() const
Definition CGValue.h:278
bool isVolatileQualified() const
Definition CGValue.h:285
RValue asAggregateRValue() const
Definition CGValue.h:498
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition CGValue.h:395
llvm::Value * getGlobalReg() const
Definition CGValue.h:430
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:432
bool isVolatile() const
Definition CGValue.h:328
const Qualifiers & getQuals() const
Definition CGValue.h:338
bool isGlobalReg() const
Definition CGValue.h:282
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:452
bool isObjCWeak() const
Definition CGValue.h:321
Address getAddress() const
Definition CGValue.h:361
unsigned getVRQualifiers() const
Definition CGValue.h:287
LValue setKnownNonNull()
Definition CGValue.h:350
bool isNonGC() const
Definition CGValue.h:303
bool isExtVectorElt() const
Definition CGValue.h:281
llvm::Value * getVectorIdx() const
Definition CGValue.h:382
void setNontemporal(bool Value)
Definition CGValue.h:319
LValueBaseInfo getBaseInfo() const
Definition CGValue.h:346
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition CGValue.h:315
QualType getType() const
Definition CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:424
bool isThreadLocalRef() const
Definition CGValue.h:309
KnownNonNull_t isKnownNonNull() const
Definition CGValue.h:349
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:335
void setNonGC(bool Value)
Definition CGValue.h:304
Address getVectorAddress() const
Definition CGValue.h:370
bool isNontemporal() const
Definition CGValue.h:318
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition CGValue.h:468
bool isObjCIvar() const
Definition CGValue.h:297
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:442
void setAddress(Address address)
Definition CGValue.h:363
Address getExtVectorAddress() const
Definition CGValue.h:401
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition CGValue.h:488
Address getMatrixAddress() const
Definition CGValue.h:387
Address getBitFieldAddress() const
Definition CGValue.h:415
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:108
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:71
An abstract representation of an aligned address.
Definition Address.h:42
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition Address.h:93
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:77
llvm::Value * getPointer() const
Definition Address.h:66
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition Address.h:83
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
QualType getElementType() const
Definition TypeBase.h:3285
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3539
bool isFileScope() const
Definition Expr.h:3571
const Expr * getInitializer() const
Definition Expr.h:3567
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4373
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition TypeBase.h:4391
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1474
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:484
ValueDecl * getDecl()
Definition Expr.h:1338
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1468
SourceLocation getLocation() const
Definition Expr.h:1346
T * getAttr() const
Definition DeclBase.h:573
SourceLocation getLocation() const
Definition DeclBase.h:439
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition DeclBase.cpp:553
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
ExplicitCastExpr - An explicit cast written in the source code.
Definition Expr.h:3862
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:80
bool isGLValue() const
Definition Expr.h:287
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3112
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:444
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3085
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3073
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition Expr.h:285
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition Expr.h:284
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1542
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3665
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3065
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:476
QualType getType() const
Definition Expr.h:144
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition Expr.cpp:2996
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6498
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4412
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4444
const Expr * getBase() const
Definition Expr.h:6515
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4843
FullExpr - Represents a "full-expression" node.
Definition Expr.h:1049
const Expr * getSubExpr() const
Definition Expr.h:1062
Represents a function declaration or definition.
Definition Decl.h:2000
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3750
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5266
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4462
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition Expr.h:7271
const OpaqueValueExpr * getCastedTemporary() const
Definition Expr.h:7322
const OpaqueValueExpr * getOpaqueArgLValue() const
Definition Expr.h:7303
bool isInOut() const
returns true if the parameter is inout and false if the parameter is out.
Definition Expr.h:7330
const Expr * getWritebackCast() const
Definition Expr.h:7317
const Expr * getArgLValue() const
Return the l-value expression that was written as the argument in source.
Definition Expr.h:7312
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
Describes an C or C++ initializer list.
Definition Expr.h:5233
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2457
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4922
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4947
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4939
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4972
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition Expr.h:2799
bool isIncomplete() const
Definition Expr.h:2819
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3381
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3522
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3653
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition NSAPI.cpp:481
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
ObjCEncodeExpr, used for @encode in Objective-C.
Definition ExprObjC.h:409
Represents an ObjC class declaration.
Definition DeclObjC.h:1154
ObjCIvarDecl - Represents an ObjC instance variable.
Definition DeclObjC.h:1952
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition ExprObjC.h:548
ObjCIvarDecl * getDecl()
Definition ExprObjC.h:578
bool isArrow() const
Definition ExprObjC.h:586
const Expr * getBase() const
Definition ExprObjC.h:582
An expression that sends a message to the given Objective-C object or class.
Definition ExprObjC.h:940
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1364
QualType getReturnType() const
Definition DeclObjC.h:329
ObjCSelectorExpr used for @selector in Objective-C.
Definition ExprObjC.h:454
Selector getSelector() const
Definition ExprObjC.h:468
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
const Expr * getSubExpr() const
Definition Expr.h:2199
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
QualType getPointeeType() const
Definition TypeBase.h:3338
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
StringRef getIdentKindName() const
Definition Expr.h:2062
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2040
StringLiteral * getFunctionName()
Definition Expr.h:2049
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6690
semantics_iterator semantics_end()
Definition Expr.h:6755
semantics_iterator semantics_begin()
Definition Expr.h:6751
const Expr *const * const_semantics_iterator
Definition Expr.h:6750
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition Expr.h:6738
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8378
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1453
QualType withoutLocalFastQualifiers() const
Definition TypeBase.h:1214
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8420
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8334
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
void print(raw_ostream &OS, const PrintingPolicy &Policy, const Twine &PlaceHolder=Twine(), unsigned Indentation=0) const
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8479
QualType getCanonicalType() const
Definition TypeBase.h:8346
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8388
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1179
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition TypeBase.h:1036
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool hasConst() const
Definition TypeBase.h:457
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void removeObjCGCAttr()
Definition TypeBase.h:523
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
void removePointerAuth()
Definition TypeBase.h:610
void setAddressSpace(LangAS space)
Definition TypeBase.h:591
bool hasVolatile() const
Definition TypeBase.h:467
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:603
ObjCLifetime getObjCLifetime() const
Definition TypeBase.h:545
Represents a struct/union/class.
Definition Decl.h:4312
field_range fields() const
Definition Decl.h:4515
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition Decl.h:4496
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition Expr.h:4529
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:346
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
bool isUnion() const
Definition Decl.h:3922
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual StringRef getABI() const
Get the ABI currently in use.
The base class of the type hierarchy.
Definition TypeBase.h:1833
bool isBlockPointerType() const
Definition TypeBase.h:8551
bool isVoidType() const
Definition TypeBase.h:8887
bool hasPointeeToToCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8583
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isPackedVectorBoolType(const ASTContext &ctx) const
Definition Type.cpp:418
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition Type.cpp:1951
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9183
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8634
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isArrayType() const
Definition TypeBase.h:8630
bool isFunctionPointerType() const
Definition TypeBase.h:8598
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isConstantMatrixType() const
Definition TypeBase.h:8692
bool isPointerType() const
Definition TypeBase.h:8531
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8931
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9174
bool isReferenceType() const
Definition TypeBase.h:8555
bool isEnumeralType() const
Definition TypeBase.h:8662
bool isVariableArrayType() const
Definition TypeBase.h:8642
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isExtVectorBoolType() const
Definition TypeBase.h:8678
bool isBitIntType() const
Definition TypeBase.h:8796
bool isAnyComplexType() const
Definition TypeBase.h:8666
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9060
bool isAtomicType() const
Definition TypeBase.h:8713
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isObjectType() const
Determine whether this type is an object type.
Definition TypeBase.h:2510
bool isHLSLResourceRecord() const
Definition Type.cpp:5364
EnumDecl * getAsEnumDecl() const
Retrieves the EnumDecl this type refers to.
Definition Type.h:53
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2436
bool isFunctionType() const
Definition TypeBase.h:8527
bool isObjCObjectPointerType() const
Definition TypeBase.h:8700
bool isVectorType() const
Definition TypeBase.h:8670
bool isAnyPointerType() const
Definition TypeBase.h:8539
bool isSubscriptableVectorType() const
Definition TypeBase.h:8684
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9107
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition Type.cpp:653
bool isRecordType() const
Definition TypeBase.h:8658
bool isHLSLResourceRecordArray() const
Definition Type.cpp:5368
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2354
bool isCFIUncheckedCalleeFunctionType() const
Definition TypeBase.h:8577
TypedefNameDecl * getDecl() const
Definition TypeBase.h:6111
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4891
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2168
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2366
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
@ TLS_None
Not a TLS variable.
Definition Decl.h:946
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Represents a GCC generic vector type.
Definition TypeBase.h:4175
unsigned getNumElements() const
Definition TypeBase.h:4190
#define INT_MIN
Definition limits.h:55
Definition SPIR.cpp:35
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition CGValue.h:141
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:154
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:145
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
@ ARCImpreciseLifetime
Definition CGValue.h:136
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition CGValue.h:159
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition Specifiers.h:154
@ SC_Register
Definition Specifiers.h:257
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition Specifiers.h:339
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
LangAS
Defines the address space values used by the address space qualifier of QualType.
llvm::cl::opt< bool > ClSanitizeGuardChecks
SmallVector< CXXBaseSpecifier *, 4 > CXXCastPath
A simple array of base specifiers.
Definition ASTContext.h:149
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
Definition TypeBase.h:5870
bool isLambdaMethod(const DeclContext *DC)
Definition ASTLambda.h:39
@ Other
Other implicit parameter.
Definition Decl.h:1746
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
uint64_t Offset
Offset - The byte offset of the final access within the base one.
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
llvm::MDNode * BaseType
BaseType - The base/leading access type.
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612
Describes how types, statements, expressions, and declarations should be printed.
unsigned FullyQualifiedName
When true, print the fully qualified name of function declarations.
unsigned SuppressTagKeyword
Whether type printing should skip printing the tag keyword.
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition Expr.h:68