clang 20.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGObjCRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CGRecordLayout.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/NSAPI.h"
34#include "llvm/ADT/STLExtras.h"
35#include "llvm/ADT/ScopeExit.h"
36#include "llvm/ADT/StringExtras.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/LLVMContext.h"
40#include "llvm/IR/MDBuilder.h"
41#include "llvm/IR/MatrixBuilder.h"
42#include "llvm/Support/ConvertUTF.h"
43#include "llvm/Support/Endian.h"
44#include "llvm/Support/MathExtras.h"
45#include "llvm/Support/Path.h"
46#include "llvm/Support/xxhash.h"
47#include "llvm/Transforms/Utils/SanitizerStats.h"
48
49#include <optional>
50#include <string>
51
52using namespace clang;
53using namespace CodeGen;
54
55namespace clang {
56// TODO: Introduce frontend options to enabled per sanitizers, similar to
57// `fsanitize-trap`.
58llvm::cl::opt<bool> ClSanitizeGuardChecks(
59 "ubsan-guard-checks", llvm::cl::Optional,
60 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
61} // namespace clang
62
63//===--------------------------------------------------------------------===//
64// Defines for metadata
65//===--------------------------------------------------------------------===//
66
67// Those values are crucial to be the SAME as in ubsan runtime library.
69 /// An integer type.
70 TK_Integer = 0x0000,
71 /// A floating-point type.
72 TK_Float = 0x0001,
73 /// An _BitInt(N) type.
74 TK_BitInt = 0x0002,
75 /// Any other type. The value representation is unspecified.
76 TK_Unknown = 0xffff
77};
78
79//===--------------------------------------------------------------------===//
80// Miscellaneous Helper Methods
81//===--------------------------------------------------------------------===//
82
83/// CreateTempAlloca - This creates a alloca and inserts it into the entry
84/// block.
86CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
87 const Twine &Name,
88 llvm::Value *ArraySize) {
89 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
90 Alloca->setAlignment(Align.getAsAlign());
91 return RawAddress(Alloca, Ty, Align, KnownNonNull);
92}
93
94/// CreateTempAlloca - This creates a alloca and inserts it into the entry
95/// block. The alloca is casted to default address space if necessary.
97 const Twine &Name,
98 llvm::Value *ArraySize,
99 RawAddress *AllocaAddr) {
100 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
101 if (AllocaAddr)
102 *AllocaAddr = Alloca;
103 llvm::Value *V = Alloca.getPointer();
104 // Alloca always returns a pointer in alloca address space, which may
105 // be different from the type defined by the language. For example,
106 // in C++ the auto variables are in the default address space. Therefore
107 // cast alloca to the default address space when necessary.
109 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
110 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
111 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
112 // otherwise alloca is inserted at the current insertion point of the
113 // builder.
114 if (!ArraySize)
115 Builder.SetInsertPoint(getPostAllocaInsertPoint());
118 Builder.getPtrTy(DestAddrSpace), /*non-null*/ true);
119 }
120
121 return RawAddress(V, Ty, Align, KnownNonNull);
122}
123
124/// CreateTempAlloca - This creates an alloca and inserts it into the entry
125/// block if \p ArraySize is nullptr, otherwise inserts it at the current
126/// insertion point of the builder.
127llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
128 const Twine &Name,
129 llvm::Value *ArraySize) {
130 llvm::AllocaInst *Alloca;
131 if (ArraySize)
132 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
133 else
134 Alloca =
135 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
136 ArraySize, Name, AllocaInsertPt->getIterator());
137 if (Allocas) {
138 Allocas->Add(Alloca);
139 }
140 return Alloca;
141}
142
143/// CreateDefaultAlignTempAlloca - This creates an alloca with the
144/// default alignment of the corresponding LLVM type, which is *not*
145/// guaranteed to be related in any way to the expected alignment of
146/// an AST type that might have been lowered to Ty.
148 const Twine &Name) {
149 CharUnits Align =
150 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
151 return CreateTempAlloca(Ty, Align, Name);
152}
153
156 return CreateTempAlloca(ConvertType(Ty), Align, Name);
157}
158
160 RawAddress *Alloca) {
161 // FIXME: Should we prefer the preferred type alignment here?
162 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
163}
164
166 const Twine &Name,
167 RawAddress *Alloca) {
169 /*ArraySize=*/nullptr, Alloca);
170
171 if (Ty->isConstantMatrixType()) {
172 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
173 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
174 ArrayTy->getNumElements());
175
176 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
178 }
179 return Result;
180}
181
183 CharUnits Align,
184 const Twine &Name) {
185 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
186}
187
189 const Twine &Name) {
190 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
191 Name);
192}
193
194/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
195/// expression and compare the result against zero, returning an Int1Ty value.
196llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
197 PGO.setCurrentStmt(E);
198 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
199 llvm::Value *MemPtr = EmitScalarExpr(E);
200 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
201 }
202
203 QualType BoolTy = getContext().BoolTy;
205 CGFPOptionsRAII FPOptsRAII(*this, E);
206 if (!E->getType()->isAnyComplexType())
207 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
208
210 Loc);
211}
212
213/// EmitIgnoredExpr - Emit code to compute the specified expression,
214/// ignoring the result.
216 if (E->isPRValue())
217 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
218
219 // if this is a bitfield-resulting conditional operator, we can special case
220 // emit this. The normal 'EmitLValue' version of this is particularly
221 // difficult to codegen for, since creating a single "LValue" for two
222 // different sized arguments here is not particularly doable.
223 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
225 if (CondOp->getObjectKind() == OK_BitField)
226 return EmitIgnoredConditionalOperator(CondOp);
227 }
228
229 // Just emit it as an l-value and drop the result.
230 EmitLValue(E);
231}
232
233/// EmitAnyExpr - Emit code to compute the specified expression which
234/// can have any type. The result is returned as an RValue struct.
235/// If this is an aggregate expression, AggSlot indicates where the
236/// result should be returned.
238 AggValueSlot aggSlot,
239 bool ignoreResult) {
240 switch (getEvaluationKind(E->getType())) {
241 case TEK_Scalar:
242 return RValue::get(EmitScalarExpr(E, ignoreResult));
243 case TEK_Complex:
244 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
245 case TEK_Aggregate:
246 if (!ignoreResult && aggSlot.isIgnored())
247 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
248 EmitAggExpr(E, aggSlot);
249 return aggSlot.asRValue();
250 }
251 llvm_unreachable("bad evaluation kind");
252}
253
254/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
255/// always be accessible even if no aggregate location is provided.
258
260 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
261 return EmitAnyExpr(E, AggSlot);
262}
263
264/// EmitAnyExprToMem - Evaluate an expression into a given memory
265/// location.
267 Address Location,
268 Qualifiers Quals,
269 bool IsInit) {
270 // FIXME: This function should take an LValue as an argument.
271 switch (getEvaluationKind(E->getType())) {
272 case TEK_Complex:
274 /*isInit*/ false);
275 return;
276
277 case TEK_Aggregate: {
278 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
283 return;
284 }
285
286 case TEK_Scalar: {
287 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
288 LValue LV = MakeAddrLValue(Location, E->getType());
290 return;
291 }
292 }
293 llvm_unreachable("bad evaluation kind");
294}
295
297 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
298 QualType Type = LV.getType();
299 switch (getEvaluationKind(Type)) {
300 case TEK_Complex:
301 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
302 return;
303 case TEK_Aggregate:
307 AggValueSlot::MayOverlap, IsZeroed));
308 return;
309 case TEK_Scalar:
310 if (LV.isSimple())
311 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
312 else
314 return;
315 }
316 llvm_unreachable("bad evaluation kind");
317}
318
319static void
321 const Expr *E, Address ReferenceTemporary) {
322 // Objective-C++ ARC:
323 // If we are binding a reference to a temporary that has ownership, we
324 // need to perform retain/release operations on the temporary.
325 //
326 // FIXME: This should be looking at E, not M.
327 if (auto Lifetime = M->getType().getObjCLifetime()) {
328 switch (Lifetime) {
331 // Carry on to normal cleanup handling.
332 break;
333
335 // Nothing to do; cleaned up by an autorelease pool.
336 return;
337
340 switch (StorageDuration Duration = M->getStorageDuration()) {
341 case SD_Static:
342 // Note: we intentionally do not register a cleanup to release
343 // the object on program termination.
344 return;
345
346 case SD_Thread:
347 // FIXME: We should probably register a cleanup in this case.
348 return;
349
350 case SD_Automatic:
354 if (Lifetime == Qualifiers::OCL_Strong) {
355 const ValueDecl *VD = M->getExtendingDecl();
356 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
357 VD->hasAttr<ObjCPreciseLifetimeAttr>();
361 } else {
362 // __weak objects always get EH cleanups; otherwise, exceptions
363 // could cause really nasty crashes instead of mere leaks.
366 }
367 if (Duration == SD_FullExpression)
368 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
369 M->getType(), *Destroy,
371 else
372 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
373 M->getType(),
374 *Destroy, CleanupKind & EHCleanup);
375 return;
376
377 case SD_Dynamic:
378 llvm_unreachable("temporary cannot have dynamic storage duration");
379 }
380 llvm_unreachable("unknown storage duration");
381 }
382 }
383
384 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
385 if (const RecordType *RT =
387 // Get the destructor for the reference temporary.
388 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
389 if (!ClassDecl->hasTrivialDestructor())
390 ReferenceTemporaryDtor = ClassDecl->getDestructor();
391 }
392
393 if (!ReferenceTemporaryDtor)
394 return;
395
396 // Call the destructor for the temporary.
397 switch (M->getStorageDuration()) {
398 case SD_Static:
399 case SD_Thread: {
400 llvm::FunctionCallee CleanupFn;
401 llvm::Constant *CleanupArg;
402 if (E->getType()->isArrayType()) {
404 ReferenceTemporary, E->getType(),
406 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
407 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
408 } else {
409 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
410 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
411 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
412 }
414 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
415 break;
416 }
417
419 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
421 CGF.getLangOpts().Exceptions);
422 break;
423
424 case SD_Automatic:
426 ReferenceTemporary, E->getType(),
428 CGF.getLangOpts().Exceptions);
429 break;
430
431 case SD_Dynamic:
432 llvm_unreachable("temporary cannot have dynamic storage duration");
433 }
434}
435
438 const Expr *Inner,
439 RawAddress *Alloca = nullptr) {
440 auto &TCG = CGF.getTargetHooks();
441 switch (M->getStorageDuration()) {
443 case SD_Automatic: {
444 // If we have a constant temporary array or record try to promote it into a
445 // constant global under the same rules a normal constant would've been
446 // promoted. This is easier on the optimizer and generally emits fewer
447 // instructions.
448 QualType Ty = Inner->getType();
449 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
450 (Ty->isArrayType() || Ty->isRecordType()) &&
451 Ty.isConstantStorage(CGF.getContext(), true, false))
452 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
453 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
454 auto *GV = new llvm::GlobalVariable(
455 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
456 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
457 llvm::GlobalValue::NotThreadLocal,
459 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
460 GV->setAlignment(alignment.getAsAlign());
461 llvm::Constant *C = GV;
462 if (AS != LangAS::Default)
463 C = TCG.performAddrSpaceCast(
464 CGF.CGM, GV, AS, LangAS::Default,
465 llvm::PointerType::get(
466 CGF.getLLVMContext(),
468 // FIXME: Should we put the new global into a COMDAT?
469 return RawAddress(C, GV->getValueType(), alignment);
470 }
471 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
472 }
473 case SD_Thread:
474 case SD_Static:
475 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
476
477 case SD_Dynamic:
478 llvm_unreachable("temporary can't have dynamic storage duration");
479 }
480 llvm_unreachable("unknown storage duration");
481}
482
483/// Helper method to check if the underlying ABI is AAPCS
484static bool isAAPCS(const TargetInfo &TargetInfo) {
485 return TargetInfo.getABI().starts_with("aapcs");
486}
487
490 const Expr *E = M->getSubExpr();
491
492 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
493 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
494 "Reference should never be pseudo-strong!");
495
496 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
497 // as that will cause the lifetime adjustment to be lost for ARC
498 auto ownership = M->getType().getObjCLifetime();
499 if (ownership != Qualifiers::OCL_None &&
500 ownership != Qualifiers::OCL_ExplicitNone) {
502 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
503 llvm::Type *Ty = ConvertTypeForMem(E->getType());
504 Object = Object.withElementType(Ty);
505
506 // createReferenceTemporary will promote the temporary to a global with a
507 // constant initializer if it can. It can only do this to a value of
508 // ARC-manageable type if the value is global and therefore "immune" to
509 // ref-counting operations. Therefore we have no need to emit either a
510 // dynamic initialization or a cleanup and we can just return the address
511 // of the temporary.
512 if (Var->hasInitializer())
513 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
514
515 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
516 }
517 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
519
520 switch (getEvaluationKind(E->getType())) {
521 default: llvm_unreachable("expected scalar or aggregate expression");
522 case TEK_Scalar:
523 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
524 break;
525 case TEK_Aggregate: {
532 break;
533 }
534 }
535
536 pushTemporaryCleanup(*this, M, E, Object);
537 return RefTempDst;
538 }
539
542 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
543
544 for (const auto &Ignored : CommaLHSs)
545 EmitIgnoredExpr(Ignored);
546
547 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
548 if (opaque->getType()->isRecordType()) {
549 assert(Adjustments.empty());
550 return EmitOpaqueValueLValue(opaque);
551 }
552 }
553
554 // Create and initialize the reference temporary.
555 RawAddress Alloca = Address::invalid();
556 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
557 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
558 Object.getPointer()->stripPointerCasts())) {
559 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
560 Object = Object.withElementType(TemporaryType);
561 // If the temporary is a global and has a constant initializer or is a
562 // constant temporary that we promoted to a global, we may have already
563 // initialized it.
564 if (!Var->hasInitializer()) {
565 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
566 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
567 }
568 } else {
569 switch (M->getStorageDuration()) {
570 case SD_Automatic:
571 if (auto *Size = EmitLifetimeStart(
572 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
573 Alloca.getPointer())) {
574 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
575 Alloca, Size);
576 }
577 break;
578
579 case SD_FullExpression: {
580 if (!ShouldEmitLifetimeMarkers)
581 break;
582
583 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
584 // marker. Instead, start the lifetime of a conditional temporary earlier
585 // so that it's unconditional. Don't do this with sanitizers which need
586 // more precise lifetime marks. However when inside an "await.suspend"
587 // block, we should always avoid conditional cleanup because it creates
588 // boolean marker that lives across await_suspend, which can destroy coro
589 // frame.
590 ConditionalEvaluation *OldConditional = nullptr;
591 CGBuilderTy::InsertPoint OldIP;
593 ((!SanOpts.has(SanitizerKind::HWAddress) &&
594 !SanOpts.has(SanitizerKind::Memory) &&
595 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
596 inSuspendBlock())) {
597 OldConditional = OutermostConditional;
598 OutermostConditional = nullptr;
599
600 OldIP = Builder.saveIP();
601 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
602 Builder.restoreIP(CGBuilderTy::InsertPoint(
603 Block, llvm::BasicBlock::iterator(Block->back())));
604 }
605
606 if (auto *Size = EmitLifetimeStart(
607 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
608 Alloca.getPointer())) {
609 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
610 Size);
611 }
612
613 if (OldConditional) {
614 OutermostConditional = OldConditional;
615 Builder.restoreIP(OldIP);
616 }
617 break;
618 }
619
620 default:
621 break;
622 }
623 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
624 }
625 pushTemporaryCleanup(*this, M, E, Object);
626
627 // Perform derived-to-base casts and/or field accesses, to get from the
628 // temporary object we created (and, potentially, for which we extended
629 // the lifetime) to the subobject we're binding the reference to.
630 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
631 switch (Adjustment.Kind) {
633 Object =
634 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
635 Adjustment.DerivedToBase.BasePath->path_begin(),
636 Adjustment.DerivedToBase.BasePath->path_end(),
637 /*NullCheckValue=*/ false, E->getExprLoc());
638 break;
639
642 LV = EmitLValueForField(LV, Adjustment.Field);
643 assert(LV.isSimple() &&
644 "materialized temporary field is not a simple lvalue");
645 Object = LV.getAddress();
646 break;
647 }
648
650 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
652 Adjustment.Ptr.MPT);
653 break;
654 }
655 }
656 }
657
658 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
659}
660
661RValue
663 // Emit the expression as an lvalue.
664 LValue LV = EmitLValue(E);
665 assert(LV.isSimple());
666 llvm::Value *Value = LV.getPointer(*this);
667
669 // C++11 [dcl.ref]p5 (as amended by core issue 453):
670 // If a glvalue to which a reference is directly bound designates neither
671 // an existing object or function of an appropriate type nor a region of
672 // storage of suitable size and alignment to contain an object of the
673 // reference's type, the behavior is undefined.
674 QualType Ty = E->getType();
676 }
677
678 return RValue::get(Value);
679}
680
681
682/// getAccessedFieldNo - Given an encoded value and a result number, return the
683/// input field number being accessed.
684unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
685 const llvm::Constant *Elts) {
686 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
687 ->getZExtValue();
688}
689
690static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
691 llvm::Value *Ptr) {
692 llvm::Value *A0 =
693 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
694 llvm::Value *A1 =
695 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
696 return Builder.CreateXor(Acc, A1);
697}
698
699bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
700 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
702}
703
704bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
706 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
707 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
710}
711
713 return SanOpts.has(SanitizerKind::Null) ||
714 SanOpts.has(SanitizerKind::Alignment) ||
715 SanOpts.has(SanitizerKind::ObjectSize) ||
716 SanOpts.has(SanitizerKind::Vptr);
717}
718
720 llvm::Value *Ptr, QualType Ty,
721 CharUnits Alignment,
722 SanitizerSet SkippedChecks,
723 llvm::Value *ArraySize) {
725 return;
726
727 // Don't check pointers outside the default address space. The null check
728 // isn't correct, the object-size check isn't supported by LLVM, and we can't
729 // communicate the addresses to the runtime handler for the vptr check.
730 if (Ptr->getType()->getPointerAddressSpace())
731 return;
732
733 // Don't check pointers to volatile data. The behavior here is implementation-
734 // defined.
735 if (Ty.isVolatileQualified())
736 return;
737
738 SanitizerScope SanScope(this);
739
741 Checks;
742 llvm::BasicBlock *Done = nullptr;
743
744 // Quickly determine whether we have a pointer to an alloca. It's possible
745 // to skip null checks, and some alignment checks, for these pointers. This
746 // can reduce compile-time significantly.
747 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
748
749 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
750 llvm::Value *IsNonNull = nullptr;
751 bool IsGuaranteedNonNull =
752 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
753 bool AllowNullPointers = isNullPointerAllowed(TCK);
754 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
755 !IsGuaranteedNonNull) {
756 // The glvalue must not be an empty glvalue.
757 IsNonNull = Builder.CreateIsNotNull(Ptr);
758
759 // The IR builder can constant-fold the null check if the pointer points to
760 // a constant.
761 IsGuaranteedNonNull = IsNonNull == True;
762
763 // Skip the null check if the pointer is known to be non-null.
764 if (!IsGuaranteedNonNull) {
765 if (AllowNullPointers) {
766 // When performing pointer casts, it's OK if the value is null.
767 // Skip the remaining checks in that case.
768 Done = createBasicBlock("null");
769 llvm::BasicBlock *Rest = createBasicBlock("not.null");
770 Builder.CreateCondBr(IsNonNull, Rest, Done);
771 EmitBlock(Rest);
772 } else {
773 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
774 }
775 }
776 }
777
778 if (SanOpts.has(SanitizerKind::ObjectSize) &&
779 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
780 !Ty->isIncompleteType()) {
782 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
783 if (ArraySize)
784 Size = Builder.CreateMul(Size, ArraySize);
785
786 // Degenerate case: new X[0] does not need an objectsize check.
787 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
788 if (!ConstantSize || !ConstantSize->isNullValue()) {
789 // The glvalue must refer to a large enough storage region.
790 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
791 // to check this.
792 // FIXME: Get object address space
793 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
794 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
795 llvm::Value *Min = Builder.getFalse();
796 llvm::Value *NullIsUnknown = Builder.getFalse();
797 llvm::Value *Dynamic = Builder.getFalse();
798 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
799 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
800 Checks.push_back(
801 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
802 }
803 }
804
805 llvm::MaybeAlign AlignVal;
806 llvm::Value *PtrAsInt = nullptr;
807
808 if (SanOpts.has(SanitizerKind::Alignment) &&
809 !SkippedChecks.has(SanitizerKind::Alignment)) {
810 AlignVal = Alignment.getAsMaybeAlign();
811 if (!Ty->isIncompleteType() && !AlignVal)
812 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
813 /*ForPointeeType=*/true)
815
816 // The glvalue must be suitably aligned.
817 if (AlignVal && *AlignVal > llvm::Align(1) &&
818 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
819 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
820 llvm::Value *Align = Builder.CreateAnd(
821 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
822 llvm::Value *Aligned =
823 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
824 if (Aligned != True)
825 Checks.push_back(std::make_pair(Aligned, SanitizerKind::SO_Alignment));
826 }
827 }
828
829 if (Checks.size() > 0) {
830 llvm::Constant *StaticData[] = {
832 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
833 llvm::ConstantInt::get(Int8Ty, TCK)};
834 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
835 PtrAsInt ? PtrAsInt : Ptr);
836 }
837
838 // If possible, check that the vptr indicates that there is a subobject of
839 // type Ty at offset zero within this object.
840 //
841 // C++11 [basic.life]p5,6:
842 // [For storage which does not refer to an object within its lifetime]
843 // The program has undefined behavior if:
844 // -- the [pointer or glvalue] is used to access a non-static data member
845 // or call a non-static member function
846 if (SanOpts.has(SanitizerKind::Vptr) &&
847 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
848 // Ensure that the pointer is non-null before loading it. If there is no
849 // compile-time guarantee, reuse the run-time null check or emit a new one.
850 if (!IsGuaranteedNonNull) {
851 if (!IsNonNull)
852 IsNonNull = Builder.CreateIsNotNull(Ptr);
853 if (!Done)
854 Done = createBasicBlock("vptr.null");
855 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
856 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
857 EmitBlock(VptrNotNull);
858 }
859
860 // Compute a deterministic hash of the mangled name of the type.
861 SmallString<64> MangledName;
862 llvm::raw_svector_ostream Out(MangledName);
864 Out);
865
866 // Contained in NoSanitizeList based on the mangled type.
867 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
868 Out.str())) {
869 // Load the vptr, and mix it with TypeHash.
870 llvm::Value *TypeHash =
871 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
872
873 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
874 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
875 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
876 Ty->getAsCXXRecordDecl(),
878 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
879
880 llvm::Value *Hash =
881 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
882 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
883
884 // Look the hash up in our cache.
885 const int CacheSize = 128;
886 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
887 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
888 "__ubsan_vptr_type_cache");
889 llvm::Value *Slot = Builder.CreateAnd(Hash,
890 llvm::ConstantInt::get(IntPtrTy,
891 CacheSize-1));
892 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
893 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
894 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
896
897 // If the hash isn't in the cache, call a runtime handler to perform the
898 // hard work of checking whether the vptr is for an object of the right
899 // type. This will either fill in the cache and return, or produce a
900 // diagnostic.
901 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
902 llvm::Constant *StaticData[] = {
906 llvm::ConstantInt::get(Int8Ty, TCK)
907 };
908 llvm::Value *DynamicData[] = { Ptr, Hash };
909 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
910 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
911 DynamicData);
912 }
913 }
914
915 if (Done) {
916 Builder.CreateBr(Done);
917 EmitBlock(Done);
918 }
919}
920
922 QualType EltTy) {
924 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
925 if (!EltSize)
926 return nullptr;
927
928 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
929 if (!ArrayDeclRef)
930 return nullptr;
931
932 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
933 if (!ParamDecl)
934 return nullptr;
935
936 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
937 if (!POSAttr)
938 return nullptr;
939
940 // Don't load the size if it's a lower bound.
941 int POSType = POSAttr->getType();
942 if (POSType != 0 && POSType != 1)
943 return nullptr;
944
945 // Find the implicit size parameter.
946 auto PassedSizeIt = SizeArguments.find(ParamDecl);
947 if (PassedSizeIt == SizeArguments.end())
948 return nullptr;
949
950 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
951 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
952 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
953 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
954 C.getSizeType(), E->getExprLoc());
955 llvm::Value *SizeOfElement =
956 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
957 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
958}
959
960/// If Base is known to point to the start of an array, return the length of
961/// that array. Return 0 if the length cannot be determined.
962static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
963 const Expr *Base,
964 QualType &IndexedType,
966 StrictFlexArraysLevel) {
967 // For the vector indexing extension, the bound is the number of elements.
968 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
969 IndexedType = Base->getType();
970 return CGF.Builder.getInt32(VT->getNumElements());
971 }
972
973 Base = Base->IgnoreParens();
974
975 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
976 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
977 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
978 StrictFlexArraysLevel)) {
979 CodeGenFunction::SanitizerScope SanScope(&CGF);
980
981 IndexedType = CE->getSubExpr()->getType();
982 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
983 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
984 return CGF.Builder.getInt(CAT->getSize());
985
986 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
987 return CGF.getVLASize(VAT).NumElts;
988 // Ignore pass_object_size here. It's not applicable on decayed pointers.
989 }
990 }
991
992 CodeGenFunction::SanitizerScope SanScope(&CGF);
993
994 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
995 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
996 IndexedType = Base->getType();
997 return POS;
998 }
999
1000 return nullptr;
1001}
1002
1003namespace {
1004
1005/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1006/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1007///
1008/// p in p-> a.b.c
1009///
1010/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1011/// looking for:
1012///
1013/// struct s {
1014/// struct s *ptr;
1015/// int count;
1016/// char array[] __attribute__((counted_by(count)));
1017/// };
1018///
1019/// If we have an expression like \p p->ptr->array[index], we want the
1020/// \p MemberExpr for \p p->ptr instead of \p p.
1021class StructAccessBase
1022 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1023 const RecordDecl *ExpectedRD;
1024
1025 bool IsExpectedRecordDecl(const Expr *E) const {
1026 QualType Ty = E->getType();
1027 if (Ty->isPointerType())
1028 Ty = Ty->getPointeeType();
1029 return ExpectedRD == Ty->getAsRecordDecl();
1030 }
1031
1032public:
1033 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1034
1035 //===--------------------------------------------------------------------===//
1036 // Visitor Methods
1037 //===--------------------------------------------------------------------===//
1038
1039 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1040 // horrors like this:
1041 //
1042 // struct S {
1043 // int x, y;
1044 // int blah[] __attribute__((counted_by(x)));
1045 // } s;
1046 //
1047 // int foo(int index, int val) {
1048 // int (S::*IHatePMDs)[] = &S::blah;
1049 // (s.*IHatePMDs)[index] = val;
1050 // }
1051
1052 const Expr *Visit(const Expr *E) {
1054 }
1055
1056 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1057
1058 // These are the types we expect to return (in order of most to least
1059 // likely):
1060 //
1061 // 1. DeclRefExpr - This is the expression for the base of the structure.
1062 // It's exactly what we want to build an access to the \p counted_by
1063 // field.
1064 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1065 // as the flexble array member's lexical enclosing \p RecordDecl. This
1066 // allows us to catch things like: "p->p->array"
1067 // 3. CompoundLiteralExpr - This is for people who create something
1068 // heretical like (struct foo has a flexible array member):
1069 //
1070 // (struct foo){ 1, 2 }.blah[idx];
1071 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1072 return IsExpectedRecordDecl(E) ? E : nullptr;
1073 }
1074 const Expr *VisitMemberExpr(const MemberExpr *E) {
1075 if (IsExpectedRecordDecl(E) && E->isArrow())
1076 return E;
1077 const Expr *Res = Visit(E->getBase());
1078 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1079 }
1080 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1081 return IsExpectedRecordDecl(E) ? E : nullptr;
1082 }
1083 const Expr *VisitCallExpr(const CallExpr *E) {
1084 return IsExpectedRecordDecl(E) ? E : nullptr;
1085 }
1086
1087 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1088 if (IsExpectedRecordDecl(E))
1089 return E;
1090 return Visit(E->getBase());
1091 }
1092 const Expr *VisitCastExpr(const CastExpr *E) {
1093 if (E->getCastKind() == CK_LValueToRValue)
1094 return IsExpectedRecordDecl(E) ? E : nullptr;
1095 return Visit(E->getSubExpr());
1096 }
1097 const Expr *VisitParenExpr(const ParenExpr *E) {
1098 return Visit(E->getSubExpr());
1099 }
1100 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1101 return Visit(E->getSubExpr());
1102 }
1103 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1104 return Visit(E->getSubExpr());
1105 }
1106};
1107
1108} // end anonymous namespace
1109
1111
1113 const FieldDecl *Field,
1114 RecIndicesTy &Indices) {
1115 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1116 int64_t FieldNo = -1;
1117 for (const FieldDecl *FD : RD->fields()) {
1118 if (!Layout.containsFieldDecl(FD))
1119 // This could happen if the field has a struct type that's empty. I don't
1120 // know why either.
1121 continue;
1122
1123 FieldNo = Layout.getLLVMFieldNo(FD);
1124 if (FD == Field) {
1125 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1126 return true;
1127 }
1128
1129 QualType Ty = FD->getType();
1130 if (Ty->isRecordType()) {
1131 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1132 if (RD->isUnion())
1133 FieldNo = 0;
1134 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1135 return true;
1136 }
1137 }
1138 }
1139
1140 return false;
1141}
1142
1144 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1145 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1146
1147 // Find the base struct expr (i.e. p in p->a.b.c.d).
1148 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1149 if (!StructBase || StructBase->HasSideEffects(getContext()))
1150 return nullptr;
1151
1152 llvm::Value *Res = nullptr;
1153 if (StructBase->getType()->isPointerType()) {
1154 LValueBaseInfo BaseInfo;
1155 TBAAAccessInfo TBAAInfo;
1156 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1157 Res = Addr.emitRawPointer(*this);
1158 } else if (StructBase->isLValue()) {
1159 LValue LV = EmitLValue(StructBase);
1160 Address Addr = LV.getAddress();
1161 Res = Addr.emitRawPointer(*this);
1162 } else {
1163 return nullptr;
1164 }
1165
1166 RecIndicesTy Indices;
1167 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1168 if (Indices.empty())
1169 return nullptr;
1170
1171 Indices.push_back(Builder.getInt32(0));
1173 ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
1174 RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
1175}
1176
1177/// This method is typically called in contexts where we can't generate
1178/// side-effects, like in __builtin_dynamic_object_size. When finding
1179/// expressions, only choose those that have either already been emitted or can
1180/// be loaded without side-effects.
1181///
1182/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1183/// within the top-level struct.
1184/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1186 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1187 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1188 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1189 getIntAlign(), "counted_by.load");
1190 return nullptr;
1191}
1192
1193void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1194 llvm::Value *Index, QualType IndexType,
1195 bool Accessed) {
1196 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1197 "should not be called unless adding bounds checks");
1198 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1199 getLangOpts().getStrictFlexArraysLevel();
1200 QualType IndexedType;
1201 llvm::Value *Bound =
1202 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1203
1204 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1205}
1206
1207void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1208 llvm::Value *Index,
1209 QualType IndexType,
1210 QualType IndexedType, bool Accessed) {
1211 if (!Bound)
1212 return;
1213
1214 SanitizerScope SanScope(this);
1215
1216 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1217 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1218 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1219
1220 llvm::Constant *StaticData[] = {
1222 EmitCheckTypeDescriptor(IndexedType),
1223 EmitCheckTypeDescriptor(IndexType)
1224 };
1225 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1226 : Builder.CreateICmpULE(IndexVal, BoundVal);
1227 EmitCheck(std::make_pair(Check, SanitizerKind::SO_ArrayBounds),
1228 SanitizerHandler::OutOfBounds, StaticData, Index);
1229}
1230
1233 bool isInc, bool isPre) {
1235
1236 llvm::Value *NextVal;
1237 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1238 uint64_t AmountVal = isInc ? 1 : -1;
1239 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1240
1241 // Add the inc/dec to the real part.
1242 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1243 } else {
1244 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1245 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1246 if (!isInc)
1247 FVal.changeSign();
1248 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1249
1250 // Add the inc/dec to the real part.
1251 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1252 }
1253
1254 ComplexPairTy IncVal(NextVal, InVal.second);
1255
1256 // Store the updated result through the lvalue.
1257 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1258 if (getLangOpts().OpenMP)
1260 E->getSubExpr());
1261
1262 // If this is a postinc, return the value read from memory, otherwise use the
1263 // updated value.
1264 return isPre ? IncVal : InVal;
1265}
1266
1268 CodeGenFunction *CGF) {
1269 // Bind VLAs in the cast type.
1270 if (CGF && E->getType()->isVariablyModifiedType())
1272
1273 if (CGDebugInfo *DI = getModuleDebugInfo())
1274 DI->EmitExplicitCastType(E->getType());
1275}
1276
1277//===----------------------------------------------------------------------===//
1278// LValue Expression Emission
1279//===----------------------------------------------------------------------===//
1280
1282 TBAAAccessInfo *TBAAInfo,
1283 KnownNonNull_t IsKnownNonNull,
1284 CodeGenFunction &CGF) {
1285 // We allow this with ObjC object pointers because of fragile ABIs.
1286 assert(E->getType()->isPointerType() ||
1288 E = E->IgnoreParens();
1289
1290 // Casts:
1291 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1292 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1293 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1294
1295 switch (CE->getCastKind()) {
1296 // Non-converting casts (but not C's implicit conversion from void*).
1297 case CK_BitCast:
1298 case CK_NoOp:
1299 case CK_AddressSpaceConversion:
1300 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1301 if (PtrTy->getPointeeType()->isVoidType())
1302 break;
1303
1304 LValueBaseInfo InnerBaseInfo;
1305 TBAAAccessInfo InnerTBAAInfo;
1307 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1308 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1309 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1310
1311 if (isa<ExplicitCastExpr>(CE)) {
1312 LValueBaseInfo TargetTypeBaseInfo;
1313 TBAAAccessInfo TargetTypeTBAAInfo;
1315 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1316 if (TBAAInfo)
1317 *TBAAInfo =
1318 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1319 // If the source l-value is opaque, honor the alignment of the
1320 // casted-to type.
1321 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1322 if (BaseInfo)
1323 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1324 Addr.setAlignment(Align);
1325 }
1326 }
1327
1328 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1329 CE->getCastKind() == CK_BitCast) {
1330 if (auto PT = E->getType()->getAs<PointerType>())
1331 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1332 /*MayBeNull=*/true,
1334 CE->getBeginLoc());
1335 }
1336
1337 llvm::Type *ElemTy =
1339 Addr = Addr.withElementType(ElemTy);
1340 if (CE->getCastKind() == CK_AddressSpaceConversion)
1341 Addr = CGF.Builder.CreateAddrSpaceCast(
1342 Addr, CGF.ConvertType(E->getType()), ElemTy);
1343 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1344 CE->getType());
1345 }
1346 break;
1347
1348 // Array-to-pointer decay.
1349 case CK_ArrayToPointerDecay:
1350 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1351
1352 // Derived-to-base conversions.
1353 case CK_UncheckedDerivedToBase:
1354 case CK_DerivedToBase: {
1355 // TODO: Support accesses to members of base classes in TBAA. For now, we
1356 // conservatively pretend that the complete object is of the base class
1357 // type.
1358 if (TBAAInfo)
1359 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1361 CE->getSubExpr(), BaseInfo, nullptr,
1362 (KnownNonNull_t)(IsKnownNonNull ||
1363 CE->getCastKind() == CK_UncheckedDerivedToBase));
1364 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1365 return CGF.GetAddressOfBaseClass(
1366 Addr, Derived, CE->path_begin(), CE->path_end(),
1367 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1368 }
1369
1370 // TODO: Is there any reason to treat base-to-derived conversions
1371 // specially?
1372 default:
1373 break;
1374 }
1375 }
1376
1377 // Unary &.
1378 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1379 if (UO->getOpcode() == UO_AddrOf) {
1380 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1381 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1382 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1383 return LV.getAddress();
1384 }
1385 }
1386
1387 // std::addressof and variants.
1388 if (auto *Call = dyn_cast<CallExpr>(E)) {
1389 switch (Call->getBuiltinCallee()) {
1390 default:
1391 break;
1392 case Builtin::BIaddressof:
1393 case Builtin::BI__addressof:
1394 case Builtin::BI__builtin_addressof: {
1395 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1396 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1397 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1398 return LV.getAddress();
1399 }
1400 }
1401 }
1402
1403 // TODO: conditional operators, comma.
1404
1405 // Otherwise, use the alignment of the type.
1408 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1409}
1410
1411/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1412/// derive a more accurate bound on the alignment of the pointer.
1414 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1415 KnownNonNull_t IsKnownNonNull) {
1416 Address Addr =
1417 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1418 if (IsKnownNonNull && !Addr.isKnownNonNull())
1419 Addr.setKnownNonNull();
1420 return Addr;
1421}
1422
1424 llvm::Value *V = RV.getScalarVal();
1425 if (auto MPT = T->getAs<MemberPointerType>())
1426 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1427 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1428}
1429
1431 if (Ty->isVoidType())
1432 return RValue::get(nullptr);
1433
1434 switch (getEvaluationKind(Ty)) {
1435 case TEK_Complex: {
1436 llvm::Type *EltTy =
1438 llvm::Value *U = llvm::UndefValue::get(EltTy);
1439 return RValue::getComplex(std::make_pair(U, U));
1440 }
1441
1442 // If this is a use of an undefined aggregate type, the aggregate must have an
1443 // identifiable address. Just because the contents of the value are undefined
1444 // doesn't mean that the address can't be taken and compared.
1445 case TEK_Aggregate: {
1446 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1447 return RValue::getAggregate(DestPtr);
1448 }
1449
1450 case TEK_Scalar:
1451 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1452 }
1453 llvm_unreachable("bad evaluation kind");
1454}
1455
1457 const char *Name) {
1458 ErrorUnsupported(E, Name);
1459 return GetUndefRValue(E->getType());
1460}
1461
1463 const char *Name) {
1464 ErrorUnsupported(E, Name);
1465 llvm::Type *ElTy = ConvertType(E->getType());
1466 llvm::Type *Ty = UnqualPtrTy;
1467 return MakeAddrLValue(
1468 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1469}
1470
1471bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1472 const Expr *Base = Obj;
1473 while (!isa<CXXThisExpr>(Base)) {
1474 // The result of a dynamic_cast can be null.
1475 if (isa<CXXDynamicCastExpr>(Base))
1476 return false;
1477
1478 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1479 Base = CE->getSubExpr();
1480 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1481 Base = PE->getSubExpr();
1482 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1483 if (UO->getOpcode() == UO_Extension)
1484 Base = UO->getSubExpr();
1485 else
1486 return false;
1487 } else {
1488 return false;
1489 }
1490 }
1491 return true;
1492}
1493
1494LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1495 LValue LV;
1496 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1497 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1498 else
1499 LV = EmitLValue(E);
1500 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1501 SanitizerSet SkippedChecks;
1502 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1503 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1504 if (IsBaseCXXThis)
1505 SkippedChecks.set(SanitizerKind::Alignment, true);
1506 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1507 SkippedChecks.set(SanitizerKind::Null, true);
1508 }
1509 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1510 }
1511 return LV;
1512}
1513
1514/// EmitLValue - Emit code to compute a designator that specifies the location
1515/// of the expression.
1516///
1517/// This can return one of two things: a simple address or a bitfield reference.
1518/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1519/// an LLVM pointer type.
1520///
1521/// If this returns a bitfield reference, nothing about the pointee type of the
1522/// LLVM value is known: For example, it may not be a pointer to an integer.
1523///
1524/// If this returns a normal address, and if the lvalue's C type is fixed size,
1525/// this method guarantees that the returned pointer type will point to an LLVM
1526/// type of the same size of the lvalue's type. If the lvalue has a variable
1527/// length type, this is not possible.
1528///
1530 KnownNonNull_t IsKnownNonNull) {
1531 // Running with sufficient stack space to avoid deeply nested expressions
1532 // cause a stack overflow.
1533 LValue LV;
1535 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1536
1537 if (IsKnownNonNull && !LV.isKnownNonNull())
1538 LV.setKnownNonNull();
1539 return LV;
1540}
1541
1543 const ASTContext &Ctx) {
1544 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1545 if (isa<OpaqueValueExpr>(SE))
1546 return SE->getType();
1547 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1548}
1549
1550LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1551 KnownNonNull_t IsKnownNonNull) {
1552 ApplyDebugLocation DL(*this, E);
1553 switch (E->getStmtClass()) {
1554 default: return EmitUnsupportedLValue(E, "l-value expression");
1555
1556 case Expr::ObjCPropertyRefExprClass:
1557 llvm_unreachable("cannot emit a property reference directly");
1558
1559 case Expr::ObjCSelectorExprClass:
1560 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1561 case Expr::ObjCIsaExprClass:
1562 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1563 case Expr::BinaryOperatorClass:
1564 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1565 case Expr::CompoundAssignOperatorClass: {
1566 QualType Ty = E->getType();
1567 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1568 Ty = AT->getValueType();
1569 if (!Ty->isAnyComplexType())
1570 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1571 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1572 }
1573 case Expr::CallExprClass:
1574 case Expr::CXXMemberCallExprClass:
1575 case Expr::CXXOperatorCallExprClass:
1576 case Expr::UserDefinedLiteralClass:
1577 return EmitCallExprLValue(cast<CallExpr>(E));
1578 case Expr::CXXRewrittenBinaryOperatorClass:
1579 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1580 IsKnownNonNull);
1581 case Expr::VAArgExprClass:
1582 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1583 case Expr::DeclRefExprClass:
1584 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1585 case Expr::ConstantExprClass: {
1586 const ConstantExpr *CE = cast<ConstantExpr>(E);
1587 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1589 return MakeNaturalAlignAddrLValue(Result, RetType);
1590 }
1591 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1592 }
1593 case Expr::ParenExprClass:
1594 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1595 case Expr::GenericSelectionExprClass:
1596 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1597 IsKnownNonNull);
1598 case Expr::PredefinedExprClass:
1599 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1600 case Expr::StringLiteralClass:
1601 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1602 case Expr::ObjCEncodeExprClass:
1603 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1604 case Expr::PseudoObjectExprClass:
1605 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1606 case Expr::InitListExprClass:
1607 return EmitInitListLValue(cast<InitListExpr>(E));
1608 case Expr::CXXTemporaryObjectExprClass:
1609 case Expr::CXXConstructExprClass:
1610 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1611 case Expr::CXXBindTemporaryExprClass:
1612 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1613 case Expr::CXXUuidofExprClass:
1614 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1615 case Expr::LambdaExprClass:
1616 return EmitAggExprToLValue(E);
1617
1618 case Expr::ExprWithCleanupsClass: {
1619 const auto *cleanups = cast<ExprWithCleanups>(E);
1620 RunCleanupsScope Scope(*this);
1621 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1622 if (LV.isSimple()) {
1623 // Defend against branches out of gnu statement expressions surrounded by
1624 // cleanups.
1625 Address Addr = LV.getAddress();
1626 llvm::Value *V = Addr.getBasePointer();
1627 Scope.ForceCleanup({&V});
1628 Addr.replaceBasePointer(V);
1629 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1630 LV.getBaseInfo(), LV.getTBAAInfo());
1631 }
1632 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1633 // bitfield lvalue or some other non-simple lvalue?
1634 return LV;
1635 }
1636
1637 case Expr::CXXDefaultArgExprClass: {
1638 auto *DAE = cast<CXXDefaultArgExpr>(E);
1639 CXXDefaultArgExprScope Scope(*this, DAE);
1640 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1641 }
1642 case Expr::CXXDefaultInitExprClass: {
1643 auto *DIE = cast<CXXDefaultInitExpr>(E);
1644 CXXDefaultInitExprScope Scope(*this, DIE);
1645 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1646 }
1647 case Expr::CXXTypeidExprClass:
1648 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1649
1650 case Expr::ObjCMessageExprClass:
1651 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1652 case Expr::ObjCIvarRefExprClass:
1653 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1654 case Expr::StmtExprClass:
1655 return EmitStmtExprLValue(cast<StmtExpr>(E));
1656 case Expr::UnaryOperatorClass:
1657 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1658 case Expr::ArraySubscriptExprClass:
1659 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1660 case Expr::MatrixSubscriptExprClass:
1661 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1662 case Expr::ArraySectionExprClass:
1663 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1664 case Expr::ExtVectorElementExprClass:
1665 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1666 case Expr::CXXThisExprClass:
1668 case Expr::MemberExprClass:
1669 return EmitMemberExpr(cast<MemberExpr>(E));
1670 case Expr::CompoundLiteralExprClass:
1671 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1672 case Expr::ConditionalOperatorClass:
1673 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1674 case Expr::BinaryConditionalOperatorClass:
1675 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1676 case Expr::ChooseExprClass:
1677 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1678 case Expr::OpaqueValueExprClass:
1679 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1680 case Expr::SubstNonTypeTemplateParmExprClass:
1681 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1682 IsKnownNonNull);
1683 case Expr::ImplicitCastExprClass:
1684 case Expr::CStyleCastExprClass:
1685 case Expr::CXXFunctionalCastExprClass:
1686 case Expr::CXXStaticCastExprClass:
1687 case Expr::CXXDynamicCastExprClass:
1688 case Expr::CXXReinterpretCastExprClass:
1689 case Expr::CXXConstCastExprClass:
1690 case Expr::CXXAddrspaceCastExprClass:
1691 case Expr::ObjCBridgedCastExprClass:
1692 return EmitCastLValue(cast<CastExpr>(E));
1693
1694 case Expr::MaterializeTemporaryExprClass:
1695 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1696
1697 case Expr::CoawaitExprClass:
1698 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1699 case Expr::CoyieldExprClass:
1700 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1701 case Expr::PackIndexingExprClass:
1702 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1703 case Expr::HLSLOutArgExprClass:
1704 llvm_unreachable("cannot emit a HLSL out argument directly");
1705 }
1706}
1707
1708/// Given an object of the given canonical type, can we safely copy a
1709/// value out of it based on its initializer?
1711 assert(type.isCanonical());
1712 assert(!type->isReferenceType());
1713
1714 // Must be const-qualified but non-volatile.
1715 Qualifiers qs = type.getLocalQualifiers();
1716 if (!qs.hasConst() || qs.hasVolatile()) return false;
1717
1718 // Otherwise, all object types satisfy this except C++ classes with
1719 // mutable subobjects or non-trivial copy/destroy behavior.
1720 if (const auto *RT = dyn_cast<RecordType>(type))
1721 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1722 if (RD->hasMutableFields() || !RD->isTrivial())
1723 return false;
1724
1725 return true;
1726}
1727
1728/// Can we constant-emit a load of a reference to a variable of the
1729/// given type? This is different from predicates like
1730/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1731/// in situations that don't necessarily satisfy the language's rules
1732/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1733/// to do this with const float variables even if those variables
1734/// aren't marked 'constexpr'.
1742 type = type.getCanonicalType();
1743 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1744 if (isConstantEmittableObjectType(ref->getPointeeType()))
1746 return CEK_AsReferenceOnly;
1747 }
1749 return CEK_AsValueOnly;
1750 return CEK_None;
1751}
1752
1753/// Try to emit a reference to the given value without producing it as
1754/// an l-value. This is just an optimization, but it avoids us needing
1755/// to emit global copies of variables if they're named without triggering
1756/// a formal use in a context where we can't emit a direct reference to them,
1757/// for instance if a block or lambda or a member of a local class uses a
1758/// const int variable or constexpr variable from an enclosing function.
1759CodeGenFunction::ConstantEmission
1761 ValueDecl *value = refExpr->getDecl();
1762
1763 // The value needs to be an enum constant or a constant variable.
1765 if (isa<ParmVarDecl>(value)) {
1766 CEK = CEK_None;
1767 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1768 CEK = checkVarTypeForConstantEmission(var->getType());
1769 } else if (isa<EnumConstantDecl>(value)) {
1770 CEK = CEK_AsValueOnly;
1771 } else {
1772 CEK = CEK_None;
1773 }
1774 if (CEK == CEK_None) return ConstantEmission();
1775
1776 Expr::EvalResult result;
1777 bool resultIsReference;
1778 QualType resultType;
1779
1780 // It's best to evaluate all the way as an r-value if that's permitted.
1781 if (CEK != CEK_AsReferenceOnly &&
1782 refExpr->EvaluateAsRValue(result, getContext())) {
1783 resultIsReference = false;
1784 resultType = refExpr->getType();
1785
1786 // Otherwise, try to evaluate as an l-value.
1787 } else if (CEK != CEK_AsValueOnly &&
1788 refExpr->EvaluateAsLValue(result, getContext())) {
1789 resultIsReference = true;
1790 resultType = value->getType();
1791
1792 // Failure.
1793 } else {
1794 return ConstantEmission();
1795 }
1796
1797 // In any case, if the initializer has side-effects, abandon ship.
1798 if (result.HasSideEffects)
1799 return ConstantEmission();
1800
1801 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1802 // referencing a global host variable by copy. In this case the lambda should
1803 // make a copy of the value of the global host variable. The DRE of the
1804 // captured reference variable cannot be emitted as load from the host
1805 // global variable as compile time constant, since the host variable is not
1806 // accessible on device. The DRE of the captured reference variable has to be
1807 // loaded from captures.
1808 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1810 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1811 if (MD && MD->getParent()->isLambda() &&
1812 MD->getOverloadedOperator() == OO_Call) {
1813 const APValue::LValueBase &base = result.Val.getLValueBase();
1814 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1815 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1816 if (!VD->hasAttr<CUDADeviceAttr>()) {
1817 return ConstantEmission();
1818 }
1819 }
1820 }
1821 }
1822 }
1823
1824 // Emit as a constant.
1825 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1826 result.Val, resultType);
1827
1828 // Make sure we emit a debug reference to the global variable.
1829 // This should probably fire even for
1830 if (isa<VarDecl>(value)) {
1831 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1832 EmitDeclRefExprDbgValue(refExpr, result.Val);
1833 } else {
1834 assert(isa<EnumConstantDecl>(value));
1835 EmitDeclRefExprDbgValue(refExpr, result.Val);
1836 }
1837
1838 // If we emitted a reference constant, we need to dereference that.
1839 if (resultIsReference)
1841
1843}
1844
1846 const MemberExpr *ME) {
1847 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1848 // Try to emit static variable member expressions as DREs.
1849 return DeclRefExpr::Create(
1851 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1852 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1853 }
1854 return nullptr;
1855}
1856
1857CodeGenFunction::ConstantEmission
1860 return tryEmitAsConstant(DRE);
1861 return ConstantEmission();
1862}
1863
1865 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1866 assert(Constant && "not a constant");
1867 if (Constant.isReference())
1868 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1869 E->getExprLoc())
1870 .getScalarVal();
1871 return Constant.getValue();
1872}
1873
1874llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1876 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1877 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1878 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1879}
1880
1882 if (Ty->isBooleanType())
1883 return true;
1884
1885 if (const EnumType *ET = Ty->getAs<EnumType>())
1886 return ET->getDecl()->getIntegerType()->isBooleanType();
1887
1888 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1889 return hasBooleanRepresentation(AT->getValueType());
1890
1891 return false;
1892}
1893
1895 llvm::APInt &Min, llvm::APInt &End,
1896 bool StrictEnums, bool IsBool) {
1897 const EnumType *ET = Ty->getAs<EnumType>();
1898 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1899 ET && !ET->getDecl()->isFixed();
1900 if (!IsBool && !IsRegularCPlusPlusEnum)
1901 return false;
1902
1903 if (IsBool) {
1904 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1905 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1906 } else {
1907 const EnumDecl *ED = ET->getDecl();
1908 ED->getValueRange(End, Min);
1909 }
1910 return true;
1911}
1912
1913llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1914 llvm::APInt Min, End;
1915 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1917 return nullptr;
1918
1919 llvm::MDBuilder MDHelper(getLLVMContext());
1920 return MDHelper.createRange(Min, End);
1921}
1922
1925 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1926 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1927 if (!HasBoolCheck && !HasEnumCheck)
1928 return false;
1929
1930 bool IsBool = hasBooleanRepresentation(Ty) ||
1932 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1933 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1934 if (!NeedsBoolCheck && !NeedsEnumCheck)
1935 return false;
1936
1937 // Single-bit booleans don't need to be checked. Special-case this to avoid
1938 // a bit width mismatch when handling bitfield values. This is handled by
1939 // EmitFromMemory for the non-bitfield case.
1940 if (IsBool &&
1941 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1942 return false;
1943
1944 if (NeedsEnumCheck &&
1945 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
1946 return false;
1947
1948 llvm::APInt Min, End;
1949 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1950 return true;
1951
1952 auto &Ctx = getLLVMContext();
1953 SanitizerScope SanScope(this);
1954 llvm::Value *Check;
1955 --End;
1956 if (!Min) {
1957 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1958 } else {
1959 llvm::Value *Upper =
1960 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1961 llvm::Value *Lower =
1962 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1963 Check = Builder.CreateAnd(Upper, Lower);
1964 }
1965 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1968 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
1969 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1970 StaticArgs, EmitCheckValue(Value));
1971 return true;
1972}
1973
1974llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1975 QualType Ty,
1977 LValueBaseInfo BaseInfo,
1978 TBAAAccessInfo TBAAInfo,
1979 bool isNontemporal) {
1980 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1981 if (GV->isThreadLocal())
1982 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1984
1985 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1986 // Boolean vectors use `iN` as storage type.
1987 if (ClangVecTy->isExtVectorBoolType()) {
1988 llvm::Type *ValTy = ConvertType(Ty);
1989 unsigned ValNumElems =
1990 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1991 // Load the `iP` storage object (P is the padded vector size).
1992 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1993 const auto *RawIntTy = RawIntV->getType();
1994 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1995 // Bitcast iP --> <P x i1>.
1996 auto *PaddedVecTy = llvm::FixedVectorType::get(
1997 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1998 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1999 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2000 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2001
2002 return EmitFromMemory(V, Ty);
2003 }
2004
2005 // Handle vectors of size 3 like size 4 for better performance.
2006 const llvm::Type *EltTy = Addr.getElementType();
2007 const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
2008
2009 if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
2010
2011 llvm::VectorType *vec4Ty =
2012 llvm::FixedVectorType::get(VTy->getElementType(), 4);
2013 Address Cast = Addr.withElementType(vec4Ty);
2014 // Now load value.
2015 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
2016
2017 // Shuffle vector to get vec3.
2018 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
2019 return EmitFromMemory(V, Ty);
2020 }
2021 }
2022
2023 // Atomic operations have to be done on integral types.
2024 LValue AtomicLValue =
2025 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2026 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2027 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2028 }
2029
2030 Addr =
2032
2033 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2034 if (isNontemporal) {
2035 llvm::MDNode *Node = llvm::MDNode::get(
2036 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2037 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2038 }
2039
2040 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2041
2042 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2043 // In order to prevent the optimizer from throwing away the check, don't
2044 // attach range metadata to the load.
2045 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2046 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2047 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2048 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2049 llvm::MDNode::get(getLLVMContext(), {}));
2050 }
2051
2052 return EmitFromMemory(Load, Ty);
2053}
2054
2055/// Converts a scalar value from its primary IR type (as returned
2056/// by ConvertType) to its load/store type (as returned by
2057/// convertTypeForLoadStore).
2058llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2059 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2060 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2062 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2063 }
2064
2065 if (Ty->isExtVectorBoolType()) {
2066 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2067 // Expand to the memory bit width.
2068 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2069 // <N x i1> --> <P x i1>.
2070 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2071 // <P x i1> --> iP.
2072 Value = Builder.CreateBitCast(Value, StoreTy);
2073 }
2074
2075 return Value;
2076}
2077
2078/// Converts a scalar value from its load/store type (as returned
2079/// by convertTypeForLoadStore) to its primary IR type (as returned
2080/// by ConvertType).
2081llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2082 if (Ty->isExtVectorBoolType()) {
2083 const auto *RawIntTy = Value->getType();
2084 // Bitcast iP --> <P x i1>.
2085 auto *PaddedVecTy = llvm::FixedVectorType::get(
2086 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2087 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2088 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2089 llvm::Type *ValTy = ConvertType(Ty);
2090 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2091 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2092 }
2093
2094 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2095 llvm::Type *ResTy = ConvertType(Ty);
2096 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2097 }
2098
2099 return Value;
2100}
2101
2102// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2103// MatrixType), if it points to a array (the memory type of MatrixType).
2105 CodeGenFunction &CGF,
2106 bool IsVector = true) {
2107 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2108 if (ArrayTy && IsVector) {
2109 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2110 ArrayTy->getNumElements());
2111
2112 return Addr.withElementType(VectorTy);
2113 }
2114 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2115 if (VectorTy && !IsVector) {
2116 auto *ArrayTy = llvm::ArrayType::get(
2117 VectorTy->getElementType(),
2118 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2119
2120 return Addr.withElementType(ArrayTy);
2121 }
2122
2123 return Addr;
2124}
2125
2126// Emit a store of a matrix LValue. This may require casting the original
2127// pointer to memory address (ArrayType) to a pointer to the value type
2128// (VectorType).
2129static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2130 bool isInit, CodeGenFunction &CGF) {
2131 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2132 value->getType()->isVectorTy());
2133 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2134 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2135 lvalue.isNontemporal());
2136}
2137
2138void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2139 bool Volatile, QualType Ty,
2140 LValueBaseInfo BaseInfo,
2141 TBAAAccessInfo TBAAInfo,
2142 bool isInit, bool isNontemporal) {
2143 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2144 if (GV->isThreadLocal())
2145 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2147
2148 llvm::Type *SrcTy = Value->getType();
2149 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2150 auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
2151 if (!CGM.getCodeGenOpts().PreserveVec3Type) {
2152 // Handle vec3 special.
2153 if (VecTy && !ClangVecTy->isExtVectorBoolType() &&
2154 cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
2155 // Our source is a vec3, do a shuffle vector to make it a vec4.
2156 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
2157 "extractVec");
2158 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
2159 }
2160 if (Addr.getElementType() != SrcTy) {
2161 Addr = Addr.withElementType(SrcTy);
2162 }
2163 }
2164 }
2165
2166 Value = EmitToMemory(Value, Ty);
2167
2168 LValue AtomicLValue =
2169 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2170 if (Ty->isAtomicType() ||
2171 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2172 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2173 return;
2174 }
2175
2176 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2177 if (isNontemporal) {
2178 llvm::MDNode *Node =
2179 llvm::MDNode::get(Store->getContext(),
2180 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2181 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2182 }
2183
2184 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2185}
2186
2187void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2188 bool isInit) {
2189 if (lvalue.getType()->isConstantMatrixType()) {
2190 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2191 return;
2192 }
2193
2194 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2195 lvalue.getType(), lvalue.getBaseInfo(),
2196 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2197}
2198
2199// Emit a load of a LValue of matrix type. This may require casting the pointer
2200// to memory address (ArrayType) to a pointer to the value type (VectorType).
2202 CodeGenFunction &CGF) {
2203 assert(LV.getType()->isConstantMatrixType());
2205 LV.setAddress(Addr);
2206 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2207}
2208
2211 QualType Ty = LV.getType();
2212 switch (getEvaluationKind(Ty)) {
2213 case TEK_Scalar:
2214 return EmitLoadOfLValue(LV, Loc);
2215 case TEK_Complex:
2217 case TEK_Aggregate:
2218 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2219 return Slot.asRValue();
2220 }
2221 llvm_unreachable("bad evaluation kind");
2222}
2223
2224/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2225/// method emits the address of the lvalue, then loads the result as an rvalue,
2226/// returning the rvalue.
2228 if (LV.isObjCWeak()) {
2229 // load of a __weak object.
2230 Address AddrWeakObj = LV.getAddress();
2232 AddrWeakObj));
2233 }
2235 // In MRC mode, we do a load+autorelease.
2236 if (!getLangOpts().ObjCAutoRefCount) {
2238 }
2239
2240 // In ARC mode, we load retained and then consume the value.
2241 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2242 Object = EmitObjCConsumeObject(LV.getType(), Object);
2243 return RValue::get(Object);
2244 }
2245
2246 if (LV.isSimple()) {
2247 assert(!LV.getType()->isFunctionType());
2248
2249 if (LV.getType()->isConstantMatrixType())
2250 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2251
2252 // Everything needs a load.
2253 return RValue::get(EmitLoadOfScalar(LV, Loc));
2254 }
2255
2256 if (LV.isVectorElt()) {
2257 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2258 LV.isVolatileQualified());
2259 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2260 "vecext"));
2261 }
2262
2263 // If this is a reference to a subset of the elements of a vector, either
2264 // shuffle the input or extract/insert them as appropriate.
2265 if (LV.isExtVectorElt()) {
2267 }
2268
2269 // Global Register variables always invoke intrinsics
2270 if (LV.isGlobalReg())
2271 return EmitLoadOfGlobalRegLValue(LV);
2272
2273 if (LV.isMatrixElt()) {
2274 llvm::Value *Idx = LV.getMatrixIdx();
2275 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2276 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2277 llvm::MatrixBuilder MB(Builder);
2278 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2279 }
2280 llvm::LoadInst *Load =
2282 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2283 }
2284
2285 assert(LV.isBitField() && "Unknown LValue type!");
2286 return EmitLoadOfBitfieldLValue(LV, Loc);
2287}
2288
2291 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2292
2293 // Get the output type.
2294 llvm::Type *ResLTy = ConvertType(LV.getType());
2295
2296 Address Ptr = LV.getBitFieldAddress();
2297 llvm::Value *Val =
2298 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2299
2300 bool UseVolatile = LV.isVolatileQualified() &&
2301 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2302 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2303 const unsigned StorageSize =
2304 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2305 if (Info.IsSigned) {
2306 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2307 unsigned HighBits = StorageSize - Offset - Info.Size;
2308 if (HighBits)
2309 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2310 if (Offset + HighBits)
2311 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2312 } else {
2313 if (Offset)
2314 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2315 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2316 Val = Builder.CreateAnd(
2317 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2318 }
2319 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2320 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2321 return RValue::get(Val);
2322}
2323
2324// If this is a reference to a subset of the elements of a vector, create an
2325// appropriate shufflevector.
2327 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2328 LV.isVolatileQualified());
2329
2330 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2331 // IR value to a vector here allows the rest of codegen to behave as normal.
2332 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2333 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2334 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2335 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2336 }
2337
2338 const llvm::Constant *Elts = LV.getExtVectorElts();
2339
2340 // If the result of the expression is a non-vector type, we must be extracting
2341 // a single element. Just codegen as an extractelement.
2342 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2343 if (!ExprVT) {
2344 unsigned InIdx = getAccessedFieldNo(0, Elts);
2345 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2346 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2347 }
2348
2349 // Always use shuffle vector to try to retain the original program structure
2350 unsigned NumResultElts = ExprVT->getNumElements();
2351
2353 for (unsigned i = 0; i != NumResultElts; ++i)
2354 Mask.push_back(getAccessedFieldNo(i, Elts));
2355
2356 Vec = Builder.CreateShuffleVector(Vec, Mask);
2357 return RValue::get(Vec);
2358}
2359
2360/// Generates lvalue for partial ext_vector access.
2362 Address VectorAddress = LV.getExtVectorAddress();
2363 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2364 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2365
2366 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2367
2368 const llvm::Constant *Elts = LV.getExtVectorElts();
2369 unsigned ix = getAccessedFieldNo(0, Elts);
2370
2371 Address VectorBasePtrPlusIx =
2372 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2373 "vector.elt");
2374
2375 return VectorBasePtrPlusIx;
2376}
2377
2378/// Load of global named registers are always calls to intrinsics.
2380 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2381 "Bad type for register variable");
2382 llvm::MDNode *RegName = cast<llvm::MDNode>(
2383 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2384
2385 // We accept integer and pointer types only
2386 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2387 llvm::Type *Ty = OrigTy;
2388 if (OrigTy->isPointerTy())
2389 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2390 llvm::Type *Types[] = { Ty };
2391
2392 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2393 llvm::Value *Call = Builder.CreateCall(
2394 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2395 if (OrigTy->isPointerTy())
2396 Call = Builder.CreateIntToPtr(Call, OrigTy);
2397 return RValue::get(Call);
2398}
2399
2400/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2401/// lvalue, where both are guaranteed to the have the same type, and that type
2402/// is 'Ty'.
2404 bool isInit) {
2405 if (!Dst.isSimple()) {
2406 if (Dst.isVectorElt()) {
2407 // Read/modify/write the vector, inserting the new element.
2408 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2409 Dst.isVolatileQualified());
2410 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2411 if (IRStoreTy) {
2412 auto *IRVecTy = llvm::FixedVectorType::get(
2413 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2414 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2415 // iN --> <N x i1>.
2416 }
2417 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2418 Dst.getVectorIdx(), "vecins");
2419 if (IRStoreTy) {
2420 // <N x i1> --> <iN>.
2421 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2422 }
2424 Dst.isVolatileQualified());
2425 return;
2426 }
2427
2428 // If this is an update of extended vector elements, insert them as
2429 // appropriate.
2430 if (Dst.isExtVectorElt())
2432
2433 if (Dst.isGlobalReg())
2434 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2435
2436 if (Dst.isMatrixElt()) {
2437 llvm::Value *Idx = Dst.getMatrixIdx();
2438 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2439 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2440 llvm::MatrixBuilder MB(Builder);
2441 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2442 }
2443 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2444 llvm::Value *Vec =
2445 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2447 Dst.isVolatileQualified());
2448 return;
2449 }
2450
2451 assert(Dst.isBitField() && "Unknown LValue type");
2452 return EmitStoreThroughBitfieldLValue(Src, Dst);
2453 }
2454
2455 // There's special magic for assigning into an ARC-qualified l-value.
2456 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2457 switch (Lifetime) {
2459 llvm_unreachable("present but none");
2460
2462 // nothing special
2463 break;
2464
2466 if (isInit) {
2467 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2468 break;
2469 }
2470 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2471 return;
2472
2474 if (isInit)
2475 // Initialize and then skip the primitive store.
2477 else
2479 /*ignore*/ true);
2480 return;
2481
2484 Src.getScalarVal()));
2485 // fall into the normal path
2486 break;
2487 }
2488 }
2489
2490 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2491 // load of a __weak object.
2492 Address LvalueDst = Dst.getAddress();
2493 llvm::Value *src = Src.getScalarVal();
2494 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2495 return;
2496 }
2497
2498 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2499 // load of a __strong object.
2500 Address LvalueDst = Dst.getAddress();
2501 llvm::Value *src = Src.getScalarVal();
2502 if (Dst.isObjCIvar()) {
2503 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2504 llvm::Type *ResultType = IntPtrTy;
2506 llvm::Value *RHS = dst.emitRawPointer(*this);
2507 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2508 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2509 ResultType, "sub.ptr.lhs.cast");
2510 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2511 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2512 } else if (Dst.isGlobalObjCRef()) {
2513 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2514 Dst.isThreadLocalRef());
2515 }
2516 else
2517 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2518 return;
2519 }
2520
2521 assert(Src.isScalar() && "Can't emit an agg store with this method");
2522 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2523}
2524
2526 llvm::Value **Result) {
2527 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2528 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2529 Address Ptr = Dst.getBitFieldAddress();
2530
2531 // Get the source value, truncated to the width of the bit-field.
2532 llvm::Value *SrcVal = Src.getScalarVal();
2533
2534 // Cast the source to the storage type and shift it into place.
2535 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2536 /*isSigned=*/false);
2537 llvm::Value *MaskedVal = SrcVal;
2538
2539 const bool UseVolatile =
2540 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2541 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2542 const unsigned StorageSize =
2543 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2544 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2545 // See if there are other bits in the bitfield's storage we'll need to load
2546 // and mask together with source before storing.
2547 if (StorageSize != Info.Size) {
2548 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2549 llvm::Value *Val =
2550 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2551
2552 // Mask the source value as needed.
2554 SrcVal = Builder.CreateAnd(
2555 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2556 "bf.value");
2557 MaskedVal = SrcVal;
2558 if (Offset)
2559 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2560
2561 // Mask out the original value.
2562 Val = Builder.CreateAnd(
2563 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2564 "bf.clear");
2565
2566 // Or together the unchanged values and the source value.
2567 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2568 } else {
2569 assert(Offset == 0);
2570 // According to the AACPS:
2571 // When a volatile bit-field is written, and its container does not overlap
2572 // with any non-bit-field member, its container must be read exactly once
2573 // and written exactly once using the access width appropriate to the type
2574 // of the container. The two accesses are not atomic.
2575 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2576 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2577 Builder.CreateLoad(Ptr, true, "bf.load");
2578 }
2579
2580 // Write the new value back out.
2581 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2582
2583 // Return the new value of the bit-field, if requested.
2584 if (Result) {
2585 llvm::Value *ResultVal = MaskedVal;
2586
2587 // Sign extend the value if needed.
2588 if (Info.IsSigned) {
2589 assert(Info.Size <= StorageSize);
2590 unsigned HighBits = StorageSize - Info.Size;
2591 if (HighBits) {
2592 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2593 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2594 }
2595 }
2596
2597 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2598 "bf.result.cast");
2599 *Result = EmitFromMemory(ResultVal, Dst.getType());
2600 }
2601}
2602
2604 LValue Dst) {
2605 // HLSL allows storing to scalar values through ExtVector component LValues.
2606 // To support this we need to handle the case where the destination address is
2607 // a scalar.
2608 Address DstAddr = Dst.getExtVectorAddress();
2609 if (!DstAddr.getElementType()->isVectorTy()) {
2610 assert(!Dst.getType()->isVectorType() &&
2611 "this should only occur for non-vector l-values");
2612 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2613 return;
2614 }
2615
2616 // This access turns into a read/modify/write of the vector. Load the input
2617 // value now.
2618 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2619 const llvm::Constant *Elts = Dst.getExtVectorElts();
2620
2621 llvm::Value *SrcVal = Src.getScalarVal();
2622
2623 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2624 unsigned NumSrcElts = VTy->getNumElements();
2625 unsigned NumDstElts =
2626 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2627 if (NumDstElts == NumSrcElts) {
2628 // Use shuffle vector is the src and destination are the same number of
2629 // elements and restore the vector mask since it is on the side it will be
2630 // stored.
2631 SmallVector<int, 4> Mask(NumDstElts);
2632 for (unsigned i = 0; i != NumSrcElts; ++i)
2633 Mask[getAccessedFieldNo(i, Elts)] = i;
2634
2635 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2636 } else if (NumDstElts > NumSrcElts) {
2637 // Extended the source vector to the same length and then shuffle it
2638 // into the destination.
2639 // FIXME: since we're shuffling with undef, can we just use the indices
2640 // into that? This could be simpler.
2641 SmallVector<int, 4> ExtMask;
2642 for (unsigned i = 0; i != NumSrcElts; ++i)
2643 ExtMask.push_back(i);
2644 ExtMask.resize(NumDstElts, -1);
2645 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2646 // build identity
2648 for (unsigned i = 0; i != NumDstElts; ++i)
2649 Mask.push_back(i);
2650
2651 // When the vector size is odd and .odd or .hi is used, the last element
2652 // of the Elts constant array will be one past the size of the vector.
2653 // Ignore the last element here, if it is greater than the mask size.
2654 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2655 NumSrcElts--;
2656
2657 // modify when what gets shuffled in
2658 for (unsigned i = 0; i != NumSrcElts; ++i)
2659 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2660 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2661 } else {
2662 // We should never shorten the vector
2663 llvm_unreachable("unexpected shorten vector length");
2664 }
2665 } else {
2666 // If the Src is a scalar (not a vector), and the target is a vector it must
2667 // be updating one element.
2668 unsigned InIdx = getAccessedFieldNo(0, Elts);
2669 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2670 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2671 }
2672
2674 Dst.isVolatileQualified());
2675}
2676
2677/// Store of global named registers are always calls to intrinsics.
2679 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2680 "Bad type for register variable");
2681 llvm::MDNode *RegName = cast<llvm::MDNode>(
2682 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2683 assert(RegName && "Register LValue is not metadata");
2684
2685 // We accept integer and pointer types only
2686 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2687 llvm::Type *Ty = OrigTy;
2688 if (OrigTy->isPointerTy())
2689 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2690 llvm::Type *Types[] = { Ty };
2691
2692 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2693 llvm::Value *Value = Src.getScalarVal();
2694 if (OrigTy->isPointerTy())
2695 Value = Builder.CreatePtrToInt(Value, Ty);
2696 Builder.CreateCall(
2697 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2698}
2699
2700// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2701// generating write-barries API. It is currently a global, ivar,
2702// or neither.
2703static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2704 LValue &LV,
2705 bool IsMemberAccess=false) {
2706 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2707 return;
2708
2709 if (isa<ObjCIvarRefExpr>(E)) {
2710 QualType ExpTy = E->getType();
2711 if (IsMemberAccess && ExpTy->isPointerType()) {
2712 // If ivar is a structure pointer, assigning to field of
2713 // this struct follows gcc's behavior and makes it a non-ivar
2714 // writer-barrier conservatively.
2715 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2716 if (ExpTy->isRecordType()) {
2717 LV.setObjCIvar(false);
2718 return;
2719 }
2720 }
2721 LV.setObjCIvar(true);
2722 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2723 LV.setBaseIvarExp(Exp->getBase());
2725 return;
2726 }
2727
2728 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2729 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2730 if (VD->hasGlobalStorage()) {
2731 LV.setGlobalObjCRef(true);
2732 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2733 }
2734 }
2736 return;
2737 }
2738
2739 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2740 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2741 return;
2742 }
2743
2744 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2745 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2746 if (LV.isObjCIvar()) {
2747 // If cast is to a structure pointer, follow gcc's behavior and make it
2748 // a non-ivar write-barrier.
2749 QualType ExpTy = E->getType();
2750 if (ExpTy->isPointerType())
2751 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2752 if (ExpTy->isRecordType())
2753 LV.setObjCIvar(false);
2754 }
2755 return;
2756 }
2757
2758 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2759 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2760 return;
2761 }
2762
2763 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2764 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2765 return;
2766 }
2767
2768 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2769 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2770 return;
2771 }
2772
2773 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2774 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2775 return;
2776 }
2777
2778 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2779 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2780 if (LV.isObjCIvar() && !LV.isObjCArray())
2781 // Using array syntax to assigning to what an ivar points to is not
2782 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2783 LV.setObjCIvar(false);
2784 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2785 // Using array syntax to assigning to what global points to is not
2786 // same as assigning to the global itself. {id *G;} G[i] = 0;
2787 LV.setGlobalObjCRef(false);
2788 return;
2789 }
2790
2791 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2792 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2793 // We don't know if member is an 'ivar', but this flag is looked at
2794 // only in the context of LV.isObjCIvar().
2796 return;
2797 }
2798}
2799
2801 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2802 llvm::Type *RealVarTy, SourceLocation Loc) {
2803 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2805 CGF, VD, Addr, Loc);
2806 else
2807 Addr =
2808 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2809
2810 Addr = Addr.withElementType(RealVarTy);
2811 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2812}
2813
2815 const VarDecl *VD, QualType T) {
2816 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2817 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2818 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2819 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2820 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2821 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2822 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2824 return Address::invalid();
2825 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2826 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2827 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2829 "Expected link clause OR to clause with unified memory enabled.");
2830 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2832 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2833}
2834
2835Address
2837 LValueBaseInfo *PointeeBaseInfo,
2838 TBAAAccessInfo *PointeeTBAAInfo) {
2839 llvm::LoadInst *Load =
2840 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2842 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2843 CharUnits(), /*ForPointeeType=*/true,
2844 PointeeBaseInfo, PointeeTBAAInfo);
2845}
2846
2848 LValueBaseInfo PointeeBaseInfo;
2849 TBAAAccessInfo PointeeTBAAInfo;
2850 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2851 &PointeeTBAAInfo);
2852 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2853 PointeeBaseInfo, PointeeTBAAInfo);
2854}
2855
2857 const PointerType *PtrTy,
2858 LValueBaseInfo *BaseInfo,
2859 TBAAAccessInfo *TBAAInfo) {
2860 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2861 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2862 CharUnits(), /*ForPointeeType=*/true,
2863 BaseInfo, TBAAInfo);
2864}
2865
2867 const PointerType *PtrTy) {
2868 LValueBaseInfo BaseInfo;
2869 TBAAAccessInfo TBAAInfo;
2870 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2871 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2872}
2873
2875 const Expr *E, const VarDecl *VD) {
2876 QualType T = E->getType();
2877
2878 // If it's thread_local, emit a call to its wrapper function instead.
2879 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2881 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2882 // Check if the variable is marked as declare target with link clause in
2883 // device codegen.
2884 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2885 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2886 if (Addr.isValid())
2887 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2888 }
2889
2890 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2891
2892 if (VD->getTLSKind() != VarDecl::TLS_None)
2893 V = CGF.Builder.CreateThreadLocalAddress(V);
2894
2895 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2896 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2897 Address Addr(V, RealVarTy, Alignment);
2898 // Emit reference to the private copy of the variable if it is an OpenMP
2899 // threadprivate variable.
2900 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2901 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2902 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2903 E->getExprLoc());
2904 }
2905 LValue LV = VD->getType()->isReferenceType() ?
2906 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2909 setObjCGCLValueClass(CGF.getContext(), E, LV);
2910 return LV;
2911}
2912
2914 llvm::Type *Ty) {
2915 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2916 if (FD->hasAttr<WeakRefAttr>()) {
2918 return aliasee.getPointer();
2919 }
2920
2921 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
2922 return V;
2923}
2924
2926 GlobalDecl GD) {
2927 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2928 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
2929 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2930 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2932}
2933
2935 llvm::Value *ThisValue) {
2936
2937 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2938}
2939
2940/// Named Registers are named metadata pointing to the register name
2941/// which will be read from/written to as an argument to the intrinsic
2942/// @llvm.read/write_register.
2943/// So far, only the name is being passed down, but other options such as
2944/// register type, allocation type or even optimization options could be
2945/// passed down via the metadata node.
2947 SmallString<64> Name("llvm.named.register.");
2948 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2949 assert(Asm->getLabel().size() < 64-Name.size() &&
2950 "Register name too big");
2951 Name.append(Asm->getLabel());
2952 llvm::NamedMDNode *M =
2953 CGM.getModule().getOrInsertNamedMetadata(Name);
2954 if (M->getNumOperands() == 0) {
2955 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2956 Asm->getLabel());
2957 llvm::Metadata *Ops[] = {Str};
2958 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2959 }
2960
2961 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2962
2963 llvm::Value *Ptr =
2964 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2965 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2966}
2967
2968/// Determine whether we can emit a reference to \p VD from the current
2969/// context, despite not necessarily having seen an odr-use of the variable in
2970/// this context.
2972 const DeclRefExpr *E,
2973 const VarDecl *VD) {
2974 // For a variable declared in an enclosing scope, do not emit a spurious
2975 // reference even if we have a capture, as that will emit an unwarranted
2976 // reference to our capture state, and will likely generate worse code than
2977 // emitting a local copy.
2978 if (E->refersToEnclosingVariableOrCapture())
2979 return false;
2980
2981 // For a local declaration declared in this function, we can always reference
2982 // it even if we don't have an odr-use.
2983 if (VD->hasLocalStorage()) {
2984 return VD->getDeclContext() ==
2985 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2986 }
2987
2988 // For a global declaration, we can emit a reference to it if we know
2989 // for sure that we are able to emit a definition of it.
2990 VD = VD->getDefinition(CGF.getContext());
2991 if (!VD)
2992 return false;
2993
2994 // Don't emit a spurious reference if it might be to a variable that only
2995 // exists on a different device / target.
2996 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2997 // cross-target reference.
2998 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2999 CGF.getLangOpts().OpenCL) {
3000 return false;
3001 }
3002
3003 // We can emit a spurious reference only if the linkage implies that we'll
3004 // be emitting a non-interposable symbol that will be retained until link
3005 // time.
3006 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3007 case llvm::GlobalValue::ExternalLinkage:
3008 case llvm::GlobalValue::LinkOnceODRLinkage:
3009 case llvm::GlobalValue::WeakODRLinkage:
3010 case llvm::GlobalValue::InternalLinkage:
3011 case llvm::GlobalValue::PrivateLinkage:
3012 return true;
3013 default:
3014 return false;
3015 }
3016}
3017
3019 const NamedDecl *ND = E->getDecl();
3020 QualType T = E->getType();
3021
3022 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3023 "should not emit an unevaluated operand");
3024
3025 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3026 // Global Named registers access via intrinsics only
3027 if (VD->getStorageClass() == SC_Register &&
3028 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3029 return EmitGlobalNamedRegister(VD, CGM);
3030
3031 // If this DeclRefExpr does not constitute an odr-use of the variable,
3032 // we're not permitted to emit a reference to it in general, and it might
3033 // not be captured if capture would be necessary for a use. Emit the
3034 // constant value directly instead.
3035 if (E->isNonOdrUse() == NOUR_Constant &&
3036 (VD->getType()->isReferenceType() ||
3037 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3038 VD->getAnyInitializer(VD);
3039 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3040 E->getLocation(), *VD->evaluateValue(), VD->getType());
3041 assert(Val && "failed to emit constant expression");
3042
3043 Address Addr = Address::invalid();
3044 if (!VD->getType()->isReferenceType()) {
3045 // Spill the constant value to a global.
3046 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3047 getContext().getDeclAlign(VD));
3048 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3049 auto *PTy = llvm::PointerType::get(
3050 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
3051 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3052 } else {
3053 // Should we be using the alignment of the constant pointer we emitted?
3054 CharUnits Alignment =
3056 /* BaseInfo= */ nullptr,
3057 /* TBAAInfo= */ nullptr,
3058 /* forPointeeType= */ true);
3059 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3060 }
3061 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3062 }
3063
3064 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3065
3066 // Check for captured variables.
3067 if (E->refersToEnclosingVariableOrCapture()) {
3068 VD = VD->getCanonicalDecl();
3069 if (auto *FD = LambdaCaptureFields.lookup(VD))
3070 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3071 if (CapturedStmtInfo) {
3072 auto I = LocalDeclMap.find(VD);
3073 if (I != LocalDeclMap.end()) {
3074 LValue CapLVal;
3075 if (VD->getType()->isReferenceType())
3076 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3078 else
3079 CapLVal = MakeAddrLValue(I->second, T);
3080 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3081 // in simd context.
3082 if (getLangOpts().OpenMP &&
3084 CapLVal.setNontemporal(/*Value=*/true);
3085 return CapLVal;
3086 }
3087 LValue CapLVal =
3090 Address LValueAddress = CapLVal.getAddress();
3091 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3092 LValueAddress.getElementType(),
3093 getContext().getDeclAlign(VD)),
3094 CapLVal.getType(),
3096 CapLVal.getTBAAInfo());
3097 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3098 // in simd context.
3099 if (getLangOpts().OpenMP &&
3101 CapLVal.setNontemporal(/*Value=*/true);
3102 return CapLVal;
3103 }
3104
3105 assert(isa<BlockDecl>(CurCodeDecl));
3106 Address addr = GetAddrOfBlockDecl(VD);
3107 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3108 }
3109 }
3110
3111 // FIXME: We should be able to assert this for FunctionDecls as well!
3112 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3113 // those with a valid source location.
3114 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3115 !E->getLocation().isValid()) &&
3116 "Should not use decl without marking it used!");
3117
3118 if (ND->hasAttr<WeakRefAttr>()) {
3119 const auto *VD = cast<ValueDecl>(ND);
3121 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3122 }
3123
3124 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3125 // Check if this is a global variable.
3126 if (VD->hasLinkage() || VD->isStaticDataMember())
3127 return EmitGlobalVarDeclLValue(*this, E, VD);
3128
3129 Address addr = Address::invalid();
3130
3131 // The variable should generally be present in the local decl map.
3132 auto iter = LocalDeclMap.find(VD);
3133 if (iter != LocalDeclMap.end()) {
3134 addr = iter->second;
3135
3136 // Otherwise, it might be static local we haven't emitted yet for
3137 // some reason; most likely, because it's in an outer function.
3138 } else if (VD->isStaticLocal()) {
3139 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3141 addr = Address(
3142 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3143
3144 // No other cases for now.
3145 } else {
3146 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3147 }
3148
3149 // Handle threadlocal function locals.
3150 if (VD->getTLSKind() != VarDecl::TLS_None)
3151 addr = addr.withPointer(
3152 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3154
3155 // Check for OpenMP threadprivate variables.
3156 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3157 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3159 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3160 E->getExprLoc());
3161 }
3162
3163 // Drill into block byref variables.
3164 bool isBlockByref = VD->isEscapingByref();
3165 if (isBlockByref) {
3166 addr = emitBlockByrefAddress(addr, VD);
3167 }
3168
3169 // Drill into reference types.
3170 LValue LV = VD->getType()->isReferenceType() ?
3171 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3173
3174 bool isLocalStorage = VD->hasLocalStorage();
3175
3176 bool NonGCable = isLocalStorage &&
3177 !VD->getType()->isReferenceType() &&
3178 !isBlockByref;
3179 if (NonGCable) {
3181 LV.setNonGC(true);
3182 }
3183
3184 bool isImpreciseLifetime =
3185 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3186 if (isImpreciseLifetime)
3189 return LV;
3190 }
3191
3192 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3193 return EmitFunctionDeclLValue(*this, E, FD);
3194
3195 // FIXME: While we're emitting a binding from an enclosing scope, all other
3196 // DeclRefExprs we see should be implicitly treated as if they also refer to
3197 // an enclosing scope.
3198 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3199 if (E->refersToEnclosingVariableOrCapture()) {
3200 auto *FD = LambdaCaptureFields.lookup(BD);
3201 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3202 }
3203 return EmitLValue(BD->getBinding());
3204 }
3205
3206 // We can form DeclRefExprs naming GUID declarations when reconstituting
3207 // non-type template parameters into expressions.
3208 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3211
3212 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3213 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3214 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3215
3216 if (AS != T.getAddressSpace()) {
3217 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3218 auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3220 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3221 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3222 }
3223
3224 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3225 }
3226
3227 llvm_unreachable("Unhandled DeclRefExpr");
3228}
3229
3231 // __extension__ doesn't affect lvalue-ness.
3232 if (E->getOpcode() == UO_Extension)
3233 return EmitLValue(E->getSubExpr());
3234
3235 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3236 switch (E->getOpcode()) {
3237 default: llvm_unreachable("Unknown unary operator lvalue!");
3238 case UO_Deref: {
3239 QualType T = E->getSubExpr()->getType()->getPointeeType();
3240 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3241
3242 LValueBaseInfo BaseInfo;
3243 TBAAAccessInfo TBAAInfo;
3244 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3245 &TBAAInfo);
3246 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3248
3249 // We should not generate __weak write barrier on indirect reference
3250 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3251 // But, we continue to generate __strong write barrier on indirect write
3252 // into a pointer to object.
3253 if (getLangOpts().ObjC &&
3254 getLangOpts().getGC() != LangOptions::NonGC &&
3255 LV.isObjCWeak())
3257 return LV;
3258 }
3259 case UO_Real:
3260 case UO_Imag: {
3261 LValue LV = EmitLValue(E->getSubExpr());
3262 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3263
3264 // __real is valid on scalars. This is a faster way of testing that.
3265 // __imag can only produce an rvalue on scalars.
3266 if (E->getOpcode() == UO_Real &&
3267 !LV.getAddress().getElementType()->isStructTy()) {
3268 assert(E->getSubExpr()->getType()->isArithmeticType());
3269 return LV;
3270 }
3271
3272 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3273
3274 Address Component =
3275 (E->getOpcode() == UO_Real
3277 : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3278 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3280 ElemLV.getQuals().addQualifiers(LV.getQuals());
3281 return ElemLV;
3282 }
3283 case UO_PreInc:
3284 case UO_PreDec: {
3285 LValue LV = EmitLValue(E->getSubExpr());
3286 bool isInc = E->getOpcode() == UO_PreInc;
3287
3288 if (E->getType()->isAnyComplexType())
3289 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3290 else
3291 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3292 return LV;
3293 }
3294 }
3295}
3296
3300}
3301
3305}
3306
3308 auto SL = E->getFunctionName();
3309 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3310 StringRef FnName = CurFn->getName();
3311 if (FnName.starts_with("\01"))
3312 FnName = FnName.substr(1);
3313 StringRef NameItems[] = {
3314 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3315 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3316 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3317 std::string Name = std::string(SL->getString());
3318 if (!Name.empty()) {
3319 unsigned Discriminator =
3321 if (Discriminator)
3322 Name += "_" + Twine(Discriminator + 1).str();
3323 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3325 } else {
3326 auto C =
3327 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3329 }
3330 }
3331 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3333}
3334
3335/// Emit a type description suitable for use by a runtime sanitizer library. The
3336/// format of a type descriptor is
3337///
3338/// \code
3339/// { i16 TypeKind, i16 TypeInfo }
3340/// \endcode
3341///
3342/// followed by an array of i8 containing the type name with extra information
3343/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3344/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3345/// anything else.
3347 // Only emit each type's descriptor once.
3348 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3349 return C;
3350
3351 uint16_t TypeKind = TK_Unknown;
3352 uint16_t TypeInfo = 0;
3353 bool IsBitInt = false;
3354
3355 if (T->isIntegerType()) {
3356 TypeKind = TK_Integer;
3357 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3358 (T->isSignedIntegerType() ? 1 : 0);
3359 // Follow suggestion from discussion of issue 64100.
3360 // So we can write the exact amount of bits in TypeName after '\0'
3361 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3362 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3363 // Do a sanity checks as we are using 32-bit type to store bit length.
3364 assert(getContext().getTypeSize(T) > 0 &&
3365 " non positive amount of bits in __BitInt type");
3366 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3367 " too many bits in __BitInt type");
3368
3369 // Redefine TypeKind with the actual __BitInt type if we have signed
3370 // BitInt.
3371 TypeKind = TK_BitInt;
3372 IsBitInt = true;
3373 }
3374 } else if (T->isFloatingType()) {
3375 TypeKind = TK_Float;
3377 }
3378
3379 // Format the type name as if for a diagnostic, including quotes and
3380 // optionally an 'aka'.
3381 SmallString<32> Buffer;
3383 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3384 StringRef(), {}, Buffer, {});
3385
3386 if (IsBitInt) {
3387 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3388 // endianness, zero.
3389 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3390 const auto *EIT = T->castAs<BitIntType>();
3391 uint32_t Bits = EIT->getNumBits();
3392 llvm::support::endian::write32(S + 1, Bits,
3393 getTarget().isBigEndian()
3394 ? llvm::endianness::big
3395 : llvm::endianness::little);
3396 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3397 Buffer.append(Str);
3398 }
3399
3400 llvm::Constant *Components[] = {
3401 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3402 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3403 };
3404 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3405
3406 auto *GV = new llvm::GlobalVariable(
3407 CGM.getModule(), Descriptor->getType(),
3408 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3409 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3411
3412 // Remember the descriptor for this type.
3414
3415 return GV;
3416}
3417
3418llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3419 llvm::Type *TargetTy = IntPtrTy;
3420
3421 if (V->getType() == TargetTy)
3422 return V;
3423
3424 // Floating-point types which fit into intptr_t are bitcast to integers
3425 // and then passed directly (after zero-extension, if necessary).
3426 if (V->getType()->isFloatingPointTy()) {
3427 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3428 if (Bits <= TargetTy->getIntegerBitWidth())
3429 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3430 Bits));
3431 }
3432
3433 // Integers which fit in intptr_t are zero-extended and passed directly.
3434 if (V->getType()->isIntegerTy() &&
3435 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3436 return Builder.CreateZExt(V, TargetTy);
3437
3438 // Pointers are passed directly, everything else is passed by address.
3439 if (!V->getType()->isPointerTy()) {
3440 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3441 Builder.CreateStore(V, Ptr);
3442 V = Ptr.getPointer();
3443 }
3444 return Builder.CreatePtrToInt(V, TargetTy);
3445}
3446
3447/// Emit a representation of a SourceLocation for passing to a handler
3448/// in a sanitizer runtime library. The format for this data is:
3449/// \code
3450/// struct SourceLocation {
3451/// const char *Filename;
3452/// int32_t Line, Column;
3453/// };
3454/// \endcode
3455/// For an invalid SourceLocation, the Filename pointer is null.
3457 llvm::Constant *Filename;
3458 int Line, Column;
3459
3461 if (PLoc.isValid()) {
3462 StringRef FilenameString = PLoc.getFilename();
3463
3464 int PathComponentsToStrip =
3465 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3466 if (PathComponentsToStrip < 0) {
3467 assert(PathComponentsToStrip != INT_MIN);
3468 int PathComponentsToKeep = -PathComponentsToStrip;
3469 auto I = llvm::sys::path::rbegin(FilenameString);
3470 auto E = llvm::sys::path::rend(FilenameString);
3471 while (I != E && --PathComponentsToKeep)
3472 ++I;
3473
3474 FilenameString = FilenameString.substr(I - E);
3475 } else if (PathComponentsToStrip > 0) {
3476 auto I = llvm::sys::path::begin(FilenameString);
3477 auto E = llvm::sys::path::end(FilenameString);
3478 while (I != E && PathComponentsToStrip--)
3479 ++I;
3480
3481 if (I != E)
3482 FilenameString =
3483 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3484 else
3485 FilenameString = llvm::sys::path::filename(FilenameString);
3486 }
3487
3488 auto FilenameGV =
3489 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3491 cast<llvm::GlobalVariable>(
3492 FilenameGV.getPointer()->stripPointerCasts()));
3493 Filename = FilenameGV.getPointer();
3494 Line = PLoc.getLine();
3495 Column = PLoc.getColumn();
3496 } else {
3497 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3498 Line = Column = 0;
3499 }
3500
3501 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3502 Builder.getInt32(Column)};
3503
3504 return llvm::ConstantStruct::getAnon(Data);
3505}
3506
3507namespace {
3508/// Specify under what conditions this check can be recovered
3509enum class CheckRecoverableKind {
3510 /// Always terminate program execution if this check fails.
3512 /// Check supports recovering, runtime has both fatal (noreturn) and
3513 /// non-fatal handlers for this check.
3514 Recoverable,
3515 /// Runtime conditionally aborts, always need to support recovery.
3517};
3518}
3519
3520static CheckRecoverableKind
3522 if (Ordinal == SanitizerKind::SO_Vptr)
3523 return CheckRecoverableKind::AlwaysRecoverable;
3524 else if (Ordinal == SanitizerKind::SO_Return ||
3525 Ordinal == SanitizerKind::SO_Unreachable)
3526 return CheckRecoverableKind::Unrecoverable;
3527 else
3528 return CheckRecoverableKind::Recoverable;
3529}
3530
3531namespace {
3532struct SanitizerHandlerInfo {
3533 char const *const Name;
3534 unsigned Version;
3535};
3536}
3537
3538const SanitizerHandlerInfo SanitizerHandlers[] = {
3539#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3541#undef SANITIZER_CHECK
3542};
3543
3545 llvm::FunctionType *FnType,
3547 SanitizerHandler CheckHandler,
3548 CheckRecoverableKind RecoverKind, bool IsFatal,
3549 llvm::BasicBlock *ContBB, bool NoMerge) {
3550 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3551 std::optional<ApplyDebugLocation> DL;
3552 if (!CGF.Builder.getCurrentDebugLocation()) {
3553 // Ensure that the call has at least an artificial debug location.
3554 DL.emplace(CGF, SourceLocation());
3555 }
3556 bool NeedsAbortSuffix =
3557 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3558 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3559 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3560 const StringRef CheckName = CheckInfo.Name;
3561 std::string FnName = "__ubsan_handle_" + CheckName.str();
3562 if (CheckInfo.Version && !MinimalRuntime)
3563 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3564 if (MinimalRuntime)
3565 FnName += "_minimal";
3566 if (NeedsAbortSuffix)
3567 FnName += "_abort";
3568 bool MayReturn =
3569 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3570
3571 llvm::AttrBuilder B(CGF.getLLVMContext());
3572 if (!MayReturn) {
3573 B.addAttribute(llvm::Attribute::NoReturn)
3574 .addAttribute(llvm::Attribute::NoUnwind);
3575 }
3576 B.addUWTableAttr(llvm::UWTableKind::Default);
3577
3578 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3579 FnType, FnName,
3580 llvm::AttributeList::get(CGF.getLLVMContext(),
3581 llvm::AttributeList::FunctionIndex, B),
3582 /*Local=*/true);
3583 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3584 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
3585 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3586 if (NoMerge)
3587 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
3588 if (!MayReturn) {
3589 HandlerCall->setDoesNotReturn();
3590 CGF.Builder.CreateUnreachable();
3591 } else {
3592 CGF.Builder.CreateBr(ContBB);
3593 }
3594}
3595
3597 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
3598 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3599 ArrayRef<llvm::Value *> DynamicArgs) {
3600 assert(IsSanitizerScope);
3601 assert(Checked.size() > 0);
3602 assert(CheckHandler >= 0 &&
3603 size_t(CheckHandler) < std::size(SanitizerHandlers));
3604 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3605
3606 llvm::Value *FatalCond = nullptr;
3607 llvm::Value *RecoverableCond = nullptr;
3608 llvm::Value *TrapCond = nullptr;
3609 bool NoMerge = false;
3610 for (int i = 0, n = Checked.size(); i < n; ++i) {
3611 llvm::Value *Check = Checked[i].first;
3612 // -fsanitize-trap= overrides -fsanitize-recover=.
3613 llvm::Value *&Cond =
3614 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3615 ? TrapCond
3616 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3617 ? RecoverableCond
3618 : FatalCond;
3619 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3620
3621 if (!CGM.getCodeGenOpts().SanitizeMergeHandlers.has(Checked[i].second))
3622 NoMerge = true;
3623 }
3624
3626 llvm::Value *Allow =
3627 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3628 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3629
3630 for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3631 if (*Cond)
3632 *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3633 }
3634 }
3635
3636 if (TrapCond)
3637 EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
3638 if (!FatalCond && !RecoverableCond)
3639 return;
3640
3641 llvm::Value *JointCond;
3642 if (FatalCond && RecoverableCond)
3643 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3644 else
3645 JointCond = FatalCond ? FatalCond : RecoverableCond;
3646 assert(JointCond);
3647
3648 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3649 assert(SanOpts.has(Checked[0].second));
3650#ifndef NDEBUG
3651 for (int i = 1, n = Checked.size(); i < n; ++i) {
3652 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3653 "All recoverable kinds in a single check must be same!");
3654 assert(SanOpts.has(Checked[i].second));
3655 }
3656#endif
3657
3658 llvm::BasicBlock *Cont = createBasicBlock("cont");
3659 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3660 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3661 // Give hint that we very much don't expect to execute the handler
3662 llvm::MDBuilder MDHelper(getLLVMContext());
3663 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3664 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3665 EmitBlock(Handlers);
3666
3667 // Handler functions take an i8* pointing to the (handler-specific) static
3668 // information block, followed by a sequence of intptr_t arguments
3669 // representing operand values.
3672 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3673 Args.reserve(DynamicArgs.size() + 1);
3674 ArgTypes.reserve(DynamicArgs.size() + 1);
3675
3676 // Emit handler arguments and create handler function type.
3677 if (!StaticArgs.empty()) {
3678 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3679 auto *InfoPtr = new llvm::GlobalVariable(
3680 CGM.getModule(), Info->getType(), false,
3681 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3682 llvm::GlobalVariable::NotThreadLocal,
3683 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3684 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3686 Args.push_back(InfoPtr);
3687 ArgTypes.push_back(Args.back()->getType());
3688 }
3689
3690 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3691 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3692 ArgTypes.push_back(IntPtrTy);
3693 }
3694 }
3695
3696 llvm::FunctionType *FnType =
3697 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3698
3699 if (!FatalCond || !RecoverableCond) {
3700 // Simple case: we need to generate a single handler call, either
3701 // fatal, or non-fatal.
3702 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3703 (FatalCond != nullptr), Cont, NoMerge);
3704 } else {
3705 // Emit two handler calls: first one for set of unrecoverable checks,
3706 // another one for recoverable.
3707 llvm::BasicBlock *NonFatalHandlerBB =
3708 createBasicBlock("non_fatal." + CheckName);
3709 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3710 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3711 EmitBlock(FatalHandlerBB);
3712 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3713 NonFatalHandlerBB, NoMerge);
3714 EmitBlock(NonFatalHandlerBB);
3715 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3716 Cont, NoMerge);
3717 }
3718
3719 EmitBlock(Cont);
3720}
3721
3723 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
3724 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
3725 ArrayRef<llvm::Constant *> StaticArgs) {
3726 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3727
3728 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3729 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3730
3731 llvm::MDBuilder MDHelper(getLLVMContext());
3732 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3733 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3734
3735 EmitBlock(CheckBB);
3736
3737 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
3738
3739 llvm::CallInst *CheckCall;
3740 llvm::FunctionCallee SlowPathFn;
3741 if (WithDiag) {
3742 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3743 auto *InfoPtr =
3744 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3745 llvm::GlobalVariable::PrivateLinkage, Info);
3746 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3748
3749 SlowPathFn = CGM.getModule().getOrInsertFunction(
3750 "__cfi_slowpath_diag",
3751 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3752 false));
3753 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3754 } else {
3755 SlowPathFn = CGM.getModule().getOrInsertFunction(
3756 "__cfi_slowpath",
3757 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3758 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3759 }
3760
3762 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3763 CheckCall->setDoesNotThrow();
3764
3765 EmitBlock(Cont);
3766}
3767
3768// Emit a stub for __cfi_check function so that the linker knows about this
3769// symbol in LTO mode.
3771 llvm::Module *M = &CGM.getModule();
3772 ASTContext &C = getContext();
3773 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3774
3776 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3777 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3778 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3780 FnArgs.push_back(&ArgCallsiteTypeId);
3781 FnArgs.push_back(&ArgAddr);
3782 FnArgs.push_back(&ArgCFICheckFailData);
3783 const CGFunctionInfo &FI =
3785
3786 llvm::Function *F = llvm::Function::Create(
3787 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3788 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3789 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3791 F->setAlignment(llvm::Align(4096));
3792 CGM.setDSOLocal(F);
3793
3794 llvm::LLVMContext &Ctx = M->getContext();
3795 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3796 // CrossDSOCFI pass is not executed if there is no executable code.
3797 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3798 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3799 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3800}
3801
3802// This function is basically a switch over the CFI failure kind, which is
3803// extracted from CFICheckFailData (1st function argument). Each case is either
3804// llvm.trap or a call to one of the two runtime handlers, based on
3805// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3806// failure kind) traps, but this should really never happen. CFICheckFailData
3807// can be nullptr if the calling module has -fsanitize-trap behavior for this
3808// check kind; in this case __cfi_check_fail traps as well.
3810 SanitizerScope SanScope(this);
3811 FunctionArgList Args;
3816 Args.push_back(&ArgData);
3817 Args.push_back(&ArgAddr);
3818
3819 const CGFunctionInfo &FI =
3821
3822 llvm::Function *F = llvm::Function::Create(
3823 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3824 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3825
3826 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3828 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3829
3830 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3831 SourceLocation());
3832
3833 // This function is not affected by NoSanitizeList. This function does
3834 // not have a source location, but "src:*" would still apply. Revert any
3835 // changes to SanOpts made in StartFunction.
3837
3838 llvm::Value *Data =
3839 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3840 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3841 llvm::Value *Addr =
3842 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3843 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3844
3845 // Data == nullptr means the calling module has trap behaviour for this check.
3846 llvm::Value *DataIsNotNullPtr =
3847 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3848 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3849
3850 llvm::StructType *SourceLocationTy =
3851 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3852 llvm::StructType *CfiCheckFailDataTy =
3853 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3854
3855 llvm::Value *V = Builder.CreateConstGEP2_32(
3856 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
3857
3858 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3859 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3860
3861 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3863 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3864 llvm::Value *ValidVtable = Builder.CreateZExt(
3865 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3866 {Addr, AllVtables}),
3867 IntPtrTy);
3868
3869 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
3870 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
3871 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
3872 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
3873 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
3874 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
3875
3877 Checks;
3878 for (auto CheckKindOrdinalPair : CheckKinds) {
3879 int Kind = CheckKindOrdinalPair.first;
3880 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
3881 llvm::Value *Cond =
3882 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3883 if (CGM.getLangOpts().Sanitize.has(Ordinal))
3884 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
3885 {}, {Data, Addr, ValidVtable});
3886 else
3887 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3888 }
3889
3891 // The only reference to this function will be created during LTO link.
3892 // Make sure it survives until then.
3893 CGM.addUsedGlobal(F);
3894}
3895
3897 if (SanOpts.has(SanitizerKind::Unreachable)) {
3898 SanitizerScope SanScope(this);
3899 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3900 SanitizerKind::SO_Unreachable),
3901 SanitizerHandler::BuiltinUnreachable,
3903 }
3904 Builder.CreateUnreachable();
3905}
3906
3907void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3908 SanitizerHandler CheckHandlerID,
3909 bool NoMerge) {
3910 llvm::BasicBlock *Cont = createBasicBlock("cont");
3911
3912 // If we're optimizing, collapse all calls to trap down to just one per
3913 // check-type per function to save on code size.
3914 if ((int)TrapBBs.size() <= CheckHandlerID)
3915 TrapBBs.resize(CheckHandlerID + 1);
3916
3917 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3918
3919 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
3920 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3921
3922 if (TrapBB && !NoMerge) {
3923 auto Call = TrapBB->begin();
3924 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3925
3926 Call->applyMergedLocation(Call->getDebugLoc(),
3927 Builder.getCurrentDebugLocation());
3928 Builder.CreateCondBr(Checked, Cont, TrapBB);
3929 } else {
3930 TrapBB = createBasicBlock("trap");
3931 Builder.CreateCondBr(Checked, Cont, TrapBB);
3932 EmitBlock(TrapBB);
3933
3934 llvm::CallInst *TrapCall =
3935 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3936 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
3937
3938 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3939 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3941 TrapCall->addFnAttr(A);
3942 }
3943 if (NoMerge)
3944 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3945 TrapCall->setDoesNotReturn();
3946 TrapCall->setDoesNotThrow();
3947 Builder.CreateUnreachable();
3948 }
3949
3950 EmitBlock(Cont);
3951}
3952
3953llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3954 llvm::CallInst *TrapCall =
3955 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3956
3957 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3958 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3960 TrapCall->addFnAttr(A);
3961 }
3962
3964 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3965 return TrapCall;
3966}
3967
3969 LValueBaseInfo *BaseInfo,
3970 TBAAAccessInfo *TBAAInfo) {
3971 assert(E->getType()->isArrayType() &&
3972 "Array to pointer decay must have array source type!");
3973
3974 // Expressions of array type can't be bitfields or vector elements.
3975 LValue LV = EmitLValue(E);
3976 Address Addr = LV.getAddress();
3977
3978 // If the array type was an incomplete type, we need to make sure
3979 // the decay ends up being the right type.
3980 llvm::Type *NewTy = ConvertType(E->getType());
3981 Addr = Addr.withElementType(NewTy);
3982
3983 // Note that VLA pointers are always decayed, so we don't need to do
3984 // anything here.
3985 if (!E->getType()->isVariableArrayType()) {
3986 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3987 "Expected pointer to array");
3988 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3989 }
3990
3991 // The result of this decay conversion points to an array element within the
3992 // base lvalue. However, since TBAA currently does not support representing
3993 // accesses to elements of member arrays, we conservatively represent accesses
3994 // to the pointee object as if it had no any base lvalue specified.
3995 // TODO: Support TBAA for member arrays.
3997 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3998 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3999
4000 return Addr.withElementType(ConvertTypeForMem(EltType));
4001}
4002
4003/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4004/// array to pointer, return the array subexpression.
4005static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4006 // If this isn't just an array->pointer decay, bail out.
4007 const auto *CE = dyn_cast<CastExpr>(E);
4008 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4009 return nullptr;
4010
4011 // If this is a decay from variable width array, bail out.
4012 const Expr *SubExpr = CE->getSubExpr();
4013 if (SubExpr->getType()->isVariableArrayType())
4014 return nullptr;
4015
4016 return SubExpr;
4017}
4018
4020 llvm::Type *elemType,
4021 llvm::Value *ptr,
4022 ArrayRef<llvm::Value*> indices,
4023 bool inbounds,
4024 bool signedIndices,
4025 SourceLocation loc,
4026 const llvm::Twine &name = "arrayidx") {
4027 if (inbounds) {
4028 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4030 name);
4031 } else {
4032 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4033 }
4034}
4035
4038 llvm::Type *elementType, bool inbounds,
4039 bool signedIndices, SourceLocation loc,
4040 CharUnits align,
4041 const llvm::Twine &name = "arrayidx") {
4042 if (inbounds) {
4043 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4045 align, name);
4046 } else {
4047 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4048 }
4049}
4050
4052 llvm::Value *idx,
4053 CharUnits eltSize) {
4054 // If we have a constant index, we can use the exact offset of the
4055 // element we're accessing.
4056 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
4057 CharUnits offset = constantIdx->getZExtValue() * eltSize;
4058 return arrayAlign.alignmentAtOffset(offset);
4059
4060 // Otherwise, use the worst-case alignment for any element.
4061 } else {
4062 return arrayAlign.alignmentOfArrayElement(eltSize);
4063 }
4064}
4065
4067 const VariableArrayType *vla) {
4068 QualType eltType;
4069 do {
4070 eltType = vla->getElementType();
4071 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4072 return eltType;
4073}
4074
4076 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4077}
4078
4079static bool hasBPFPreserveStaticOffset(const Expr *E) {
4080 if (!E)
4081 return false;
4082 QualType PointeeType = E->getType()->getPointeeType();
4083 if (PointeeType.isNull())
4084 return false;
4085 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4086 return hasBPFPreserveStaticOffset(BaseDecl);
4087 return false;
4088}
4089
4090// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4092 Address &Addr) {
4093 if (!CGF.getTarget().getTriple().isBPF())
4094 return Addr;
4095
4096 llvm::Function *Fn =
4097 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4098 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4099 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4100}
4101
4102/// Given an array base, check whether its member access belongs to a record
4103/// with preserve_access_index attribute or not.
4104static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4105 if (!ArrayBase || !CGF.getDebugInfo())
4106 return false;
4107
4108 // Only support base as either a MemberExpr or DeclRefExpr.
4109 // DeclRefExpr to cover cases like:
4110 // struct s { int a; int b[10]; };
4111 // struct s *p;
4112 // p[1].a
4113 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4114 // p->b[5] is a MemberExpr example.
4115 const Expr *E = ArrayBase->IgnoreImpCasts();
4116 if (const auto *ME = dyn_cast<MemberExpr>(E))
4117 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4118
4119 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4120 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4121 if (!VarDef)
4122 return false;
4123
4124 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4125 if (!PtrT)
4126 return false;
4127
4128 const auto *PointeeT = PtrT->getPointeeType()
4130 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4131 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4132 return false;
4133 }
4134
4135 return false;
4136}
4137
4140 QualType eltType, bool inbounds,
4141 bool signedIndices, SourceLocation loc,
4142 QualType *arrayType = nullptr,
4143 const Expr *Base = nullptr,
4144 const llvm::Twine &name = "arrayidx") {
4145 // All the indices except that last must be zero.
4146#ifndef NDEBUG
4147 for (auto *idx : indices.drop_back())
4148 assert(isa<llvm::ConstantInt>(idx) &&
4149 cast<llvm::ConstantInt>(idx)->isZero());
4150#endif
4151
4152 // Determine the element size of the statically-sized base. This is
4153 // the thing that the indices are expressed in terms of.
4154 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4155 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4156 }
4157
4158 // We can use that to compute the best alignment of the element.
4159 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4160 CharUnits eltAlign =
4161 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4162
4164 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4165
4166 llvm::Value *eltPtr;
4167 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4168 if (!LastIndex ||
4170 addr = emitArraySubscriptGEP(CGF, addr, indices,
4171 CGF.ConvertTypeForMem(eltType), inbounds,
4172 signedIndices, loc, eltAlign, name);
4173 return addr;
4174 } else {
4175 // Remember the original array subscript for bpf target
4176 unsigned idx = LastIndex->getZExtValue();
4177 llvm::DIType *DbgInfo = nullptr;
4178 if (arrayType)
4179 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4180 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4181 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4182 idx, DbgInfo);
4183 }
4184
4185 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4186}
4187
4188/// The offset of a field from the beginning of the record.
4190 const FieldDecl *Field, int64_t &Offset) {
4191 ASTContext &Ctx = CGF.getContext();
4192 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4193 unsigned FieldNo = 0;
4194
4195 for (const FieldDecl *FD : RD->fields()) {
4196 if (FD == Field) {
4197 Offset += Layout.getFieldOffset(FieldNo);
4198 return true;
4199 }
4200
4201 QualType Ty = FD->getType();
4202 if (Ty->isRecordType())
4203 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4204 Offset += Layout.getFieldOffset(FieldNo);
4205 return true;
4206 }
4207
4208 if (!RD->isUnion())
4209 ++FieldNo;
4210 }
4211
4212 return false;
4213}
4214
4215/// Returns the relative offset difference between \p FD1 and \p FD2.
4216/// \code
4217/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4218/// \endcode
4219/// Both fields must be within the same struct.
4220static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4221 const FieldDecl *FD1,
4222 const FieldDecl *FD2) {
4223 const RecordDecl *FD1OuterRec =
4225 const RecordDecl *FD2OuterRec =
4227
4228 if (FD1OuterRec != FD2OuterRec)
4229 // Fields must be within the same RecordDecl.
4230 return std::optional<int64_t>();
4231
4232 int64_t FD1Offset = 0;
4233 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4234 return std::optional<int64_t>();
4235
4236 int64_t FD2Offset = 0;
4237 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4238 return std::optional<int64_t>();
4239
4240 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4241}
4242
4244 bool Accessed) {
4245 // The index must always be an integer, which is not an aggregate. Emit it
4246 // in lexical order (this complexity is, sadly, required by C++17).
4247 llvm::Value *IdxPre =
4248 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4249 bool SignedIndices = false;
4250 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4251 auto *Idx = IdxPre;
4252 if (E->getLHS() != E->getIdx()) {
4253 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4254 Idx = EmitScalarExpr(E->getIdx());
4255 }
4256
4257 QualType IdxTy = E->getIdx()->getType();
4258 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4259 SignedIndices |= IdxSigned;
4260
4261 if (SanOpts.has(SanitizerKind::ArrayBounds))
4262 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4263
4264 // Extend or truncate the index type to 32 or 64-bits.
4265 if (Promote && Idx->getType() != IntPtrTy)
4266 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4267
4268 return Idx;
4269 };
4270 IdxPre = nullptr;
4271
4272 // If the base is a vector type, then we are forming a vector element lvalue
4273 // with this subscript.
4274 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4275 !isa<ExtVectorElementExpr>(E->getBase())) {
4276 // Emit the vector as an lvalue to get its address.
4277 LValue LHS = EmitLValue(E->getBase());
4278 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4279 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4280 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4281 LHS.getBaseInfo(), TBAAAccessInfo());
4282 }
4283
4284 // All the other cases basically behave like simple offsetting.
4285
4286 // Handle the extvector case we ignored above.
4287 if (isa<ExtVectorElementExpr>(E->getBase())) {
4288 LValue LV = EmitLValue(E->getBase());
4289 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4291
4292 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4293 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4294 SignedIndices, E->getExprLoc());
4295 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4296 CGM.getTBAAInfoForSubobject(LV, EltType));
4297 }
4298
4299 LValueBaseInfo EltBaseInfo;
4300 TBAAAccessInfo EltTBAAInfo;
4301 Address Addr = Address::invalid();
4302 if (const VariableArrayType *vla =
4303 getContext().getAsVariableArrayType(E->getType())) {
4304 // The base must be a pointer, which is not an aggregate. Emit
4305 // it. It needs to be emitted first in case it's what captures
4306 // the VLA bounds.
4307 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4308 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4309
4310 // The element count here is the total number of non-VLA elements.
4311 llvm::Value *numElements = getVLASize(vla).NumElts;
4312
4313 // Effectively, the multiply by the VLA size is part of the GEP.
4314 // GEP indexes are signed, and scaling an index isn't permitted to
4315 // signed-overflow, so we use the same semantics for our explicit
4316 // multiply. We suppress this if overflow is not undefined behavior.
4317 if (getLangOpts().isSignedOverflowDefined()) {
4318 Idx = Builder.CreateMul(Idx, numElements);
4319 } else {
4320 Idx = Builder.CreateNSWMul(Idx, numElements);
4321 }
4322
4323 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4324 !getLangOpts().isSignedOverflowDefined(),
4325 SignedIndices, E->getExprLoc());
4326
4327 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4328 // Indexing over an interface, as in "NSString *P; P[4];"
4329
4330 // Emit the base pointer.
4331 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4332 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4333
4334 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4335 llvm::Value *InterfaceSizeVal =
4336 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4337
4338 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4339
4340 // We don't necessarily build correct LLVM struct types for ObjC
4341 // interfaces, so we can't rely on GEP to do this scaling
4342 // correctly, so we need to cast to i8*. FIXME: is this actually
4343 // true? A lot of other things in the fragile ABI would break...
4344 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4345
4346 // Do the GEP.
4347 CharUnits EltAlign =
4348 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4349 llvm::Value *EltPtr =
4350 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4351 ScaledIdx, false, SignedIndices, E->getExprLoc());
4352 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4353 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4354 // If this is A[i] where A is an array, the frontend will have decayed the
4355 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4356 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4357 // "gep x, i" here. Emit one "gep A, 0, i".
4358 assert(Array->getType()->isArrayType() &&
4359 "Array to pointer decay must have array source type!");
4360 LValue ArrayLV;
4361 // For simple multidimensional array indexing, set the 'accessed' flag for
4362 // better bounds-checking of the base expression.
4363 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4364 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4365 else
4366 ArrayLV = EmitLValue(Array);
4367 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4368
4369 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4370 // If the array being accessed has a "counted_by" attribute, generate
4371 // bounds checking code. The "count" field is at the top level of the
4372 // struct or in an anonymous struct, that's also at the top level. Future
4373 // expansions may allow the "count" to reside at any place in the struct,
4374 // but the value of "counted_by" will be a "simple" path to the count,
4375 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4376 // similar to emit the correct GEP.
4377 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4378 getLangOpts().getStrictFlexArraysLevel();
4379
4380 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4381 ME &&
4382 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4384 const FieldDecl *FAMDecl = cast<FieldDecl>(ME->getMemberDecl());
4385 if (const FieldDecl *CountFD = FAMDecl->findCountedByField()) {
4386 if (std::optional<int64_t> Diff =
4387 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4388 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4389
4390 // Create a GEP with a byte offset between the FAM and count and
4391 // use that to load the count value.
4393 ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
4394
4395 llvm::Type *CountTy = ConvertType(CountFD->getType());
4396 llvm::Value *Res = Builder.CreateInBoundsGEP(
4397 Int8Ty, Addr.emitRawPointer(*this),
4398 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4399 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4400 ".counted_by.load");
4401
4402 // Now emit the bounds checking.
4403 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4404 Array->getType(), Accessed);
4405 }
4406 }
4407 }
4408 }
4409
4410 // Propagate the alignment from the array itself to the result.
4411 QualType arrayType = Array->getType();
4412 Addr = emitArraySubscriptGEP(
4413 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4414 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4415 E->getExprLoc(), &arrayType, E->getBase());
4416 EltBaseInfo = ArrayLV.getBaseInfo();
4417 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4418 } else {
4419 // The base must be a pointer; emit it with an estimate of its alignment.
4420 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4421 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4422 QualType ptrType = E->getBase()->getType();
4423 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4424 !getLangOpts().isSignedOverflowDefined(),
4425 SignedIndices, E->getExprLoc(), &ptrType,
4426 E->getBase());
4427 }
4428
4429 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4430
4431 if (getLangOpts().ObjC &&
4432 getLangOpts().getGC() != LangOptions::NonGC) {
4435 }
4436 return LV;
4437}
4438
4439llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) {
4440 llvm::Value *Idx = EmitScalarExpr(E);
4441 if (Idx->getType() == IntPtrTy)
4442 return Idx;
4443 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
4444 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
4445}
4446
4448 assert(
4449 !E->isIncomplete() &&
4450 "incomplete matrix subscript expressions should be rejected during Sema");
4451 LValue Base = EmitLValue(E->getBase());
4452
4453 // Extend or truncate the index type to 32 or 64-bits if needed.
4454 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
4455 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
4456
4457 llvm::Value *NumRows = Builder.getIntN(
4458 RowIdx->getType()->getScalarSizeInBits(),
4459 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4460 llvm::Value *FinalIdx =
4461 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4462 return LValue::MakeMatrixElt(
4463 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4464 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4465}
4466
4468 LValueBaseInfo &BaseInfo,
4469 TBAAAccessInfo &TBAAInfo,
4470 QualType BaseTy, QualType ElTy,
4471 bool IsLowerBound) {
4472 LValue BaseLVal;
4473 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4474 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4475 if (BaseTy->isArrayType()) {
4476 Address Addr = BaseLVal.getAddress();
4477 BaseInfo = BaseLVal.getBaseInfo();
4478
4479 // If the array type was an incomplete type, we need to make sure
4480 // the decay ends up being the right type.
4481 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4482 Addr = Addr.withElementType(NewTy);
4483
4484 // Note that VLA pointers are always decayed, so we don't need to do
4485 // anything here.
4486 if (!BaseTy->isVariableArrayType()) {
4487 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4488 "Expected pointer to array");
4489 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4490 }
4491
4492 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4493 }
4494 LValueBaseInfo TypeBaseInfo;
4495 TBAAAccessInfo TypeTBAAInfo;
4496 CharUnits Align =
4497 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4498 BaseInfo.mergeForCast(TypeBaseInfo);
4499 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4500 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4501 CGF.ConvertTypeForMem(ElTy), Align);
4502 }
4503 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4504}
4505
4507 bool IsLowerBound) {
4508
4509 assert(!E->isOpenACCArraySection() &&
4510 "OpenACC Array section codegen not implemented");
4511
4513 QualType ResultExprTy;
4514 if (auto *AT = getContext().getAsArrayType(BaseTy))
4515 ResultExprTy = AT->getElementType();
4516 else
4517 ResultExprTy = BaseTy->getPointeeType();
4518 llvm::Value *Idx = nullptr;
4519 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4520 // Requesting lower bound or upper bound, but without provided length and
4521 // without ':' symbol for the default length -> length = 1.
4522 // Idx = LowerBound ?: 0;
4523 if (auto *LowerBound = E->getLowerBound()) {
4524 Idx = Builder.CreateIntCast(
4525 EmitScalarExpr(LowerBound), IntPtrTy,
4526 LowerBound->getType()->hasSignedIntegerRepresentation());
4527 } else
4528 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4529 } else {
4530 // Try to emit length or lower bound as constant. If this is possible, 1
4531 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4532 // IR (LB + Len) - 1.
4533 auto &C = CGM.getContext();
4534 auto *Length = E->getLength();
4535 llvm::APSInt ConstLength;
4536 if (Length) {
4537 // Idx = LowerBound + Length - 1;
4538 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4539 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4540 Length = nullptr;
4541 }
4542 auto *LowerBound = E->getLowerBound();
4543 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4544 if (LowerBound) {
4545 if (std::optional<llvm::APSInt> LB =
4546 LowerBound->getIntegerConstantExpr(C)) {
4547 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4548 LowerBound = nullptr;
4549 }
4550 }
4551 if (!Length)
4552 --ConstLength;
4553 else if (!LowerBound)
4554 --ConstLowerBound;
4555
4556 if (Length || LowerBound) {
4557 auto *LowerBoundVal =
4558 LowerBound
4559 ? Builder.CreateIntCast(
4560 EmitScalarExpr(LowerBound), IntPtrTy,
4561 LowerBound->getType()->hasSignedIntegerRepresentation())
4562 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4563 auto *LengthVal =
4564 Length
4565 ? Builder.CreateIntCast(
4566 EmitScalarExpr(Length), IntPtrTy,
4567 Length->getType()->hasSignedIntegerRepresentation())
4568 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4569 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4570 /*HasNUW=*/false,
4571 !getLangOpts().isSignedOverflowDefined());
4572 if (Length && LowerBound) {
4573 Idx = Builder.CreateSub(
4574 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4575 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4576 }
4577 } else
4578 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4579 } else {
4580 // Idx = ArraySize - 1;
4581 QualType ArrayTy = BaseTy->isPointerType()
4582 ? E->getBase()->IgnoreParenImpCasts()->getType()
4583 : BaseTy;
4584 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4585 Length = VAT->getSizeExpr();
4586 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4587 ConstLength = *L;
4588 Length = nullptr;
4589 }
4590 } else {
4591 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4592 assert(CAT && "unexpected type for array initializer");
4593 ConstLength = CAT->getSize();
4594 }
4595 if (Length) {
4596 auto *LengthVal = Builder.CreateIntCast(
4597 EmitScalarExpr(Length), IntPtrTy,
4598 Length->getType()->hasSignedIntegerRepresentation());
4599 Idx = Builder.CreateSub(
4600 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4601 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4602 } else {
4603 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4604 --ConstLength;
4605 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4606 }
4607 }
4608 }
4609 assert(Idx);
4610
4611 Address EltPtr = Address::invalid();
4612 LValueBaseInfo BaseInfo;
4613 TBAAAccessInfo TBAAInfo;
4614 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4615 // The base must be a pointer, which is not an aggregate. Emit
4616 // it. It needs to be emitted first in case it's what captures
4617 // the VLA bounds.
4618 Address Base =
4619 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4620 BaseTy, VLA->getElementType(), IsLowerBound);
4621 // The element count here is the total number of non-VLA elements.
4622 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4623
4624 // Effectively, the multiply by the VLA size is part of the GEP.
4625 // GEP indexes are signed, and scaling an index isn't permitted to
4626 // signed-overflow, so we use the same semantics for our explicit
4627 // multiply. We suppress this if overflow is not undefined behavior.
4628 if (getLangOpts().isSignedOverflowDefined())
4629 Idx = Builder.CreateMul(Idx, NumElements);
4630 else
4631 Idx = Builder.CreateNSWMul(Idx, NumElements);
4632 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4633 !getLangOpts().isSignedOverflowDefined(),
4634 /*signedIndices=*/false, E->getExprLoc());
4635 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4636 // If this is A[i] where A is an array, the frontend will have decayed the
4637 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4638 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4639 // "gep x, i" here. Emit one "gep A, 0, i".
4640 assert(Array->getType()->isArrayType() &&
4641 "Array to pointer decay must have array source type!");
4642 LValue ArrayLV;
4643 // For simple multidimensional array indexing, set the 'accessed' flag for
4644 // better bounds-checking of the base expression.
4645 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4646 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4647 else
4648 ArrayLV = EmitLValue(Array);
4649
4650 // Propagate the alignment from the array itself to the result.
4651 EltPtr = emitArraySubscriptGEP(
4652 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4653 ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4654 /*signedIndices=*/false, E->getExprLoc());
4655 BaseInfo = ArrayLV.getBaseInfo();
4656 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4657 } else {
4658 Address Base =
4659 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4660 ResultExprTy, IsLowerBound);
4661 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4662 !getLangOpts().isSignedOverflowDefined(),
4663 /*signedIndices=*/false, E->getExprLoc());
4664 }
4665
4666 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4667}
4668
4671 // Emit the base vector as an l-value.
4672 LValue Base;
4673
4674 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4675 if (E->isArrow()) {
4676 // If it is a pointer to a vector, emit the address and form an lvalue with
4677 // it.
4678 LValueBaseInfo BaseInfo;
4679 TBAAAccessInfo TBAAInfo;
4680 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4681 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4682 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4683 Base.getQuals().removeObjCGCAttr();
4684 } else if (E->getBase()->isGLValue()) {
4685 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4686 // emit the base as an lvalue.
4687 assert(E->getBase()->getType()->isVectorType());
4688 Base = EmitLValue(E->getBase());
4689 } else {
4690 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4691 assert(E->getBase()->getType()->isVectorType() &&
4692 "Result must be a vector");
4693 llvm::Value *Vec = EmitScalarExpr(E->getBase());
4694
4695 // Store the vector to memory (because LValue wants an address).
4696 Address VecMem = CreateMemTemp(E->getBase()->getType());
4697 Builder.CreateStore(Vec, VecMem);
4698 Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4700 }
4701
4702 QualType type =
4703 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4704
4705 // Encode the element access list into a vector of unsigned indices.
4707 E->getEncodedElementAccess(Indices);
4708
4709 if (Base.isSimple()) {
4710 llvm::Constant *CV =
4711 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4712 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
4713 Base.getBaseInfo(), TBAAAccessInfo());
4714 }
4715 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4716
4717 llvm::Constant *BaseElts = Base.getExtVectorElts();
4719
4720 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4721 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4722 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4723 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4724 Base.getBaseInfo(), TBAAAccessInfo());
4725}
4726
4729 EmitIgnoredExpr(E->getBase());
4730 return EmitDeclRefLValue(DRE);
4731 }
4732
4733 Expr *BaseExpr = E->getBase();
4734 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4735 LValue BaseLV;
4736 if (E->isArrow()) {
4737 LValueBaseInfo BaseInfo;
4738 TBAAAccessInfo TBAAInfo;
4739 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4740 QualType PtrTy = BaseExpr->getType()->getPointeeType();
4741 SanitizerSet SkippedChecks;
4742 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4743 if (IsBaseCXXThis)
4744 SkippedChecks.set(SanitizerKind::Alignment, true);
4745 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4746 SkippedChecks.set(SanitizerKind::Null, true);
4747 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4748 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4749 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4750 } else
4751 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4752
4753 NamedDecl *ND = E->getMemberDecl();
4754 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4755 LValue LV = EmitLValueForField(BaseLV, Field);
4757 if (getLangOpts().OpenMP) {
4758 // If the member was explicitly marked as nontemporal, mark it as
4759 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4760 // to children as nontemporal too.
4761 if ((IsWrappedCXXThis(BaseExpr) &&
4763 BaseLV.isNontemporal())
4764 LV.setNontemporal(/*Value=*/true);
4765 }
4766 return LV;
4767 }
4768
4769 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4770 return EmitFunctionDeclLValue(*this, E, FD);
4771
4772 llvm_unreachable("Unhandled member declaration!");
4773}
4774
4775/// Given that we are currently emitting a lambda, emit an l-value for
4776/// one of its members.
4777///
4779 llvm::Value *ThisValue) {
4780 bool HasExplicitObjectParameter = false;
4781 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
4782 if (MD) {
4783 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4784 assert(MD->getParent()->isLambda());
4785 assert(MD->getParent() == Field->getParent());
4786 }
4787 LValue LambdaLV;
4788 if (HasExplicitObjectParameter) {
4789 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4790 auto It = LocalDeclMap.find(D);
4791 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4792 Address AddrOfExplicitObject = It->getSecond();
4793 if (D->getType()->isReferenceType())
4794 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4796 else
4797 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4798 D->getType().getNonReferenceType());
4799
4800 // Make sure we have an lvalue to the lambda itself and not a derived class.
4801 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
4802 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
4803 if (ThisTy != LambdaTy) {
4804 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
4806 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
4807 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
4808 LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
4809 }
4810 } else {
4811 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4812 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4813 }
4814 return EmitLValueForField(LambdaLV, Field);
4815}
4816
4818 return EmitLValueForLambdaField(Field, CXXABIThisValue);
4819}
4820
4821/// Get the field index in the debug info. The debug info structure/union
4822/// will ignore the unnamed bitfields.
4824 unsigned FieldIndex) {
4825 unsigned I = 0, Skipped = 0;
4826
4827 for (auto *F : Rec->getDefinition()->fields()) {
4828 if (I == FieldIndex)
4829 break;
4830 if (F->isUnnamedBitField())
4831 Skipped++;
4832 I++;
4833 }
4834
4835 return FieldIndex - Skipped;
4836}
4837
4838/// Get the address of a zero-sized field within a record. The resulting
4839/// address doesn't necessarily have the right type.
4841 const FieldDecl *Field) {
4843 CGF.getContext().getFieldOffset(Field));
4844 if (Offset.isZero())
4845 return Base;
4846 Base = Base.withElementType(CGF.Int8Ty);
4847 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4848}
4849
4850/// Drill down to the storage of a field without walking into
4851/// reference types.
4852///
4853/// The resulting address doesn't necessarily have the right type.
4855 const FieldDecl *field) {
4856 if (isEmptyFieldForLayout(CGF.getContext(), field))
4857 return emitAddrOfZeroSizeField(CGF, base, field);
4858
4859 const RecordDecl *rec = field->getParent();
4860
4861 unsigned idx =
4862 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4863
4864 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4865}
4866
4868 Address addr, const FieldDecl *field) {
4869 const RecordDecl *rec = field->getParent();
4870 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4871 base.getType(), rec->getLocation());
4872
4873 unsigned idx =
4874 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4875
4877 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4878}
4879
4880static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4881 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4882 if (!RD)
4883 return false;
4884
4885 if (RD->isDynamicClass())
4886 return true;
4887
4888 for (const auto &Base : RD->bases())
4889 if (hasAnyVptr(Base.getType(), Context))
4890 return true;
4891
4892 for (const FieldDecl *Field : RD->fields())
4893 if (hasAnyVptr(Field->getType(), Context))
4894 return true;
4895
4896 return false;
4897}
4898
4900 const FieldDecl *field) {
4901 LValueBaseInfo BaseInfo = base.getBaseInfo();
4902
4903 if (field->isBitField()) {
4904 const CGRecordLayout &RL =
4906 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4907 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4908 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4909 Info.VolatileStorageSize != 0 &&
4910 field->getType()
4913 Address Addr = base.getAddress();
4914 unsigned Idx = RL.getLLVMFieldNo(field);
4915 const RecordDecl *rec = field->getParent();
4917 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4918 if (!UseVolatile) {
4919 if (!IsInPreservedAIRegion &&
4920 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4921 if (Idx != 0)
4922 // For structs, we GEP to the field that the record layout suggests.
4923 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4924 } else {
4925 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4926 getContext().getRecordType(rec), rec->getLocation());
4928 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4929 DbgInfo);
4930 }
4931 }
4932 const unsigned SS =
4933 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4934 // Get the access type.
4935 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4936 Addr = Addr.withElementType(FieldIntTy);
4937 if (UseVolatile) {
4938 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4939 if (VolatileOffset)
4940 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4941 }
4942
4943 QualType fieldType =
4944 field->getType().withCVRQualifiers(base.getVRQualifiers());
4945 // TODO: Support TBAA for bit fields.
4946 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4947 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4948 TBAAAccessInfo());
4949 }
4950
4951 // Fields of may-alias structures are may-alias themselves.
4952 // FIXME: this should get propagated down through anonymous structs
4953 // and unions.
4954 QualType FieldType = field->getType();
4955 const RecordDecl *rec = field->getParent();
4956 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4957 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4958 TBAAAccessInfo FieldTBAAInfo;
4959 if (base.getTBAAInfo().isMayAlias() ||
4960 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4961 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4962 } else if (rec->isUnion()) {
4963 // TODO: Support TBAA for unions.
4964 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4965 } else {
4966 // If no base type been assigned for the base access, then try to generate
4967 // one for this base lvalue.
4968 FieldTBAAInfo = base.getTBAAInfo();
4969 if (!FieldTBAAInfo.BaseType) {
4970 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4971 assert(!FieldTBAAInfo.Offset &&
4972 "Nonzero offset for an access with no base type!");
4973 }
4974
4975 // Adjust offset to be relative to the base type.
4976 const ASTRecordLayout &Layout =
4978 unsigned CharWidth = getContext().getCharWidth();
4979 if (FieldTBAAInfo.BaseType)
4980 FieldTBAAInfo.Offset +=
4981 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4982
4983 // Update the final access type and size.
4984 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4985 FieldTBAAInfo.Size =
4987 }
4988
4989 Address addr = base.getAddress();
4991 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4992 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4993 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4994 ClassDef->isDynamicClass()) {
4995 // Getting to any field of dynamic object requires stripping dynamic
4996 // information provided by invariant.group. This is because accessing
4997 // fields may leak the real address of dynamic object, which could result
4998 // in miscompilation when leaked pointer would be compared.
4999 auto *stripped =
5001 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5002 }
5003 }
5004
5005 unsigned RecordCVR = base.getVRQualifiers();
5006 if (rec->isUnion()) {
5007 // For unions, there is no pointer adjustment.
5008 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5009 hasAnyVptr(FieldType, getContext()))
5010 // Because unions can easily skip invariant.barriers, we need to add
5011 // a barrier every time CXXRecord field with vptr is referenced.
5013
5015 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5016 // Remember the original union field index
5017 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5018 rec->getLocation());
5019 addr =
5021 addr.emitRawPointer(*this),
5022 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5023 addr.getElementType(), addr.getAlignment());
5024 }
5025
5026 if (FieldType->isReferenceType())
5027 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5028 } else {
5029 if (!IsInPreservedAIRegion &&
5030 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5031 // For structs, we GEP to the field that the record layout suggests.
5032 addr = emitAddrOfFieldStorage(*this, addr, field);
5033 else
5034 // Remember the original struct field index
5035 addr = emitPreserveStructAccess(*this, base, addr, field);
5036 }
5037
5038 // If this is a reference field, load the reference right now.
5039 if (FieldType->isReferenceType()) {
5040 LValue RefLVal =
5041 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5042 if (RecordCVR & Qualifiers::Volatile)
5043 RefLVal.getQuals().addVolatile();
5044 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5045
5046 // Qualifiers on the struct don't apply to the referencee.
5047 RecordCVR = 0;
5048 FieldType = FieldType->getPointeeType();
5049 }
5050
5051 // Make sure that the address is pointing to the right type. This is critical
5052 // for both unions and structs.
5053 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5054
5055 if (field->hasAttr<AnnotateAttr>())
5056 addr = EmitFieldAnnotations(field, addr);
5057
5058 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5059 LV.getQuals().addCVRQualifiers(RecordCVR);
5060
5061 // __weak attribute on a field is ignored.
5064
5065 return LV;
5066}
5067
5068LValue
5070 const FieldDecl *Field) {
5071 QualType FieldType = Field->getType();
5072
5073 if (!FieldType->isReferenceType())
5074 return EmitLValueForField(Base, Field);
5075
5076 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
5077
5078 // Make sure that the address is pointing to the right type.
5079 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5080 V = V.withElementType(llvmType);
5081
5082 // TODO: Generate TBAA information that describes this access as a structure
5083 // member access and not just an access to an object of the field's type. This
5084 // should be similar to what we do in EmitLValueForField().
5085 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5086 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5087 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5088 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5089 CGM.getTBAAInfoForSubobject(Base, FieldType));
5090}
5091
5093 if (E->isFileScope()) {
5095 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5096 }
5098 // make sure to emit the VLA size.
5100
5101 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5102 const Expr *InitExpr = E->getInitializer();
5104
5105 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5106 /*Init*/ true);
5107
5108 // Block-scope compound literals are destroyed at the end of the enclosing
5109 // scope in C.
5110 if (!getLangOpts().CPlusPlus)
5113 E->getType(), getDestroyer(DtorKind),
5114 DtorKind & EHCleanup);
5115
5116 return Result;
5117}
5118
5120 if (!E->isGLValue())
5121 // Initializing an aggregate temporary in C++11: T{...}.
5122 return EmitAggExprToLValue(E);
5123
5124 // An lvalue initializer list must be initializing a reference.
5125 assert(E->isTransparent() && "non-transparent glvalue init list");
5126 return EmitLValue(E->getInit(0));
5127}
5128
5129/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5130/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5131/// LValue is returned and the current block has been terminated.
5132static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5133 const Expr *Operand) {
5134 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5135 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5136 return std::nullopt;
5137 }
5138
5139 return CGF.EmitLValue(Operand);
5140}
5141
5142namespace {
5143// Handle the case where the condition is a constant evaluatable simple integer,
5144// which means we don't have to separately handle the true/false blocks.
5145std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5147 const Expr *condExpr = E->getCond();
5148 bool CondExprBool;
5149 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5150 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5151 if (!CondExprBool)
5152 std::swap(Live, Dead);
5153
5154 if (!CGF.ContainsLabel(Dead)) {
5155 // If the true case is live, we need to track its region.
5156 if (CondExprBool)
5158 CGF.markStmtMaybeUsed(Dead);
5159 // If a throw expression we emit it and return an undefined lvalue
5160 // because it can't be used.
5161 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5162 CGF.EmitCXXThrowExpr(ThrowExpr);
5163 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5164 llvm::Type *Ty = CGF.UnqualPtrTy;
5165 return CGF.MakeAddrLValue(
5166 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5167 Dead->getType());
5168 }
5169 return CGF.EmitLValue(Live);
5170 }
5171 }
5172 return std::nullopt;
5173}
5174struct ConditionalInfo {
5175 llvm::BasicBlock *lhsBlock, *rhsBlock;
5176 std::optional<LValue> LHS, RHS;
5177};
5178
5179// Create and generate the 3 blocks for a conditional operator.
5180// Leaves the 'current block' in the continuation basic block.
5181template<typename FuncTy>
5182ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5184 const FuncTy &BranchGenFunc) {
5185 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5186 CGF.createBasicBlock("cond.false"), std::nullopt,
5187 std::nullopt};
5188 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5189
5190 CodeGenFunction::ConditionalEvaluation eval(CGF);
5191 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5192 CGF.getProfileCount(E));
5193
5194 // Any temporaries created here are conditional.
5195 CGF.EmitBlock(Info.lhsBlock);
5197 eval.begin(CGF);
5198 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5199 eval.end(CGF);
5200 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5201
5202 if (Info.LHS)
5203 CGF.Builder.CreateBr(endBlock);
5204
5205 // Any temporaries created here are conditional.
5206 CGF.EmitBlock(Info.rhsBlock);
5207 eval.begin(CGF);
5208 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5209 eval.end(CGF);
5210 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5211 CGF.EmitBlock(endBlock);
5212
5213 return Info;
5214}
5215} // namespace
5216
5219 if (!E->isGLValue()) {
5220 // ?: here should be an aggregate.
5222 "Unexpected conditional operator!");
5223 return (void)EmitAggExprToLValue(E);
5224 }
5225
5226 OpaqueValueMapping binding(*this, E);
5227 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5228 return;
5229
5230 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5231 CGF.EmitIgnoredExpr(E);
5232 return LValue{};
5233 });
5234}
5237 if (!expr->isGLValue()) {
5238 // ?: here should be an aggregate.
5239 assert(hasAggregateEvaluationKind(expr->getType()) &&
5240 "Unexpected conditional operator!");
5241 return EmitAggExprToLValue(expr);
5242 }
5243
5244 OpaqueValueMapping binding(*this, expr);
5245 if (std::optional<LValue> Res =
5246 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5247 return *Res;
5248
5249 ConditionalInfo Info = EmitConditionalBlocks(
5250 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5251 return EmitLValueOrThrowExpression(CGF, E);
5252 });
5253
5254 if ((Info.LHS && !Info.LHS->isSimple()) ||
5255 (Info.RHS && !Info.RHS->isSimple()))
5256 return EmitUnsupportedLValue(expr, "conditional operator");
5257
5258 if (Info.LHS && Info.RHS) {
5259 Address lhsAddr = Info.LHS->getAddress();
5260 Address rhsAddr = Info.RHS->getAddress();
5262 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5263 Builder.GetInsertBlock(), expr->getType());
5264 AlignmentSource alignSource =
5265 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5266 Info.RHS->getBaseInfo().getAlignmentSource());
5268 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5269 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5270 TBAAInfo);
5271 } else {
5272 assert((Info.LHS || Info.RHS) &&
5273 "both operands of glvalue conditional are throw-expressions?");
5274 return Info.LHS ? *Info.LHS : *Info.RHS;
5275 }
5276}
5277
5278/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5279/// type. If the cast is to a reference, we can have the usual lvalue result,
5280/// otherwise if a cast is needed by the code generator in an lvalue context,
5281/// then it must mean that we need the address of an aggregate in order to
5282/// access one of its members. This can happen for all the reasons that casts
5283/// are permitted with aggregate result, including noop aggregate casts, and
5284/// cast from scalar to union.
5286 switch (E->getCastKind()) {
5287 case CK_ToVoid:
5288 case CK_BitCast:
5289 case CK_LValueToRValueBitCast:
5290 case CK_ArrayToPointerDecay:
5291 case CK_FunctionToPointerDecay:
5292 case CK_NullToMemberPointer:
5293 case CK_NullToPointer:
5294 case CK_IntegralToPointer:
5295 case CK_PointerToIntegral:
5296 case CK_PointerToBoolean:
5297 case CK_IntegralCast:
5298 case CK_BooleanToSignedIntegral:
5299 case CK_IntegralToBoolean:
5300 case CK_IntegralToFloating:
5301 case CK_FloatingToIntegral:
5302 case CK_FloatingToBoolean:
5303 case CK_FloatingCast:
5304 case CK_FloatingRealToComplex:
5305 case CK_FloatingComplexToReal:
5306 case CK_FloatingComplexToBoolean:
5307 case CK_FloatingComplexCast:
5308 case CK_FloatingComplexToIntegralComplex:
5309 case CK_IntegralRealToComplex:
5310 case CK_IntegralComplexToReal:
5311 case CK_IntegralComplexToBoolean:
5312 case CK_IntegralComplexCast:
5313 case CK_IntegralComplexToFloatingComplex:
5314 case CK_DerivedToBaseMemberPointer:
5315 case CK_BaseToDerivedMemberPointer:
5316 case CK_MemberPointerToBoolean:
5317 case CK_ReinterpretMemberPointer:
5318 case CK_AnyPointerToBlockPointerCast:
5319 case CK_ARCProduceObject:
5320 case CK_ARCConsumeObject:
5321 case CK_ARCReclaimReturnedObject:
5322 case CK_ARCExtendBlockObject:
5323 case CK_CopyAndAutoreleaseBlockObject:
5324 case CK_IntToOCLSampler:
5325 case CK_FloatingToFixedPoint:
5326 case CK_FixedPointToFloating:
5327 case CK_FixedPointCast:
5328 case CK_FixedPointToBoolean:
5329 case CK_FixedPointToIntegral:
5330 case CK_IntegralToFixedPoint:
5331 case CK_MatrixCast:
5332 case CK_HLSLVectorTruncation:
5333 case CK_HLSLArrayRValue:
5334 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5335
5336 case CK_Dependent:
5337 llvm_unreachable("dependent cast kind in IR gen!");
5338
5339 case CK_BuiltinFnToFnPtr:
5340 llvm_unreachable("builtin functions are handled elsewhere");
5341
5342 // These are never l-values; just use the aggregate emission code.
5343 case CK_NonAtomicToAtomic:
5344 case CK_AtomicToNonAtomic:
5345 return EmitAggExprToLValue(E);
5346
5347 case CK_Dynamic: {
5348 LValue LV = EmitLValue(E->getSubExpr());
5349 Address V = LV.getAddress();
5350 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5352 }
5353
5354 case CK_ConstructorConversion:
5355 case CK_UserDefinedConversion:
5356 case CK_CPointerToObjCPointerCast:
5357 case CK_BlockPointerToObjCPointerCast:
5358 case CK_LValueToRValue:
5359 return EmitLValue(E->getSubExpr());
5360
5361 case CK_NoOp: {
5362 // CK_NoOp can model a qualification conversion, which can remove an array
5363 // bound and change the IR type.
5364 // FIXME: Once pointee types are removed from IR, remove this.
5365 LValue LV = EmitLValue(E->getSubExpr());
5366 // Propagate the volatile qualifer to LValue, if exist in E.
5367 if (E->changesVolatileQualification())
5368 LV.getQuals() = E->getType().getQualifiers();
5369 if (LV.isSimple()) {
5370 Address V = LV.getAddress();
5371 if (V.isValid()) {
5372 llvm::Type *T = ConvertTypeForMem(E->getType());
5373 if (V.getElementType() != T)
5374 LV.setAddress(V.withElementType(T));
5375 }
5376 }
5377 return LV;
5378 }
5379
5380 case CK_UncheckedDerivedToBase:
5381 case CK_DerivedToBase: {
5382 const auto *DerivedClassTy =
5383 E->getSubExpr()->getType()->castAs<RecordType>();
5384 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5385
5386 LValue LV = EmitLValue(E->getSubExpr());
5387 Address This = LV.getAddress();
5388
5389 // Perform the derived-to-base conversion
5391 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5392 /*NullCheckValue=*/false, E->getExprLoc());
5393
5394 // TODO: Support accesses to members of base classes in TBAA. For now, we
5395 // conservatively pretend that the complete object is of the base class
5396 // type.
5397 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5399 }
5400 case CK_ToUnion:
5401 return EmitAggExprToLValue(E);
5402 case CK_BaseToDerived: {
5403 const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5404 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5405
5406 LValue LV = EmitLValue(E->getSubExpr());
5407
5408 // Perform the base-to-derived conversion
5410 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5411 /*NullCheckValue=*/false);
5412
5413 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5414 // performed and the object is not of the derived type.
5417 E->getType());
5418
5419 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5421 /*MayBeNull=*/false, CFITCK_DerivedCast,
5422 E->getBeginLoc());
5423
5424 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5426 }
5427 case CK_LValueBitCast: {
5428 // This must be a reinterpret_cast (or c-style equivalent).
5429 const auto *CE = cast<ExplicitCastExpr>(E);
5430
5431 CGM.EmitExplicitCastExprType(CE, this);
5432 LValue LV = EmitLValue(E->getSubExpr());
5434 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5435
5436 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5438 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5439 E->getBeginLoc());
5440
5441 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5443 }
5444 case CK_AddressSpaceConversion: {
5445 LValue LV = EmitLValue(E->getSubExpr());
5447 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5448 *this, LV.getPointer(*this),
5449 E->getSubExpr()->getType().getAddressSpace(),
5450 E->getType().getAddressSpace(), ConvertType(DestTy));
5452 LV.getAddress().getAlignment()),
5453 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5454 }
5455 case CK_ObjCObjectLValueCast: {
5456 LValue LV = EmitLValue(E->getSubExpr());
5458 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5460 }
5461 case CK_ZeroToOCLOpaqueType:
5462 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5463
5464 case CK_VectorSplat: {
5465 // LValue results of vector splats are only supported in HLSL.
5466 if (!getLangOpts().HLSL)
5467 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5468 return EmitLValue(E->getSubExpr());
5469 }
5470 }
5471
5472 llvm_unreachable("Unhandled lvalue cast kind?");
5473}
5474
5478}
5479
5480std::pair<LValue, LValue>
5482 // Emitting the casted temporary through an opaque value.
5483 LValue BaseLV = EmitLValue(E->getArgLValue());
5484 OpaqueValueMappingData::bind(*this, E->getOpaqueArgLValue(), BaseLV);
5485
5486 QualType ExprTy = E->getType();
5487 Address OutTemp = CreateIRTemp(ExprTy);
5488 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
5489
5490 if (E->isInOut())
5491 EmitInitializationToLValue(E->getCastedTemporary()->getSourceExpr(),
5492 TempLV);
5493
5494 OpaqueValueMappingData::bind(*this, E->getCastedTemporary(), TempLV);
5495 return std::make_pair(BaseLV, TempLV);
5496}
5497
5499 CallArgList &Args, QualType Ty) {
5500
5501 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
5502
5503 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
5504 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
5505
5506 llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
5507
5508 llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
5509
5510 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
5511 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
5512 LifetimeSize);
5513 Args.add(RValue::get(TmpAddr, *this), Ty);
5514 return TempLV;
5515}
5516
5517LValue
5520
5521 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5522 it = OpaqueLValues.find(e);
5523
5524 if (it != OpaqueLValues.end())
5525 return it->second;
5526
5527 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5528 return EmitLValue(e->getSourceExpr());
5529}
5530
5531RValue
5534
5535 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5536 it = OpaqueRValues.find(e);
5537
5538 if (it != OpaqueRValues.end())
5539 return it->second;
5540
5541 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5542 return EmitAnyExpr(e->getSourceExpr());
5543}
5544
5546 const FieldDecl *FD,
5548 QualType FT = FD->getType();
5549 LValue FieldLV = EmitLValueForField(LV, FD);
5550 switch (getEvaluationKind(FT)) {
5551 case TEK_Complex:
5552 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5553 case TEK_Aggregate:
5554 return FieldLV.asAggregateRValue();
5555 case TEK_Scalar:
5556 // This routine is used to load fields one-by-one to perform a copy, so
5557 // don't load reference fields.
5558 if (FD->getType()->isReferenceType())
5559 return RValue::get(FieldLV.getPointer(*this));
5560 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5561 // primitive load.
5562 if (FieldLV.isBitField())
5563 return EmitLoadOfLValue(FieldLV, Loc);
5564 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5565 }
5566 llvm_unreachable("bad evaluation kind");
5567}
5568
5569//===--------------------------------------------------------------------===//
5570// Expression Emission
5571//===--------------------------------------------------------------------===//
5572
5574 ReturnValueSlot ReturnValue,
5575 llvm::CallBase **CallOrInvoke) {
5576 llvm::CallBase *CallOrInvokeStorage;
5577 if (!CallOrInvoke) {
5578 CallOrInvoke = &CallOrInvokeStorage;
5579 }
5580
5581 auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] {
5582 if (E->isCoroElideSafe()) {
5583 auto *I = *CallOrInvoke;
5584 if (I)
5585 I->addFnAttr(llvm::Attribute::CoroElideSafe);
5586 }
5587 });
5588
5589 // Builtins never have block type.
5590 if (E->getCallee()->getType()->isBlockPointerType())
5591 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
5592
5593 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5594 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
5595
5596 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5597 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
5598
5599 // A CXXOperatorCallExpr is created even for explicit object methods, but
5600 // these should be treated like static function call.
5601 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5602 if (const auto *MD =
5603 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5604 MD && MD->isImplicitObjectMemberFunction())
5605 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
5606
5607 CGCallee callee = EmitCallee(E->getCallee());
5608
5609 if (callee.isBuiltin()) {
5610 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5611 E, ReturnValue);
5612 }
5613
5614 if (callee.isPseudoDestructor()) {
5616 }
5617
5618 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
5619 /*Chain=*/nullptr, CallOrInvoke);
5620}
5621
5622/// Emit a CallExpr without considering whether it might be a subclass.
5624 ReturnValueSlot ReturnValue,
5625 llvm::CallBase **CallOrInvoke) {
5626 CGCallee Callee = EmitCallee(E->getCallee());
5627 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
5628 /*Chain=*/nullptr, CallOrInvoke);
5629}
5630
5631// Detect the unusual situation where an inline version is shadowed by a
5632// non-inline version. In that case we should pick the external one
5633// everywhere. That's GCC behavior too.
5635 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5636 if (!PD->isInlineBuiltinDeclaration())
5637 return false;
5638 return true;
5639}
5640
5642 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5643
5644 if (auto builtinID = FD->getBuiltinID()) {
5645 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5646 std::string NoBuiltins = "no-builtins";
5647
5648 StringRef Ident = CGF.CGM.getMangledName(GD);
5649 std::string FDInlineName = (Ident + ".inline").str();
5650
5651 bool IsPredefinedLibFunction =
5653 bool HasAttributeNoBuiltin =
5654 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5655 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5656
5657 // When directing calling an inline builtin, call it through it's mangled
5658 // name to make it clear it's not the actual builtin.
5659 if (CGF.CurFn->getName() != FDInlineName &&
5661 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5662 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5663 llvm::Module *M = Fn->getParent();
5664 llvm::Function *Clone = M->getFunction(FDInlineName);
5665 if (!Clone) {
5666 Clone = llvm::Function::Create(Fn->getFunctionType(),
5667 llvm::GlobalValue::InternalLinkage,
5668 Fn->getAddressSpace(), FDInlineName, M);
5669 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5670 }
5671 return CGCallee::forDirect(Clone, GD);
5672 }
5673
5674 // Replaceable builtins provide their own implementation of a builtin. If we
5675 // are in an inline builtin implementation, avoid trivial infinite
5676 // recursion. Honor __attribute__((no_builtin("foo"))) or
5677 // __attribute__((no_builtin)) on the current function unless foo is
5678 // not a predefined library function which means we must generate the
5679 // builtin no matter what.
5680 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5681 return CGCallee::forBuiltin(builtinID, FD);
5682 }
5683
5684 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5685 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5686 FD->hasAttr<CUDAGlobalAttr>())
5687 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5688 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5689
5690 return CGCallee::forDirect(CalleePtr, GD);
5691}
5692
5694 E = E->IgnoreParens();
5695
5696 // Look through function-to-pointer decay.
5697 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5698 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5699 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5700 return EmitCallee(ICE->getSubExpr());
5701 }
5702
5703 // Resolve direct calls.
5704 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5705 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5706 return EmitDirectCallee(*this, FD);
5707 }
5708 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5709 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5710 EmitIgnoredExpr(ME->getBase());
5711 return EmitDirectCallee(*this, FD);
5712 }
5713
5714 // Look through template substitutions.
5715 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5716 return EmitCallee(NTTP->getReplacement());
5717
5718 // Treat pseudo-destructor calls differently.
5719 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5721 }
5722
5723 // Otherwise, we have an indirect reference.
5724 llvm::Value *calleePtr;
5726 if (auto ptrType = E->getType()->getAs<PointerType>()) {
5727 calleePtr = EmitScalarExpr(E);
5728 functionType = ptrType->getPointeeType();
5729 } else {
5730 functionType = E->getType();
5731 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5732 }
5733 assert(functionType->isFunctionType());
5734
5735 GlobalDecl GD;
5736 if (const auto *VD =
5737 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5738 GD = GlobalDecl(VD);
5739
5740 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5742 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
5743 return callee;
5744}
5745
5747 // Comma expressions just emit their LHS then their RHS as an l-value.
5748 if (E->getOpcode() == BO_Comma) {
5749 EmitIgnoredExpr(E->getLHS());
5751 return EmitLValue(E->getRHS());
5752 }
5753
5754 if (E->getOpcode() == BO_PtrMemD ||
5755 E->getOpcode() == BO_PtrMemI)
5757
5758 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5759
5760 // Note that in all of these cases, __block variables need the RHS
5761 // evaluated first just in case the variable gets moved by the RHS.
5762
5763 switch (getEvaluationKind(E->getType())) {
5764 case TEK_Scalar: {
5765 switch (E->getLHS()->getType().getObjCLifetime()) {
5767 return EmitARCStoreStrong(E, /*ignored*/ false).first;
5768
5770 return EmitARCStoreAutoreleasing(E).first;
5771
5772 // No reason to do any of these differently.
5776 break;
5777 }
5778
5779 // TODO: Can we de-duplicate this code with the corresponding code in
5780 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5781 RValue RV;
5782 llvm::Value *Previous = nullptr;
5783 QualType SrcType = E->getRHS()->getType();
5784 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5785 // we want to extract that value and potentially (if the bitfield sanitizer
5786 // is enabled) use it to check for an implicit conversion.
5787 if (E->getLHS()->refersToBitField()) {
5788 llvm::Value *RHS =
5790 RV = RValue::get(RHS);
5791 } else
5792 RV = EmitAnyExpr(E->getRHS());
5793
5794 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
5795
5796 if (RV.isScalar())
5798
5799 if (LV.isBitField()) {
5800 llvm::Value *Result = nullptr;
5801 // If bitfield sanitizers are enabled we want to use the result
5802 // to check whether a truncation or sign change has occurred.
5803 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5805 else
5807
5808 // If the expression contained an implicit conversion, make sure
5809 // to use the value before the scalar conversion.
5810 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5811 QualType DstType = E->getLHS()->getType();
5812 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5813 LV.getBitFieldInfo(), E->getExprLoc());
5814 } else
5815 EmitStoreThroughLValue(RV, LV);
5816
5817 if (getLangOpts().OpenMP)
5819 E->getLHS());
5820 return LV;
5821 }
5822
5823 case TEK_Complex:
5825
5826 case TEK_Aggregate:
5827 // If the lang opt is HLSL and the LHS is a constant array
5828 // then we are performing a copy assignment and call a special
5829 // function because EmitAggExprToLValue emits to a temporary LValue
5830 if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType())
5832
5833 return EmitAggExprToLValue(E);
5834 }
5835 llvm_unreachable("bad evaluation kind");
5836}
5837
5838// This function implements trivial copy assignment for HLSL's
5839// assignable constant arrays.
5841 // Don't emit an LValue for the RHS because it might not be an LValue
5842 LValue LHS = EmitLValue(E->getLHS());
5843 // In C the RHS of an assignment operator is an RValue.
5844 // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call
5845 // EmitInitializationToLValue to emit an RValue into an LValue.
5846 EmitInitializationToLValue(E->getRHS(), LHS);
5847 return LHS;
5848}
5849
5851 llvm::CallBase **CallOrInvoke) {
5852 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
5853
5854 if (!RV.isScalar())
5857
5858 assert(E->getCallReturnType(getContext())->isReferenceType() &&
5859 "Can't have a scalar return unless the return type is a "
5860 "reference type!");
5861
5863}
5864
5866 // FIXME: This shouldn't require another copy.
5867 return EmitAggExprToLValue(E);
5868}
5869
5872 && "binding l-value to type which needs a temporary");
5874 EmitCXXConstructExpr(E, Slot);
5876}
5877
5878LValue
5881}
5882
5884 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
5886}
5887
5891}
5892
5893LValue
5895 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5897 EmitAggExpr(E->getSubExpr(), Slot);
5898 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5900}
5901
5904
5905 if (!RV.isScalar())
5908
5909 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5910 "Can't have a scalar return unless the return type is a "
5911 "reference type!");
5912
5914}
5915
5917 Address V =
5918 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
5920}
5921
5923 const ObjCIvarDecl *Ivar) {
5924 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5925}
5926
5927llvm::Value *
5929 const ObjCIvarDecl *Ivar) {
5930 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5931 QualType PointerDiffType = getContext().getPointerDiffType();
5932 return Builder.CreateZExtOrTrunc(OffsetValue,
5933 getTypes().ConvertType(PointerDiffType));
5934}
5935
5937 llvm::Value *BaseValue,
5938 const ObjCIvarDecl *Ivar,
5939 unsigned CVRQualifiers) {
5940 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5941 Ivar, CVRQualifiers);
5942}
5943
5945 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5946 llvm::Value *BaseValue = nullptr;
5947 const Expr *BaseExpr = E->getBase();
5948 Qualifiers BaseQuals;
5949 QualType ObjectTy;
5950 if (E->isArrow()) {
5951 BaseValue = EmitScalarExpr(BaseExpr);
5952 ObjectTy = BaseExpr->getType()->getPointeeType();
5953 BaseQuals = ObjectTy.getQualifiers();
5954 } else {
5955 LValue BaseLV = EmitLValue(BaseExpr);
5956 BaseValue = BaseLV.getPointer(*this);
5957 ObjectTy = BaseExpr->getType();
5958 BaseQuals = ObjectTy.getQualifiers();
5959 }
5960
5961 LValue LV =
5962 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5963 BaseQuals.getCVRQualifiers());
5965 return LV;
5966}
5967
5969 // Can only get l-value for message expression returning aggregate type
5973}
5974
5976 const CGCallee &OrigCallee, const CallExpr *E,
5977 ReturnValueSlot ReturnValue,
5978 llvm::Value *Chain,
5979 llvm::CallBase **CallOrInvoke,
5980 CGFunctionInfo const **ResolvedFnInfo) {
5981 // Get the actual function type. The callee type will always be a pointer to
5982 // function type or a block pointer type.
5983 assert(CalleeType->isFunctionPointerType() &&
5984 "Call must have function pointer type!");
5985
5986 const Decl *TargetDecl =
5987 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5988
5989 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5990 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5991 "trying to emit a call to an immediate function");
5992
5993 CalleeType = getContext().getCanonicalType(CalleeType);
5994
5995 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5996
5997 CGCallee Callee = OrigCallee;
5998
5999 if (SanOpts.has(SanitizerKind::Function) &&
6000 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6001 !isa<FunctionNoProtoType>(PointeeType)) {
6002 if (llvm::Constant *PrefixSig =
6004 SanitizerScope SanScope(this);
6005 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6006
6007 llvm::Type *PrefixSigType = PrefixSig->getType();
6008 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6009 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6010
6011 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6013 // Use raw pointer since we are using the callee pointer as data here.
6014 Address Addr =
6015 Address(CalleePtr, CalleePtr->getType(),
6017 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6018 Callee.getPointerAuthInfo(), nullptr);
6019 CalleePtr = Addr.emitRawPointer(*this);
6020 }
6021
6022 // On 32-bit Arm, the low bit of a function pointer indicates whether
6023 // it's using the Arm or Thumb instruction set. The actual first
6024 // instruction lives at the same address either way, so we must clear
6025 // that low bit before using the function address to find the prefix
6026 // structure.
6027 //
6028 // This applies to both Arm and Thumb target triples, because
6029 // either one could be used in an interworking context where it
6030 // might be passed function pointers of both types.
6031 llvm::Value *AlignedCalleePtr;
6032 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6033 llvm::Value *CalleeAddress =
6034 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
6035 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
6036 llvm::Value *AlignedCalleeAddress =
6037 Builder.CreateAnd(CalleeAddress, Mask);
6038 AlignedCalleePtr =
6039 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
6040 } else {
6041 AlignedCalleePtr = CalleePtr;
6042 }
6043
6044 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6045 llvm::Value *CalleeSigPtr =
6046 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6047 llvm::Value *CalleeSig =
6048 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6049 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6050
6051 llvm::BasicBlock *Cont = createBasicBlock("cont");
6052 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6053 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6054
6055 EmitBlock(TypeCheck);
6056 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6057 Int32Ty,
6058 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6059 getPointerAlign());
6060 llvm::Value *CalleeTypeHashMatch =
6061 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6062 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6063 EmitCheckTypeDescriptor(CalleeType)};
6064 EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::SO_Function),
6065 SanitizerHandler::FunctionTypeMismatch, StaticData,
6066 {CalleePtr});
6067
6068 Builder.CreateBr(Cont);
6069 EmitBlock(Cont);
6070 }
6071 }
6072
6073 const auto *FnType = cast<FunctionType>(PointeeType);
6074
6075 // If we are checking indirect calls and this call is indirect, check that the
6076 // function pointer is a member of the bit set for the function type.
6077 if (SanOpts.has(SanitizerKind::CFIICall) &&
6078 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6079 SanitizerScope SanScope(this);
6080 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6081
6082 llvm::Metadata *MD;
6083 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
6085 else
6087
6088 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6089
6090 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6091 llvm::Value *TypeTest = Builder.CreateCall(
6092 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6093
6094 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6095 llvm::Constant *StaticData[] = {
6096 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6099 };
6100 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6101 EmitCfiSlowPathCheck(SanitizerKind::SO_CFIICall, TypeTest, CrossDsoTypeId,
6102 CalleePtr, StaticData);
6103 } else {
6104 EmitCheck(std::make_pair(TypeTest, SanitizerKind::SO_CFIICall),
6105 SanitizerHandler::CFICheckFail, StaticData,
6106 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6107 }
6108 }
6109
6110 CallArgList Args;
6111 if (Chain)
6112 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6113
6114 // C++17 requires that we evaluate arguments to a call using assignment syntax
6115 // right-to-left, and that we evaluate arguments to certain other operators
6116 // left-to-right. Note that we allow this to override the order dictated by
6117 // the calling convention on the MS ABI, which means that parameter
6118 // destruction order is not necessarily reverse construction order.
6119 // FIXME: Revisit this based on C++ committee response to unimplementability.
6121 bool StaticOperator = false;
6122 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
6123 if (OCE->isAssignmentOp())
6125 else {
6126 switch (OCE->getOperator()) {
6127 case OO_LessLess:
6128 case OO_GreaterGreater:
6129 case OO_AmpAmp:
6130 case OO_PipePipe:
6131 case OO_Comma:
6132 case OO_ArrowStar:
6134 break;
6135 default:
6136 break;
6137 }
6138 }
6139
6140 if (const auto *MD =
6141 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6142 MD && MD->isStatic())
6143 StaticOperator = true;
6144 }
6145
6146 auto Arguments = E->arguments();
6147 if (StaticOperator) {
6148 // If we're calling a static operator, we need to emit the object argument
6149 // and ignore it.
6150 EmitIgnoredExpr(E->getArg(0));
6151 Arguments = drop_begin(Arguments, 1);
6152 }
6153 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6154 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6155
6157 Args, FnType, /*ChainCall=*/Chain);
6158
6159 if (ResolvedFnInfo)
6160 *ResolvedFnInfo = &FnInfo;
6161
6162 // HIP function pointer contains kernel handle when it is used in triple
6163 // chevron. The kernel stub needs to be loaded from kernel handle and used
6164 // as callee.
6165 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6166 isa<CUDAKernelCallExpr>(E) &&
6167 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6168 llvm::Value *Handle = Callee.getFunctionPointer();
6169 auto *Stub = Builder.CreateLoad(
6170 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6171 Callee.setFunctionPointer(Stub);
6172 }
6173 llvm::CallBase *LocalCallOrInvoke = nullptr;
6174 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
6175 E == MustTailCall, E->getExprLoc());
6176
6177 // Generate function declaration DISuprogram in order to be used
6178 // in debug info about call sites.
6179 if (CGDebugInfo *DI = getDebugInfo()) {
6180 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6181 FunctionArgList Args;
6182 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6183 DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
6184 DI->getFunctionType(CalleeDecl, ResTy, Args),
6185 CalleeDecl);
6186 }
6187 }
6188 if (CallOrInvoke)
6189 *CallOrInvoke = LocalCallOrInvoke;
6190
6191 return Call;
6192}
6193
6196 Address BaseAddr = Address::invalid();
6197 if (E->getOpcode() == BO_PtrMemI) {
6198 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6199 } else {
6200 BaseAddr = EmitLValue(E->getLHS()).getAddress();
6201 }
6202
6203 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6204 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6205
6206 LValueBaseInfo BaseInfo;
6207 TBAAAccessInfo TBAAInfo;
6208 Address MemberAddr =
6209 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6210 &TBAAInfo);
6211
6212 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6213}
6214
6215/// Given the address of a temporary variable, produce an r-value of
6216/// its type.
6218 QualType type,
6219 SourceLocation loc) {
6221 switch (getEvaluationKind(type)) {
6222 case TEK_Complex:
6223 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6224 case TEK_Aggregate:
6225 return lvalue.asAggregateRValue();
6226 case TEK_Scalar:
6227 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6228 }
6229 llvm_unreachable("bad evaluation kind");
6230}
6231
6232void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6233 assert(Val->getType()->isFPOrFPVectorTy());
6234 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6235 return;
6236
6237 llvm::MDBuilder MDHelper(getLLVMContext());
6238 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6239
6240 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6241}
6242
6243void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6244 llvm::Type *EltTy = Val->getType()->getScalarType();
6245 if (!EltTy->isFloatTy())
6246 return;
6247
6248 if ((getLangOpts().OpenCL &&
6249 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6250 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6251 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6252 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6253 //
6254 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6255 // build option allows an application to specify that single precision
6256 // floating-point divide (x/y and 1/x) and sqrt used in the program
6257 // source are correctly rounded.
6258 //
6259 // TODO: CUDA has a prec-sqrt flag
6260 SetFPAccuracy(Val, 3.0f);
6261 }
6262}
6263
6264void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6265 llvm::Type *EltTy = Val->getType()->getScalarType();
6266 if (!EltTy->isFloatTy())
6267 return;
6268
6269 if ((getLangOpts().OpenCL &&
6270 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6271 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6272 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6273 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6274 //
6275 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6276 // build option allows an application to specify that single precision
6277 // floating-point divide (x/y and 1/x) and sqrt used in the program
6278 // source are correctly rounded.
6279 //
6280 // TODO: CUDA has a prec-div flag
6281 SetFPAccuracy(Val, 2.5f);
6282 }
6283}
6284
6285namespace {
6286 struct LValueOrRValue {
6287 LValue LV;
6288 RValue RV;
6289 };
6290}
6291
6292static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6293 const PseudoObjectExpr *E,
6294 bool forLValue,
6295 AggValueSlot slot) {
6297
6298 // Find the result expression, if any.
6299 const Expr *resultExpr = E->getResultExpr();
6300 LValueOrRValue result;
6301
6303 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6304 const Expr *semantic = *i;
6305
6306 // If this semantic expression is an opaque value, bind it
6307 // to the result of its source expression.
6308 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6309 // Skip unique OVEs.
6310 if (ov->isUnique()) {
6311 assert(ov != resultExpr &&
6312 "A unique OVE cannot be used as the result expression");
6313 continue;
6314 }
6315
6316 // If this is the result expression, we may need to evaluate
6317 // directly into the slot.
6318 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6319 OVMA opaqueData;
6320 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6322 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6323 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6325 opaqueData = OVMA::bind(CGF, ov, LV);
6326 result.RV = slot.asRValue();
6327
6328 // Otherwise, emit as normal.
6329 } else {
6330 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6331
6332 // If this is the result, also evaluate the result now.
6333 if (ov == resultExpr) {
6334 if (forLValue)
6335 result.LV = CGF.EmitLValue(ov);
6336 else
6337 result.RV = CGF.EmitAnyExpr(ov, slot);
6338 }
6339 }
6340
6341 opaques.push_back(opaqueData);
6342
6343 // Otherwise, if the expression is the result, evaluate it
6344 // and remember the result.
6345 } else if (semantic == resultExpr) {
6346 if (forLValue)
6347 result.LV = CGF.EmitLValue(semantic);
6348 else
6349 result.RV = CGF.EmitAnyExpr(semantic, slot);
6350
6351 // Otherwise, evaluate the expression in an ignored context.
6352 } else {
6353 CGF.EmitIgnoredExpr(semantic);
6354 }
6355 }
6356
6357 // Unbind all the opaques now.
6358 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6359 opaques[i].unbind(CGF);
6360
6361 return result;
6362}
6363
6365 AggValueSlot slot) {
6366 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6367}
6368
6370 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6371}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3453
DynTypedNode Node
Defines enum values for all the target-independent builtin functions.
CodeGenFunction::ComplexPairTy ComplexPairTy
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition: CGExpr.cpp:2703
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition: CGExpr.cpp:2946
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition: CGExpr.cpp:690
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition: CGExpr.cpp:4005
static bool hasBooleanRepresentation(QualType Ty)
Definition: CGExpr.cpp:1881
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition: CGExpr.cpp:4189
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition: CGExpr.cpp:4075
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field)
Drill down to the storage of a field without walking into reference types.
Definition: CGExpr.cpp:4854
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type? This is different from pr...
Definition: CGExpr.cpp:1735
@ CEK_AsReferenceOnly
Definition: CGExpr.cpp:1737
@ CEK_AsValueOnly
Definition: CGExpr.cpp:1739
@ CEK_None
Definition: CGExpr.cpp:1736
@ CEK_AsValueOrReference
Definition: CGExpr.cpp:1738
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition: CGExpr.cpp:1710
static QualType getFixedSizeElementType(const ASTContext &ctx, const VariableArrayType *vla)
Definition: CGExpr.cpp:4066
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition: CGExpr.cpp:2934
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition: CGExpr.cpp:5132
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition: CGExpr.cpp:3521
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition: CGExpr.cpp:4019
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition: CGExpr.cpp:2925
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition: CGExpr.cpp:2104
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition: CGExpr.cpp:6292
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition: CGExpr.cpp:4091
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, const MemberExpr *ME)
Definition: CGExpr.cpp:1845
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition: CGExpr.cpp:962
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2201
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition: CGExpr.cpp:1741
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition: CGExpr.cpp:1542
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition: CGExpr.cpp:1894
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition: CGExpr.cpp:4220
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition: CGExpr.cpp:5641
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition: CGExpr.cpp:2800
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition: CGExpr.cpp:1112
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition: CGExpr.cpp:5634
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition: CGExpr.cpp:2874
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition: CGExpr.cpp:4880
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition: CGExpr.cpp:4104
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition: CGExpr.cpp:2814
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD)
Determine whether we can emit a reference to VD from the current context, despite not necessarily hav...
Definition: CGExpr.cpp:2971
VariableTypeDescriptorKind
Definition: CGExpr.cpp:68
@ TK_Float
A floating-point type.
Definition: CGExpr.cpp:72
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition: CGExpr.cpp:76
@ TK_Integer
An integer type.
Definition: CGExpr.cpp:70
@ TK_BitInt
An _BitInt(N) type.
Definition: CGExpr.cpp:74
static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize)
Definition: CGExpr.cpp:4051
static RawAddress createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner, RawAddress *Alloca=nullptr)
Definition: CGExpr.cpp:436
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CGExpr.cpp:484
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2129
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1281
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition: CGExpr.cpp:4867
const SanitizerHandlerInfo SanitizerHandlers[]
Definition: CGExpr.cpp:3538
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field)
Get the address of a zero-sized field within a record.
Definition: CGExpr.cpp:4840
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition: CGExpr.cpp:3544
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition: CGExpr.cpp:4467
static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, Address ReferenceTemporary)
Definition: CGExpr.cpp:320
const Decl * D
Expr * E
StringRef Filename
Definition: Format.cpp:3051
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
StateNode * Previous
const LValueBase getLValueBase() const
Definition: APValue.cpp:973
bool isLValue() const
Definition: APValue.h:448
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2716
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1187
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:682
const LangOptions & getLangOpts() const
Definition: ASTContext.h:834
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
Definition: ASTContext.cpp:854
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
Definition: ASTContext.h:1161
const NoSanitizeList & getNoSanitizeList() const
Definition: ASTContext.h:844
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
Definition: ASTContext.h:1256
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2482
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1160
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2918
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2486
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4224
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6986
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition: Expr.cpp:5184
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2718
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3577
QualType getElementType() const
Definition: Type.h:3589
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3909
A fixed int type of a specified bitwidth.
Definition: Type.h:7819
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition: Builtins.h:161
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2817
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1378
bool isDynamicClass() const
Definition: DeclCXX.h:586
bool hasDefinition() const
Definition: DeclCXX.h:572
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1066
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3547
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition: CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
SanitizerSet SanitizeMergeHandlers
Set of sanitizer checks that can merge handlers (smaller code size at the expense of debuggability).
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
SanitizerSet SanitizeRecover
Set of sanitizer checks that are non-fatal (i.e.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:193
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:259
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:231
Address setKnownNonNull()
Definition: Address.h:236
void setAlignment(CharUnits Value)
Definition: Address.h:191
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:181
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
RValue asRValue() const
Definition: CGValue.h:666
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:858
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:292
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:203
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition: CGBuilder.h:331
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:413
Address CreateLaunderInvariantGroup(Address Addr)
Definition: CGBuilder.h:437
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:429
Address CreateStripInvariantGroup(Address Addr)
Definition: CGBuilder.h:443
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:189
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:261
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:346
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:113
Abstract information about a function or function prototype.
Definition: CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:59
All available information about a concrete callee.
Definition: CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition: CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition: CGCall.h:172
bool isPseudoDestructor() const
Definition: CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition: CGCall.h:123
unsigned getBuiltinID() const
Definition: CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
bool isBuiltin() const
Definition: CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition: CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition: CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, llvm::Value *ivarOffset)=0
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)=0
virtual llvm::Value * EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)=0
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, Address AddrWeakObj)=0
virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel)=0
Get the address of a selector for the specified name and type values.
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, bool threadlocal=false)=0
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
bool isNontemporalDecl(const ValueDecl *VD) const
Checks if the VD variable is marked as nontemporal declaration in current context.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:305
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr, llvm::Value *lifetimeSz=nullptr)
Definition: CGCall.h:326
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitARCLoadWeakRetained(Address addr)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
LValue EmitInitListLValue(const InitListExpr *E)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Address EmitExtVectorElementLValue(LValue V)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void markStmtMaybeUsed(const Stmt *S)
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
LValue EmitCoyieldLValue(const CoyieldExpr *E)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
const TargetCodeGenInfo & getTargetHooks() const
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
LValue EmitMemberExpr(const MemberExpr *E)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitARCInitWeak(Address addr, llvm::Value *value)
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
LValue EmitStringLiteralLValue(const StringLiteral *E)
static Destroyer destroyARCStrongPrecise
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
static Destroyer destroyARCStrongImprecise
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
This class organizes the cross-function state that is used while generating LLVM code.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD)
Get the address of a GUID.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1267
void setDSOLocal(llvm::GlobalValue *GV) const
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
CGDebugInfo * getModuleDebugInfo()
ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E)
Returns a pointer to a constant global variable for the given file-scope compound literal expression.
llvm::ConstantInt * CreateCrossDsoCfiTypeId(llvm::Metadata *MD)
Generate a cross-DSO type identifier for MD.
void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C)
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition: CGExpr.cpp:2913
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition: CGCXX.cpp:218
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1107
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
DiagnosticsEngine & getDiags() const
void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref< void()> Fn)
Run some code with "sufficient" stack space.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetInfo & getTarget() const
llvm::Metadata * CreateMetadataIdentifierForType(QualType T)
Create a metadata identifier for the given type.
llvm::Constant * getTypeDescriptorFromMap(QualType Ty)
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
llvm::MDNode * getTBAABaseTypeInfo(QualType QTy)
getTBAABaseTypeInfo - Get metadata that describes the given base access type.
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGPointerAuthInfo getFunctionPointerAuthInfo(QualType T)
Return the abstract pointer authentication schema for a pointer to the given function type.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Metadata * CreateMetadataIdentifierGeneralized(QualType T)
Create a metadata identifier for the generalization of the given type.
const llvm::Triple & getTriple() const
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:245
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType)
getTBAAInfoForSubobject - Get TBAA information for an access with a given base lvalue.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ConstantAddress GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name=".str")
Return a pointer to a constant array for the given string literal.
ASTContext & getContext() const
ConstantAddress GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO)
Get the address of a template parameter object.
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
llvm::MDNode * getTBAATypeInfo(QualType QTy)
getTBAATypeInfo - Get metadata used to describe accesses to objects of the given type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, TBAAAccessInfo InfoB)
mergeTBAAInfoForConditionalOperator - Get merged TBAA information for the purposes of conditional ope...
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info, llvm::Function *F, bool IsThunk)
Set the LLVM function attributes (sext, zext, etc).
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F)
Set the LLVM function attributes which only apply to a function definition.
ConstantAddress GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *)
Return a pointer to a constant array for the given ObjCEncodeExpr node.
ConstantAddress GetAddrOfConstantCString(const std::string &Str, const char *GlobalName=nullptr)
Returns a pointer to a character array containing the literal and a terminating '\0' character.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenTypes.h:99
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
Definition: CGCall.cpp:638
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:294
ConstantAddress withElementType(llvm::Type *ElemTy) const
Definition: Address.h:310
llvm::Constant * getPointer() const
Definition: Address.h:306
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
void mergeForCast(const LValueBaseInfo &Info)
Definition: CGValue.h:174
AlignmentSource getAlignmentSource() const
Definition: CGValue.h:171
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isMatrixElt() const
Definition: CGValue.h:283
Expr * getBaseIvarExp() const
Definition: CGValue.h:332
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:409
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition: CGValue.h:478
void setObjCIvar(bool Value)
Definition: CGValue.h:298
bool isObjCArray() const
Definition: CGValue.h:300
bool isObjCStrong() const
Definition: CGValue.h:324
bool isGlobalObjCRef() const
Definition: CGValue.h:306
bool isVectorElt() const
Definition: CGValue.h:279
void setObjCArray(bool Value)
Definition: CGValue.h:301
bool isSimple() const
Definition: CGValue.h:278
bool isVolatileQualified() const
Definition: CGValue.h:285
RValue asAggregateRValue() const
Definition: CGValue.h:498
CharUnits getAlignment() const
Definition: CGValue.h:343
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition: CGValue.h:395
llvm::Value * getGlobalReg() const
Definition: CGValue.h:430
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:432
bool isVolatile() const
Definition: CGValue.h:328
const Qualifiers & getQuals() const
Definition: CGValue.h:338
bool isGlobalReg() const
Definition: CGValue.h:282
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:452
bool isObjCWeak() const
Definition: CGValue.h:321
Address getAddress() const
Definition: CGValue.h:361
unsigned getVRQualifiers() const
Definition: CGValue.h:287
void setThreadLocalRef(bool Value)
Definition: CGValue.h:310
LValue setKnownNonNull()
Definition: CGValue.h:350
bool isNonGC() const
Definition: CGValue.h:303
void setGlobalObjCRef(bool Value)
Definition: CGValue.h:307
bool isExtVectorElt() const
Definition: CGValue.h:281
llvm::Value * getVectorIdx() const
Definition: CGValue.h:382
void setNontemporal(bool Value)
Definition: CGValue.h:319
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:346
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition: CGValue.h:315
QualType getType() const
Definition: CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:424
bool isThreadLocalRef() const
Definition: CGValue.h:309
KnownNonNull_t isKnownNonNull() const
Definition: CGValue.h:349
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
Address getVectorAddress() const
Definition: CGValue.h:370
bool isNontemporal() const
Definition: CGValue.h:318
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:468
bool isObjCIvar() const
Definition: CGValue.h:297
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:442
void setAddress(Address address)
Definition: CGValue.h:363
void setBaseIvarExp(Expr *V)
Definition: CGValue.h:333
Address getExtVectorAddress() const
Definition: CGValue.h:401
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:488
Address getMatrixAddress() const
Definition: CGValue.h:387
Address getBitFieldAddress() const
Definition: CGValue.h:415
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:108
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
An abstract representation of an aligned address.
Definition: Address.h:42
RawAddress withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:77
llvm::Value * getPointer() const
Definition: Address.h:66
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:237
Complex values, per C99 6.2.5p11.
Definition: Type.h:3145
QualType getElementType() const
Definition: Type.h:3155
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3477
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:195
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4232
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4250
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
Definition: DeclBase.cpp:2016
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition: Expr.h:1463
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition: Expr.cpp:487
ValueDecl * getDecl()
Definition: Expr.h:1333
SourceLocation getLocation() const
Definition: Expr.h:1341
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getLocation() const
Definition: DeclBase.h:442
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:549
DeclContext * getDeclContext()
Definition: DeclBase.h:451
bool hasAttr() const
Definition: DeclBase.h:580
void ConvertArgToString(ArgumentKind Kind, intptr_t Val, StringRef Modifier, StringRef Argument, ArrayRef< ArgumentValue > PrevArgs, SmallVectorImpl< char > &Output, ArrayRef< intptr_t > QualTypeVals) const
Converts a diagnostic argument (as an intptr_t) into the string that represents it.
Definition: Diagnostic.h:903
Represents an enum.
Definition: Decl.h:3861
bool isFixed() const
Returns true if this is an Objective-C, C++11, or Microsoft-style enumeration with a fixed underlying...
Definition: Decl.h:4075
void getValueRange(llvm::APInt &Max, llvm::APInt &Min) const
Calculates the [Min,Max) values the enum can store based on the NumPositiveBits and NumNegativeBits.
Definition: Decl.cpp:4998
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:6103
EnumDecl * getDecl() const
Definition: Type.h:6110
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3799
This represents one expression.
Definition: Expr.h:110
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition: Expr.cpp:82
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3124
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition: Expr.h:437
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition: Expr.cpp:3097
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3085
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3093
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition: Expr.h:278
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition: Expr.h:277
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition: Expr.cpp:1550
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3594
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3077
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:276
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
bool isFlexibleArrayMemberLike(ASTContext &Context, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution=false) const
Check whether this array fits the idiom of a flexible array member, depending on the value of -fstric...
Definition: Expr.cpp:205
QualType getType() const
Definition: Expr.h:142
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition: Expr.cpp:3008
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6354
Represents a member of a struct/union/class.
Definition: Decl.h:3033
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3136
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.h:3118
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3264
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition: Decl.cpp:4707
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:1044
const Expr * getSubExpr() const
Definition: Expr.h:1057
Represents a function declaration or definition.
Definition: Decl.h:1935
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3638
Represents a prototype with parameter type info, e.g.
Definition: Type.h:5107
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition: Expr.h:7152
Describes an C or C++ initializer list.
Definition: Expr.h:5088
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:505
virtual void mangleCXXRTTI(QualType T, raw_ostream &)=0
unsigned getBlockId(const BlockDecl *BD, bool Local)
Definition: Mangle.h:84
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4734
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition: ExprCXX.h:4759
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition: ExprCXX.h:4751
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition: ExprCXX.h:4784
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2796
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3236
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition: Expr.h:3319
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why? This is only meaningful if the named memb...
Definition: Expr.h:3460
Expr * getBase() const
Definition: Expr.h:3313
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:3431
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3519
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition: NSAPI.cpp:481
This represents a decl that may have a name.
Definition: Decl.h:253
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:280
A C++ nested-name-specifier augmented with source location information.
bool containsType(SanitizerMask Mask, StringRef MangledTypeName, StringRef Category=StringRef()) const
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1951
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:941
Represents a class type in Objective C.
Definition: Type.h:7331
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
bool isUnique() const
Definition: Expr.h:1231
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2170
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3198
QualType getPointeeType() const
Definition: Type.h:3208
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
StringRef getIdentKindName() const
Definition: Expr.h:2048
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
bool isValid() const
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6546
const Expr *const * const_semantics_iterator
Definition: Expr.h:6611
A (possibly-)qualified type.
Definition: Type.h:929
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:8020
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:8062
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7976
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1433
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:8030
QualType withCVRQualifiers(unsigned CVR) const
Definition: Type.h:1174
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1531
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1028
The collection of all-type qualifiers we support.
Definition: Type.h:324
unsigned getCVRQualifiers() const
Definition: Type.h:481
GC getObjCGCAttr() const
Definition: Type.h:512
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:354
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:347
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:343
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:357
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:360
bool hasConst() const
Definition: Type.h:450
void addCVRQualifiers(unsigned mask)
Definition: Type.h:495
void removeObjCGCAttr()
Definition: Type.h:516
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition: Type.h:643
void setAddressSpace(LangAS space)
Definition: Type.h:584
bool hasVolatile() const
Definition: Type.h:460
ObjCLifetime getObjCLifetime() const
Definition: Type.h:538
void addVolatile()
Definition: Type.h:463
Represents a struct/union/class.
Definition: Decl.h:4162
field_range fields() const
Definition: Decl.h:4368
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition: Decl.h:4353
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6077
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:203
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4466
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1380
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:345
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
bool isUnion() const
Definition: Decl.h:3784
Exposes information about the current target.
Definition: TargetInfo.h:220
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1262
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1330
const Type * getTypeForDecl() const
Definition: Decl.h:3409
The type-property cache.
Definition: Type.cpp:4501
The base class of the type hierarchy.
Definition: Type.h:1828
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1916
bool isBlockPointerType() const
Definition: Type.h:8205
bool isVoidType() const
Definition: Type.h:8515
bool isBooleanType() const
Definition: Type.h:8643
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2201
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition: Type.cpp:1933
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2180
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition: Type.h:8814
bool isConstantArrayType() const
Definition: Type.h:8267
bool isArrayType() const
Definition: Type.h:8263
bool isFunctionPointerType() const
Definition: Type.h:8231
bool isCountAttributedType() const
Definition: Type.cpp:727
bool isArithmeticType() const
Definition: Type.cpp:2315
bool isConstantMatrixType() const
Definition: Type.h:8325
bool isPointerType() const
Definition: Type.h:8191
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:8555
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8805
bool isReferenceType() const
Definition: Type.h:8209
bool isVariableArrayType() const
Definition: Type.h:8275
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
bool isExtVectorBoolType() const
Definition: Type.h:8311
bool isBitIntType() const
Definition: Type.h:8429
bool isAnyComplexType() const
Definition: Type.h:8299
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition: Type.h:8686
bool isAtomicType() const
Definition: Type.h:8346
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2724
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2396
bool isFunctionType() const
Definition: Type.h:8187
bool isObjCObjectPointerType() const
Definition: Type.h:8333
bool isVectorType() const
Definition: Type.h:8303
bool isFloatingType() const
Definition: Type.cpp:2283
bool isSubscriptableVectorType() const
Definition: Type.h:8317
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8736
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition: Type.cpp:638
bool isRecordType() const
Definition: Type.h:8291
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.cpp:1920
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2232
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4750
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
QualType getType() const
Definition: Decl.h:682
QualType getType() const
Definition: Value.cpp:234
Represents a variable declaration or definition.
Definition: Decl.h:882
TLSKind getTLSKind() const
Definition: Decl.cpp:2157
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition: Decl.cpp:2355
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition: Decl.h:1135
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition: Decl.h:908
@ TLS_None
Not a TLS variable.
Definition: Decl.h:902
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3808
Represents a GCC generic vector type.
Definition: Type.h:4034
unsigned getNumElements() const
Definition: Type.h:4049
#define INT_MIN
Definition: limits.h:55
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
@ ARCImpreciseLifetime
Definition: CGValue.h:136
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition: CGValue.h:159
@ NotKnownNonNull
Definition: Address.h:33
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< FunctionType > functionType
Matches FunctionType nodes.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition: CNFFormula.h:64
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
bool This(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2390
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2353
bool IsNonNull(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2378
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1693
bool Cast(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2126
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition: Specifiers.h:154
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition: Specifiers.h:327
@ SD_Thread
Thread storage duration.
Definition: Specifiers.h:330
@ SD_Static
Static storage duration.
Definition: Specifiers.h:331
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition: Specifiers.h:328
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:329
@ SD_Dynamic
Dynamic storage duration.
Definition: Specifiers.h:332
@ Result
The result type of a method or function.
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
llvm::cl::opt< bool > ClSanitizeGuardChecks
const FunctionProtoType * T
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:86
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
@ Other
Other implicit parameter.
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition: Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition: Specifiers.h:180
unsigned long uint64_t
unsigned int uint32_t
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
Definition: CodeGenTBAA.h:105
uint64_t Offset
Offset - The byte offset of the final access within the base one.
Definition: CodeGenTBAA.h:109
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
Definition: CodeGenTBAA.h:112
llvm::MDNode * BaseType
BaseType - The base/leading access type.
Definition: CodeGenTBAA.h:101
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition: Expr.h:609
PointerAuthSchema FunctionPointers
The ABI for C function pointers.
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:182
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:169
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition: Expr.h:66