clang 20.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGObjCRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CGRecordLayout.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "TargetInfo.h"
27#include "clang/AST/ASTLambda.h"
28#include "clang/AST/Attr.h"
29#include "clang/AST/DeclObjC.h"
30#include "clang/AST/NSAPI.h"
35#include "llvm/ADT/STLExtras.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/ADT/StringExtras.h"
38#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Intrinsics.h"
40#include "llvm/IR/LLVMContext.h"
41#include "llvm/IR/MDBuilder.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/Support/ConvertUTF.h"
44#include "llvm/Support/Endian.h"
45#include "llvm/Support/MathExtras.h"
46#include "llvm/Support/Path.h"
47#include "llvm/Support/xxhash.h"
48#include "llvm/Transforms/Utils/SanitizerStats.h"
49
50#include <numeric>
51#include <optional>
52#include <string>
53
54using namespace clang;
55using namespace CodeGen;
56
57namespace clang {
58// TODO: Introduce frontend options to enabled per sanitizers, similar to
59// `fsanitize-trap`.
60llvm::cl::opt<bool> ClSanitizeGuardChecks(
61 "ubsan-guard-checks", llvm::cl::Optional,
62 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
63} // namespace clang
64
65//===--------------------------------------------------------------------===//
66// Defines for metadata
67//===--------------------------------------------------------------------===//
68
69// Those values are crucial to be the SAME as in ubsan runtime library.
71 /// An integer type.
72 TK_Integer = 0x0000,
73 /// A floating-point type.
74 TK_Float = 0x0001,
75 /// An _BitInt(N) type.
76 TK_BitInt = 0x0002,
77 /// Any other type. The value representation is unspecified.
78 TK_Unknown = 0xffff
79};
80
81//===--------------------------------------------------------------------===//
82// Miscellaneous Helper Methods
83//===--------------------------------------------------------------------===//
84
85/// CreateTempAlloca - This creates a alloca and inserts it into the entry
86/// block.
88CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
89 const Twine &Name,
90 llvm::Value *ArraySize) {
91 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
92 Alloca->setAlignment(Align.getAsAlign());
93 return RawAddress(Alloca, Ty, Align, KnownNonNull);
94}
95
96/// CreateTempAlloca - This creates a alloca and inserts it into the entry
97/// block. The alloca is casted to default address space if necessary.
99 const Twine &Name,
100 llvm::Value *ArraySize,
101 RawAddress *AllocaAddr) {
102 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
103 if (AllocaAddr)
104 *AllocaAddr = Alloca;
105 llvm::Value *V = Alloca.getPointer();
106 // Alloca always returns a pointer in alloca address space, which may
107 // be different from the type defined by the language. For example,
108 // in C++ the auto variables are in the default address space. Therefore
109 // cast alloca to the default address space when necessary.
111 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
112 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
113 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
114 // otherwise alloca is inserted at the current insertion point of the
115 // builder.
116 if (!ArraySize)
117 Builder.SetInsertPoint(getPostAllocaInsertPoint());
120 Builder.getPtrTy(DestAddrSpace), /*non-null*/ true);
121 }
122
123 return RawAddress(V, Ty, Align, KnownNonNull);
124}
125
126/// CreateTempAlloca - This creates an alloca and inserts it into the entry
127/// block if \p ArraySize is nullptr, otherwise inserts it at the current
128/// insertion point of the builder.
129llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
130 const Twine &Name,
131 llvm::Value *ArraySize) {
132 llvm::AllocaInst *Alloca;
133 if (ArraySize)
134 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
135 else
136 Alloca =
137 new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
138 ArraySize, Name, AllocaInsertPt->getIterator());
139 if (Allocas) {
140 Allocas->Add(Alloca);
141 }
142 return Alloca;
143}
144
145/// CreateDefaultAlignTempAlloca - This creates an alloca with the
146/// default alignment of the corresponding LLVM type, which is *not*
147/// guaranteed to be related in any way to the expected alignment of
148/// an AST type that might have been lowered to Ty.
150 const Twine &Name) {
151 CharUnits Align =
152 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
153 return CreateTempAlloca(Ty, Align, Name);
154}
155
158 return CreateTempAlloca(ConvertType(Ty), Align, Name);
159}
160
162 RawAddress *Alloca) {
163 // FIXME: Should we prefer the preferred type alignment here?
164 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
165}
166
168 const Twine &Name,
169 RawAddress *Alloca) {
171 /*ArraySize=*/nullptr, Alloca);
172
173 if (Ty->isConstantMatrixType()) {
174 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
175 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
176 ArrayTy->getNumElements());
177
178 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
180 }
181 return Result;
182}
183
185 CharUnits Align,
186 const Twine &Name) {
187 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
188}
189
191 const Twine &Name) {
192 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
193 Name);
194}
195
196/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
197/// expression and compare the result against zero, returning an Int1Ty value.
198llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
199 PGO.setCurrentStmt(E);
200 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
201 llvm::Value *MemPtr = EmitScalarExpr(E);
202 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
203 }
204
205 QualType BoolTy = getContext().BoolTy;
207 CGFPOptionsRAII FPOptsRAII(*this, E);
208 if (!E->getType()->isAnyComplexType())
209 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
210
212 Loc);
213}
214
215/// EmitIgnoredExpr - Emit code to compute the specified expression,
216/// ignoring the result.
218 if (E->isPRValue())
219 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
220
221 // if this is a bitfield-resulting conditional operator, we can special case
222 // emit this. The normal 'EmitLValue' version of this is particularly
223 // difficult to codegen for, since creating a single "LValue" for two
224 // different sized arguments here is not particularly doable.
225 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
227 if (CondOp->getObjectKind() == OK_BitField)
228 return EmitIgnoredConditionalOperator(CondOp);
229 }
230
231 // Just emit it as an l-value and drop the result.
232 EmitLValue(E);
233}
234
235/// EmitAnyExpr - Emit code to compute the specified expression which
236/// can have any type. The result is returned as an RValue struct.
237/// If this is an aggregate expression, AggSlot indicates where the
238/// result should be returned.
240 AggValueSlot aggSlot,
241 bool ignoreResult) {
242 switch (getEvaluationKind(E->getType())) {
243 case TEK_Scalar:
244 return RValue::get(EmitScalarExpr(E, ignoreResult));
245 case TEK_Complex:
246 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
247 case TEK_Aggregate:
248 if (!ignoreResult && aggSlot.isIgnored())
249 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
250 EmitAggExpr(E, aggSlot);
251 return aggSlot.asRValue();
252 }
253 llvm_unreachable("bad evaluation kind");
254}
255
256/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
257/// always be accessible even if no aggregate location is provided.
260
262 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
263 return EmitAnyExpr(E, AggSlot);
264}
265
266/// EmitAnyExprToMem - Evaluate an expression into a given memory
267/// location.
269 Address Location,
270 Qualifiers Quals,
271 bool IsInit) {
272 // FIXME: This function should take an LValue as an argument.
273 switch (getEvaluationKind(E->getType())) {
274 case TEK_Complex:
276 /*isInit*/ false);
277 return;
278
279 case TEK_Aggregate: {
280 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
285 return;
286 }
287
288 case TEK_Scalar: {
289 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
290 LValue LV = MakeAddrLValue(Location, E->getType());
292 return;
293 }
294 }
295 llvm_unreachable("bad evaluation kind");
296}
297
299 const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed) {
300 QualType Type = LV.getType();
301 switch (getEvaluationKind(Type)) {
302 case TEK_Complex:
303 EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
304 return;
305 case TEK_Aggregate:
309 AggValueSlot::MayOverlap, IsZeroed));
310 return;
311 case TEK_Scalar:
312 if (LV.isSimple())
313 EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false);
314 else
316 return;
317 }
318 llvm_unreachable("bad evaluation kind");
319}
320
321static void
323 const Expr *E, Address ReferenceTemporary) {
324 // Objective-C++ ARC:
325 // If we are binding a reference to a temporary that has ownership, we
326 // need to perform retain/release operations on the temporary.
327 //
328 // FIXME: This should be looking at E, not M.
329 if (auto Lifetime = M->getType().getObjCLifetime()) {
330 switch (Lifetime) {
333 // Carry on to normal cleanup handling.
334 break;
335
337 // Nothing to do; cleaned up by an autorelease pool.
338 return;
339
342 switch (StorageDuration Duration = M->getStorageDuration()) {
343 case SD_Static:
344 // Note: we intentionally do not register a cleanup to release
345 // the object on program termination.
346 return;
347
348 case SD_Thread:
349 // FIXME: We should probably register a cleanup in this case.
350 return;
351
352 case SD_Automatic:
356 if (Lifetime == Qualifiers::OCL_Strong) {
357 const ValueDecl *VD = M->getExtendingDecl();
358 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
359 VD->hasAttr<ObjCPreciseLifetimeAttr>();
363 } else {
364 // __weak objects always get EH cleanups; otherwise, exceptions
365 // could cause really nasty crashes instead of mere leaks.
368 }
369 if (Duration == SD_FullExpression)
370 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
371 M->getType(), *Destroy,
373 else
374 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
375 M->getType(),
376 *Destroy, CleanupKind & EHCleanup);
377 return;
378
379 case SD_Dynamic:
380 llvm_unreachable("temporary cannot have dynamic storage duration");
381 }
382 llvm_unreachable("unknown storage duration");
383 }
384 }
385
386 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
387 if (const RecordType *RT =
389 // Get the destructor for the reference temporary.
390 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
391 if (!ClassDecl->hasTrivialDestructor())
392 ReferenceTemporaryDtor = ClassDecl->getDestructor();
393 }
394
395 if (!ReferenceTemporaryDtor)
396 return;
397
398 // Call the destructor for the temporary.
399 switch (M->getStorageDuration()) {
400 case SD_Static:
401 case SD_Thread: {
402 llvm::FunctionCallee CleanupFn;
403 llvm::Constant *CleanupArg;
404 if (E->getType()->isArrayType()) {
406 ReferenceTemporary, E->getType(),
408 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
409 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
410 } else {
411 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
412 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
413 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
414 }
416 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
417 break;
418 }
419
421 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
423 CGF.getLangOpts().Exceptions);
424 break;
425
426 case SD_Automatic:
428 ReferenceTemporary, E->getType(),
430 CGF.getLangOpts().Exceptions);
431 break;
432
433 case SD_Dynamic:
434 llvm_unreachable("temporary cannot have dynamic storage duration");
435 }
436}
437
440 const Expr *Inner,
441 RawAddress *Alloca = nullptr) {
442 auto &TCG = CGF.getTargetHooks();
443 switch (M->getStorageDuration()) {
445 case SD_Automatic: {
446 // If we have a constant temporary array or record try to promote it into a
447 // constant global under the same rules a normal constant would've been
448 // promoted. This is easier on the optimizer and generally emits fewer
449 // instructions.
450 QualType Ty = Inner->getType();
451 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
452 (Ty->isArrayType() || Ty->isRecordType()) &&
453 Ty.isConstantStorage(CGF.getContext(), true, false))
454 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
455 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
456 auto *GV = new llvm::GlobalVariable(
457 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
458 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
459 llvm::GlobalValue::NotThreadLocal,
461 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
462 GV->setAlignment(alignment.getAsAlign());
463 llvm::Constant *C = GV;
464 if (AS != LangAS::Default)
465 C = TCG.performAddrSpaceCast(
466 CGF.CGM, GV, AS, LangAS::Default,
467 llvm::PointerType::get(
468 CGF.getLLVMContext(),
470 // FIXME: Should we put the new global into a COMDAT?
471 return RawAddress(C, GV->getValueType(), alignment);
472 }
473 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
474 }
475 case SD_Thread:
476 case SD_Static:
477 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
478
479 case SD_Dynamic:
480 llvm_unreachable("temporary can't have dynamic storage duration");
481 }
482 llvm_unreachable("unknown storage duration");
483}
484
485/// Helper method to check if the underlying ABI is AAPCS
486static bool isAAPCS(const TargetInfo &TargetInfo) {
487 return TargetInfo.getABI().starts_with("aapcs");
488}
489
492 const Expr *E = M->getSubExpr();
493
494 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
495 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
496 "Reference should never be pseudo-strong!");
497
498 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
499 // as that will cause the lifetime adjustment to be lost for ARC
500 auto ownership = M->getType().getObjCLifetime();
501 if (ownership != Qualifiers::OCL_None &&
502 ownership != Qualifiers::OCL_ExplicitNone) {
504 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
505 llvm::Type *Ty = ConvertTypeForMem(E->getType());
506 Object = Object.withElementType(Ty);
507
508 // createReferenceTemporary will promote the temporary to a global with a
509 // constant initializer if it can. It can only do this to a value of
510 // ARC-manageable type if the value is global and therefore "immune" to
511 // ref-counting operations. Therefore we have no need to emit either a
512 // dynamic initialization or a cleanup and we can just return the address
513 // of the temporary.
514 if (Var->hasInitializer())
515 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
516
517 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
518 }
519 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
521
522 switch (getEvaluationKind(E->getType())) {
523 default: llvm_unreachable("expected scalar or aggregate expression");
524 case TEK_Scalar:
525 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
526 break;
527 case TEK_Aggregate: {
534 break;
535 }
536 }
537
538 pushTemporaryCleanup(*this, M, E, Object);
539 return RefTempDst;
540 }
541
544 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
545
546 for (const auto &Ignored : CommaLHSs)
547 EmitIgnoredExpr(Ignored);
548
549 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
550 if (opaque->getType()->isRecordType()) {
551 assert(Adjustments.empty());
552 return EmitOpaqueValueLValue(opaque);
553 }
554 }
555
556 // Create and initialize the reference temporary.
557 RawAddress Alloca = Address::invalid();
558 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
559 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
560 Object.getPointer()->stripPointerCasts())) {
561 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
562 Object = Object.withElementType(TemporaryType);
563 // If the temporary is a global and has a constant initializer or is a
564 // constant temporary that we promoted to a global, we may have already
565 // initialized it.
566 if (!Var->hasInitializer()) {
567 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
568 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
569 }
570 } else {
571 switch (M->getStorageDuration()) {
572 case SD_Automatic:
573 if (auto *Size = EmitLifetimeStart(
574 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
575 Alloca.getPointer())) {
576 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
577 Alloca, Size);
578 }
579 break;
580
581 case SD_FullExpression: {
582 if (!ShouldEmitLifetimeMarkers)
583 break;
584
585 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
586 // marker. Instead, start the lifetime of a conditional temporary earlier
587 // so that it's unconditional. Don't do this with sanitizers which need
588 // more precise lifetime marks. However when inside an "await.suspend"
589 // block, we should always avoid conditional cleanup because it creates
590 // boolean marker that lives across await_suspend, which can destroy coro
591 // frame.
592 ConditionalEvaluation *OldConditional = nullptr;
593 CGBuilderTy::InsertPoint OldIP;
595 ((!SanOpts.has(SanitizerKind::HWAddress) &&
596 !SanOpts.has(SanitizerKind::Memory) &&
597 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
598 inSuspendBlock())) {
599 OldConditional = OutermostConditional;
600 OutermostConditional = nullptr;
601
602 OldIP = Builder.saveIP();
603 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
604 Builder.restoreIP(CGBuilderTy::InsertPoint(
605 Block, llvm::BasicBlock::iterator(Block->back())));
606 }
607
608 if (auto *Size = EmitLifetimeStart(
609 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
610 Alloca.getPointer())) {
611 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
612 Size);
613 }
614
615 if (OldConditional) {
616 OutermostConditional = OldConditional;
617 Builder.restoreIP(OldIP);
618 }
619 break;
620 }
621
622 default:
623 break;
624 }
625 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
626 }
627 pushTemporaryCleanup(*this, M, E, Object);
628
629 // Perform derived-to-base casts and/or field accesses, to get from the
630 // temporary object we created (and, potentially, for which we extended
631 // the lifetime) to the subobject we're binding the reference to.
632 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
633 switch (Adjustment.Kind) {
635 Object =
636 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
637 Adjustment.DerivedToBase.BasePath->path_begin(),
638 Adjustment.DerivedToBase.BasePath->path_end(),
639 /*NullCheckValue=*/ false, E->getExprLoc());
640 break;
641
644 LV = EmitLValueForField(LV, Adjustment.Field);
645 assert(LV.isSimple() &&
646 "materialized temporary field is not a simple lvalue");
647 Object = LV.getAddress();
648 break;
649 }
650
652 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
654 Adjustment.Ptr.MPT);
655 break;
656 }
657 }
658 }
659
660 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
661}
662
663RValue
665 // Emit the expression as an lvalue.
666 LValue LV = EmitLValue(E);
667 assert(LV.isSimple());
668 llvm::Value *Value = LV.getPointer(*this);
669
671 // C++11 [dcl.ref]p5 (as amended by core issue 453):
672 // If a glvalue to which a reference is directly bound designates neither
673 // an existing object or function of an appropriate type nor a region of
674 // storage of suitable size and alignment to contain an object of the
675 // reference's type, the behavior is undefined.
676 QualType Ty = E->getType();
678 }
679
680 return RValue::get(Value);
681}
682
683
684/// getAccessedFieldNo - Given an encoded value and a result number, return the
685/// input field number being accessed.
686unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
687 const llvm::Constant *Elts) {
688 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
689 ->getZExtValue();
690}
691
692static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
693 llvm::Value *Ptr) {
694 llvm::Value *A0 =
695 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
696 llvm::Value *A1 =
697 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
698 return Builder.CreateXor(Acc, A1);
699}
700
701bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
702 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
704}
705
706bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
708 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
709 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
712}
713
715 return SanOpts.has(SanitizerKind::Null) ||
716 SanOpts.has(SanitizerKind::Alignment) ||
717 SanOpts.has(SanitizerKind::ObjectSize) ||
718 SanOpts.has(SanitizerKind::Vptr);
719}
720
722 llvm::Value *Ptr, QualType Ty,
723 CharUnits Alignment,
724 SanitizerSet SkippedChecks,
725 llvm::Value *ArraySize) {
727 return;
728
729 // Don't check pointers outside the default address space. The null check
730 // isn't correct, the object-size check isn't supported by LLVM, and we can't
731 // communicate the addresses to the runtime handler for the vptr check.
732 if (Ptr->getType()->getPointerAddressSpace())
733 return;
734
735 // Don't check pointers to volatile data. The behavior here is implementation-
736 // defined.
737 if (Ty.isVolatileQualified())
738 return;
739
740 SanitizerScope SanScope(this);
741
743 Checks;
744 llvm::BasicBlock *Done = nullptr;
745
746 // Quickly determine whether we have a pointer to an alloca. It's possible
747 // to skip null checks, and some alignment checks, for these pointers. This
748 // can reduce compile-time significantly.
749 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
750
751 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
752 llvm::Value *IsNonNull = nullptr;
753 bool IsGuaranteedNonNull =
754 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
755 bool AllowNullPointers = isNullPointerAllowed(TCK);
756 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
757 !IsGuaranteedNonNull) {
758 // The glvalue must not be an empty glvalue.
759 IsNonNull = Builder.CreateIsNotNull(Ptr);
760
761 // The IR builder can constant-fold the null check if the pointer points to
762 // a constant.
763 IsGuaranteedNonNull = IsNonNull == True;
764
765 // Skip the null check if the pointer is known to be non-null.
766 if (!IsGuaranteedNonNull) {
767 if (AllowNullPointers) {
768 // When performing pointer casts, it's OK if the value is null.
769 // Skip the remaining checks in that case.
770 Done = createBasicBlock("null");
771 llvm::BasicBlock *Rest = createBasicBlock("not.null");
772 Builder.CreateCondBr(IsNonNull, Rest, Done);
773 EmitBlock(Rest);
774 } else {
775 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::SO_Null));
776 }
777 }
778 }
779
780 if (SanOpts.has(SanitizerKind::ObjectSize) &&
781 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
782 !Ty->isIncompleteType()) {
784 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
785 if (ArraySize)
786 Size = Builder.CreateMul(Size, ArraySize);
787
788 // Degenerate case: new X[0] does not need an objectsize check.
789 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
790 if (!ConstantSize || !ConstantSize->isNullValue()) {
791 // The glvalue must refer to a large enough storage region.
792 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
793 // to check this.
794 // FIXME: Get object address space
795 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
796 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
797 llvm::Value *Min = Builder.getFalse();
798 llvm::Value *NullIsUnknown = Builder.getFalse();
799 llvm::Value *Dynamic = Builder.getFalse();
800 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
801 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
802 Checks.push_back(
803 std::make_pair(LargeEnough, SanitizerKind::SO_ObjectSize));
804 }
805 }
806
807 llvm::MaybeAlign AlignVal;
808 llvm::Value *PtrAsInt = nullptr;
809
810 if (SanOpts.has(SanitizerKind::Alignment) &&
811 !SkippedChecks.has(SanitizerKind::Alignment)) {
812 AlignVal = Alignment.getAsMaybeAlign();
813 if (!Ty->isIncompleteType() && !AlignVal)
814 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
815 /*ForPointeeType=*/true)
817
818 // The glvalue must be suitably aligned.
819 if (AlignVal && *AlignVal > llvm::Align(1) &&
820 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
821 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
822 llvm::Value *Align = Builder.CreateAnd(
823 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
824 llvm::Value *Aligned =
825 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
826 if (Aligned != True)
827 Checks.push_back(std::make_pair(Aligned, SanitizerKind::SO_Alignment));
828 }
829 }
830
831 if (Checks.size() > 0) {
832 llvm::Constant *StaticData[] = {
834 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
835 llvm::ConstantInt::get(Int8Ty, TCK)};
836 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
837 PtrAsInt ? PtrAsInt : Ptr);
838 }
839
840 // If possible, check that the vptr indicates that there is a subobject of
841 // type Ty at offset zero within this object.
842 //
843 // C++11 [basic.life]p5,6:
844 // [For storage which does not refer to an object within its lifetime]
845 // The program has undefined behavior if:
846 // -- the [pointer or glvalue] is used to access a non-static data member
847 // or call a non-static member function
848 if (SanOpts.has(SanitizerKind::Vptr) &&
849 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
850 // Ensure that the pointer is non-null before loading it. If there is no
851 // compile-time guarantee, reuse the run-time null check or emit a new one.
852 if (!IsGuaranteedNonNull) {
853 if (!IsNonNull)
854 IsNonNull = Builder.CreateIsNotNull(Ptr);
855 if (!Done)
856 Done = createBasicBlock("vptr.null");
857 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
858 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
859 EmitBlock(VptrNotNull);
860 }
861
862 // Compute a deterministic hash of the mangled name of the type.
863 SmallString<64> MangledName;
864 llvm::raw_svector_ostream Out(MangledName);
866 Out);
867
868 // Contained in NoSanitizeList based on the mangled type.
869 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
870 Out.str())) {
871 // Load the vptr, and mix it with TypeHash.
872 llvm::Value *TypeHash =
873 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
874
875 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
876 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
877 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
878 Ty->getAsCXXRecordDecl(),
880 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
881
882 llvm::Value *Hash =
883 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
884 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
885
886 // Look the hash up in our cache.
887 const int CacheSize = 128;
888 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
889 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
890 "__ubsan_vptr_type_cache");
891 llvm::Value *Slot = Builder.CreateAnd(Hash,
892 llvm::ConstantInt::get(IntPtrTy,
893 CacheSize-1));
894 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
895 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
896 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
898
899 // If the hash isn't in the cache, call a runtime handler to perform the
900 // hard work of checking whether the vptr is for an object of the right
901 // type. This will either fill in the cache and return, or produce a
902 // diagnostic.
903 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
904 llvm::Constant *StaticData[] = {
908 llvm::ConstantInt::get(Int8Ty, TCK)
909 };
910 llvm::Value *DynamicData[] = { Ptr, Hash };
911 EmitCheck(std::make_pair(EqualHash, SanitizerKind::SO_Vptr),
912 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
913 DynamicData);
914 }
915 }
916
917 if (Done) {
918 Builder.CreateBr(Done);
919 EmitBlock(Done);
920 }
921}
922
924 QualType EltTy) {
926 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
927 if (!EltSize)
928 return nullptr;
929
930 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
931 if (!ArrayDeclRef)
932 return nullptr;
933
934 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
935 if (!ParamDecl)
936 return nullptr;
937
938 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
939 if (!POSAttr)
940 return nullptr;
941
942 // Don't load the size if it's a lower bound.
943 int POSType = POSAttr->getType();
944 if (POSType != 0 && POSType != 1)
945 return nullptr;
946
947 // Find the implicit size parameter.
948 auto PassedSizeIt = SizeArguments.find(ParamDecl);
949 if (PassedSizeIt == SizeArguments.end())
950 return nullptr;
951
952 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
953 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
954 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
955 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
956 C.getSizeType(), E->getExprLoc());
957 llvm::Value *SizeOfElement =
958 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
959 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
960}
961
962/// If Base is known to point to the start of an array, return the length of
963/// that array. Return 0 if the length cannot be determined.
964static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
965 const Expr *Base,
966 QualType &IndexedType,
968 StrictFlexArraysLevel) {
969 // For the vector indexing extension, the bound is the number of elements.
970 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
971 IndexedType = Base->getType();
972 return CGF.Builder.getInt32(VT->getNumElements());
973 }
974
975 Base = Base->IgnoreParens();
976
977 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
978 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
979 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
980 StrictFlexArraysLevel)) {
981 CodeGenFunction::SanitizerScope SanScope(&CGF);
982
983 IndexedType = CE->getSubExpr()->getType();
984 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
985 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
986 return CGF.Builder.getInt(CAT->getSize());
987
988 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
989 return CGF.getVLASize(VAT).NumElts;
990 // Ignore pass_object_size here. It's not applicable on decayed pointers.
991 }
992 }
993
994 CodeGenFunction::SanitizerScope SanScope(&CGF);
995
996 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
997 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
998 IndexedType = Base->getType();
999 return POS;
1000 }
1001
1002 return nullptr;
1003}
1004
1005namespace {
1006
1007/// \p StructAccessBase returns the base \p Expr of a field access. It returns
1008/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
1009///
1010/// p in p-> a.b.c
1011///
1012/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
1013/// looking for:
1014///
1015/// struct s {
1016/// struct s *ptr;
1017/// int count;
1018/// char array[] __attribute__((counted_by(count)));
1019/// };
1020///
1021/// If we have an expression like \p p->ptr->array[index], we want the
1022/// \p MemberExpr for \p p->ptr instead of \p p.
1023class StructAccessBase
1024 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
1025 const RecordDecl *ExpectedRD;
1026
1027 bool IsExpectedRecordDecl(const Expr *E) const {
1028 QualType Ty = E->getType();
1029 if (Ty->isPointerType())
1030 Ty = Ty->getPointeeType();
1031 return ExpectedRD == Ty->getAsRecordDecl();
1032 }
1033
1034public:
1035 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
1036
1037 //===--------------------------------------------------------------------===//
1038 // Visitor Methods
1039 //===--------------------------------------------------------------------===//
1040
1041 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1042 // horrors like this:
1043 //
1044 // struct S {
1045 // int x, y;
1046 // int blah[] __attribute__((counted_by(x)));
1047 // } s;
1048 //
1049 // int foo(int index, int val) {
1050 // int (S::*IHatePMDs)[] = &S::blah;
1051 // (s.*IHatePMDs)[index] = val;
1052 // }
1053
1054 const Expr *Visit(const Expr *E) {
1056 }
1057
1058 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1059
1060 // These are the types we expect to return (in order of most to least
1061 // likely):
1062 //
1063 // 1. DeclRefExpr - This is the expression for the base of the structure.
1064 // It's exactly what we want to build an access to the \p counted_by
1065 // field.
1066 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1067 // as the flexble array member's lexical enclosing \p RecordDecl. This
1068 // allows us to catch things like: "p->p->array"
1069 // 3. CompoundLiteralExpr - This is for people who create something
1070 // heretical like (struct foo has a flexible array member):
1071 //
1072 // (struct foo){ 1, 2 }.blah[idx];
1073 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1074 return IsExpectedRecordDecl(E) ? E : nullptr;
1075 }
1076 const Expr *VisitMemberExpr(const MemberExpr *E) {
1077 if (IsExpectedRecordDecl(E) && E->isArrow())
1078 return E;
1079 const Expr *Res = Visit(E->getBase());
1080 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1081 }
1082 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1083 return IsExpectedRecordDecl(E) ? E : nullptr;
1084 }
1085 const Expr *VisitCallExpr(const CallExpr *E) {
1086 return IsExpectedRecordDecl(E) ? E : nullptr;
1087 }
1088
1089 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1090 if (IsExpectedRecordDecl(E))
1091 return E;
1092 return Visit(E->getBase());
1093 }
1094 const Expr *VisitCastExpr(const CastExpr *E) {
1095 if (E->getCastKind() == CK_LValueToRValue)
1096 return IsExpectedRecordDecl(E) ? E : nullptr;
1097 return Visit(E->getSubExpr());
1098 }
1099 const Expr *VisitParenExpr(const ParenExpr *E) {
1100 return Visit(E->getSubExpr());
1101 }
1102 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1103 return Visit(E->getSubExpr());
1104 }
1105 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1106 return Visit(E->getSubExpr());
1107 }
1108};
1109
1110} // end anonymous namespace
1111
1113
1115 const FieldDecl *Field,
1116 RecIndicesTy &Indices) {
1117 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1118 int64_t FieldNo = -1;
1119 for (const FieldDecl *FD : RD->fields()) {
1120 if (!Layout.containsFieldDecl(FD))
1121 // This could happen if the field has a struct type that's empty. I don't
1122 // know why either.
1123 continue;
1124
1125 FieldNo = Layout.getLLVMFieldNo(FD);
1126 if (FD == Field) {
1127 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1128 return true;
1129 }
1130
1131 QualType Ty = FD->getType();
1132 if (Ty->isRecordType()) {
1133 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1134 if (RD->isUnion())
1135 FieldNo = 0;
1136 Indices.emplace_back(CGF.Builder.getInt32(FieldNo));
1137 return true;
1138 }
1139 }
1140 }
1141
1142 return false;
1143}
1144
1146 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1147 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1148
1149 // Find the base struct expr (i.e. p in p->a.b.c.d).
1150 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1151 if (!StructBase || StructBase->HasSideEffects(getContext()))
1152 return nullptr;
1153
1154 llvm::Value *Res = nullptr;
1155 if (StructBase->getType()->isPointerType()) {
1156 LValueBaseInfo BaseInfo;
1157 TBAAAccessInfo TBAAInfo;
1158 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1159 Res = Addr.emitRawPointer(*this);
1160 } else if (StructBase->isLValue()) {
1161 LValue LV = EmitLValue(StructBase);
1162 Address Addr = LV.getAddress();
1163 Res = Addr.emitRawPointer(*this);
1164 } else {
1165 return nullptr;
1166 }
1167
1168 RecIndicesTy Indices;
1169 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1170 if (Indices.empty())
1171 return nullptr;
1172
1173 Indices.push_back(Builder.getInt32(0));
1175 ConvertType(QualType(RD->getTypeForDecl(), 0)), Res,
1176 RecIndicesTy(llvm::reverse(Indices)), "counted_by.gep");
1177}
1178
1179/// This method is typically called in contexts where we can't generate
1180/// side-effects, like in __builtin_dynamic_object_size. When finding
1181/// expressions, only choose those that have either already been emitted or can
1182/// be loaded without side-effects.
1183///
1184/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1185/// within the top-level struct.
1186/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1188 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1189 if (llvm::Value *GEP = GetCountedByFieldExprGEP(Base, FAMDecl, CountDecl))
1190 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), GEP,
1191 getIntAlign(), "counted_by.load");
1192 return nullptr;
1193}
1194
1195void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1196 llvm::Value *Index, QualType IndexType,
1197 bool Accessed) {
1198 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1199 "should not be called unless adding bounds checks");
1200 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1201 getLangOpts().getStrictFlexArraysLevel();
1202 QualType IndexedType;
1203 llvm::Value *Bound =
1204 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1205
1206 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1207}
1208
1209void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1210 llvm::Value *Index,
1211 QualType IndexType,
1212 QualType IndexedType, bool Accessed) {
1213 if (!Bound)
1214 return;
1215
1216 SanitizerScope SanScope(this);
1217
1218 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1219 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1220 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1221
1222 llvm::Constant *StaticData[] = {
1224 EmitCheckTypeDescriptor(IndexedType),
1225 EmitCheckTypeDescriptor(IndexType)
1226 };
1227 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1228 : Builder.CreateICmpULE(IndexVal, BoundVal);
1229 EmitCheck(std::make_pair(Check, SanitizerKind::SO_ArrayBounds),
1230 SanitizerHandler::OutOfBounds, StaticData, Index);
1231}
1232
1235 bool isInc, bool isPre) {
1237
1238 llvm::Value *NextVal;
1239 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1240 uint64_t AmountVal = isInc ? 1 : -1;
1241 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1242
1243 // Add the inc/dec to the real part.
1244 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1245 } else {
1246 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1247 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1248 if (!isInc)
1249 FVal.changeSign();
1250 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1251
1252 // Add the inc/dec to the real part.
1253 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1254 }
1255
1256 ComplexPairTy IncVal(NextVal, InVal.second);
1257
1258 // Store the updated result through the lvalue.
1259 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1260 if (getLangOpts().OpenMP)
1262 E->getSubExpr());
1263
1264 // If this is a postinc, return the value read from memory, otherwise use the
1265 // updated value.
1266 return isPre ? IncVal : InVal;
1267}
1268
1270 CodeGenFunction *CGF) {
1271 // Bind VLAs in the cast type.
1272 if (CGF && E->getType()->isVariablyModifiedType())
1274
1275 if (CGDebugInfo *DI = getModuleDebugInfo())
1276 DI->EmitExplicitCastType(E->getType());
1277}
1278
1279//===----------------------------------------------------------------------===//
1280// LValue Expression Emission
1281//===----------------------------------------------------------------------===//
1282
1284 TBAAAccessInfo *TBAAInfo,
1285 KnownNonNull_t IsKnownNonNull,
1286 CodeGenFunction &CGF) {
1287 // We allow this with ObjC object pointers because of fragile ABIs.
1288 assert(E->getType()->isPointerType() ||
1290 E = E->IgnoreParens();
1291
1292 // Casts:
1293 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1294 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1295 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1296
1297 switch (CE->getCastKind()) {
1298 // Non-converting casts (but not C's implicit conversion from void*).
1299 case CK_BitCast:
1300 case CK_NoOp:
1301 case CK_AddressSpaceConversion:
1302 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1303 if (PtrTy->getPointeeType()->isVoidType())
1304 break;
1305
1306 LValueBaseInfo InnerBaseInfo;
1307 TBAAAccessInfo InnerTBAAInfo;
1309 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1310 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1311 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1312
1313 if (isa<ExplicitCastExpr>(CE)) {
1314 LValueBaseInfo TargetTypeBaseInfo;
1315 TBAAAccessInfo TargetTypeTBAAInfo;
1317 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1318 if (TBAAInfo)
1319 *TBAAInfo =
1320 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1321 // If the source l-value is opaque, honor the alignment of the
1322 // casted-to type.
1323 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1324 if (BaseInfo)
1325 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1326 Addr.setAlignment(Align);
1327 }
1328 }
1329
1330 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1331 CE->getCastKind() == CK_BitCast) {
1332 if (auto PT = E->getType()->getAs<PointerType>())
1333 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1334 /*MayBeNull=*/true,
1336 CE->getBeginLoc());
1337 }
1338
1339 llvm::Type *ElemTy =
1341 Addr = Addr.withElementType(ElemTy);
1342 if (CE->getCastKind() == CK_AddressSpaceConversion)
1343 Addr = CGF.Builder.CreateAddrSpaceCast(
1344 Addr, CGF.ConvertType(E->getType()), ElemTy);
1345 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1346 CE->getType());
1347 }
1348 break;
1349
1350 // Array-to-pointer decay.
1351 case CK_ArrayToPointerDecay:
1352 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1353
1354 // Derived-to-base conversions.
1355 case CK_UncheckedDerivedToBase:
1356 case CK_DerivedToBase: {
1357 // TODO: Support accesses to members of base classes in TBAA. For now, we
1358 // conservatively pretend that the complete object is of the base class
1359 // type.
1360 if (TBAAInfo)
1361 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1363 CE->getSubExpr(), BaseInfo, nullptr,
1364 (KnownNonNull_t)(IsKnownNonNull ||
1365 CE->getCastKind() == CK_UncheckedDerivedToBase));
1366 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1367 return CGF.GetAddressOfBaseClass(
1368 Addr, Derived, CE->path_begin(), CE->path_end(),
1369 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1370 }
1371
1372 // TODO: Is there any reason to treat base-to-derived conversions
1373 // specially?
1374 default:
1375 break;
1376 }
1377 }
1378
1379 // Unary &.
1380 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1381 if (UO->getOpcode() == UO_AddrOf) {
1382 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1383 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1384 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1385 return LV.getAddress();
1386 }
1387 }
1388
1389 // std::addressof and variants.
1390 if (auto *Call = dyn_cast<CallExpr>(E)) {
1391 switch (Call->getBuiltinCallee()) {
1392 default:
1393 break;
1394 case Builtin::BIaddressof:
1395 case Builtin::BI__addressof:
1396 case Builtin::BI__builtin_addressof: {
1397 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1398 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1399 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1400 return LV.getAddress();
1401 }
1402 }
1403 }
1404
1405 // TODO: conditional operators, comma.
1406
1407 // Otherwise, use the alignment of the type.
1410 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1411}
1412
1413/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1414/// derive a more accurate bound on the alignment of the pointer.
1416 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1417 KnownNonNull_t IsKnownNonNull) {
1418 Address Addr =
1419 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1420 if (IsKnownNonNull && !Addr.isKnownNonNull())
1421 Addr.setKnownNonNull();
1422 return Addr;
1423}
1424
1426 llvm::Value *V = RV.getScalarVal();
1427 if (auto MPT = T->getAs<MemberPointerType>())
1428 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1429 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1430}
1431
1433 if (Ty->isVoidType())
1434 return RValue::get(nullptr);
1435
1436 switch (getEvaluationKind(Ty)) {
1437 case TEK_Complex: {
1438 llvm::Type *EltTy =
1440 llvm::Value *U = llvm::UndefValue::get(EltTy);
1441 return RValue::getComplex(std::make_pair(U, U));
1442 }
1443
1444 // If this is a use of an undefined aggregate type, the aggregate must have an
1445 // identifiable address. Just because the contents of the value are undefined
1446 // doesn't mean that the address can't be taken and compared.
1447 case TEK_Aggregate: {
1448 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1449 return RValue::getAggregate(DestPtr);
1450 }
1451
1452 case TEK_Scalar:
1453 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1454 }
1455 llvm_unreachable("bad evaluation kind");
1456}
1457
1459 const char *Name) {
1460 ErrorUnsupported(E, Name);
1461 return GetUndefRValue(E->getType());
1462}
1463
1465 const char *Name) {
1466 ErrorUnsupported(E, Name);
1467 llvm::Type *ElTy = ConvertType(E->getType());
1468 llvm::Type *Ty = UnqualPtrTy;
1469 return MakeAddrLValue(
1470 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1471}
1472
1473bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1474 const Expr *Base = Obj;
1475 while (!isa<CXXThisExpr>(Base)) {
1476 // The result of a dynamic_cast can be null.
1477 if (isa<CXXDynamicCastExpr>(Base))
1478 return false;
1479
1480 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1481 Base = CE->getSubExpr();
1482 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1483 Base = PE->getSubExpr();
1484 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1485 if (UO->getOpcode() == UO_Extension)
1486 Base = UO->getSubExpr();
1487 else
1488 return false;
1489 } else {
1490 return false;
1491 }
1492 }
1493 return true;
1494}
1495
1496LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1497 LValue LV;
1498 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1499 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1500 else
1501 LV = EmitLValue(E);
1502 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1503 SanitizerSet SkippedChecks;
1504 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1505 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1506 if (IsBaseCXXThis)
1507 SkippedChecks.set(SanitizerKind::Alignment, true);
1508 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1509 SkippedChecks.set(SanitizerKind::Null, true);
1510 }
1511 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1512 }
1513 return LV;
1514}
1515
1516/// EmitLValue - Emit code to compute a designator that specifies the location
1517/// of the expression.
1518///
1519/// This can return one of two things: a simple address or a bitfield reference.
1520/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1521/// an LLVM pointer type.
1522///
1523/// If this returns a bitfield reference, nothing about the pointee type of the
1524/// LLVM value is known: For example, it may not be a pointer to an integer.
1525///
1526/// If this returns a normal address, and if the lvalue's C type is fixed size,
1527/// this method guarantees that the returned pointer type will point to an LLVM
1528/// type of the same size of the lvalue's type. If the lvalue has a variable
1529/// length type, this is not possible.
1530///
1532 KnownNonNull_t IsKnownNonNull) {
1533 // Running with sufficient stack space to avoid deeply nested expressions
1534 // cause a stack overflow.
1535 LValue LV;
1537 E->getExprLoc(), [&] { LV = EmitLValueHelper(E, IsKnownNonNull); });
1538
1539 if (IsKnownNonNull && !LV.isKnownNonNull())
1540 LV.setKnownNonNull();
1541 return LV;
1542}
1543
1545 const ASTContext &Ctx) {
1546 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1547 if (isa<OpaqueValueExpr>(SE))
1548 return SE->getType();
1549 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1550}
1551
1552LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1553 KnownNonNull_t IsKnownNonNull) {
1554 ApplyDebugLocation DL(*this, E);
1555 switch (E->getStmtClass()) {
1556 default: return EmitUnsupportedLValue(E, "l-value expression");
1557
1558 case Expr::ObjCPropertyRefExprClass:
1559 llvm_unreachable("cannot emit a property reference directly");
1560
1561 case Expr::ObjCSelectorExprClass:
1562 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1563 case Expr::ObjCIsaExprClass:
1564 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1565 case Expr::BinaryOperatorClass:
1566 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1567 case Expr::CompoundAssignOperatorClass: {
1568 QualType Ty = E->getType();
1569 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1570 Ty = AT->getValueType();
1571 if (!Ty->isAnyComplexType())
1572 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1573 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1574 }
1575 case Expr::CallExprClass:
1576 case Expr::CXXMemberCallExprClass:
1577 case Expr::CXXOperatorCallExprClass:
1578 case Expr::UserDefinedLiteralClass:
1579 return EmitCallExprLValue(cast<CallExpr>(E));
1580 case Expr::CXXRewrittenBinaryOperatorClass:
1581 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1582 IsKnownNonNull);
1583 case Expr::VAArgExprClass:
1584 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1585 case Expr::DeclRefExprClass:
1586 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1587 case Expr::ConstantExprClass: {
1588 const ConstantExpr *CE = cast<ConstantExpr>(E);
1589 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1591 return MakeNaturalAlignAddrLValue(Result, RetType);
1592 }
1593 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1594 }
1595 case Expr::ParenExprClass:
1596 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1597 case Expr::GenericSelectionExprClass:
1598 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1599 IsKnownNonNull);
1600 case Expr::PredefinedExprClass:
1601 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1602 case Expr::StringLiteralClass:
1603 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1604 case Expr::ObjCEncodeExprClass:
1605 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1606 case Expr::PseudoObjectExprClass:
1607 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1608 case Expr::InitListExprClass:
1609 return EmitInitListLValue(cast<InitListExpr>(E));
1610 case Expr::CXXTemporaryObjectExprClass:
1611 case Expr::CXXConstructExprClass:
1612 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1613 case Expr::CXXBindTemporaryExprClass:
1614 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1615 case Expr::CXXUuidofExprClass:
1616 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1617 case Expr::LambdaExprClass:
1618 return EmitAggExprToLValue(E);
1619
1620 case Expr::ExprWithCleanupsClass: {
1621 const auto *cleanups = cast<ExprWithCleanups>(E);
1622 RunCleanupsScope Scope(*this);
1623 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1624 if (LV.isSimple()) {
1625 // Defend against branches out of gnu statement expressions surrounded by
1626 // cleanups.
1627 Address Addr = LV.getAddress();
1628 llvm::Value *V = Addr.getBasePointer();
1629 Scope.ForceCleanup({&V});
1630 Addr.replaceBasePointer(V);
1631 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1632 LV.getBaseInfo(), LV.getTBAAInfo());
1633 }
1634 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1635 // bitfield lvalue or some other non-simple lvalue?
1636 return LV;
1637 }
1638
1639 case Expr::CXXDefaultArgExprClass: {
1640 auto *DAE = cast<CXXDefaultArgExpr>(E);
1641 CXXDefaultArgExprScope Scope(*this, DAE);
1642 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1643 }
1644 case Expr::CXXDefaultInitExprClass: {
1645 auto *DIE = cast<CXXDefaultInitExpr>(E);
1646 CXXDefaultInitExprScope Scope(*this, DIE);
1647 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1648 }
1649 case Expr::CXXTypeidExprClass:
1650 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1651
1652 case Expr::ObjCMessageExprClass:
1653 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1654 case Expr::ObjCIvarRefExprClass:
1655 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1656 case Expr::StmtExprClass:
1657 return EmitStmtExprLValue(cast<StmtExpr>(E));
1658 case Expr::UnaryOperatorClass:
1659 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1660 case Expr::ArraySubscriptExprClass:
1661 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1662 case Expr::MatrixSubscriptExprClass:
1663 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1664 case Expr::ArraySectionExprClass:
1665 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1666 case Expr::ExtVectorElementExprClass:
1667 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1668 case Expr::CXXThisExprClass:
1670 case Expr::MemberExprClass:
1671 return EmitMemberExpr(cast<MemberExpr>(E));
1672 case Expr::CompoundLiteralExprClass:
1673 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1674 case Expr::ConditionalOperatorClass:
1675 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1676 case Expr::BinaryConditionalOperatorClass:
1677 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1678 case Expr::ChooseExprClass:
1679 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1680 case Expr::OpaqueValueExprClass:
1681 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1682 case Expr::SubstNonTypeTemplateParmExprClass:
1683 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1684 IsKnownNonNull);
1685 case Expr::ImplicitCastExprClass:
1686 case Expr::CStyleCastExprClass:
1687 case Expr::CXXFunctionalCastExprClass:
1688 case Expr::CXXStaticCastExprClass:
1689 case Expr::CXXDynamicCastExprClass:
1690 case Expr::CXXReinterpretCastExprClass:
1691 case Expr::CXXConstCastExprClass:
1692 case Expr::CXXAddrspaceCastExprClass:
1693 case Expr::ObjCBridgedCastExprClass:
1694 return EmitCastLValue(cast<CastExpr>(E));
1695
1696 case Expr::MaterializeTemporaryExprClass:
1697 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1698
1699 case Expr::CoawaitExprClass:
1700 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1701 case Expr::CoyieldExprClass:
1702 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1703 case Expr::PackIndexingExprClass:
1704 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1705 case Expr::HLSLOutArgExprClass:
1706 llvm_unreachable("cannot emit a HLSL out argument directly");
1707 }
1708}
1709
1710/// Given an object of the given canonical type, can we safely copy a
1711/// value out of it based on its initializer?
1713 assert(type.isCanonical());
1714 assert(!type->isReferenceType());
1715
1716 // Must be const-qualified but non-volatile.
1717 Qualifiers qs = type.getLocalQualifiers();
1718 if (!qs.hasConst() || qs.hasVolatile()) return false;
1719
1720 // Otherwise, all object types satisfy this except C++ classes with
1721 // mutable subobjects or non-trivial copy/destroy behavior.
1722 if (const auto *RT = dyn_cast<RecordType>(type))
1723 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1724 if (RD->hasMutableFields() || !RD->isTrivial())
1725 return false;
1726
1727 return true;
1728}
1729
1730/// Can we constant-emit a load of a reference to a variable of the
1731/// given type? This is different from predicates like
1732/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1733/// in situations that don't necessarily satisfy the language's rules
1734/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1735/// to do this with const float variables even if those variables
1736/// aren't marked 'constexpr'.
1744 type = type.getCanonicalType();
1745 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1746 if (isConstantEmittableObjectType(ref->getPointeeType()))
1748 return CEK_AsReferenceOnly;
1749 }
1751 return CEK_AsValueOnly;
1752 return CEK_None;
1753}
1754
1755/// Try to emit a reference to the given value without producing it as
1756/// an l-value. This is just an optimization, but it avoids us needing
1757/// to emit global copies of variables if they're named without triggering
1758/// a formal use in a context where we can't emit a direct reference to them,
1759/// for instance if a block or lambda or a member of a local class uses a
1760/// const int variable or constexpr variable from an enclosing function.
1761CodeGenFunction::ConstantEmission
1763 ValueDecl *value = refExpr->getDecl();
1764
1765 // The value needs to be an enum constant or a constant variable.
1767 if (isa<ParmVarDecl>(value)) {
1768 CEK = CEK_None;
1769 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1770 CEK = checkVarTypeForConstantEmission(var->getType());
1771 } else if (isa<EnumConstantDecl>(value)) {
1772 CEK = CEK_AsValueOnly;
1773 } else {
1774 CEK = CEK_None;
1775 }
1776 if (CEK == CEK_None) return ConstantEmission();
1777
1778 Expr::EvalResult result;
1779 bool resultIsReference;
1780 QualType resultType;
1781
1782 // It's best to evaluate all the way as an r-value if that's permitted.
1783 if (CEK != CEK_AsReferenceOnly &&
1784 refExpr->EvaluateAsRValue(result, getContext())) {
1785 resultIsReference = false;
1786 resultType = refExpr->getType();
1787
1788 // Otherwise, try to evaluate as an l-value.
1789 } else if (CEK != CEK_AsValueOnly &&
1790 refExpr->EvaluateAsLValue(result, getContext())) {
1791 resultIsReference = true;
1792 resultType = value->getType();
1793
1794 // Failure.
1795 } else {
1796 return ConstantEmission();
1797 }
1798
1799 // In any case, if the initializer has side-effects, abandon ship.
1800 if (result.HasSideEffects)
1801 return ConstantEmission();
1802
1803 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1804 // referencing a global host variable by copy. In this case the lambda should
1805 // make a copy of the value of the global host variable. The DRE of the
1806 // captured reference variable cannot be emitted as load from the host
1807 // global variable as compile time constant, since the host variable is not
1808 // accessible on device. The DRE of the captured reference variable has to be
1809 // loaded from captures.
1810 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1812 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1813 if (isLambdaMethod(MD) && MD->getOverloadedOperator() == OO_Call) {
1814 const APValue::LValueBase &base = result.Val.getLValueBase();
1815 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1816 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1817 if (!VD->hasAttr<CUDADeviceAttr>()) {
1818 return ConstantEmission();
1819 }
1820 }
1821 }
1822 }
1823 }
1824
1825 // Emit as a constant.
1826 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1827 result.Val, resultType);
1828
1829 // Make sure we emit a debug reference to the global variable.
1830 // This should probably fire even for
1831 if (isa<VarDecl>(value)) {
1832 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1833 EmitDeclRefExprDbgValue(refExpr, result.Val);
1834 } else {
1835 assert(isa<EnumConstantDecl>(value));
1836 EmitDeclRefExprDbgValue(refExpr, result.Val);
1837 }
1838
1839 // If we emitted a reference constant, we need to dereference that.
1840 if (resultIsReference)
1842
1844}
1845
1847 const MemberExpr *ME) {
1848 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1849 // Try to emit static variable member expressions as DREs.
1850 return DeclRefExpr::Create(
1852 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1853 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1854 }
1855 return nullptr;
1856}
1857
1858CodeGenFunction::ConstantEmission
1861 return tryEmitAsConstant(DRE);
1862 return ConstantEmission();
1863}
1864
1866 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1867 assert(Constant && "not a constant");
1868 if (Constant.isReference())
1869 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1870 E->getExprLoc())
1871 .getScalarVal();
1872 return Constant.getValue();
1873}
1874
1875llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1877 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1878 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1879 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1880}
1881
1883 if (Ty->isBooleanType())
1884 return true;
1885
1886 if (const EnumType *ET = Ty->getAs<EnumType>())
1887 return ET->getDecl()->getIntegerType()->isBooleanType();
1888
1889 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1890 return hasBooleanRepresentation(AT->getValueType());
1891
1892 return false;
1893}
1894
1896 llvm::APInt &Min, llvm::APInt &End,
1897 bool StrictEnums, bool IsBool) {
1898 const EnumType *ET = Ty->getAs<EnumType>();
1899 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1900 ET && !ET->getDecl()->isFixed();
1901 if (!IsBool && !IsRegularCPlusPlusEnum)
1902 return false;
1903
1904 if (IsBool) {
1905 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1906 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1907 } else {
1908 const EnumDecl *ED = ET->getDecl();
1909 ED->getValueRange(End, Min);
1910 }
1911 return true;
1912}
1913
1914llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1915 llvm::APInt Min, End;
1916 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1918 return nullptr;
1919
1920 llvm::MDBuilder MDHelper(getLLVMContext());
1921 return MDHelper.createRange(Min, End);
1922}
1923
1926 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1927 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1928 if (!HasBoolCheck && !HasEnumCheck)
1929 return false;
1930
1931 bool IsBool = hasBooleanRepresentation(Ty) ||
1933 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1934 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1935 if (!NeedsBoolCheck && !NeedsEnumCheck)
1936 return false;
1937
1938 // Single-bit booleans don't need to be checked. Special-case this to avoid
1939 // a bit width mismatch when handling bitfield values. This is handled by
1940 // EmitFromMemory for the non-bitfield case.
1941 if (IsBool &&
1942 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1943 return false;
1944
1945 if (NeedsEnumCheck &&
1946 getContext().isTypeIgnoredBySanitizer(SanitizerKind::Enum, Ty))
1947 return false;
1948
1949 llvm::APInt Min, End;
1950 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1951 return true;
1952
1953 auto &Ctx = getLLVMContext();
1954 SanitizerScope SanScope(this);
1955 llvm::Value *Check;
1956 --End;
1957 if (!Min) {
1958 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1959 } else {
1960 llvm::Value *Upper =
1961 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1962 llvm::Value *Lower =
1963 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1964 Check = Builder.CreateAnd(Upper, Lower);
1965 }
1966 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1969 NeedsEnumCheck ? SanitizerKind::SO_Enum : SanitizerKind::SO_Bool;
1970 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1971 StaticArgs, EmitCheckValue(Value));
1972 return true;
1973}
1974
1975llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1976 QualType Ty,
1978 LValueBaseInfo BaseInfo,
1979 TBAAAccessInfo TBAAInfo,
1980 bool isNontemporal) {
1981 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1982 if (GV->isThreadLocal())
1983 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1985
1986 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1987 // Boolean vectors use `iN` as storage type.
1988 if (ClangVecTy->isExtVectorBoolType()) {
1989 llvm::Type *ValTy = ConvertType(Ty);
1990 unsigned ValNumElems =
1991 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1992 // Load the `iP` storage object (P is the padded vector size).
1993 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1994 const auto *RawIntTy = RawIntV->getType();
1995 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1996 // Bitcast iP --> <P x i1>.
1997 auto *PaddedVecTy = llvm::FixedVectorType::get(
1998 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1999 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
2000 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2001 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
2002
2003 return EmitFromMemory(V, Ty);
2004 }
2005
2006 // Handles vectors of sizes that are likely to be expanded to a larger size
2007 // to optimize performance.
2008 auto *VTy = cast<llvm::FixedVectorType>(Addr.getElementType());
2009 auto *NewVecTy =
2011
2012 if (VTy != NewVecTy) {
2013 Address Cast = Addr.withElementType(NewVecTy);
2014 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVecN");
2015 unsigned OldNumElements = VTy->getNumElements();
2016 SmallVector<int, 16> Mask(OldNumElements);
2017 std::iota(Mask.begin(), Mask.end(), 0);
2018 V = Builder.CreateShuffleVector(V, Mask, "extractVec");
2019 return EmitFromMemory(V, Ty);
2020 }
2021 }
2022
2023 // Atomic operations have to be done on integral types.
2024 LValue AtomicLValue =
2025 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2026 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
2027 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
2028 }
2029
2030 Addr =
2032
2033 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
2034 if (isNontemporal) {
2035 llvm::MDNode *Node = llvm::MDNode::get(
2036 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2037 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2038 }
2039
2040 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2041
2042 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2043 // In order to prevent the optimizer from throwing away the check, don't
2044 // attach range metadata to the load.
2045 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2046 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2047 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2048 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2049 llvm::MDNode::get(getLLVMContext(), {}));
2050 }
2051
2052 return EmitFromMemory(Load, Ty);
2053}
2054
2055/// Converts a scalar value from its primary IR type (as returned
2056/// by ConvertType) to its load/store type (as returned by
2057/// convertTypeForLoadStore).
2058llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2059 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2060 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2062 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2063 }
2064
2065 if (Ty->isExtVectorBoolType()) {
2066 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2067 // Expand to the memory bit width.
2068 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2069 // <N x i1> --> <P x i1>.
2070 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2071 // <P x i1> --> iP.
2072 Value = Builder.CreateBitCast(Value, StoreTy);
2073 }
2074
2075 return Value;
2076}
2077
2078/// Converts a scalar value from its load/store type (as returned
2079/// by convertTypeForLoadStore) to its primary IR type (as returned
2080/// by ConvertType).
2081llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2082 if (Ty->isExtVectorBoolType()) {
2083 const auto *RawIntTy = Value->getType();
2084 // Bitcast iP --> <P x i1>.
2085 auto *PaddedVecTy = llvm::FixedVectorType::get(
2086 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2087 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2088 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2089 llvm::Type *ValTy = ConvertType(Ty);
2090 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2091 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2092 }
2093
2094 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2095 llvm::Type *ResTy = ConvertType(Ty);
2096 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2097 }
2098
2099 return Value;
2100}
2101
2102// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2103// MatrixType), if it points to a array (the memory type of MatrixType).
2105 CodeGenFunction &CGF,
2106 bool IsVector = true) {
2107 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2108 if (ArrayTy && IsVector) {
2109 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2110 ArrayTy->getNumElements());
2111
2112 return Addr.withElementType(VectorTy);
2113 }
2114 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2115 if (VectorTy && !IsVector) {
2116 auto *ArrayTy = llvm::ArrayType::get(
2117 VectorTy->getElementType(),
2118 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2119
2120 return Addr.withElementType(ArrayTy);
2121 }
2122
2123 return Addr;
2124}
2125
2126// Emit a store of a matrix LValue. This may require casting the original
2127// pointer to memory address (ArrayType) to a pointer to the value type
2128// (VectorType).
2129static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2130 bool isInit, CodeGenFunction &CGF) {
2131 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2132 value->getType()->isVectorTy());
2133 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2134 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2135 lvalue.isNontemporal());
2136}
2137
2138void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2139 bool Volatile, QualType Ty,
2140 LValueBaseInfo BaseInfo,
2141 TBAAAccessInfo TBAAInfo,
2142 bool isInit, bool isNontemporal) {
2143 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2144 if (GV->isThreadLocal())
2145 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2147
2148 // Handles vectors of sizes that are likely to be expanded to a larger size
2149 // to optimize performance.
2150 llvm::Type *SrcTy = Value->getType();
2151 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2152 if (auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2153 auto *NewVecTy =
2155 if (!ClangVecTy->isExtVectorBoolType() && VecTy != NewVecTy) {
2156 SmallVector<int, 16> Mask(NewVecTy->getNumElements(), -1);
2157 std::iota(Mask.begin(), Mask.begin() + VecTy->getNumElements(), 0);
2158 Value = Builder.CreateShuffleVector(Value, Mask, "extractVec");
2159 SrcTy = NewVecTy;
2160 }
2161 if (Addr.getElementType() != SrcTy)
2162 Addr = Addr.withElementType(SrcTy);
2163 }
2164 }
2165
2166 Value = EmitToMemory(Value, Ty);
2167
2168 LValue AtomicLValue =
2169 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2170 if (Ty->isAtomicType() ||
2171 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2172 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2173 return;
2174 }
2175
2176 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2177 if (isNontemporal) {
2178 llvm::MDNode *Node =
2179 llvm::MDNode::get(Store->getContext(),
2180 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2181 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2182 }
2183
2184 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2185}
2186
2187void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2188 bool isInit) {
2189 if (lvalue.getType()->isConstantMatrixType()) {
2190 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2191 return;
2192 }
2193
2194 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2195 lvalue.getType(), lvalue.getBaseInfo(),
2196 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2197}
2198
2199// Emit a load of a LValue of matrix type. This may require casting the pointer
2200// to memory address (ArrayType) to a pointer to the value type (VectorType).
2202 CodeGenFunction &CGF) {
2203 assert(LV.getType()->isConstantMatrixType());
2205 LV.setAddress(Addr);
2206 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2207}
2208
2211 QualType Ty = LV.getType();
2212 switch (getEvaluationKind(Ty)) {
2213 case TEK_Scalar:
2214 return EmitLoadOfLValue(LV, Loc);
2215 case TEK_Complex:
2217 case TEK_Aggregate:
2218 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2219 return Slot.asRValue();
2220 }
2221 llvm_unreachable("bad evaluation kind");
2222}
2223
2224/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2225/// method emits the address of the lvalue, then loads the result as an rvalue,
2226/// returning the rvalue.
2228 if (LV.isObjCWeak()) {
2229 // load of a __weak object.
2230 Address AddrWeakObj = LV.getAddress();
2232 AddrWeakObj));
2233 }
2235 // In MRC mode, we do a load+autorelease.
2236 if (!getLangOpts().ObjCAutoRefCount) {
2238 }
2239
2240 // In ARC mode, we load retained and then consume the value.
2241 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2242 Object = EmitObjCConsumeObject(LV.getType(), Object);
2243 return RValue::get(Object);
2244 }
2245
2246 if (LV.isSimple()) {
2247 assert(!LV.getType()->isFunctionType());
2248
2249 if (LV.getType()->isConstantMatrixType())
2250 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2251
2252 // Everything needs a load.
2253 return RValue::get(EmitLoadOfScalar(LV, Loc));
2254 }
2255
2256 if (LV.isVectorElt()) {
2257 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2258 LV.isVolatileQualified());
2259 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2260 "vecext"));
2261 }
2262
2263 // If this is a reference to a subset of the elements of a vector, either
2264 // shuffle the input or extract/insert them as appropriate.
2265 if (LV.isExtVectorElt()) {
2267 }
2268
2269 // Global Register variables always invoke intrinsics
2270 if (LV.isGlobalReg())
2271 return EmitLoadOfGlobalRegLValue(LV);
2272
2273 if (LV.isMatrixElt()) {
2274 llvm::Value *Idx = LV.getMatrixIdx();
2275 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2276 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2277 llvm::MatrixBuilder MB(Builder);
2278 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2279 }
2280 llvm::LoadInst *Load =
2282 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2283 }
2284
2285 assert(LV.isBitField() && "Unknown LValue type!");
2286 return EmitLoadOfBitfieldLValue(LV, Loc);
2287}
2288
2291 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2292
2293 // Get the output type.
2294 llvm::Type *ResLTy = ConvertType(LV.getType());
2295
2296 Address Ptr = LV.getBitFieldAddress();
2297 llvm::Value *Val =
2298 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2299
2300 bool UseVolatile = LV.isVolatileQualified() &&
2301 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2302 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2303 const unsigned StorageSize =
2304 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2305 if (Info.IsSigned) {
2306 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2307 unsigned HighBits = StorageSize - Offset - Info.Size;
2308 if (HighBits)
2309 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2310 if (Offset + HighBits)
2311 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2312 } else {
2313 if (Offset)
2314 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2315 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2316 Val = Builder.CreateAnd(
2317 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2318 }
2319 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2320 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2321 return RValue::get(Val);
2322}
2323
2324// If this is a reference to a subset of the elements of a vector, create an
2325// appropriate shufflevector.
2327 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2328 LV.isVolatileQualified());
2329
2330 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2331 // IR value to a vector here allows the rest of codegen to behave as normal.
2332 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2333 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2334 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2335 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2336 }
2337
2338 const llvm::Constant *Elts = LV.getExtVectorElts();
2339
2340 // If the result of the expression is a non-vector type, we must be extracting
2341 // a single element. Just codegen as an extractelement.
2342 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2343 if (!ExprVT) {
2344 unsigned InIdx = getAccessedFieldNo(0, Elts);
2345 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2346 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2347 }
2348
2349 // Always use shuffle vector to try to retain the original program structure
2350 unsigned NumResultElts = ExprVT->getNumElements();
2351
2353 for (unsigned i = 0; i != NumResultElts; ++i)
2354 Mask.push_back(getAccessedFieldNo(i, Elts));
2355
2356 Vec = Builder.CreateShuffleVector(Vec, Mask);
2357 return RValue::get(Vec);
2358}
2359
2360/// Generates lvalue for partial ext_vector access.
2362 Address VectorAddress = LV.getExtVectorAddress();
2363 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2364 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2365
2366 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2367
2368 const llvm::Constant *Elts = LV.getExtVectorElts();
2369 unsigned ix = getAccessedFieldNo(0, Elts);
2370
2371 Address VectorBasePtrPlusIx =
2372 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2373 "vector.elt");
2374
2375 return VectorBasePtrPlusIx;
2376}
2377
2378/// Load of global named registers are always calls to intrinsics.
2380 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2381 "Bad type for register variable");
2382 llvm::MDNode *RegName = cast<llvm::MDNode>(
2383 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2384
2385 // We accept integer and pointer types only
2386 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2387 llvm::Type *Ty = OrigTy;
2388 if (OrigTy->isPointerTy())
2389 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2390 llvm::Type *Types[] = { Ty };
2391
2392 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2393 llvm::Value *Call = Builder.CreateCall(
2394 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2395 if (OrigTy->isPointerTy())
2396 Call = Builder.CreateIntToPtr(Call, OrigTy);
2397 return RValue::get(Call);
2398}
2399
2400/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2401/// lvalue, where both are guaranteed to the have the same type, and that type
2402/// is 'Ty'.
2404 bool isInit) {
2405 if (!Dst.isSimple()) {
2406 if (Dst.isVectorElt()) {
2407 // Read/modify/write the vector, inserting the new element.
2408 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2409 Dst.isVolatileQualified());
2410 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2411 if (IRStoreTy) {
2412 auto *IRVecTy = llvm::FixedVectorType::get(
2413 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2414 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2415 // iN --> <N x i1>.
2416 }
2417 llvm::Value *SrcVal = Src.getScalarVal();
2418 // Allow inserting `<1 x T>` into an `<N x T>`. It can happen with scalar
2419 // types which are mapped to vector LLVM IR types (e.g. for implementing
2420 // an ABI).
2421 if (auto *EltTy = dyn_cast<llvm::FixedVectorType>(SrcVal->getType());
2422 EltTy && EltTy->getNumElements() == 1)
2423 SrcVal = Builder.CreateBitCast(SrcVal, EltTy->getElementType());
2424 Vec = Builder.CreateInsertElement(Vec, SrcVal, Dst.getVectorIdx(),
2425 "vecins");
2426 if (IRStoreTy) {
2427 // <N x i1> --> <iN>.
2428 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2429 }
2431 Dst.isVolatileQualified());
2432 return;
2433 }
2434
2435 // If this is an update of extended vector elements, insert them as
2436 // appropriate.
2437 if (Dst.isExtVectorElt())
2439
2440 if (Dst.isGlobalReg())
2441 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2442
2443 if (Dst.isMatrixElt()) {
2444 llvm::Value *Idx = Dst.getMatrixIdx();
2445 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2446 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2447 llvm::MatrixBuilder MB(Builder);
2448 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2449 }
2450 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2451 llvm::Value *Vec =
2452 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2454 Dst.isVolatileQualified());
2455 return;
2456 }
2457
2458 assert(Dst.isBitField() && "Unknown LValue type");
2459 return EmitStoreThroughBitfieldLValue(Src, Dst);
2460 }
2461
2462 // There's special magic for assigning into an ARC-qualified l-value.
2463 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2464 switch (Lifetime) {
2466 llvm_unreachable("present but none");
2467
2469 // nothing special
2470 break;
2471
2473 if (isInit) {
2474 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2475 break;
2476 }
2477 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2478 return;
2479
2481 if (isInit)
2482 // Initialize and then skip the primitive store.
2484 else
2486 /*ignore*/ true);
2487 return;
2488
2491 Src.getScalarVal()));
2492 // fall into the normal path
2493 break;
2494 }
2495 }
2496
2497 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2498 // load of a __weak object.
2499 Address LvalueDst = Dst.getAddress();
2500 llvm::Value *src = Src.getScalarVal();
2501 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2502 return;
2503 }
2504
2505 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2506 // load of a __strong object.
2507 Address LvalueDst = Dst.getAddress();
2508 llvm::Value *src = Src.getScalarVal();
2509 if (Dst.isObjCIvar()) {
2510 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2511 llvm::Type *ResultType = IntPtrTy;
2513 llvm::Value *RHS = dst.emitRawPointer(*this);
2514 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2515 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2516 ResultType, "sub.ptr.lhs.cast");
2517 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2518 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2519 } else if (Dst.isGlobalObjCRef()) {
2520 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2521 Dst.isThreadLocalRef());
2522 }
2523 else
2524 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2525 return;
2526 }
2527
2528 assert(Src.isScalar() && "Can't emit an agg store with this method");
2529 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2530}
2531
2533 llvm::Value **Result) {
2534 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2535 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2536 Address Ptr = Dst.getBitFieldAddress();
2537
2538 // Get the source value, truncated to the width of the bit-field.
2539 llvm::Value *SrcVal = Src.getScalarVal();
2540
2541 // Cast the source to the storage type and shift it into place.
2542 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2543 /*isSigned=*/false);
2544 llvm::Value *MaskedVal = SrcVal;
2545
2546 const bool UseVolatile =
2547 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2548 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2549 const unsigned StorageSize =
2550 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2551 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2552 // See if there are other bits in the bitfield's storage we'll need to load
2553 // and mask together with source before storing.
2554 if (StorageSize != Info.Size) {
2555 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2556 llvm::Value *Val =
2557 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2558
2559 // Mask the source value as needed.
2561 SrcVal = Builder.CreateAnd(
2562 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2563 "bf.value");
2564 MaskedVal = SrcVal;
2565 if (Offset)
2566 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2567
2568 // Mask out the original value.
2569 Val = Builder.CreateAnd(
2570 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2571 "bf.clear");
2572
2573 // Or together the unchanged values and the source value.
2574 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2575 } else {
2576 assert(Offset == 0);
2577 // According to the AACPS:
2578 // When a volatile bit-field is written, and its container does not overlap
2579 // with any non-bit-field member, its container must be read exactly once
2580 // and written exactly once using the access width appropriate to the type
2581 // of the container. The two accesses are not atomic.
2582 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2583 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2584 Builder.CreateLoad(Ptr, true, "bf.load");
2585 }
2586
2587 // Write the new value back out.
2588 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2589
2590 // Return the new value of the bit-field, if requested.
2591 if (Result) {
2592 llvm::Value *ResultVal = MaskedVal;
2593
2594 // Sign extend the value if needed.
2595 if (Info.IsSigned) {
2596 assert(Info.Size <= StorageSize);
2597 unsigned HighBits = StorageSize - Info.Size;
2598 if (HighBits) {
2599 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2600 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2601 }
2602 }
2603
2604 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2605 "bf.result.cast");
2606 *Result = EmitFromMemory(ResultVal, Dst.getType());
2607 }
2608}
2609
2611 LValue Dst) {
2612 // HLSL allows storing to scalar values through ExtVector component LValues.
2613 // To support this we need to handle the case where the destination address is
2614 // a scalar.
2615 Address DstAddr = Dst.getExtVectorAddress();
2616 if (!DstAddr.getElementType()->isVectorTy()) {
2617 assert(!Dst.getType()->isVectorType() &&
2618 "this should only occur for non-vector l-values");
2619 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2620 return;
2621 }
2622
2623 // This access turns into a read/modify/write of the vector. Load the input
2624 // value now.
2625 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2626 const llvm::Constant *Elts = Dst.getExtVectorElts();
2627
2628 llvm::Value *SrcVal = Src.getScalarVal();
2629
2630 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2631 unsigned NumSrcElts = VTy->getNumElements();
2632 unsigned NumDstElts =
2633 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2634 if (NumDstElts == NumSrcElts) {
2635 // Use shuffle vector is the src and destination are the same number of
2636 // elements and restore the vector mask since it is on the side it will be
2637 // stored.
2638 SmallVector<int, 4> Mask(NumDstElts);
2639 for (unsigned i = 0; i != NumSrcElts; ++i)
2640 Mask[getAccessedFieldNo(i, Elts)] = i;
2641
2642 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2643 } else if (NumDstElts > NumSrcElts) {
2644 // Extended the source vector to the same length and then shuffle it
2645 // into the destination.
2646 // FIXME: since we're shuffling with undef, can we just use the indices
2647 // into that? This could be simpler.
2648 SmallVector<int, 4> ExtMask;
2649 for (unsigned i = 0; i != NumSrcElts; ++i)
2650 ExtMask.push_back(i);
2651 ExtMask.resize(NumDstElts, -1);
2652 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2653 // build identity
2655 for (unsigned i = 0; i != NumDstElts; ++i)
2656 Mask.push_back(i);
2657
2658 // When the vector size is odd and .odd or .hi is used, the last element
2659 // of the Elts constant array will be one past the size of the vector.
2660 // Ignore the last element here, if it is greater than the mask size.
2661 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2662 NumSrcElts--;
2663
2664 // modify when what gets shuffled in
2665 for (unsigned i = 0; i != NumSrcElts; ++i)
2666 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2667 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2668 } else {
2669 // We should never shorten the vector
2670 llvm_unreachable("unexpected shorten vector length");
2671 }
2672 } else {
2673 // If the Src is a scalar (not a vector), and the target is a vector it must
2674 // be updating one element.
2675 unsigned InIdx = getAccessedFieldNo(0, Elts);
2676 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2677 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2678 }
2679
2681 Dst.isVolatileQualified());
2682}
2683
2684/// Store of global named registers are always calls to intrinsics.
2686 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2687 "Bad type for register variable");
2688 llvm::MDNode *RegName = cast<llvm::MDNode>(
2689 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2690 assert(RegName && "Register LValue is not metadata");
2691
2692 // We accept integer and pointer types only
2693 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2694 llvm::Type *Ty = OrigTy;
2695 if (OrigTy->isPointerTy())
2696 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2697 llvm::Type *Types[] = { Ty };
2698
2699 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2700 llvm::Value *Value = Src.getScalarVal();
2701 if (OrigTy->isPointerTy())
2702 Value = Builder.CreatePtrToInt(Value, Ty);
2703 Builder.CreateCall(
2704 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2705}
2706
2707// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2708// generating write-barries API. It is currently a global, ivar,
2709// or neither.
2710static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2711 LValue &LV,
2712 bool IsMemberAccess=false) {
2713 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2714 return;
2715
2716 if (isa<ObjCIvarRefExpr>(E)) {
2717 QualType ExpTy = E->getType();
2718 if (IsMemberAccess && ExpTy->isPointerType()) {
2719 // If ivar is a structure pointer, assigning to field of
2720 // this struct follows gcc's behavior and makes it a non-ivar
2721 // writer-barrier conservatively.
2722 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2723 if (ExpTy->isRecordType()) {
2724 LV.setObjCIvar(false);
2725 return;
2726 }
2727 }
2728 LV.setObjCIvar(true);
2729 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2730 LV.setBaseIvarExp(Exp->getBase());
2732 return;
2733 }
2734
2735 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2736 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2737 if (VD->hasGlobalStorage()) {
2738 LV.setGlobalObjCRef(true);
2739 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2740 }
2741 }
2743 return;
2744 }
2745
2746 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2747 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2748 return;
2749 }
2750
2751 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2752 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2753 if (LV.isObjCIvar()) {
2754 // If cast is to a structure pointer, follow gcc's behavior and make it
2755 // a non-ivar write-barrier.
2756 QualType ExpTy = E->getType();
2757 if (ExpTy->isPointerType())
2758 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2759 if (ExpTy->isRecordType())
2760 LV.setObjCIvar(false);
2761 }
2762 return;
2763 }
2764
2765 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2766 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2767 return;
2768 }
2769
2770 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2771 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2772 return;
2773 }
2774
2775 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2776 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2777 return;
2778 }
2779
2780 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2781 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2782 return;
2783 }
2784
2785 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2786 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2787 if (LV.isObjCIvar() && !LV.isObjCArray())
2788 // Using array syntax to assigning to what an ivar points to is not
2789 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2790 LV.setObjCIvar(false);
2791 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2792 // Using array syntax to assigning to what global points to is not
2793 // same as assigning to the global itself. {id *G;} G[i] = 0;
2794 LV.setGlobalObjCRef(false);
2795 return;
2796 }
2797
2798 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2799 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2800 // We don't know if member is an 'ivar', but this flag is looked at
2801 // only in the context of LV.isObjCIvar().
2803 return;
2804 }
2805}
2806
2808 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2809 llvm::Type *RealVarTy, SourceLocation Loc) {
2810 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2812 CGF, VD, Addr, Loc);
2813 else
2814 Addr =
2815 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2816
2817 Addr = Addr.withElementType(RealVarTy);
2818 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2819}
2820
2822 const VarDecl *VD, QualType T) {
2823 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2824 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2825 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2826 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2827 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2828 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2829 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2831 return Address::invalid();
2832 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2833 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2834 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2836 "Expected link clause OR to clause with unified memory enabled.");
2837 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2839 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2840}
2841
2842Address
2844 LValueBaseInfo *PointeeBaseInfo,
2845 TBAAAccessInfo *PointeeTBAAInfo) {
2846 llvm::LoadInst *Load =
2847 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2849 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2850 CharUnits(), /*ForPointeeType=*/true,
2851 PointeeBaseInfo, PointeeTBAAInfo);
2852}
2853
2855 LValueBaseInfo PointeeBaseInfo;
2856 TBAAAccessInfo PointeeTBAAInfo;
2857 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2858 &PointeeTBAAInfo);
2859 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2860 PointeeBaseInfo, PointeeTBAAInfo);
2861}
2862
2864 const PointerType *PtrTy,
2865 LValueBaseInfo *BaseInfo,
2866 TBAAAccessInfo *TBAAInfo) {
2867 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2868 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2869 CharUnits(), /*ForPointeeType=*/true,
2870 BaseInfo, TBAAInfo);
2871}
2872
2874 const PointerType *PtrTy) {
2875 LValueBaseInfo BaseInfo;
2876 TBAAAccessInfo TBAAInfo;
2877 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2878 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2879}
2880
2882 const Expr *E, const VarDecl *VD) {
2883 QualType T = E->getType();
2884
2885 // If it's thread_local, emit a call to its wrapper function instead.
2886 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2888 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2889 // Check if the variable is marked as declare target with link clause in
2890 // device codegen.
2891 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2892 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2893 if (Addr.isValid())
2894 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2895 }
2896
2897 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2898
2899 if (VD->getTLSKind() != VarDecl::TLS_None)
2900 V = CGF.Builder.CreateThreadLocalAddress(V);
2901
2902 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2903 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2904 Address Addr(V, RealVarTy, Alignment);
2905 // Emit reference to the private copy of the variable if it is an OpenMP
2906 // threadprivate variable.
2907 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2908 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2909 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2910 E->getExprLoc());
2911 }
2912 LValue LV = VD->getType()->isReferenceType() ?
2913 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2916 setObjCGCLValueClass(CGF.getContext(), E, LV);
2917 return LV;
2918}
2919
2921 llvm::Type *Ty) {
2922 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2923 if (FD->hasAttr<WeakRefAttr>()) {
2925 return aliasee.getPointer();
2926 }
2927
2928 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
2929 return V;
2930}
2931
2933 GlobalDecl GD) {
2934 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2935 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
2936 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2937 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2939}
2940
2942 llvm::Value *ThisValue) {
2943
2944 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2945}
2946
2947/// Named Registers are named metadata pointing to the register name
2948/// which will be read from/written to as an argument to the intrinsic
2949/// @llvm.read/write_register.
2950/// So far, only the name is being passed down, but other options such as
2951/// register type, allocation type or even optimization options could be
2952/// passed down via the metadata node.
2954 SmallString<64> Name("llvm.named.register.");
2955 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2956 assert(Asm->getLabel().size() < 64-Name.size() &&
2957 "Register name too big");
2958 Name.append(Asm->getLabel());
2959 llvm::NamedMDNode *M =
2960 CGM.getModule().getOrInsertNamedMetadata(Name);
2961 if (M->getNumOperands() == 0) {
2962 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2963 Asm->getLabel());
2964 llvm::Metadata *Ops[] = {Str};
2965 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2966 }
2967
2968 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2969
2970 llvm::Value *Ptr =
2971 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2972 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2973}
2974
2975/// Determine whether we can emit a reference to \p VD from the current
2976/// context, despite not necessarily having seen an odr-use of the variable in
2977/// this context.
2979 const DeclRefExpr *E,
2980 const VarDecl *VD) {
2981 // For a variable declared in an enclosing scope, do not emit a spurious
2982 // reference even if we have a capture, as that will emit an unwarranted
2983 // reference to our capture state, and will likely generate worse code than
2984 // emitting a local copy.
2985 if (E->refersToEnclosingVariableOrCapture())
2986 return false;
2987
2988 // For a local declaration declared in this function, we can always reference
2989 // it even if we don't have an odr-use.
2990 if (VD->hasLocalStorage()) {
2991 return VD->getDeclContext() ==
2992 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2993 }
2994
2995 // For a global declaration, we can emit a reference to it if we know
2996 // for sure that we are able to emit a definition of it.
2997 VD = VD->getDefinition(CGF.getContext());
2998 if (!VD)
2999 return false;
3000
3001 // Don't emit a spurious reference if it might be to a variable that only
3002 // exists on a different device / target.
3003 // FIXME: This is unnecessarily broad. Check whether this would actually be a
3004 // cross-target reference.
3005 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
3006 CGF.getLangOpts().OpenCL) {
3007 return false;
3008 }
3009
3010 // We can emit a spurious reference only if the linkage implies that we'll
3011 // be emitting a non-interposable symbol that will be retained until link
3012 // time.
3013 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
3014 case llvm::GlobalValue::ExternalLinkage:
3015 case llvm::GlobalValue::LinkOnceODRLinkage:
3016 case llvm::GlobalValue::WeakODRLinkage:
3017 case llvm::GlobalValue::InternalLinkage:
3018 case llvm::GlobalValue::PrivateLinkage:
3019 return true;
3020 default:
3021 return false;
3022 }
3023}
3024
3026 const NamedDecl *ND = E->getDecl();
3027 QualType T = E->getType();
3028
3029 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
3030 "should not emit an unevaluated operand");
3031
3032 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3033 // Global Named registers access via intrinsics only
3034 if (VD->getStorageClass() == SC_Register &&
3035 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
3036 return EmitGlobalNamedRegister(VD, CGM);
3037
3038 // If this DeclRefExpr does not constitute an odr-use of the variable,
3039 // we're not permitted to emit a reference to it in general, and it might
3040 // not be captured if capture would be necessary for a use. Emit the
3041 // constant value directly instead.
3042 if (E->isNonOdrUse() == NOUR_Constant &&
3043 (VD->getType()->isReferenceType() ||
3044 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
3045 VD->getAnyInitializer(VD);
3046 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3047 E->getLocation(), *VD->evaluateValue(), VD->getType());
3048 assert(Val && "failed to emit constant expression");
3049
3050 Address Addr = Address::invalid();
3051 if (!VD->getType()->isReferenceType()) {
3052 // Spill the constant value to a global.
3053 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3054 getContext().getDeclAlign(VD));
3055 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3056 auto *PTy = llvm::PointerType::get(
3057 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
3058 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3059 } else {
3060 // Should we be using the alignment of the constant pointer we emitted?
3061 CharUnits Alignment =
3063 /* BaseInfo= */ nullptr,
3064 /* TBAAInfo= */ nullptr,
3065 /* forPointeeType= */ true);
3066 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3067 }
3068 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3069 }
3070
3071 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3072
3073 // Check for captured variables.
3074 if (E->refersToEnclosingVariableOrCapture()) {
3075 VD = VD->getCanonicalDecl();
3076 if (auto *FD = LambdaCaptureFields.lookup(VD))
3077 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3078 if (CapturedStmtInfo) {
3079 auto I = LocalDeclMap.find(VD);
3080 if (I != LocalDeclMap.end()) {
3081 LValue CapLVal;
3082 if (VD->getType()->isReferenceType())
3083 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3085 else
3086 CapLVal = MakeAddrLValue(I->second, T);
3087 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3088 // in simd context.
3089 if (getLangOpts().OpenMP &&
3091 CapLVal.setNontemporal(/*Value=*/true);
3092 return CapLVal;
3093 }
3094 LValue CapLVal =
3097 Address LValueAddress = CapLVal.getAddress();
3098 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3099 LValueAddress.getElementType(),
3100 getContext().getDeclAlign(VD)),
3101 CapLVal.getType(),
3103 CapLVal.getTBAAInfo());
3104 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3105 // in simd context.
3106 if (getLangOpts().OpenMP &&
3108 CapLVal.setNontemporal(/*Value=*/true);
3109 return CapLVal;
3110 }
3111
3112 assert(isa<BlockDecl>(CurCodeDecl));
3113 Address addr = GetAddrOfBlockDecl(VD);
3114 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3115 }
3116 }
3117
3118 // FIXME: We should be able to assert this for FunctionDecls as well!
3119 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3120 // those with a valid source location.
3121 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3122 !E->getLocation().isValid()) &&
3123 "Should not use decl without marking it used!");
3124
3125 if (ND->hasAttr<WeakRefAttr>()) {
3126 const auto *VD = cast<ValueDecl>(ND);
3128 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3129 }
3130
3131 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3132 // Check if this is a global variable.
3133 if (VD->hasLinkage() || VD->isStaticDataMember())
3134 return EmitGlobalVarDeclLValue(*this, E, VD);
3135
3136 Address addr = Address::invalid();
3137
3138 // The variable should generally be present in the local decl map.
3139 auto iter = LocalDeclMap.find(VD);
3140 if (iter != LocalDeclMap.end()) {
3141 addr = iter->second;
3142
3143 // Otherwise, it might be static local we haven't emitted yet for
3144 // some reason; most likely, because it's in an outer function.
3145 } else if (VD->isStaticLocal()) {
3146 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3148 addr = Address(
3149 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3150
3151 // No other cases for now.
3152 } else {
3153 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3154 }
3155
3156 // Handle threadlocal function locals.
3157 if (VD->getTLSKind() != VarDecl::TLS_None)
3158 addr = addr.withPointer(
3159 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3161
3162 // Check for OpenMP threadprivate variables.
3163 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3164 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3166 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3167 E->getExprLoc());
3168 }
3169
3170 // Drill into block byref variables.
3171 bool isBlockByref = VD->isEscapingByref();
3172 if (isBlockByref) {
3173 addr = emitBlockByrefAddress(addr, VD);
3174 }
3175
3176 // Drill into reference types.
3177 LValue LV = VD->getType()->isReferenceType() ?
3178 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3180
3181 bool isLocalStorage = VD->hasLocalStorage();
3182
3183 bool NonGCable = isLocalStorage &&
3184 !VD->getType()->isReferenceType() &&
3185 !isBlockByref;
3186 if (NonGCable) {
3188 LV.setNonGC(true);
3189 }
3190
3191 bool isImpreciseLifetime =
3192 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3193 if (isImpreciseLifetime)
3196 return LV;
3197 }
3198
3199 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3200 return EmitFunctionDeclLValue(*this, E, FD);
3201
3202 // FIXME: While we're emitting a binding from an enclosing scope, all other
3203 // DeclRefExprs we see should be implicitly treated as if they also refer to
3204 // an enclosing scope.
3205 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3206 if (E->refersToEnclosingVariableOrCapture()) {
3207 auto *FD = LambdaCaptureFields.lookup(BD);
3208 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3209 }
3210 return EmitLValue(BD->getBinding());
3211 }
3212
3213 // We can form DeclRefExprs naming GUID declarations when reconstituting
3214 // non-type template parameters into expressions.
3215 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3218
3219 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3220 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3221 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3222
3223 if (AS != T.getAddressSpace()) {
3224 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3225 auto PtrTy = llvm::PointerType::get(CGM.getLLVMContext(), TargetAS);
3227 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3228 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3229 }
3230
3231 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3232 }
3233
3234 llvm_unreachable("Unhandled DeclRefExpr");
3235}
3236
3238 // __extension__ doesn't affect lvalue-ness.
3239 if (E->getOpcode() == UO_Extension)
3240 return EmitLValue(E->getSubExpr());
3241
3242 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3243 switch (E->getOpcode()) {
3244 default: llvm_unreachable("Unknown unary operator lvalue!");
3245 case UO_Deref: {
3246 QualType T = E->getSubExpr()->getType()->getPointeeType();
3247 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3248
3249 LValueBaseInfo BaseInfo;
3250 TBAAAccessInfo TBAAInfo;
3251 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3252 &TBAAInfo);
3253 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3255
3256 // We should not generate __weak write barrier on indirect reference
3257 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3258 // But, we continue to generate __strong write barrier on indirect write
3259 // into a pointer to object.
3260 if (getLangOpts().ObjC &&
3261 getLangOpts().getGC() != LangOptions::NonGC &&
3262 LV.isObjCWeak())
3264 return LV;
3265 }
3266 case UO_Real:
3267 case UO_Imag: {
3268 LValue LV = EmitLValue(E->getSubExpr());
3269 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3270
3271 // __real is valid on scalars. This is a faster way of testing that.
3272 // __imag can only produce an rvalue on scalars.
3273 if (E->getOpcode() == UO_Real &&
3274 !LV.getAddress().getElementType()->isStructTy()) {
3275 assert(E->getSubExpr()->getType()->isArithmeticType());
3276 return LV;
3277 }
3278
3279 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3280
3281 Address Component =
3282 (E->getOpcode() == UO_Real
3284 : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3285 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3287 ElemLV.getQuals().addQualifiers(LV.getQuals());
3288 return ElemLV;
3289 }
3290 case UO_PreInc:
3291 case UO_PreDec: {
3292 LValue LV = EmitLValue(E->getSubExpr());
3293 bool isInc = E->getOpcode() == UO_PreInc;
3294
3295 if (E->getType()->isAnyComplexType())
3296 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3297 else
3298 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3299 return LV;
3300 }
3301 }
3302}
3303
3307}
3308
3312}
3313
3315 auto SL = E->getFunctionName();
3316 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3317 StringRef FnName = CurFn->getName();
3318 if (FnName.starts_with("\01"))
3319 FnName = FnName.substr(1);
3320 StringRef NameItems[] = {
3321 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3322 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3323 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3324 std::string Name = std::string(SL->getString());
3325 if (!Name.empty()) {
3326 unsigned Discriminator =
3328 if (Discriminator)
3329 Name += "_" + Twine(Discriminator + 1).str();
3330 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3332 } else {
3333 auto C =
3334 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3336 }
3337 }
3338 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3340}
3341
3342/// Emit a type description suitable for use by a runtime sanitizer library. The
3343/// format of a type descriptor is
3344///
3345/// \code
3346/// { i16 TypeKind, i16 TypeInfo }
3347/// \endcode
3348///
3349/// followed by an array of i8 containing the type name with extra information
3350/// for BitInt. TypeKind is TK_Integer(0) for an integer, TK_Float(1) for a
3351/// floating point value, TK_BitInt(2) for BitInt and TK_Unknown(0xFFFF) for
3352/// anything else.
3354 // Only emit each type's descriptor once.
3355 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3356 return C;
3357
3358 uint16_t TypeKind = TK_Unknown;
3359 uint16_t TypeInfo = 0;
3360 bool IsBitInt = false;
3361
3362 if (T->isIntegerType()) {
3363 TypeKind = TK_Integer;
3364 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3365 (T->isSignedIntegerType() ? 1 : 0);
3366 // Follow suggestion from discussion of issue 64100.
3367 // So we can write the exact amount of bits in TypeName after '\0'
3368 // making it <diagnostic-like type name>.'\0'.<32-bit width>.
3369 if (T->isSignedIntegerType() && T->getAs<BitIntType>()) {
3370 // Do a sanity checks as we are using 32-bit type to store bit length.
3371 assert(getContext().getTypeSize(T) > 0 &&
3372 " non positive amount of bits in __BitInt type");
3373 assert(getContext().getTypeSize(T) <= 0xFFFFFFFF &&
3374 " too many bits in __BitInt type");
3375
3376 // Redefine TypeKind with the actual __BitInt type if we have signed
3377 // BitInt.
3378 TypeKind = TK_BitInt;
3379 IsBitInt = true;
3380 }
3381 } else if (T->isFloatingType()) {
3382 TypeKind = TK_Float;
3384 }
3385
3386 // Format the type name as if for a diagnostic, including quotes and
3387 // optionally an 'aka'.
3388 SmallString<32> Buffer;
3390 (intptr_t)T.getAsOpaquePtr(), StringRef(),
3391 StringRef(), {}, Buffer, {});
3392
3393 if (IsBitInt) {
3394 // The Structure is: 0 to end the string, 32 bit unsigned integer in target
3395 // endianness, zero.
3396 char S[6] = {'\0', '\0', '\0', '\0', '\0', '\0'};
3397 const auto *EIT = T->castAs<BitIntType>();
3398 uint32_t Bits = EIT->getNumBits();
3399 llvm::support::endian::write32(S + 1, Bits,
3400 getTarget().isBigEndian()
3401 ? llvm::endianness::big
3402 : llvm::endianness::little);
3403 StringRef Str = StringRef(S, sizeof(S) / sizeof(decltype(S[0])));
3404 Buffer.append(Str);
3405 }
3406
3407 llvm::Constant *Components[] = {
3408 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3409 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3410 };
3411 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3412
3413 auto *GV = new llvm::GlobalVariable(
3414 CGM.getModule(), Descriptor->getType(),
3415 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3416 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3418
3419 // Remember the descriptor for this type.
3421
3422 return GV;
3423}
3424
3425llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3426 llvm::Type *TargetTy = IntPtrTy;
3427
3428 if (V->getType() == TargetTy)
3429 return V;
3430
3431 // Floating-point types which fit into intptr_t are bitcast to integers
3432 // and then passed directly (after zero-extension, if necessary).
3433 if (V->getType()->isFloatingPointTy()) {
3434 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3435 if (Bits <= TargetTy->getIntegerBitWidth())
3436 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3437 Bits));
3438 }
3439
3440 // Integers which fit in intptr_t are zero-extended and passed directly.
3441 if (V->getType()->isIntegerTy() &&
3442 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3443 return Builder.CreateZExt(V, TargetTy);
3444
3445 // Pointers are passed directly, everything else is passed by address.
3446 if (!V->getType()->isPointerTy()) {
3447 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3448 Builder.CreateStore(V, Ptr);
3449 V = Ptr.getPointer();
3450 }
3451 return Builder.CreatePtrToInt(V, TargetTy);
3452}
3453
3454/// Emit a representation of a SourceLocation for passing to a handler
3455/// in a sanitizer runtime library. The format for this data is:
3456/// \code
3457/// struct SourceLocation {
3458/// const char *Filename;
3459/// int32_t Line, Column;
3460/// };
3461/// \endcode
3462/// For an invalid SourceLocation, the Filename pointer is null.
3464 llvm::Constant *Filename;
3465 int Line, Column;
3466
3468 if (PLoc.isValid()) {
3469 StringRef FilenameString = PLoc.getFilename();
3470
3471 int PathComponentsToStrip =
3472 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3473 if (PathComponentsToStrip < 0) {
3474 assert(PathComponentsToStrip != INT_MIN);
3475 int PathComponentsToKeep = -PathComponentsToStrip;
3476 auto I = llvm::sys::path::rbegin(FilenameString);
3477 auto E = llvm::sys::path::rend(FilenameString);
3478 while (I != E && --PathComponentsToKeep)
3479 ++I;
3480
3481 FilenameString = FilenameString.substr(I - E);
3482 } else if (PathComponentsToStrip > 0) {
3483 auto I = llvm::sys::path::begin(FilenameString);
3484 auto E = llvm::sys::path::end(FilenameString);
3485 while (I != E && PathComponentsToStrip--)
3486 ++I;
3487
3488 if (I != E)
3489 FilenameString =
3490 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3491 else
3492 FilenameString = llvm::sys::path::filename(FilenameString);
3493 }
3494
3495 auto FilenameGV =
3496 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3498 cast<llvm::GlobalVariable>(
3499 FilenameGV.getPointer()->stripPointerCasts()));
3500 Filename = FilenameGV.getPointer();
3501 Line = PLoc.getLine();
3502 Column = PLoc.getColumn();
3503 } else {
3504 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3505 Line = Column = 0;
3506 }
3507
3508 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3509 Builder.getInt32(Column)};
3510
3511 return llvm::ConstantStruct::getAnon(Data);
3512}
3513
3514namespace {
3515/// Specify under what conditions this check can be recovered
3516enum class CheckRecoverableKind {
3517 /// Always terminate program execution if this check fails.
3519 /// Check supports recovering, runtime has both fatal (noreturn) and
3520 /// non-fatal handlers for this check.
3521 Recoverable,
3522 /// Runtime conditionally aborts, always need to support recovery.
3524};
3525}
3526
3527static CheckRecoverableKind
3529 if (Ordinal == SanitizerKind::SO_Vptr)
3530 return CheckRecoverableKind::AlwaysRecoverable;
3531 else if (Ordinal == SanitizerKind::SO_Return ||
3532 Ordinal == SanitizerKind::SO_Unreachable)
3533 return CheckRecoverableKind::Unrecoverable;
3534 else
3535 return CheckRecoverableKind::Recoverable;
3536}
3537
3538namespace {
3539struct SanitizerHandlerInfo {
3540 char const *const Name;
3541 unsigned Version;
3542};
3543}
3544
3545const SanitizerHandlerInfo SanitizerHandlers[] = {
3546#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3548#undef SANITIZER_CHECK
3549};
3550
3552 llvm::FunctionType *FnType,
3554 SanitizerHandler CheckHandler,
3555 CheckRecoverableKind RecoverKind, bool IsFatal,
3556 llvm::BasicBlock *ContBB, bool NoMerge) {
3557 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3558 std::optional<ApplyDebugLocation> DL;
3559 if (!CGF.Builder.getCurrentDebugLocation()) {
3560 // Ensure that the call has at least an artificial debug location.
3561 DL.emplace(CGF, SourceLocation());
3562 }
3563 bool NeedsAbortSuffix =
3564 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3565 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3566 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3567 const StringRef CheckName = CheckInfo.Name;
3568 std::string FnName = "__ubsan_handle_" + CheckName.str();
3569 if (CheckInfo.Version && !MinimalRuntime)
3570 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3571 if (MinimalRuntime)
3572 FnName += "_minimal";
3573 if (NeedsAbortSuffix)
3574 FnName += "_abort";
3575 bool MayReturn =
3576 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3577
3578 llvm::AttrBuilder B(CGF.getLLVMContext());
3579 if (!MayReturn) {
3580 B.addAttribute(llvm::Attribute::NoReturn)
3581 .addAttribute(llvm::Attribute::NoUnwind);
3582 }
3583 B.addUWTableAttr(llvm::UWTableKind::Default);
3584
3585 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3586 FnType, FnName,
3587 llvm::AttributeList::get(CGF.getLLVMContext(),
3588 llvm::AttributeList::FunctionIndex, B),
3589 /*Local=*/true);
3590 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3591 NoMerge = NoMerge || !CGF.CGM.getCodeGenOpts().OptimizationLevel ||
3592 (CGF.CurCodeDecl && CGF.CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3593 if (NoMerge)
3594 HandlerCall->addFnAttr(llvm::Attribute::NoMerge);
3595 if (!MayReturn) {
3596 HandlerCall->setDoesNotReturn();
3597 CGF.Builder.CreateUnreachable();
3598 } else {
3599 CGF.Builder.CreateBr(ContBB);
3600 }
3601}
3602
3604 ArrayRef<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>> Checked,
3605 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3606 ArrayRef<llvm::Value *> DynamicArgs) {
3607 assert(IsSanitizerScope);
3608 assert(Checked.size() > 0);
3609 assert(CheckHandler >= 0 &&
3610 size_t(CheckHandler) < std::size(SanitizerHandlers));
3611 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3612
3613 llvm::Value *FatalCond = nullptr;
3614 llvm::Value *RecoverableCond = nullptr;
3615 llvm::Value *TrapCond = nullptr;
3616 bool NoMerge = false;
3617 for (auto &[Check, Ord] : Checked) {
3618 // -fsanitize-trap= overrides -fsanitize-recover=.
3619 llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Ord) ? TrapCond
3621 ? RecoverableCond
3622 : FatalCond;
3623 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3624
3626 NoMerge = true;
3627 }
3628
3630 llvm::Value *Allow =
3631 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3632 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3633
3634 for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3635 if (*Cond)
3636 *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3637 }
3638 }
3639
3640 if (TrapCond)
3641 EmitTrapCheck(TrapCond, CheckHandler, NoMerge);
3642 if (!FatalCond && !RecoverableCond)
3643 return;
3644
3645 llvm::Value *JointCond;
3646 if (FatalCond && RecoverableCond)
3647 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3648 else
3649 JointCond = FatalCond ? FatalCond : RecoverableCond;
3650 assert(JointCond);
3651
3652 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3653 assert(SanOpts.has(Checked[0].second));
3654#ifndef NDEBUG
3655 for (int i = 1, n = Checked.size(); i < n; ++i) {
3656 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3657 "All recoverable kinds in a single check must be same!");
3658 assert(SanOpts.has(Checked[i].second));
3659 }
3660#endif
3661
3662 llvm::BasicBlock *Cont = createBasicBlock("cont");
3663 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3664 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3665 // Give hint that we very much don't expect to execute the handler
3666 llvm::MDBuilder MDHelper(getLLVMContext());
3667 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3668 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3669 EmitBlock(Handlers);
3670
3671 // Handler functions take an i8* pointing to the (handler-specific) static
3672 // information block, followed by a sequence of intptr_t arguments
3673 // representing operand values.
3676 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3677 Args.reserve(DynamicArgs.size() + 1);
3678 ArgTypes.reserve(DynamicArgs.size() + 1);
3679
3680 // Emit handler arguments and create handler function type.
3681 if (!StaticArgs.empty()) {
3682 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3683 auto *InfoPtr = new llvm::GlobalVariable(
3684 CGM.getModule(), Info->getType(), false,
3685 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3686 llvm::GlobalVariable::NotThreadLocal,
3687 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3688 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3690 Args.push_back(InfoPtr);
3691 ArgTypes.push_back(Args.back()->getType());
3692 }
3693
3694 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3695 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3696 ArgTypes.push_back(IntPtrTy);
3697 }
3698 }
3699
3700 llvm::FunctionType *FnType =
3701 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3702
3703 if (!FatalCond || !RecoverableCond) {
3704 // Simple case: we need to generate a single handler call, either
3705 // fatal, or non-fatal.
3706 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3707 (FatalCond != nullptr), Cont, NoMerge);
3708 } else {
3709 // Emit two handler calls: first one for set of unrecoverable checks,
3710 // another one for recoverable.
3711 llvm::BasicBlock *NonFatalHandlerBB =
3712 createBasicBlock("non_fatal." + CheckName);
3713 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3714 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3715 EmitBlock(FatalHandlerBB);
3716 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3717 NonFatalHandlerBB, NoMerge);
3718 EmitBlock(NonFatalHandlerBB);
3719 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3720 Cont, NoMerge);
3721 }
3722
3723 EmitBlock(Cont);
3724}
3725
3727 SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond,
3728 llvm::ConstantInt *TypeId, llvm::Value *Ptr,
3729 ArrayRef<llvm::Constant *> StaticArgs) {
3730 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3731
3732 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3733 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3734
3735 llvm::MDBuilder MDHelper(getLLVMContext());
3736 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3737 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3738
3739 EmitBlock(CheckBB);
3740
3741 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Ordinal);
3742
3743 llvm::CallInst *CheckCall;
3744 llvm::FunctionCallee SlowPathFn;
3745 if (WithDiag) {
3746 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3747 auto *InfoPtr =
3748 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3749 llvm::GlobalVariable::PrivateLinkage, Info);
3750 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3752
3753 SlowPathFn = CGM.getModule().getOrInsertFunction(
3754 "__cfi_slowpath_diag",
3755 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3756 false));
3757 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3758 } else {
3759 SlowPathFn = CGM.getModule().getOrInsertFunction(
3760 "__cfi_slowpath",
3761 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3762 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3763 }
3764
3766 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3767 CheckCall->setDoesNotThrow();
3768
3769 EmitBlock(Cont);
3770}
3771
3772// Emit a stub for __cfi_check function so that the linker knows about this
3773// symbol in LTO mode.
3775 llvm::Module *M = &CGM.getModule();
3776 ASTContext &C = getContext();
3777 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3778
3780 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3781 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3782 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3784 FnArgs.push_back(&ArgCallsiteTypeId);
3785 FnArgs.push_back(&ArgAddr);
3786 FnArgs.push_back(&ArgCFICheckFailData);
3787 const CGFunctionInfo &FI =
3789
3790 llvm::Function *F = llvm::Function::Create(
3791 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3792 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3793 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3795 F->setAlignment(llvm::Align(4096));
3796 CGM.setDSOLocal(F);
3797
3798 llvm::LLVMContext &Ctx = M->getContext();
3799 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3800 // CrossDSOCFI pass is not executed if there is no executable code.
3801 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3802 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3803 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3804}
3805
3806// This function is basically a switch over the CFI failure kind, which is
3807// extracted from CFICheckFailData (1st function argument). Each case is either
3808// llvm.trap or a call to one of the two runtime handlers, based on
3809// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3810// failure kind) traps, but this should really never happen. CFICheckFailData
3811// can be nullptr if the calling module has -fsanitize-trap behavior for this
3812// check kind; in this case __cfi_check_fail traps as well.
3814 SanitizerScope SanScope(this);
3815 FunctionArgList Args;
3820 Args.push_back(&ArgData);
3821 Args.push_back(&ArgAddr);
3822
3823 const CGFunctionInfo &FI =
3825
3826 llvm::Function *F = llvm::Function::Create(
3827 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3828 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3829
3830 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3832 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3833
3834 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3835 SourceLocation());
3836
3837 // This function is not affected by NoSanitizeList. This function does
3838 // not have a source location, but "src:*" would still apply. Revert any
3839 // changes to SanOpts made in StartFunction.
3841
3842 llvm::Value *Data =
3843 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3844 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3845 llvm::Value *Addr =
3846 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3847 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3848
3849 // Data == nullptr means the calling module has trap behaviour for this check.
3850 llvm::Value *DataIsNotNullPtr =
3851 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3852 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3853
3854 llvm::StructType *SourceLocationTy =
3855 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3856 llvm::StructType *CfiCheckFailDataTy =
3857 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3858
3859 llvm::Value *V = Builder.CreateConstGEP2_32(
3860 CfiCheckFailDataTy, Builder.CreatePointerCast(Data, UnqualPtrTy), 0, 0);
3861
3862 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3863 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3864
3865 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3867 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3868 llvm::Value *ValidVtable = Builder.CreateZExt(
3869 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3870 {Addr, AllVtables}),
3871 IntPtrTy);
3872
3873 const std::pair<int, SanitizerKind::SanitizerOrdinal> CheckKinds[] = {
3874 {CFITCK_VCall, SanitizerKind::SO_CFIVCall},
3875 {CFITCK_NVCall, SanitizerKind::SO_CFINVCall},
3876 {CFITCK_DerivedCast, SanitizerKind::SO_CFIDerivedCast},
3877 {CFITCK_UnrelatedCast, SanitizerKind::SO_CFIUnrelatedCast},
3878 {CFITCK_ICall, SanitizerKind::SO_CFIICall}};
3879
3881 Checks;
3882 for (auto CheckKindOrdinalPair : CheckKinds) {
3883 int Kind = CheckKindOrdinalPair.first;
3884 SanitizerKind::SanitizerOrdinal Ordinal = CheckKindOrdinalPair.second;
3885 llvm::Value *Cond =
3886 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3887 if (CGM.getLangOpts().Sanitize.has(Ordinal))
3888 EmitCheck(std::make_pair(Cond, Ordinal), SanitizerHandler::CFICheckFail,
3889 {}, {Data, Addr, ValidVtable});
3890 else
3891 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3892 }
3893
3895 // The only reference to this function will be created during LTO link.
3896 // Make sure it survives until then.
3897 CGM.addUsedGlobal(F);
3898}
3899
3901 if (SanOpts.has(SanitizerKind::Unreachable)) {
3902 SanitizerScope SanScope(this);
3903 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3904 SanitizerKind::SO_Unreachable),
3905 SanitizerHandler::BuiltinUnreachable,
3907 }
3908 Builder.CreateUnreachable();
3909}
3910
3911void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3912 SanitizerHandler CheckHandlerID,
3913 bool NoMerge) {
3914 llvm::BasicBlock *Cont = createBasicBlock("cont");
3915
3916 // If we're optimizing, collapse all calls to trap down to just one per
3917 // check-type per function to save on code size.
3918 if ((int)TrapBBs.size() <= CheckHandlerID)
3919 TrapBBs.resize(CheckHandlerID + 1);
3920
3921 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3922
3923 NoMerge = NoMerge || !CGM.getCodeGenOpts().OptimizationLevel ||
3924 (CurCodeDecl && CurCodeDecl->hasAttr<OptimizeNoneAttr>());
3925
3926 if (TrapBB && !NoMerge) {
3927 auto Call = TrapBB->begin();
3928 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3929
3930 Call->applyMergedLocation(Call->getDebugLoc(),
3931 Builder.getCurrentDebugLocation());
3932 Builder.CreateCondBr(Checked, Cont, TrapBB);
3933 } else {
3934 TrapBB = createBasicBlock("trap");
3935 Builder.CreateCondBr(Checked, Cont, TrapBB);
3936 EmitBlock(TrapBB);
3937
3938 llvm::CallInst *TrapCall =
3939 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3940 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
3941
3942 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3943 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3945 TrapCall->addFnAttr(A);
3946 }
3947 if (NoMerge)
3948 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3949 TrapCall->setDoesNotReturn();
3950 TrapCall->setDoesNotThrow();
3951 Builder.CreateUnreachable();
3952 }
3953
3954 EmitBlock(Cont);
3955}
3956
3957llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3958 llvm::CallInst *TrapCall =
3959 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3960
3961 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3962 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3964 TrapCall->addFnAttr(A);
3965 }
3966
3968 TrapCall->addFnAttr(llvm::Attribute::NoMerge);
3969 return TrapCall;
3970}
3971
3973 LValueBaseInfo *BaseInfo,
3974 TBAAAccessInfo *TBAAInfo) {
3975 assert(E->getType()->isArrayType() &&
3976 "Array to pointer decay must have array source type!");
3977
3978 // Expressions of array type can't be bitfields or vector elements.
3979 LValue LV = EmitLValue(E);
3980 Address Addr = LV.getAddress();
3981
3982 // If the array type was an incomplete type, we need to make sure
3983 // the decay ends up being the right type.
3984 llvm::Type *NewTy = ConvertType(E->getType());
3985 Addr = Addr.withElementType(NewTy);
3986
3987 // Note that VLA pointers are always decayed, so we don't need to do
3988 // anything here.
3989 if (!E->getType()->isVariableArrayType()) {
3990 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3991 "Expected pointer to array");
3992 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3993 }
3994
3995 // The result of this decay conversion points to an array element within the
3996 // base lvalue. However, since TBAA currently does not support representing
3997 // accesses to elements of member arrays, we conservatively represent accesses
3998 // to the pointee object as if it had no any base lvalue specified.
3999 // TODO: Support TBAA for member arrays.
4001 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
4002 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
4003
4004 return Addr.withElementType(ConvertTypeForMem(EltType));
4005}
4006
4007/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
4008/// array to pointer, return the array subexpression.
4009static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
4010 // If this isn't just an array->pointer decay, bail out.
4011 const auto *CE = dyn_cast<CastExpr>(E);
4012 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
4013 return nullptr;
4014
4015 // If this is a decay from variable width array, bail out.
4016 const Expr *SubExpr = CE->getSubExpr();
4017 if (SubExpr->getType()->isVariableArrayType())
4018 return nullptr;
4019
4020 return SubExpr;
4021}
4022
4024 llvm::Type *elemType,
4025 llvm::Value *ptr,
4026 ArrayRef<llvm::Value*> indices,
4027 bool inbounds,
4028 bool signedIndices,
4029 SourceLocation loc,
4030 const llvm::Twine &name = "arrayidx") {
4031 if (inbounds) {
4032 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
4034 name);
4035 } else {
4036 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
4037 }
4038}
4039
4042 llvm::Type *elementType, bool inbounds,
4043 bool signedIndices, SourceLocation loc,
4044 CharUnits align,
4045 const llvm::Twine &name = "arrayidx") {
4046 if (inbounds) {
4047 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
4049 align, name);
4050 } else {
4051 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
4052 }
4053}
4054
4056 llvm::Value *idx,
4057 CharUnits eltSize) {
4058 // If we have a constant index, we can use the exact offset of the
4059 // element we're accessing.
4060 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
4061 CharUnits offset = constantIdx->getZExtValue() * eltSize;
4062 return arrayAlign.alignmentAtOffset(offset);
4063
4064 // Otherwise, use the worst-case alignment for any element.
4065 } else {
4066 return arrayAlign.alignmentOfArrayElement(eltSize);
4067 }
4068}
4069
4071 const VariableArrayType *vla) {
4072 QualType eltType;
4073 do {
4074 eltType = vla->getElementType();
4075 } while ((vla = ctx.getAsVariableArrayType(eltType)));
4076 return eltType;
4077}
4078
4080 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
4081}
4082
4083static bool hasBPFPreserveStaticOffset(const Expr *E) {
4084 if (!E)
4085 return false;
4086 QualType PointeeType = E->getType()->getPointeeType();
4087 if (PointeeType.isNull())
4088 return false;
4089 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4090 return hasBPFPreserveStaticOffset(BaseDecl);
4091 return false;
4092}
4093
4094// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4096 Address &Addr) {
4097 if (!CGF.getTarget().getTriple().isBPF())
4098 return Addr;
4099
4100 llvm::Function *Fn =
4101 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4102 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4103 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4104}
4105
4106/// Given an array base, check whether its member access belongs to a record
4107/// with preserve_access_index attribute or not.
4108static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4109 if (!ArrayBase || !CGF.getDebugInfo())
4110 return false;
4111
4112 // Only support base as either a MemberExpr or DeclRefExpr.
4113 // DeclRefExpr to cover cases like:
4114 // struct s { int a; int b[10]; };
4115 // struct s *p;
4116 // p[1].a
4117 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4118 // p->b[5] is a MemberExpr example.
4119 const Expr *E = ArrayBase->IgnoreImpCasts();
4120 if (const auto *ME = dyn_cast<MemberExpr>(E))
4121 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4122
4123 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4124 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4125 if (!VarDef)
4126 return false;
4127
4128 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4129 if (!PtrT)
4130 return false;
4131
4132 const auto *PointeeT = PtrT->getPointeeType()
4134 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4135 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4136 return false;
4137 }
4138
4139 return false;
4140}
4141
4144 QualType eltType, bool inbounds,
4145 bool signedIndices, SourceLocation loc,
4146 QualType *arrayType = nullptr,
4147 const Expr *Base = nullptr,
4148 const llvm::Twine &name = "arrayidx") {
4149 // All the indices except that last must be zero.
4150#ifndef NDEBUG
4151 for (auto *idx : indices.drop_back())
4152 assert(isa<llvm::ConstantInt>(idx) &&
4153 cast<llvm::ConstantInt>(idx)->isZero());
4154#endif
4155
4156 // Determine the element size of the statically-sized base. This is
4157 // the thing that the indices are expressed in terms of.
4158 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4159 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4160 }
4161
4162 // We can use that to compute the best alignment of the element.
4163 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4164 CharUnits eltAlign =
4165 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4166
4168 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4169
4170 llvm::Value *eltPtr;
4171 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4172 if (!LastIndex ||
4174 addr = emitArraySubscriptGEP(CGF, addr, indices,
4175 CGF.ConvertTypeForMem(eltType), inbounds,
4176 signedIndices, loc, eltAlign, name);
4177 return addr;
4178 } else {
4179 // Remember the original array subscript for bpf target
4180 unsigned idx = LastIndex->getZExtValue();
4181 llvm::DIType *DbgInfo = nullptr;
4182 if (arrayType)
4183 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4184 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4185 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4186 idx, DbgInfo);
4187 }
4188
4189 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4190}
4191
4192/// The offset of a field from the beginning of the record.
4194 const FieldDecl *Field, int64_t &Offset) {
4195 ASTContext &Ctx = CGF.getContext();
4196 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4197 unsigned FieldNo = 0;
4198
4199 for (const FieldDecl *FD : RD->fields()) {
4200 if (FD == Field) {
4201 Offset += Layout.getFieldOffset(FieldNo);
4202 return true;
4203 }
4204
4205 QualType Ty = FD->getType();
4206 if (Ty->isRecordType())
4207 if (getFieldOffsetInBits(CGF, Ty->getAsRecordDecl(), Field, Offset)) {
4208 Offset += Layout.getFieldOffset(FieldNo);
4209 return true;
4210 }
4211
4212 if (!RD->isUnion())
4213 ++FieldNo;
4214 }
4215
4216 return false;
4217}
4218
4219/// Returns the relative offset difference between \p FD1 and \p FD2.
4220/// \code
4221/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4222/// \endcode
4223/// Both fields must be within the same struct.
4224static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4225 const FieldDecl *FD1,
4226 const FieldDecl *FD2) {
4227 const RecordDecl *FD1OuterRec =
4229 const RecordDecl *FD2OuterRec =
4231
4232 if (FD1OuterRec != FD2OuterRec)
4233 // Fields must be within the same RecordDecl.
4234 return std::optional<int64_t>();
4235
4236 int64_t FD1Offset = 0;
4237 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4238 return std::optional<int64_t>();
4239
4240 int64_t FD2Offset = 0;
4241 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4242 return std::optional<int64_t>();
4243
4244 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4245}
4246
4248 bool Accessed) {
4249 // The index must always be an integer, which is not an aggregate. Emit it
4250 // in lexical order (this complexity is, sadly, required by C++17).
4251 llvm::Value *IdxPre =
4252 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4253 bool SignedIndices = false;
4254 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4255 auto *Idx = IdxPre;
4256 if (E->getLHS() != E->getIdx()) {
4257 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4258 Idx = EmitScalarExpr(E->getIdx());
4259 }
4260
4261 QualType IdxTy = E->getIdx()->getType();
4262 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4263 SignedIndices |= IdxSigned;
4264
4265 if (SanOpts.has(SanitizerKind::ArrayBounds))
4266 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4267
4268 // Extend or truncate the index type to 32 or 64-bits.
4269 if (Promote && Idx->getType() != IntPtrTy)
4270 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4271
4272 return Idx;
4273 };
4274 IdxPre = nullptr;
4275
4276 // If the base is a vector type, then we are forming a vector element lvalue
4277 // with this subscript.
4278 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4279 !isa<ExtVectorElementExpr>(E->getBase())) {
4280 // Emit the vector as an lvalue to get its address.
4281 LValue LHS = EmitLValue(E->getBase());
4282 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4283 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4284 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4285 LHS.getBaseInfo(), TBAAAccessInfo());
4286 }
4287
4288 // All the other cases basically behave like simple offsetting.
4289
4290 // Handle the extvector case we ignored above.
4291 if (isa<ExtVectorElementExpr>(E->getBase())) {
4292 LValue LV = EmitLValue(E->getBase());
4293 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4295
4296 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4297 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4298 SignedIndices, E->getExprLoc());
4299 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4300 CGM.getTBAAInfoForSubobject(LV, EltType));
4301 }
4302
4303 LValueBaseInfo EltBaseInfo;
4304 TBAAAccessInfo EltTBAAInfo;
4305 Address Addr = Address::invalid();
4306 if (const VariableArrayType *vla =
4307 getContext().getAsVariableArrayType(E->getType())) {
4308 // The base must be a pointer, which is not an aggregate. Emit
4309 // it. It needs to be emitted first in case it's what captures
4310 // the VLA bounds.
4311 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4312 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4313
4314 // The element count here is the total number of non-VLA elements.
4315 llvm::Value *numElements = getVLASize(vla).NumElts;
4316
4317 // Effectively, the multiply by the VLA size is part of the GEP.
4318 // GEP indexes are signed, and scaling an index isn't permitted to
4319 // signed-overflow, so we use the same semantics for our explicit
4320 // multiply. We suppress this if overflow is not undefined behavior.
4321 if (getLangOpts().isSignedOverflowDefined()) {
4322 Idx = Builder.CreateMul(Idx, numElements);
4323 } else {
4324 Idx = Builder.CreateNSWMul(Idx, numElements);
4325 }
4326
4327 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4328 !getLangOpts().isSignedOverflowDefined(),
4329 SignedIndices, E->getExprLoc());
4330
4331 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4332 // Indexing over an interface, as in "NSString *P; P[4];"
4333
4334 // Emit the base pointer.
4335 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4336 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4337
4338 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4339 llvm::Value *InterfaceSizeVal =
4340 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4341
4342 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4343
4344 // We don't necessarily build correct LLVM struct types for ObjC
4345 // interfaces, so we can't rely on GEP to do this scaling
4346 // correctly, so we need to cast to i8*. FIXME: is this actually
4347 // true? A lot of other things in the fragile ABI would break...
4348 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4349
4350 // Do the GEP.
4351 CharUnits EltAlign =
4352 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4353 llvm::Value *EltPtr =
4354 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4355 ScaledIdx, false, SignedIndices, E->getExprLoc());
4356 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4357 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4358 // If this is A[i] where A is an array, the frontend will have decayed the
4359 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4360 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4361 // "gep x, i" here. Emit one "gep A, 0, i".
4362 assert(Array->getType()->isArrayType() &&
4363 "Array to pointer decay must have array source type!");
4364 LValue ArrayLV;
4365 // For simple multidimensional array indexing, set the 'accessed' flag for
4366 // better bounds-checking of the base expression.
4367 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4368 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4369 else
4370 ArrayLV = EmitLValue(Array);
4371 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4372
4373 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4374 // If the array being accessed has a "counted_by" attribute, generate
4375 // bounds checking code. The "count" field is at the top level of the
4376 // struct or in an anonymous struct, that's also at the top level. Future
4377 // expansions may allow the "count" to reside at any place in the struct,
4378 // but the value of "counted_by" will be a "simple" path to the count,
4379 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4380 // similar to emit the correct GEP.
4381 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4382 getLangOpts().getStrictFlexArraysLevel();
4383
4384 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4385 ME &&
4386 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4388 const FieldDecl *FAMDecl = cast<FieldDecl>(ME->getMemberDecl());
4389 if (const FieldDecl *CountFD = FAMDecl->findCountedByField()) {
4390 if (std::optional<int64_t> Diff =
4391 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4392 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4393
4394 // Create a GEP with a byte offset between the FAM and count and
4395 // use that to load the count value.
4397 ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
4398
4399 llvm::Type *CountTy = ConvertType(CountFD->getType());
4400 llvm::Value *Res = Builder.CreateInBoundsGEP(
4401 Int8Ty, Addr.emitRawPointer(*this),
4402 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4403 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4404 ".counted_by.load");
4405
4406 // Now emit the bounds checking.
4407 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4408 Array->getType(), Accessed);
4409 }
4410 }
4411 }
4412 }
4413
4414 // Propagate the alignment from the array itself to the result.
4415 QualType arrayType = Array->getType();
4416 Addr = emitArraySubscriptGEP(
4417 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4418 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4419 E->getExprLoc(), &arrayType, E->getBase());
4420 EltBaseInfo = ArrayLV.getBaseInfo();
4421 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4422 } else {
4423 // The base must be a pointer; emit it with an estimate of its alignment.
4424 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4425 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4426 QualType ptrType = E->getBase()->getType();
4427 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4428 !getLangOpts().isSignedOverflowDefined(),
4429 SignedIndices, E->getExprLoc(), &ptrType,
4430 E->getBase());
4431 }
4432
4433 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4434
4435 if (getLangOpts().ObjC &&
4436 getLangOpts().getGC() != LangOptions::NonGC) {
4439 }
4440 return LV;
4441}
4442
4443llvm::Value *CodeGenFunction::EmitMatrixIndexExpr(const Expr *E) {
4444 llvm::Value *Idx = EmitScalarExpr(E);
4445 if (Idx->getType() == IntPtrTy)
4446 return Idx;
4447 bool IsSigned = E->getType()->isSignedIntegerOrEnumerationType();
4448 return Builder.CreateIntCast(Idx, IntPtrTy, IsSigned);
4449}
4450
4452 assert(
4453 !E->isIncomplete() &&
4454 "incomplete matrix subscript expressions should be rejected during Sema");
4455 LValue Base = EmitLValue(E->getBase());
4456
4457 // Extend or truncate the index type to 32 or 64-bits if needed.
4458 llvm::Value *RowIdx = EmitMatrixIndexExpr(E->getRowIdx());
4459 llvm::Value *ColIdx = EmitMatrixIndexExpr(E->getColumnIdx());
4460
4461 llvm::Value *NumRows = Builder.getIntN(
4462 RowIdx->getType()->getScalarSizeInBits(),
4463 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4464 llvm::Value *FinalIdx =
4465 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4466 return LValue::MakeMatrixElt(
4467 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4468 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4469}
4470
4472 LValueBaseInfo &BaseInfo,
4473 TBAAAccessInfo &TBAAInfo,
4474 QualType BaseTy, QualType ElTy,
4475 bool IsLowerBound) {
4476 LValue BaseLVal;
4477 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4478 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4479 if (BaseTy->isArrayType()) {
4480 Address Addr = BaseLVal.getAddress();
4481 BaseInfo = BaseLVal.getBaseInfo();
4482
4483 // If the array type was an incomplete type, we need to make sure
4484 // the decay ends up being the right type.
4485 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4486 Addr = Addr.withElementType(NewTy);
4487
4488 // Note that VLA pointers are always decayed, so we don't need to do
4489 // anything here.
4490 if (!BaseTy->isVariableArrayType()) {
4491 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4492 "Expected pointer to array");
4493 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4494 }
4495
4496 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4497 }
4498 LValueBaseInfo TypeBaseInfo;
4499 TBAAAccessInfo TypeTBAAInfo;
4500 CharUnits Align =
4501 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4502 BaseInfo.mergeForCast(TypeBaseInfo);
4503 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4504 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4505 CGF.ConvertTypeForMem(ElTy), Align);
4506 }
4507 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4508}
4509
4511 bool IsLowerBound) {
4512
4513 assert(!E->isOpenACCArraySection() &&
4514 "OpenACC Array section codegen not implemented");
4515
4517 QualType ResultExprTy;
4518 if (auto *AT = getContext().getAsArrayType(BaseTy))
4519 ResultExprTy = AT->getElementType();
4520 else
4521 ResultExprTy = BaseTy->getPointeeType();
4522 llvm::Value *Idx = nullptr;
4523 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4524 // Requesting lower bound or upper bound, but without provided length and
4525 // without ':' symbol for the default length -> length = 1.
4526 // Idx = LowerBound ?: 0;
4527 if (auto *LowerBound = E->getLowerBound()) {
4528 Idx = Builder.CreateIntCast(
4529 EmitScalarExpr(LowerBound), IntPtrTy,
4530 LowerBound->getType()->hasSignedIntegerRepresentation());
4531 } else
4532 Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4533 } else {
4534 // Try to emit length or lower bound as constant. If this is possible, 1
4535 // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4536 // IR (LB + Len) - 1.
4537 auto &C = CGM.getContext();
4538 auto *Length = E->getLength();
4539 llvm::APSInt ConstLength;
4540 if (Length) {
4541 // Idx = LowerBound + Length - 1;
4542 if (std::optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4543 ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4544 Length = nullptr;
4545 }
4546 auto *LowerBound = E->getLowerBound();
4547 llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4548 if (LowerBound) {
4549 if (std::optional<llvm::APSInt> LB =
4550 LowerBound->getIntegerConstantExpr(C)) {
4551 ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4552 LowerBound = nullptr;
4553 }
4554 }
4555 if (!Length)
4556 --ConstLength;
4557 else if (!LowerBound)
4558 --ConstLowerBound;
4559
4560 if (Length || LowerBound) {
4561 auto *LowerBoundVal =
4562 LowerBound
4563 ? Builder.CreateIntCast(
4564 EmitScalarExpr(LowerBound), IntPtrTy,
4565 LowerBound->getType()->hasSignedIntegerRepresentation())
4566 : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4567 auto *LengthVal =
4568 Length
4569 ? Builder.CreateIntCast(
4570 EmitScalarExpr(Length), IntPtrTy,
4571 Length->getType()->hasSignedIntegerRepresentation())
4572 : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4573 Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4574 /*HasNUW=*/false,
4575 !getLangOpts().isSignedOverflowDefined());
4576 if (Length && LowerBound) {
4577 Idx = Builder.CreateSub(
4578 Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4579 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4580 }
4581 } else
4582 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4583 } else {
4584 // Idx = ArraySize - 1;
4585 QualType ArrayTy = BaseTy->isPointerType()
4586 ? E->getBase()->IgnoreParenImpCasts()->getType()
4587 : BaseTy;
4588 if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4589 Length = VAT->getSizeExpr();
4590 if (std::optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4591 ConstLength = *L;
4592 Length = nullptr;
4593 }
4594 } else {
4595 auto *CAT = C.getAsConstantArrayType(ArrayTy);
4596 assert(CAT && "unexpected type for array initializer");
4597 ConstLength = CAT->getSize();
4598 }
4599 if (Length) {
4600 auto *LengthVal = Builder.CreateIntCast(
4601 EmitScalarExpr(Length), IntPtrTy,
4602 Length->getType()->hasSignedIntegerRepresentation());
4603 Idx = Builder.CreateSub(
4604 LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4605 /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4606 } else {
4607 ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4608 --ConstLength;
4609 Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4610 }
4611 }
4612 }
4613 assert(Idx);
4614
4615 Address EltPtr = Address::invalid();
4616 LValueBaseInfo BaseInfo;
4617 TBAAAccessInfo TBAAInfo;
4618 if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4619 // The base must be a pointer, which is not an aggregate. Emit
4620 // it. It needs to be emitted first in case it's what captures
4621 // the VLA bounds.
4622 Address Base =
4623 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4624 BaseTy, VLA->getElementType(), IsLowerBound);
4625 // The element count here is the total number of non-VLA elements.
4626 llvm::Value *NumElements = getVLASize(VLA).NumElts;
4627
4628 // Effectively, the multiply by the VLA size is part of the GEP.
4629 // GEP indexes are signed, and scaling an index isn't permitted to
4630 // signed-overflow, so we use the same semantics for our explicit
4631 // multiply. We suppress this if overflow is not undefined behavior.
4632 if (getLangOpts().isSignedOverflowDefined())
4633 Idx = Builder.CreateMul(Idx, NumElements);
4634 else
4635 Idx = Builder.CreateNSWMul(Idx, NumElements);
4636 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4637 !getLangOpts().isSignedOverflowDefined(),
4638 /*signedIndices=*/false, E->getExprLoc());
4639 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4640 // If this is A[i] where A is an array, the frontend will have decayed the
4641 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4642 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4643 // "gep x, i" here. Emit one "gep A, 0, i".
4644 assert(Array->getType()->isArrayType() &&
4645 "Array to pointer decay must have array source type!");
4646 LValue ArrayLV;
4647 // For simple multidimensional array indexing, set the 'accessed' flag for
4648 // better bounds-checking of the base expression.
4649 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4650 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4651 else
4652 ArrayLV = EmitLValue(Array);
4653
4654 // Propagate the alignment from the array itself to the result.
4655 EltPtr = emitArraySubscriptGEP(
4656 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4657 ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4658 /*signedIndices=*/false, E->getExprLoc());
4659 BaseInfo = ArrayLV.getBaseInfo();
4660 TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4661 } else {
4662 Address Base =
4663 emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo, BaseTy,
4664 ResultExprTy, IsLowerBound);
4665 EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4666 !getLangOpts().isSignedOverflowDefined(),
4667 /*signedIndices=*/false, E->getExprLoc());
4668 }
4669
4670 return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4671}
4672
4675 // Emit the base vector as an l-value.
4676 LValue Base;
4677
4678 // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4679 if (E->isArrow()) {
4680 // If it is a pointer to a vector, emit the address and form an lvalue with
4681 // it.
4682 LValueBaseInfo BaseInfo;
4683 TBAAAccessInfo TBAAInfo;
4684 Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4685 const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4686 Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4687 Base.getQuals().removeObjCGCAttr();
4688 } else if (E->getBase()->isGLValue()) {
4689 // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4690 // emit the base as an lvalue.
4691 assert(E->getBase()->getType()->isVectorType());
4692 Base = EmitLValue(E->getBase());
4693 } else {
4694 // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4695 assert(E->getBase()->getType()->isVectorType() &&
4696 "Result must be a vector");
4697 llvm::Value *Vec = EmitScalarExpr(E->getBase());
4698
4699 // Store the vector to memory (because LValue wants an address).
4700 Address VecMem = CreateMemTemp(E->getBase()->getType());
4701 Builder.CreateStore(Vec, VecMem);
4702 Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4704 }
4705
4706 QualType type =
4707 E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4708
4709 // Encode the element access list into a vector of unsigned indices.
4711 E->getEncodedElementAccess(Indices);
4712
4713 if (Base.isSimple()) {
4714 llvm::Constant *CV =
4715 llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4716 return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
4717 Base.getBaseInfo(), TBAAAccessInfo());
4718 }
4719 assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4720
4721 llvm::Constant *BaseElts = Base.getExtVectorElts();
4723
4724 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4725 CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4726 llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4727 return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4728 Base.getBaseInfo(), TBAAAccessInfo());
4729}
4730
4733 EmitIgnoredExpr(E->getBase());
4734 return EmitDeclRefLValue(DRE);
4735 }
4736
4737 Expr *BaseExpr = E->getBase();
4738 // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4739 LValue BaseLV;
4740 if (E->isArrow()) {
4741 LValueBaseInfo BaseInfo;
4742 TBAAAccessInfo TBAAInfo;
4743 Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4744 QualType PtrTy = BaseExpr->getType()->getPointeeType();
4745 SanitizerSet SkippedChecks;
4746 bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4747 if (IsBaseCXXThis)
4748 SkippedChecks.set(SanitizerKind::Alignment, true);
4749 if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4750 SkippedChecks.set(SanitizerKind::Null, true);
4751 EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr, PtrTy,
4752 /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4753 BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4754 } else
4755 BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4756
4757 NamedDecl *ND = E->getMemberDecl();
4758 if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4759 LValue LV = EmitLValueForField(BaseLV, Field);
4761 if (getLangOpts().OpenMP) {
4762 // If the member was explicitly marked as nontemporal, mark it as
4763 // nontemporal. If the base lvalue is marked as nontemporal, mark access
4764 // to children as nontemporal too.
4765 if ((IsWrappedCXXThis(BaseExpr) &&
4767 BaseLV.isNontemporal())
4768 LV.setNontemporal(/*Value=*/true);
4769 }
4770 return LV;
4771 }
4772
4773 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4774 return EmitFunctionDeclLValue(*this, E, FD);
4775
4776 llvm_unreachable("Unhandled member declaration!");
4777}
4778
4779/// Given that we are currently emitting a lambda, emit an l-value for
4780/// one of its members.
4781///
4783 llvm::Value *ThisValue) {
4784 bool HasExplicitObjectParameter = false;
4785 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(CurCodeDecl);
4786 if (MD) {
4787 HasExplicitObjectParameter = MD->isExplicitObjectMemberFunction();
4788 assert(MD->getParent()->isLambda());
4789 assert(MD->getParent() == Field->getParent());
4790 }
4791 LValue LambdaLV;
4792 if (HasExplicitObjectParameter) {
4793 const VarDecl *D = cast<CXXMethodDecl>(CurCodeDecl)->getParamDecl(0);
4794 auto It = LocalDeclMap.find(D);
4795 assert(It != LocalDeclMap.end() && "explicit parameter not loaded?");
4796 Address AddrOfExplicitObject = It->getSecond();
4797 if (D->getType()->isReferenceType())
4798 LambdaLV = EmitLoadOfReferenceLValue(AddrOfExplicitObject, D->getType(),
4800 else
4801 LambdaLV = MakeAddrLValue(AddrOfExplicitObject,
4802 D->getType().getNonReferenceType());
4803
4804 // Make sure we have an lvalue to the lambda itself and not a derived class.
4805 auto *ThisTy = D->getType().getNonReferenceType()->getAsCXXRecordDecl();
4806 auto *LambdaTy = cast<CXXRecordDecl>(Field->getParent());
4807 if (ThisTy != LambdaTy) {
4808 const CXXCastPath &BasePathArray = getContext().LambdaCastPaths.at(MD);
4810 LambdaLV.getAddress(), ThisTy, BasePathArray.begin(),
4811 BasePathArray.end(), /*NullCheckValue=*/false, SourceLocation());
4812 LambdaLV = MakeAddrLValue(Base, QualType{LambdaTy->getTypeForDecl(), 0});
4813 }
4814 } else {
4815 QualType LambdaTagType = getContext().getTagDeclType(Field->getParent());
4816 LambdaLV = MakeNaturalAlignAddrLValue(ThisValue, LambdaTagType);
4817 }
4818 return EmitLValueForField(LambdaLV, Field);
4819}
4820
4822 return EmitLValueForLambdaField(Field, CXXABIThisValue);
4823}
4824
4825/// Get the field index in the debug info. The debug info structure/union
4826/// will ignore the unnamed bitfields.
4828 unsigned FieldIndex) {
4829 unsigned I = 0, Skipped = 0;
4830
4831 for (auto *F : Rec->getDefinition()->fields()) {
4832 if (I == FieldIndex)
4833 break;
4834 if (F->isUnnamedBitField())
4835 Skipped++;
4836 I++;
4837 }
4838
4839 return FieldIndex - Skipped;
4840}
4841
4842/// Get the address of a zero-sized field within a record. The resulting
4843/// address doesn't necessarily have the right type.
4845 const FieldDecl *Field) {
4847 CGF.getContext().getFieldOffset(Field));
4848 if (Offset.isZero())
4849 return Base;
4850 Base = Base.withElementType(CGF.Int8Ty);
4851 return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4852}
4853
4854/// Drill down to the storage of a field without walking into
4855/// reference types.
4856///
4857/// The resulting address doesn't necessarily have the right type.
4859 const FieldDecl *field) {
4860 if (isEmptyFieldForLayout(CGF.getContext(), field))
4861 return emitAddrOfZeroSizeField(CGF, base, field);
4862
4863 const RecordDecl *rec = field->getParent();
4864
4865 unsigned idx =
4866 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4867
4868 return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4869}
4870
4872 Address addr, const FieldDecl *field) {
4873 const RecordDecl *rec = field->getParent();
4874 llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4875 base.getType(), rec->getLocation());
4876
4877 unsigned idx =
4878 CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4879
4881 addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4882}
4883
4884static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4885 const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4886 if (!RD)
4887 return false;
4888
4889 if (RD->isDynamicClass())
4890 return true;
4891
4892 for (const auto &Base : RD->bases())
4893 if (hasAnyVptr(Base.getType(), Context))
4894 return true;
4895
4896 for (const FieldDecl *Field : RD->fields())
4897 if (hasAnyVptr(Field->getType(), Context))
4898 return true;
4899
4900 return false;
4901}
4902
4904 const FieldDecl *field) {
4905 LValueBaseInfo BaseInfo = base.getBaseInfo();
4906
4907 if (field->isBitField()) {
4908 const CGRecordLayout &RL =
4910 const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4911 const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4912 CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4913 Info.VolatileStorageSize != 0 &&
4914 field->getType()
4917 Address Addr = base.getAddress();
4918 unsigned Idx = RL.getLLVMFieldNo(field);
4919 const RecordDecl *rec = field->getParent();
4921 Addr = wrapWithBPFPreserveStaticOffset(*this, Addr);
4922 if (!UseVolatile) {
4923 if (!IsInPreservedAIRegion &&
4924 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4925 if (Idx != 0)
4926 // For structs, we GEP to the field that the record layout suggests.
4927 Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4928 } else {
4929 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4930 getContext().getRecordType(rec), rec->getLocation());
4932 Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4933 DbgInfo);
4934 }
4935 }
4936 const unsigned SS =
4937 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4938 // Get the access type.
4939 llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4940 Addr = Addr.withElementType(FieldIntTy);
4941 if (UseVolatile) {
4942 const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4943 if (VolatileOffset)
4944 Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4945 }
4946
4947 QualType fieldType =
4948 field->getType().withCVRQualifiers(base.getVRQualifiers());
4949 // TODO: Support TBAA for bit fields.
4950 LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4951 return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4952 TBAAAccessInfo());
4953 }
4954
4955 // Fields of may-alias structures are may-alias themselves.
4956 // FIXME: this should get propagated down through anonymous structs
4957 // and unions.
4958 QualType FieldType = field->getType();
4959 const RecordDecl *rec = field->getParent();
4960 AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4961 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4962 TBAAAccessInfo FieldTBAAInfo;
4963 if (base.getTBAAInfo().isMayAlias() ||
4964 rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4965 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4966 } else if (rec->isUnion()) {
4967 // TODO: Support TBAA for unions.
4968 FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4969 } else {
4970 // If no base type been assigned for the base access, then try to generate
4971 // one for this base lvalue.
4972 FieldTBAAInfo = base.getTBAAInfo();
4973 if (!FieldTBAAInfo.BaseType) {
4974 FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4975 assert(!FieldTBAAInfo.Offset &&
4976 "Nonzero offset for an access with no base type!");
4977 }
4978
4979 // Adjust offset to be relative to the base type.
4980 const ASTRecordLayout &Layout =
4982 unsigned CharWidth = getContext().getCharWidth();
4983 if (FieldTBAAInfo.BaseType)
4984 FieldTBAAInfo.Offset +=
4985 Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4986
4987 // Update the final access type and size.
4988 FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4989 FieldTBAAInfo.Size =
4991 }
4992
4993 Address addr = base.getAddress();
4995 addr = wrapWithBPFPreserveStaticOffset(*this, addr);
4996 if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4997 if (CGM.getCodeGenOpts().StrictVTablePointers &&
4998 ClassDef->isDynamicClass()) {
4999 // Getting to any field of dynamic object requires stripping dynamic
5000 // information provided by invariant.group. This is because accessing
5001 // fields may leak the real address of dynamic object, which could result
5002 // in miscompilation when leaked pointer would be compared.
5003 auto *stripped =
5005 addr = Address(stripped, addr.getElementType(), addr.getAlignment());
5006 }
5007 }
5008
5009 unsigned RecordCVR = base.getVRQualifiers();
5010 if (rec->isUnion()) {
5011 // For unions, there is no pointer adjustment.
5012 if (CGM.getCodeGenOpts().StrictVTablePointers &&
5013 hasAnyVptr(FieldType, getContext()))
5014 // Because unions can easily skip invariant.barriers, we need to add
5015 // a barrier every time CXXRecord field with vptr is referenced.
5017
5019 (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
5020 // Remember the original union field index
5021 llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
5022 rec->getLocation());
5023 addr =
5025 addr.emitRawPointer(*this),
5026 getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
5027 addr.getElementType(), addr.getAlignment());
5028 }
5029
5030 if (FieldType->isReferenceType())
5031 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5032 } else {
5033 if (!IsInPreservedAIRegion &&
5034 (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
5035 // For structs, we GEP to the field that the record layout suggests.
5036 addr = emitAddrOfFieldStorage(*this, addr, field);
5037 else
5038 // Remember the original struct field index
5039 addr = emitPreserveStructAccess(*this, base, addr, field);
5040 }
5041
5042 // If this is a reference field, load the reference right now.
5043 if (FieldType->isReferenceType()) {
5044 LValue RefLVal =
5045 MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5046 if (RecordCVR & Qualifiers::Volatile)
5047 RefLVal.getQuals().addVolatile();
5048 addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
5049
5050 // Qualifiers on the struct don't apply to the referencee.
5051 RecordCVR = 0;
5052 FieldType = FieldType->getPointeeType();
5053 }
5054
5055 // Make sure that the address is pointing to the right type. This is critical
5056 // for both unions and structs.
5057 addr = addr.withElementType(CGM.getTypes().ConvertTypeForMem(FieldType));
5058
5059 if (field->hasAttr<AnnotateAttr>())
5060 addr = EmitFieldAnnotations(field, addr);
5061
5062 LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
5063 LV.getQuals().addCVRQualifiers(RecordCVR);
5064
5065 // __weak attribute on a field is ignored.
5068
5069 return LV;
5070}
5071
5072LValue
5074 const FieldDecl *Field) {
5075 QualType FieldType = Field->getType();
5076
5077 if (!FieldType->isReferenceType())
5078 return EmitLValueForField(Base, Field);
5079
5080 Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
5081
5082 // Make sure that the address is pointing to the right type.
5083 llvm::Type *llvmType = ConvertTypeForMem(FieldType);
5084 V = V.withElementType(llvmType);
5085
5086 // TODO: Generate TBAA information that describes this access as a structure
5087 // member access and not just an access to an object of the field's type. This
5088 // should be similar to what we do in EmitLValueForField().
5089 LValueBaseInfo BaseInfo = Base.getBaseInfo();
5090 AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
5091 LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
5092 return MakeAddrLValue(V, FieldType, FieldBaseInfo,
5093 CGM.getTBAAInfoForSubobject(Base, FieldType));
5094}
5095
5097 if (E->isFileScope()) {
5099 return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
5100 }
5102 // make sure to emit the VLA size.
5104
5105 Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
5106 const Expr *InitExpr = E->getInitializer();
5108
5109 EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
5110 /*Init*/ true);
5111
5112 // Block-scope compound literals are destroyed at the end of the enclosing
5113 // scope in C.
5114 if (!getLangOpts().CPlusPlus)
5117 E->getType(), getDestroyer(DtorKind),
5118 DtorKind & EHCleanup);
5119
5120 return Result;
5121}
5122
5124 if (!E->isGLValue())
5125 // Initializing an aggregate temporary in C++11: T{...}.
5126 return EmitAggExprToLValue(E);
5127
5128 // An lvalue initializer list must be initializing a reference.
5129 assert(E->isTransparent() && "non-transparent glvalue init list");
5130 return EmitLValue(E->getInit(0));
5131}
5132
5133/// Emit the operand of a glvalue conditional operator. This is either a glvalue
5134/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
5135/// LValue is returned and the current block has been terminated.
5136static std::optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
5137 const Expr *Operand) {
5138 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
5139 CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
5140 return std::nullopt;
5141 }
5142
5143 return CGF.EmitLValue(Operand);
5144}
5145
5146namespace {
5147// Handle the case where the condition is a constant evaluatable simple integer,
5148// which means we don't have to separately handle the true/false blocks.
5149std::optional<LValue> HandleConditionalOperatorLValueSimpleCase(
5151 const Expr *condExpr = E->getCond();
5152 bool CondExprBool;
5153 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5154 const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
5155 if (!CondExprBool)
5156 std::swap(Live, Dead);
5157
5158 if (!CGF.ContainsLabel(Dead)) {
5159 // If the true case is live, we need to track its region.
5160 if (CondExprBool)
5162 CGF.markStmtMaybeUsed(Dead);
5163 // If a throw expression we emit it and return an undefined lvalue
5164 // because it can't be used.
5165 if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
5166 CGF.EmitCXXThrowExpr(ThrowExpr);
5167 llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
5168 llvm::Type *Ty = CGF.UnqualPtrTy;
5169 return CGF.MakeAddrLValue(
5170 Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
5171 Dead->getType());
5172 }
5173 return CGF.EmitLValue(Live);
5174 }
5175 }
5176 return std::nullopt;
5177}
5178struct ConditionalInfo {
5179 llvm::BasicBlock *lhsBlock, *rhsBlock;
5180 std::optional<LValue> LHS, RHS;
5181};
5182
5183// Create and generate the 3 blocks for a conditional operator.
5184// Leaves the 'current block' in the continuation basic block.
5185template<typename FuncTy>
5186ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
5188 const FuncTy &BranchGenFunc) {
5189 ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
5190 CGF.createBasicBlock("cond.false"), std::nullopt,
5191 std::nullopt};
5192 llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
5193
5194 CodeGenFunction::ConditionalEvaluation eval(CGF);
5195 CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
5196 CGF.getProfileCount(E));
5197
5198 // Any temporaries created here are conditional.
5199 CGF.EmitBlock(Info.lhsBlock);
5201 eval.begin(CGF);
5202 Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
5203 eval.end(CGF);
5204 Info.lhsBlock = CGF.Builder.GetInsertBlock();
5205
5206 if (Info.LHS)
5207 CGF.Builder.CreateBr(endBlock);
5208
5209 // Any temporaries created here are conditional.
5210 CGF.EmitBlock(Info.rhsBlock);
5211 eval.begin(CGF);
5212 Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
5213 eval.end(CGF);
5214 Info.rhsBlock = CGF.Builder.GetInsertBlock();
5215 CGF.EmitBlock(endBlock);
5216
5217 return Info;
5218}
5219} // namespace
5220
5223 if (!E->isGLValue()) {
5224 // ?: here should be an aggregate.
5226 "Unexpected conditional operator!");
5227 return (void)EmitAggExprToLValue(E);
5228 }
5229
5230 OpaqueValueMapping binding(*this, E);
5231 if (HandleConditionalOperatorLValueSimpleCase(*this, E))
5232 return;
5233
5234 EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
5235 CGF.EmitIgnoredExpr(E);
5236 return LValue{};
5237 });
5238}
5241 if (!expr->isGLValue()) {
5242 // ?: here should be an aggregate.
5243 assert(hasAggregateEvaluationKind(expr->getType()) &&
5244 "Unexpected conditional operator!");
5245 return EmitAggExprToLValue(expr);
5246 }
5247
5248 OpaqueValueMapping binding(*this, expr);
5249 if (std::optional<LValue> Res =
5250 HandleConditionalOperatorLValueSimpleCase(*this, expr))
5251 return *Res;
5252
5253 ConditionalInfo Info = EmitConditionalBlocks(
5254 *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
5255 return EmitLValueOrThrowExpression(CGF, E);
5256 });
5257
5258 if ((Info.LHS && !Info.LHS->isSimple()) ||
5259 (Info.RHS && !Info.RHS->isSimple()))
5260 return EmitUnsupportedLValue(expr, "conditional operator");
5261
5262 if (Info.LHS && Info.RHS) {
5263 Address lhsAddr = Info.LHS->getAddress();
5264 Address rhsAddr = Info.RHS->getAddress();
5266 lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
5267 Builder.GetInsertBlock(), expr->getType());
5268 AlignmentSource alignSource =
5269 std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
5270 Info.RHS->getBaseInfo().getAlignmentSource());
5272 Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
5273 return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
5274 TBAAInfo);
5275 } else {
5276 assert((Info.LHS || Info.RHS) &&
5277 "both operands of glvalue conditional are throw-expressions?");
5278 return Info.LHS ? *Info.LHS : *Info.RHS;
5279 }
5280}
5281
5282/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
5283/// type. If the cast is to a reference, we can have the usual lvalue result,
5284/// otherwise if a cast is needed by the code generator in an lvalue context,
5285/// then it must mean that we need the address of an aggregate in order to
5286/// access one of its members. This can happen for all the reasons that casts
5287/// are permitted with aggregate result, including noop aggregate casts, and
5288/// cast from scalar to union.
5290 switch (E->getCastKind()) {
5291 case CK_ToVoid:
5292 case CK_BitCast:
5293 case CK_LValueToRValueBitCast:
5294 case CK_ArrayToPointerDecay:
5295 case CK_FunctionToPointerDecay:
5296 case CK_NullToMemberPointer:
5297 case CK_NullToPointer:
5298 case CK_IntegralToPointer:
5299 case CK_PointerToIntegral:
5300 case CK_PointerToBoolean:
5301 case CK_IntegralCast:
5302 case CK_BooleanToSignedIntegral:
5303 case CK_IntegralToBoolean:
5304 case CK_IntegralToFloating:
5305 case CK_FloatingToIntegral:
5306 case CK_FloatingToBoolean:
5307 case CK_FloatingCast:
5308 case CK_FloatingRealToComplex:
5309 case CK_FloatingComplexToReal:
5310 case CK_FloatingComplexToBoolean:
5311 case CK_FloatingComplexCast:
5312 case CK_FloatingComplexToIntegralComplex:
5313 case CK_IntegralRealToComplex:
5314 case CK_IntegralComplexToReal:
5315 case CK_IntegralComplexToBoolean:
5316 case CK_IntegralComplexCast:
5317 case CK_IntegralComplexToFloatingComplex:
5318 case CK_DerivedToBaseMemberPointer:
5319 case CK_BaseToDerivedMemberPointer:
5320 case CK_MemberPointerToBoolean:
5321 case CK_ReinterpretMemberPointer:
5322 case CK_AnyPointerToBlockPointerCast:
5323 case CK_ARCProduceObject:
5324 case CK_ARCConsumeObject:
5325 case CK_ARCReclaimReturnedObject:
5326 case CK_ARCExtendBlockObject:
5327 case CK_CopyAndAutoreleaseBlockObject:
5328 case CK_IntToOCLSampler:
5329 case CK_FloatingToFixedPoint:
5330 case CK_FixedPointToFloating:
5331 case CK_FixedPointCast:
5332 case CK_FixedPointToBoolean:
5333 case CK_FixedPointToIntegral:
5334 case CK_IntegralToFixedPoint:
5335 case CK_MatrixCast:
5336 case CK_HLSLVectorTruncation:
5337 case CK_HLSLArrayRValue:
5338 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5339
5340 case CK_Dependent:
5341 llvm_unreachable("dependent cast kind in IR gen!");
5342
5343 case CK_BuiltinFnToFnPtr:
5344 llvm_unreachable("builtin functions are handled elsewhere");
5345
5346 // These are never l-values; just use the aggregate emission code.
5347 case CK_NonAtomicToAtomic:
5348 case CK_AtomicToNonAtomic:
5349 return EmitAggExprToLValue(E);
5350
5351 case CK_Dynamic: {
5352 LValue LV = EmitLValue(E->getSubExpr());
5353 Address V = LV.getAddress();
5354 const auto *DCE = cast<CXXDynamicCastExpr>(E);
5356 }
5357
5358 case CK_ConstructorConversion:
5359 case CK_UserDefinedConversion:
5360 case CK_CPointerToObjCPointerCast:
5361 case CK_BlockPointerToObjCPointerCast:
5362 case CK_LValueToRValue:
5363 return EmitLValue(E->getSubExpr());
5364
5365 case CK_NoOp: {
5366 // CK_NoOp can model a qualification conversion, which can remove an array
5367 // bound and change the IR type.
5368 // FIXME: Once pointee types are removed from IR, remove this.
5369 LValue LV = EmitLValue(E->getSubExpr());
5370 // Propagate the volatile qualifer to LValue, if exist in E.
5371 if (E->changesVolatileQualification())
5372 LV.getQuals() = E->getType().getQualifiers();
5373 if (LV.isSimple()) {
5374 Address V = LV.getAddress();
5375 if (V.isValid()) {
5376 llvm::Type *T = ConvertTypeForMem(E->getType());
5377 if (V.getElementType() != T)
5378 LV.setAddress(V.withElementType(T));
5379 }
5380 }
5381 return LV;
5382 }
5383
5384 case CK_UncheckedDerivedToBase:
5385 case CK_DerivedToBase: {
5386 const auto *DerivedClassTy =
5387 E->getSubExpr()->getType()->castAs<RecordType>();
5388 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5389
5390 LValue LV = EmitLValue(E->getSubExpr());
5391 Address This = LV.getAddress();
5392
5393 // Perform the derived-to-base conversion
5395 This, DerivedClassDecl, E->path_begin(), E->path_end(),
5396 /*NullCheckValue=*/false, E->getExprLoc());
5397
5398 // TODO: Support accesses to members of base classes in TBAA. For now, we
5399 // conservatively pretend that the complete object is of the base class
5400 // type.
5401 return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
5403 }
5404 case CK_ToUnion:
5405 return EmitAggExprToLValue(E);
5406 case CK_BaseToDerived: {
5407 const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
5408 auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
5409
5410 LValue LV = EmitLValue(E->getSubExpr());
5411
5412 // Perform the base-to-derived conversion
5414 LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
5415 /*NullCheckValue=*/false);
5416
5417 // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
5418 // performed and the object is not of the derived type.
5421 E->getType());
5422
5423 if (SanOpts.has(SanitizerKind::CFIDerivedCast))
5425 /*MayBeNull=*/false, CFITCK_DerivedCast,
5426 E->getBeginLoc());
5427
5428 return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
5430 }
5431 case CK_LValueBitCast: {
5432 // This must be a reinterpret_cast (or c-style equivalent).
5433 const auto *CE = cast<ExplicitCastExpr>(E);
5434
5435 CGM.EmitExplicitCastExprType(CE, this);
5436 LValue LV = EmitLValue(E->getSubExpr());
5438 ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
5439
5440 if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
5442 /*MayBeNull=*/false, CFITCK_UnrelatedCast,
5443 E->getBeginLoc());
5444
5445 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5447 }
5448 case CK_AddressSpaceConversion: {
5449 LValue LV = EmitLValue(E->getSubExpr());
5451 llvm::Value *V = getTargetHooks().performAddrSpaceCast(
5452 *this, LV.getPointer(*this),
5453 E->getSubExpr()->getType().getAddressSpace(),
5454 E->getType().getAddressSpace(), ConvertType(DestTy));
5456 LV.getAddress().getAlignment()),
5457 E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
5458 }
5459 case CK_ObjCObjectLValueCast: {
5460 LValue LV = EmitLValue(E->getSubExpr());
5462 return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
5464 }
5465 case CK_ZeroToOCLOpaqueType:
5466 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
5467
5468 case CK_VectorSplat: {
5469 // LValue results of vector splats are only supported in HLSL.
5470 if (!getLangOpts().HLSL)
5471 return EmitUnsupportedLValue(E, "unexpected cast lvalue");
5472 return EmitLValue(E->getSubExpr());
5473 }
5474 }
5475
5476 llvm_unreachable("Unhandled lvalue cast kind?");
5477}
5478
5482}
5483
5484std::pair<LValue, LValue>
5486 // Emitting the casted temporary through an opaque value.
5487 LValue BaseLV = EmitLValue(E->getArgLValue());
5488 OpaqueValueMappingData::bind(*this, E->getOpaqueArgLValue(), BaseLV);
5489
5490 QualType ExprTy = E->getType();
5491 Address OutTemp = CreateIRTemp(ExprTy);
5492 LValue TempLV = MakeAddrLValue(OutTemp, ExprTy);
5493
5494 if (E->isInOut())
5495 EmitInitializationToLValue(E->getCastedTemporary()->getSourceExpr(),
5496 TempLV);
5497
5498 OpaqueValueMappingData::bind(*this, E->getCastedTemporary(), TempLV);
5499 return std::make_pair(BaseLV, TempLV);
5500}
5501
5503 CallArgList &Args, QualType Ty) {
5504
5505 auto [BaseLV, TempLV] = EmitHLSLOutArgLValues(E, Ty);
5506
5507 llvm::Value *Addr = TempLV.getAddress().getBasePointer();
5508 llvm::Type *ElTy = ConvertTypeForMem(TempLV.getType());
5509
5510 llvm::TypeSize Sz = CGM.getDataLayout().getTypeAllocSize(ElTy);
5511
5512 llvm::Value *LifetimeSize = EmitLifetimeStart(Sz, Addr);
5513
5514 Address TmpAddr(Addr, ElTy, TempLV.getAlignment());
5515 Args.addWriteback(BaseLV, TmpAddr, nullptr, E->getWritebackCast(),
5516 LifetimeSize);
5517 Args.add(RValue::get(TmpAddr, *this), Ty);
5518 return TempLV;
5519}
5520
5521LValue
5524
5525 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
5526 it = OpaqueLValues.find(e);
5527
5528 if (it != OpaqueLValues.end())
5529 return it->second;
5530
5531 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
5532 return EmitLValue(e->getSourceExpr());
5533}
5534
5535RValue
5538
5539 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
5540 it = OpaqueRValues.find(e);
5541
5542 if (it != OpaqueRValues.end())
5543 return it->second;
5544
5545 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
5546 return EmitAnyExpr(e->getSourceExpr());
5547}
5548
5550 const FieldDecl *FD,
5552 QualType FT = FD->getType();
5553 LValue FieldLV = EmitLValueForField(LV, FD);
5554 switch (getEvaluationKind(FT)) {
5555 case TEK_Complex:
5556 return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
5557 case TEK_Aggregate:
5558 return FieldLV.asAggregateRValue();
5559 case TEK_Scalar:
5560 // This routine is used to load fields one-by-one to perform a copy, so
5561 // don't load reference fields.
5562 if (FD->getType()->isReferenceType())
5563 return RValue::get(FieldLV.getPointer(*this));
5564 // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
5565 // primitive load.
5566 if (FieldLV.isBitField())
5567 return EmitLoadOfLValue(FieldLV, Loc);
5568 return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
5569 }
5570 llvm_unreachable("bad evaluation kind");
5571}
5572
5573//===--------------------------------------------------------------------===//
5574// Expression Emission
5575//===--------------------------------------------------------------------===//
5576
5578 ReturnValueSlot ReturnValue,
5579 llvm::CallBase **CallOrInvoke) {
5580 llvm::CallBase *CallOrInvokeStorage;
5581 if (!CallOrInvoke) {
5582 CallOrInvoke = &CallOrInvokeStorage;
5583 }
5584
5585 auto AddCoroElideSafeOnExit = llvm::make_scope_exit([&] {
5586 if (E->isCoroElideSafe()) {
5587 auto *I = *CallOrInvoke;
5588 if (I)
5589 I->addFnAttr(llvm::Attribute::CoroElideSafe);
5590 }
5591 });
5592
5593 // Builtins never have block type.
5594 if (E->getCallee()->getType()->isBlockPointerType())
5595 return EmitBlockCallExpr(E, ReturnValue, CallOrInvoke);
5596
5597 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
5598 return EmitCXXMemberCallExpr(CE, ReturnValue, CallOrInvoke);
5599
5600 if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
5601 return EmitCUDAKernelCallExpr(CE, ReturnValue, CallOrInvoke);
5602
5603 // A CXXOperatorCallExpr is created even for explicit object methods, but
5604 // these should be treated like static function call.
5605 if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
5606 if (const auto *MD =
5607 dyn_cast_if_present<CXXMethodDecl>(CE->getCalleeDecl());
5608 MD && MD->isImplicitObjectMemberFunction())
5609 return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue, CallOrInvoke);
5610
5611 CGCallee callee = EmitCallee(E->getCallee());
5612
5613 if (callee.isBuiltin()) {
5614 return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
5615 E, ReturnValue);
5616 }
5617
5618 if (callee.isPseudoDestructor()) {
5620 }
5621
5622 return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue,
5623 /*Chain=*/nullptr, CallOrInvoke);
5624}
5625
5626/// Emit a CallExpr without considering whether it might be a subclass.
5628 ReturnValueSlot ReturnValue,
5629 llvm::CallBase **CallOrInvoke) {
5630 CGCallee Callee = EmitCallee(E->getCallee());
5631 return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
5632 /*Chain=*/nullptr, CallOrInvoke);
5633}
5634
5635// Detect the unusual situation where an inline version is shadowed by a
5636// non-inline version. In that case we should pick the external one
5637// everywhere. That's GCC behavior too.
5639 for (const FunctionDecl *PD = FD; PD; PD = PD->getPreviousDecl())
5640 if (!PD->isInlineBuiltinDeclaration())
5641 return false;
5642 return true;
5643}
5644
5646 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
5647
5648 if (auto builtinID = FD->getBuiltinID()) {
5649 std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
5650 std::string NoBuiltins = "no-builtins";
5651
5652 StringRef Ident = CGF.CGM.getMangledName(GD);
5653 std::string FDInlineName = (Ident + ".inline").str();
5654
5655 bool IsPredefinedLibFunction =
5657 bool HasAttributeNoBuiltin =
5658 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
5659 CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
5660
5661 // When directing calling an inline builtin, call it through it's mangled
5662 // name to make it clear it's not the actual builtin.
5663 if (CGF.CurFn->getName() != FDInlineName &&
5665 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5666 llvm::Function *Fn = llvm::cast<llvm::Function>(CalleePtr);
5667 llvm::Module *M = Fn->getParent();
5668 llvm::Function *Clone = M->getFunction(FDInlineName);
5669 if (!Clone) {
5670 Clone = llvm::Function::Create(Fn->getFunctionType(),
5671 llvm::GlobalValue::InternalLinkage,
5672 Fn->getAddressSpace(), FDInlineName, M);
5673 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
5674 }
5675 return CGCallee::forDirect(Clone, GD);
5676 }
5677
5678 // Replaceable builtins provide their own implementation of a builtin. If we
5679 // are in an inline builtin implementation, avoid trivial infinite
5680 // recursion. Honor __attribute__((no_builtin("foo"))) or
5681 // __attribute__((no_builtin)) on the current function unless foo is
5682 // not a predefined library function which means we must generate the
5683 // builtin no matter what.
5684 else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
5685 return CGCallee::forBuiltin(builtinID, FD);
5686 }
5687
5688 llvm::Constant *CalleePtr = CGF.CGM.getRawFunctionPointer(GD);
5689 if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
5690 FD->hasAttr<CUDAGlobalAttr>())
5691 CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
5692 cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
5693
5694 return CGCallee::forDirect(CalleePtr, GD);
5695}
5696
5698 E = E->IgnoreParens();
5699
5700 // Look through function-to-pointer decay.
5701 if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
5702 if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
5703 ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
5704 return EmitCallee(ICE->getSubExpr());
5705 }
5706
5707 // Resolve direct calls.
5708 } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
5709 if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
5710 return EmitDirectCallee(*this, FD);
5711 }
5712 } else if (auto ME = dyn_cast<MemberExpr>(E)) {
5713 if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
5714 EmitIgnoredExpr(ME->getBase());
5715 return EmitDirectCallee(*this, FD);
5716 }
5717
5718 // Look through template substitutions.
5719 } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
5720 return EmitCallee(NTTP->getReplacement());
5721
5722 // Treat pseudo-destructor calls differently.
5723 } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
5725 }
5726
5727 // Otherwise, we have an indirect reference.
5728 llvm::Value *calleePtr;
5730 if (auto ptrType = E->getType()->getAs<PointerType>()) {
5731 calleePtr = EmitScalarExpr(E);
5732 functionType = ptrType->getPointeeType();
5733 } else {
5734 functionType = E->getType();
5735 calleePtr = EmitLValue(E, KnownNonNull).getPointer(*this);
5736 }
5737 assert(functionType->isFunctionType());
5738
5739 GlobalDecl GD;
5740 if (const auto *VD =
5741 dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
5742 GD = GlobalDecl(VD);
5743
5744 CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
5746 CGCallee callee(calleeInfo, calleePtr, pointerAuth);
5747 return callee;
5748}
5749
5751 // Comma expressions just emit their LHS then their RHS as an l-value.
5752 if (E->getOpcode() == BO_Comma) {
5753 EmitIgnoredExpr(E->getLHS());
5755 return EmitLValue(E->getRHS());
5756 }
5757
5758 if (E->getOpcode() == BO_PtrMemD ||
5759 E->getOpcode() == BO_PtrMemI)
5761
5762 assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
5763
5764 // Note that in all of these cases, __block variables need the RHS
5765 // evaluated first just in case the variable gets moved by the RHS.
5766
5767 switch (getEvaluationKind(E->getType())) {
5768 case TEK_Scalar: {
5769 switch (E->getLHS()->getType().getObjCLifetime()) {
5771 return EmitARCStoreStrong(E, /*ignored*/ false).first;
5772
5774 return EmitARCStoreAutoreleasing(E).first;
5775
5776 // No reason to do any of these differently.
5780 break;
5781 }
5782
5783 // TODO: Can we de-duplicate this code with the corresponding code in
5784 // CGExprScalar, similar to the way EmitCompoundAssignmentLValue works?
5785 RValue RV;
5786 llvm::Value *Previous = nullptr;
5787 QualType SrcType = E->getRHS()->getType();
5788 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5789 // we want to extract that value and potentially (if the bitfield sanitizer
5790 // is enabled) use it to check for an implicit conversion.
5791 if (E->getLHS()->refersToBitField()) {
5792 llvm::Value *RHS =
5794 RV = RValue::get(RHS);
5795 } else
5796 RV = EmitAnyExpr(E->getRHS());
5797
5798 LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
5799
5800 if (RV.isScalar())
5802
5803 if (LV.isBitField()) {
5804 llvm::Value *Result = nullptr;
5805 // If bitfield sanitizers are enabled we want to use the result
5806 // to check whether a truncation or sign change has occurred.
5807 if (SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
5809 else
5811
5812 // If the expression contained an implicit conversion, make sure
5813 // to use the value before the scalar conversion.
5814 llvm::Value *Src = Previous ? Previous : RV.getScalarVal();
5815 QualType DstType = E->getLHS()->getType();
5816 EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
5817 LV.getBitFieldInfo(), E->getExprLoc());
5818 } else
5819 EmitStoreThroughLValue(RV, LV);
5820
5821 if (getLangOpts().OpenMP)
5823 E->getLHS());
5824 return LV;
5825 }
5826
5827 case TEK_Complex:
5829
5830 case TEK_Aggregate:
5831 // If the lang opt is HLSL and the LHS is a constant array
5832 // then we are performing a copy assignment and call a special
5833 // function because EmitAggExprToLValue emits to a temporary LValue
5834 if (getLangOpts().HLSL && E->getLHS()->getType()->isConstantArrayType())
5836
5837 return EmitAggExprToLValue(E);
5838 }
5839 llvm_unreachable("bad evaluation kind");
5840}
5841
5842// This function implements trivial copy assignment for HLSL's
5843// assignable constant arrays.
5845 // Don't emit an LValue for the RHS because it might not be an LValue
5846 LValue LHS = EmitLValue(E->getLHS());
5847 // In C the RHS of an assignment operator is an RValue.
5848 // EmitAggregateAssign takes anan LValue for the RHS. Instead we can call
5849 // EmitInitializationToLValue to emit an RValue into an LValue.
5850 EmitInitializationToLValue(E->getRHS(), LHS);
5851 return LHS;
5852}
5853
5855 llvm::CallBase **CallOrInvoke) {
5856 RValue RV = EmitCallExpr(E, ReturnValueSlot(), CallOrInvoke);
5857
5858 if (!RV.isScalar())
5861
5862 assert(E->getCallReturnType(getContext())->isReferenceType() &&
5863 "Can't have a scalar return unless the return type is a "
5864 "reference type!");
5865
5867}
5868
5870 // FIXME: This shouldn't require another copy.
5871 return EmitAggExprToLValue(E);
5872}
5873
5876 && "binding l-value to type which needs a temporary");
5878 EmitCXXConstructExpr(E, Slot);
5880}
5881
5882LValue
5885}
5886
5888 return CGM.GetAddrOfMSGuidDecl(E->getGuidDecl())
5890}
5891
5895}
5896
5897LValue
5899 AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
5901 EmitAggExpr(E->getSubExpr(), Slot);
5902 EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
5904}
5905
5908
5909 if (!RV.isScalar())
5912
5913 assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
5914 "Can't have a scalar return unless the return type is a "
5915 "reference type!");
5916
5918}
5919
5921 Address V =
5922 CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
5924}
5925
5927 const ObjCIvarDecl *Ivar) {
5928 return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
5929}
5930
5931llvm::Value *
5933 const ObjCIvarDecl *Ivar) {
5934 llvm::Value *OffsetValue = EmitIvarOffset(Interface, Ivar);
5935 QualType PointerDiffType = getContext().getPointerDiffType();
5936 return Builder.CreateZExtOrTrunc(OffsetValue,
5937 getTypes().ConvertType(PointerDiffType));
5938}
5939
5941 llvm::Value *BaseValue,
5942 const ObjCIvarDecl *Ivar,
5943 unsigned CVRQualifiers) {
5944 return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
5945 Ivar, CVRQualifiers);
5946}
5947
5949 // FIXME: A lot of the code below could be shared with EmitMemberExpr.
5950 llvm::Value *BaseValue = nullptr;
5951 const Expr *BaseExpr = E->getBase();
5952 Qualifiers BaseQuals;
5953 QualType ObjectTy;
5954 if (E->isArrow()) {
5955 BaseValue = EmitScalarExpr(BaseExpr);
5956 ObjectTy = BaseExpr->getType()->getPointeeType();
5957 BaseQuals = ObjectTy.getQualifiers();
5958 } else {
5959 LValue BaseLV = EmitLValue(BaseExpr);
5960 BaseValue = BaseLV.getPointer(*this);
5961 ObjectTy = BaseExpr->getType();
5962 BaseQuals = ObjectTy.getQualifiers();
5963 }
5964
5965 LValue LV =
5966 EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5967 BaseQuals.getCVRQualifiers());
5969 return LV;
5970}
5971
5973 // Can only get l-value for message expression returning aggregate type
5977}
5978
5980 const CGCallee &OrigCallee, const CallExpr *E,
5981 ReturnValueSlot ReturnValue,
5982 llvm::Value *Chain,
5983 llvm::CallBase **CallOrInvoke,
5984 CGFunctionInfo const **ResolvedFnInfo) {
5985 // Get the actual function type. The callee type will always be a pointer to
5986 // function type or a block pointer type.
5987 assert(CalleeType->isFunctionPointerType() &&
5988 "Call must have function pointer type!");
5989
5990 const Decl *TargetDecl =
5991 OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5992
5993 assert((!isa_and_present<FunctionDecl>(TargetDecl) ||
5994 !cast<FunctionDecl>(TargetDecl)->isImmediateFunction()) &&
5995 "trying to emit a call to an immediate function");
5996
5997 CalleeType = getContext().getCanonicalType(CalleeType);
5998
5999 auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
6000
6001 CGCallee Callee = OrigCallee;
6002
6003 if (SanOpts.has(SanitizerKind::Function) &&
6004 (!TargetDecl || !isa<FunctionDecl>(TargetDecl)) &&
6005 !isa<FunctionNoProtoType>(PointeeType)) {
6006 if (llvm::Constant *PrefixSig =
6008 SanitizerScope SanScope(this);
6009 auto *TypeHash = getUBSanFunctionTypeHash(PointeeType);
6010
6011 llvm::Type *PrefixSigType = PrefixSig->getType();
6012 llvm::StructType *PrefixStructTy = llvm::StructType::get(
6013 CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
6014
6015 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6017 // Use raw pointer since we are using the callee pointer as data here.
6018 Address Addr =
6019 Address(CalleePtr, CalleePtr->getType(),
6021 CalleePtr->getPointerAlignment(CGM.getDataLayout())),
6022 Callee.getPointerAuthInfo(), nullptr);
6023 CalleePtr = Addr.emitRawPointer(*this);
6024 }
6025
6026 // On 32-bit Arm, the low bit of a function pointer indicates whether
6027 // it's using the Arm or Thumb instruction set. The actual first
6028 // instruction lives at the same address either way, so we must clear
6029 // that low bit before using the function address to find the prefix
6030 // structure.
6031 //
6032 // This applies to both Arm and Thumb target triples, because
6033 // either one could be used in an interworking context where it
6034 // might be passed function pointers of both types.
6035 llvm::Value *AlignedCalleePtr;
6036 if (CGM.getTriple().isARM() || CGM.getTriple().isThumb()) {
6037 llvm::Value *CalleeAddress =
6038 Builder.CreatePtrToInt(CalleePtr, IntPtrTy);
6039 llvm::Value *Mask = llvm::ConstantInt::get(IntPtrTy, ~1);
6040 llvm::Value *AlignedCalleeAddress =
6041 Builder.CreateAnd(CalleeAddress, Mask);
6042 AlignedCalleePtr =
6043 Builder.CreateIntToPtr(AlignedCalleeAddress, CalleePtr->getType());
6044 } else {
6045 AlignedCalleePtr = CalleePtr;
6046 }
6047
6048 llvm::Value *CalleePrefixStruct = AlignedCalleePtr;
6049 llvm::Value *CalleeSigPtr =
6050 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 0);
6051 llvm::Value *CalleeSig =
6052 Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
6053 llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
6054
6055 llvm::BasicBlock *Cont = createBasicBlock("cont");
6056 llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
6057 Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
6058
6059 EmitBlock(TypeCheck);
6060 llvm::Value *CalleeTypeHash = Builder.CreateAlignedLoad(
6061 Int32Ty,
6062 Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, -1, 1),
6063 getPointerAlign());
6064 llvm::Value *CalleeTypeHashMatch =
6065 Builder.CreateICmpEQ(CalleeTypeHash, TypeHash);
6066 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
6067 EmitCheckTypeDescriptor(CalleeType)};
6068 EmitCheck(std::make_pair(CalleeTypeHashMatch, SanitizerKind::SO_Function),
6069 SanitizerHandler::FunctionTypeMismatch, StaticData,
6070 {CalleePtr});
6071
6072 Builder.CreateBr(Cont);
6073 EmitBlock(Cont);
6074 }
6075 }
6076
6077 const auto *FnType = cast<FunctionType>(PointeeType);
6078
6079 // If we are checking indirect calls and this call is indirect, check that the
6080 // function pointer is a member of the bit set for the function type.
6081 if (SanOpts.has(SanitizerKind::CFIICall) &&
6082 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6083 SanitizerScope SanScope(this);
6084 EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
6085
6086 llvm::Metadata *MD;
6087 if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
6089 else
6091
6092 llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
6093
6094 llvm::Value *CalleePtr = Callee.getFunctionPointer();
6095 llvm::Value *TypeTest = Builder.CreateCall(
6096 CGM.getIntrinsic(llvm::Intrinsic::type_test), {CalleePtr, TypeId});
6097
6098 auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
6099 llvm::Constant *StaticData[] = {
6100 llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
6103 };
6104 if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
6105 EmitCfiSlowPathCheck(SanitizerKind::SO_CFIICall, TypeTest, CrossDsoTypeId,
6106 CalleePtr, StaticData);
6107 } else {
6108 EmitCheck(std::make_pair(TypeTest, SanitizerKind::SO_CFIICall),
6109 SanitizerHandler::CFICheckFail, StaticData,
6110 {CalleePtr, llvm::UndefValue::get(IntPtrTy)});
6111 }
6112 }
6113
6114 CallArgList Args;
6115 if (Chain)
6116 Args.add(RValue::get(Chain), CGM.getContext().VoidPtrTy);
6117
6118 // C++17 requires that we evaluate arguments to a call using assignment syntax
6119 // right-to-left, and that we evaluate arguments to certain other operators
6120 // left-to-right. Note that we allow this to override the order dictated by
6121 // the calling convention on the MS ABI, which means that parameter
6122 // destruction order is not necessarily reverse construction order.
6123 // FIXME: Revisit this based on C++ committee response to unimplementability.
6125 bool StaticOperator = false;
6126 if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
6127 if (OCE->isAssignmentOp())
6129 else {
6130 switch (OCE->getOperator()) {
6131 case OO_LessLess:
6132 case OO_GreaterGreater:
6133 case OO_AmpAmp:
6134 case OO_PipePipe:
6135 case OO_Comma:
6136 case OO_ArrowStar:
6138 break;
6139 default:
6140 break;
6141 }
6142 }
6143
6144 if (const auto *MD =
6145 dyn_cast_if_present<CXXMethodDecl>(OCE->getCalleeDecl());
6146 MD && MD->isStatic())
6147 StaticOperator = true;
6148 }
6149
6150 auto Arguments = E->arguments();
6151 if (StaticOperator) {
6152 // If we're calling a static operator, we need to emit the object argument
6153 // and ignore it.
6154 EmitIgnoredExpr(E->getArg(0));
6155 Arguments = drop_begin(Arguments, 1);
6156 }
6157 EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), Arguments,
6158 E->getDirectCallee(), /*ParamsToSkip=*/0, Order);
6159
6161 Args, FnType, /*ChainCall=*/Chain);
6162
6163 if (ResolvedFnInfo)
6164 *ResolvedFnInfo = &FnInfo;
6165
6166 // HIP function pointer contains kernel handle when it is used in triple
6167 // chevron. The kernel stub needs to be loaded from kernel handle and used
6168 // as callee.
6169 if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
6170 isa<CUDAKernelCallExpr>(E) &&
6171 (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
6172 llvm::Value *Handle = Callee.getFunctionPointer();
6173 auto *Stub = Builder.CreateLoad(
6174 Address(Handle, Handle->getType(), CGM.getPointerAlign()));
6175 Callee.setFunctionPointer(Stub);
6176 }
6177 llvm::CallBase *LocalCallOrInvoke = nullptr;
6178 RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &LocalCallOrInvoke,
6179 E == MustTailCall, E->getExprLoc());
6180
6181 // Generate function declaration DISuprogram in order to be used
6182 // in debug info about call sites.
6183 if (CGDebugInfo *DI = getDebugInfo()) {
6184 if (auto *CalleeDecl = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
6185 FunctionArgList Args;
6186 QualType ResTy = BuildFunctionArgList(CalleeDecl, Args);
6187 DI->EmitFuncDeclForCallSite(LocalCallOrInvoke,
6188 DI->getFunctionType(CalleeDecl, ResTy, Args),
6189 CalleeDecl);
6190 }
6191 }
6192 if (CallOrInvoke)
6193 *CallOrInvoke = LocalCallOrInvoke;
6194
6195 return Call;
6196}
6197
6200 Address BaseAddr = Address::invalid();
6201 if (E->getOpcode() == BO_PtrMemI) {
6202 BaseAddr = EmitPointerWithAlignment(E->getLHS());
6203 } else {
6204 BaseAddr = EmitLValue(E->getLHS()).getAddress();
6205 }
6206
6207 llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
6208 const auto *MPT = E->getRHS()->getType()->castAs<MemberPointerType>();
6209
6210 LValueBaseInfo BaseInfo;
6211 TBAAAccessInfo TBAAInfo;
6212 Address MemberAddr =
6213 EmitCXXMemberDataPointerAddress(E, BaseAddr, OffsetV, MPT, &BaseInfo,
6214 &TBAAInfo);
6215
6216 return MakeAddrLValue(MemberAddr, MPT->getPointeeType(), BaseInfo, TBAAInfo);
6217}
6218
6219/// Given the address of a temporary variable, produce an r-value of
6220/// its type.
6222 QualType type,
6223 SourceLocation loc) {
6225 switch (getEvaluationKind(type)) {
6226 case TEK_Complex:
6227 return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
6228 case TEK_Aggregate:
6229 return lvalue.asAggregateRValue();
6230 case TEK_Scalar:
6231 return RValue::get(EmitLoadOfScalar(lvalue, loc));
6232 }
6233 llvm_unreachable("bad evaluation kind");
6234}
6235
6236void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
6237 assert(Val->getType()->isFPOrFPVectorTy());
6238 if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
6239 return;
6240
6241 llvm::MDBuilder MDHelper(getLLVMContext());
6242 llvm::MDNode *Node = MDHelper.createFPMath(Accuracy);
6243
6244 cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node);
6245}
6246
6247void CodeGenFunction::SetSqrtFPAccuracy(llvm::Value *Val) {
6248 llvm::Type *EltTy = Val->getType()->getScalarType();
6249 if (!EltTy->isFloatTy())
6250 return;
6251
6252 if ((getLangOpts().OpenCL &&
6253 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6254 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6255 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6256 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 3ulp
6257 //
6258 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6259 // build option allows an application to specify that single precision
6260 // floating-point divide (x/y and 1/x) and sqrt used in the program
6261 // source are correctly rounded.
6262 //
6263 // TODO: CUDA has a prec-sqrt flag
6264 SetFPAccuracy(Val, 3.0f);
6265 }
6266}
6267
6268void CodeGenFunction::SetDivFPAccuracy(llvm::Value *Val) {
6269 llvm::Type *EltTy = Val->getType()->getScalarType();
6270 if (!EltTy->isFloatTy())
6271 return;
6272
6273 if ((getLangOpts().OpenCL &&
6274 !CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
6275 (getLangOpts().HIP && getLangOpts().CUDAIsDevice &&
6276 !CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
6277 // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
6278 //
6279 // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
6280 // build option allows an application to specify that single precision
6281 // floating-point divide (x/y and 1/x) and sqrt used in the program
6282 // source are correctly rounded.
6283 //
6284 // TODO: CUDA has a prec-div flag
6285 SetFPAccuracy(Val, 2.5f);
6286 }
6287}
6288
6289namespace {
6290 struct LValueOrRValue {
6291 LValue LV;
6292 RValue RV;
6293 };
6294}
6295
6296static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
6297 const PseudoObjectExpr *E,
6298 bool forLValue,
6299 AggValueSlot slot) {
6301
6302 // Find the result expression, if any.
6303 const Expr *resultExpr = E->getResultExpr();
6304 LValueOrRValue result;
6305
6307 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
6308 const Expr *semantic = *i;
6309
6310 // If this semantic expression is an opaque value, bind it
6311 // to the result of its source expression.
6312 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
6313 // Skip unique OVEs.
6314 if (ov->isUnique()) {
6315 assert(ov != resultExpr &&
6316 "A unique OVE cannot be used as the result expression");
6317 continue;
6318 }
6319
6320 // If this is the result expression, we may need to evaluate
6321 // directly into the slot.
6322 typedef CodeGenFunction::OpaqueValueMappingData OVMA;
6323 OVMA opaqueData;
6324 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
6326 CGF.EmitAggExpr(ov->getSourceExpr(), slot);
6327 LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
6329 opaqueData = OVMA::bind(CGF, ov, LV);
6330 result.RV = slot.asRValue();
6331
6332 // Otherwise, emit as normal.
6333 } else {
6334 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
6335
6336 // If this is the result, also evaluate the result now.
6337 if (ov == resultExpr) {
6338 if (forLValue)
6339 result.LV = CGF.EmitLValue(ov);
6340 else
6341 result.RV = CGF.EmitAnyExpr(ov, slot);
6342 }
6343 }
6344
6345 opaques.push_back(opaqueData);
6346
6347 // Otherwise, if the expression is the result, evaluate it
6348 // and remember the result.
6349 } else if (semantic == resultExpr) {
6350 if (forLValue)
6351 result.LV = CGF.EmitLValue(semantic);
6352 else
6353 result.RV = CGF.EmitAnyExpr(semantic, slot);
6354
6355 // Otherwise, evaluate the expression in an ignored context.
6356 } else {
6357 CGF.EmitIgnoredExpr(semantic);
6358 }
6359 }
6360
6361 // Unbind all the opaques now.
6362 for (unsigned i = 0, e = opaques.size(); i != e; ++i)
6363 opaques[i].unbind(CGF);
6364
6365 return result;
6366}
6367
6369 AggValueSlot slot) {
6370 return emitPseudoObjectExpr(*this, E, false, slot).RV;
6371}
6372
6374 return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
6375}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3460
This file provides some common utility functions for processing Lambda related AST Constructs.
DynTypedNode Node
Defines enum values for all the target-independent builtin functions.
CodeGenFunction::ComplexPairTy ComplexPairTy
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false)
Definition: CGExpr.cpp:2710
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM)
Named Registers are named metadata pointing to the register name which will be read from/written to a...
Definition: CGExpr.cpp:2953
static llvm::Value * emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc, llvm::Value *Ptr)
Definition: CGExpr.cpp:692
static const Expr * isSimpleArrayDecayOperand(const Expr *E)
isSimpleArrayDecayOperand - If the specified expr is a simple decay from an array to pointer,...
Definition: CGExpr.cpp:4009
static bool hasBooleanRepresentation(QualType Ty)
Definition: CGExpr.cpp:1882
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, int64_t &Offset)
The offset of a field from the beginning of the record.
Definition: CGExpr.cpp:4193
static bool hasBPFPreserveStaticOffset(const RecordDecl *D)
Definition: CGExpr.cpp:4079
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base, const FieldDecl *field)
Drill down to the storage of a field without walking into reference types.
Definition: CGExpr.cpp:4858
ConstantEmissionKind
Can we constant-emit a load of a reference to a variable of the given type? This is different from pr...
Definition: CGExpr.cpp:1737
@ CEK_AsReferenceOnly
Definition: CGExpr.cpp:1739
@ CEK_AsValueOnly
Definition: CGExpr.cpp:1741
@ CEK_None
Definition: CGExpr.cpp:1738
@ CEK_AsValueOrReference
Definition: CGExpr.cpp:1740
static bool isConstantEmittableObjectType(QualType type)
Given an object of the given canonical type, can we safely copy a value out of it based on its initia...
Definition: CGExpr.cpp:1712
static QualType getFixedSizeElementType(const ASTContext &ctx, const VariableArrayType *vla)
Definition: CGExpr.cpp:4070
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue)
Definition: CGExpr.cpp:2941
static std::optional< LValue > EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand)
Emit the operand of a glvalue conditional operator.
Definition: CGExpr.cpp:5136
static CheckRecoverableKind getRecoverableKind(SanitizerKind::SanitizerOrdinal Ordinal)
Definition: CGExpr.cpp:3528
static llvm::Value * emitArraySubscriptGEP(CodeGenFunction &CGF, llvm::Type *elemType, llvm::Value *ptr, ArrayRef< llvm::Value * > indices, bool inbounds, bool signedIndices, SourceLocation loc, const llvm::Twine &name="arrayidx")
Definition: CGExpr.cpp:4023
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, GlobalDecl GD)
Definition: CGExpr.cpp:2932
static RawAddress MaybeConvertMatrixAddress(RawAddress Addr, CodeGenFunction &CGF, bool IsVector=true)
Definition: CGExpr.cpp:2104
static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot)
Definition: CGExpr.cpp:6296
static Address wrapWithBPFPreserveStaticOffset(CodeGenFunction &CGF, Address &Addr)
Definition: CGExpr.cpp:4095
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF, const MemberExpr *ME)
Definition: CGExpr.cpp:1846
static llvm::Value * getArrayIndexingBound(CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel)
If Base is known to point to the start of an array, return the length of that array.
Definition: CGExpr.cpp:964
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2201
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type)
Definition: CGExpr.cpp:1743
static QualType getConstantExprReferredType(const FullExpr *E, const ASTContext &Ctx)
Definition: CGExpr.cpp:1544
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums, bool IsBool)
Definition: CGExpr.cpp:1895
static std::optional< int64_t > getOffsetDifferenceInBits(CodeGenFunction &CGF, const FieldDecl *FD1, const FieldDecl *FD2)
Returns the relative offset difference between FD1 and FD2.
Definition: CGExpr.cpp:4224
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD)
Definition: CGExpr.cpp:5645
static LValue EmitThreadPrivateVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr, llvm::Type *RealVarTy, SourceLocation Loc)
Definition: CGExpr.cpp:2807
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD, const FieldDecl *Field, RecIndicesTy &Indices)
Definition: CGExpr.cpp:1114
static bool OnlyHasInlineBuiltinDeclaration(const FunctionDecl *FD)
Definition: CGExpr.cpp:5638
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD)
Definition: CGExpr.cpp:2881
static bool hasAnyVptr(const QualType Type, const ASTContext &Context)
Definition: CGExpr.cpp:4884
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase)
Given an array base, check whether its member access belongs to a record with preserve_access_index a...
Definition: CGExpr.cpp:4108
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType T)
Definition: CGExpr.cpp:2821
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF, const DeclRefExpr *E, const VarDecl *VD)
Determine whether we can emit a reference to VD from the current context, despite not necessarily hav...
Definition: CGExpr.cpp:2978
VariableTypeDescriptorKind
Definition: CGExpr.cpp:70
@ TK_Float
A floating-point type.
Definition: CGExpr.cpp:74
@ TK_Unknown
Any other type. The value representation is unspecified.
Definition: CGExpr.cpp:78
@ TK_Integer
An integer type.
Definition: CGExpr.cpp:72
@ TK_BitInt
An _BitInt(N) type.
Definition: CGExpr.cpp:76
static CharUnits getArrayElementAlign(CharUnits arrayAlign, llvm::Value *idx, CharUnits eltSize)
Definition: CGExpr.cpp:4055
static RawAddress createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner, RawAddress *Alloca=nullptr)
Definition: CGExpr.cpp:438
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CGExpr.cpp:486
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue, bool isInit, CodeGenFunction &CGF)
Definition: CGExpr.cpp:2129
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition: CGExpr.cpp:1283
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base, Address addr, const FieldDecl *field)
Definition: CGExpr.cpp:4871
const SanitizerHandlerInfo SanitizerHandlers[]
Definition: CGExpr.cpp:3545
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base, const FieldDecl *Field)
Get the address of a zero-sized field within a record.
Definition: CGExpr.cpp:4844
static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef< llvm::Value * > FnArgs, SanitizerHandler CheckHandler, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB, bool NoMerge)
Definition: CGExpr.cpp:3551
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base, LValueBaseInfo &BaseInfo, TBAAAccessInfo &TBAAInfo, QualType BaseTy, QualType ElTy, bool IsLowerBound)
Definition: CGExpr.cpp:4471
static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, Address ReferenceTemporary)
Definition: CGExpr.cpp:322
const Decl * D
Expr * E
StringRef Filename
Definition: Format.cpp:3056
static const SanitizerMask AlwaysRecoverable
static const SanitizerMask Unrecoverable
static const RecordType * getRecordType(QualType QT)
Checks that the passed in QualType either is of RecordType or points to RecordType.
SourceLocation Loc
Definition: SemaObjC.cpp:759
Defines the SourceManager interface.
static QualType getPointeeType(const MemRegion *R)
StateNode * Previous
const LValueBase getLValueBase() const
Definition: APValue.cpp:984
bool isLValue() const
Definition: APValue.h:472
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:741
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
uint64_t getFieldOffset(const ValueDecl *FD) const
Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
QualType getTagDeclType(const TagDecl *Decl) const
Return the unique reference to the type for the specified TagDecl (struct/union/class/enum) decl.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2723
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType VoidPtrTy
Definition: ASTContext.h:1187
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:682
const LangOptions & getLangOpts() const
Definition: ASTContext.h:834
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
Definition: ASTContext.cpp:854
QualType getPointerDiffType() const
Return the unique type for "ptrdiff_t" (C99 7.17) defined in <stddef.h>.
CanQualType BoolTy
Definition: ASTContext.h:1161
const NoSanitizeList & getNoSanitizeList() const
Definition: ASTContext.h:844
llvm::DenseMap< const CXXMethodDecl *, CXXCastPath > LambdaCastPaths
For capturing lambdas with an explicit object parameter whose type is derived from the lambda type,...
Definition: ASTContext.h:1256
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2489
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1160
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2925
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2493
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4224
This class represents BOTH the OpenMP Array Section and OpenACC 'subarray', with a boolean differenti...
Definition: Expr.h:6986
static QualType getBaseOriginalType(const Expr *Base)
Return original type of the base expression for array section.
Definition: Expr.cpp:5185
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2718
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3578
QualType getElementType() const
Definition: Type.h:3590
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3909
A fixed int type of a specified bitwidth.
Definition: Type.h:7820
bool isPredefinedLibFunction(unsigned ID) const
Determines whether this builtin is a predefined libc/libm function, such as "malloc",...
Definition: Builtins.h:161
Represents binding an expression to a temporary.
Definition: ExprCXX.h:1491
Represents a call to a C++ constructor.
Definition: ExprCXX.h:1546
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2856
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition: DeclCXX.h:1378
bool isDynamicClass() const
Definition: DeclCXX.h:586
bool hasDefinition() const
Definition: DeclCXX.h:572
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition: ExprCXX.h:845
A Microsoft C++ __uuidof expression, which gets the _GUID that corresponds to the supplied type or ex...
Definition: ExprCXX.h:1066
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3547
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
llvm::MaybeAlign getAsMaybeAlign() const
getAsMaybeAlign - Returns Quantity as a valid llvm::Align or std::nullopt, Beware llvm::MaybeAlign as...
Definition: CharUnits.h:194
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition: CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
SanitizerSet SanitizeMergeHandlers
Set of sanitizer checks that can merge handlers (smaller code size at the expense of debuggability).
PointerAuthOptions PointerAuth
Configuration for pointer-signing.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
SanitizerSet SanitizeRecover
Set of sanitizer checks that are non-fatal (i.e.
std::string TrapFuncName
If not an empty string, trap intrinsics are lowered to calls to this function instead of to trap inst...
virtual llvm::FixedVectorType * getOptimalVectorMemoryType(llvm::FixedVectorType *T, const LangOptions &Opt) const
Returns the optimal vector memory type based on the given vector type.
Definition: ABIInfo.cpp:240
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
llvm::Value * getBasePointer() const
Definition: Address.h:193
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withPointer(llvm::Value *NewPointer, KnownNonNull_t IsKnownNonNull) const
Return address with different pointer, but same element type and alignment.
Definition: Address.h:259
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
KnownNonNull_t isKnownNonNull() const
Whether the pointer is known not to be null.
Definition: Address.h:231
Address setKnownNonNull()
Definition: Address.h:236
void setAlignment(CharUnits Value)
Definition: Address.h:191
void replaceBasePointer(llvm::Value *P)
This function is used in situations where the caller is doing some sort of opaque "laundering" of the...
Definition: Address.h:181
bool isValid() const
Definition: Address.h:177
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:199
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
void setExternallyDestructed(bool destructed=true)
Definition: CGValue.h:613
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
static AggValueSlot forAddr(Address addr, Qualifiers quals, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
forAddr - Make a slot for an aggregate value.
Definition: CGValue.h:587
RValue asRValue() const
Definition: CGValue.h:666
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:858
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset, const llvm::Twine &Name="")
Given a pointer to i8, adjust it by a given constant offset.
Definition: CGBuilder.h:305
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:292
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:203
Address CreateConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, const llvm::Twine &Name="")
Definition: CGBuilder.h:331
Address CreateConstArrayGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = [n x T]* ... produce name = getelementptr inbounds addr, i64 0, i64 index where i64 is a...
Definition: CGBuilder.h:241
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:413
Address CreateLaunderInvariantGroup(Address Addr)
Definition: CGBuilder.h:437
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition: CGBuilder.h:128
Address CreatePreserveUnionAccessIndex(Address Addr, unsigned FieldIndex, llvm::MDNode *DbgInfo)
Definition: CGBuilder.h:429
Address CreateStripInvariantGroup(Address Addr)
Definition: CGBuilder.h:443
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:189
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, const llvm::Twine &Name="")
Given addr = T* ... produce name = getelementptr inbounds addr, i64 index where i64 is actually the t...
Definition: CGBuilder.h:261
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:346
virtual llvm::Function * getKernelStub(llvm::GlobalValue *Handle)=0
Get kernel stub by kernel handle.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr)=0
Emit code to force the execution of a destructor during global teardown.
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType)=0
Emit a reference to a non-local thread_local variable (including triggering the initialization of all...
virtual bool usesThreadWrapperFunction(const VarDecl *VD) const =0
MangleContext & getMangleContext()
Gets the mangle context.
Definition: CGCXXABI.h:113
Abstract information about a function or function prototype.
Definition: CGCall.h:41
const GlobalDecl getCalleeDecl() const
Definition: CGCall.h:59
All available information about a concrete callee.
Definition: CGCall.h:63
CGCalleeInfo getAbstractInfo() const
Definition: CGCall.h:180
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition: CGCall.h:172
bool isPseudoDestructor() const
Definition: CGCall.h:169
static CGCallee forBuiltin(unsigned builtinID, const FunctionDecl *builtinDecl)
Definition: CGCall.h:123
unsigned getBuiltinID() const
Definition: CGCall.h:164
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
bool isBuiltin() const
Definition: CGCall.h:157
const FunctionDecl * getBuiltinDecl() const
Definition: CGCall.h:160
static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E)
Definition: CGCall.h:131
This class gathers all debug information during compilation and is responsible for emitting to llvm g...
Definition: CGDebugInfo.h:58
llvm::DIType * getOrCreateStandaloneType(QualType Ty, SourceLocation Loc)
Emit standalone debug info for a type.
llvm::DIType * getOrCreateRecordType(QualType Ty, SourceLocation L)
Emit record type's standalone debug info.
CGFunctionInfo - Class to encapsulate the information about a function definition.
virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, llvm::Value *ivarOffset)=0
virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)=0
virtual llvm::Value * EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)=0
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, Address AddrWeakObj)=0
virtual Address GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel)=0
Get the address of a selector for the specified name and type values.
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest)=0
virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, Address dest, bool threadlocal=false)=0
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
virtual ConstantAddress getAddrOfDeclareTargetVar(const VarDecl *VD)
Returns the address of the variable marked as declare target with link clause OR as declare target wi...
bool hasRequiresUnifiedSharedMemory() const
Return whether the unified_shared_memory has been specified.
bool isNontemporalDecl(const ValueDecl *VD) const
Checks if the VD variable is marked as nontemporal declaration in current context.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CGRecordLayout - This class handles struct and union layout info while lowering AST types to LLVM typ...
const CGBitFieldInfo & getBitFieldInfo(const FieldDecl *FD) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD) const
Return llvm::StructType element number that corresponds to the field FD.
bool containsFieldDecl(const FieldDecl *FD) const
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:305
void addWriteback(LValue srcLV, Address temporary, llvm::Value *toUse, const Expr *writebackExpr=nullptr, llvm::Value *lifetimeSz=nullptr)
Definition: CGCall.h:326
virtual const FieldDecl * lookup(const VarDecl *VD) const
Lookup the captured field decl for a variable.
static ConstantEmission forValue(llvm::Constant *C)
static ConstantEmission forReference(llvm::Constant *C)
static OpaqueValueMappingData bind(CodeGenFunction &CGF, const OpaqueValueExpr *ov, const Expr *e)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e)
RValue EmitLoadOfGlobalRegLValue(LValue LV)
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E)
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E)
llvm::Value * EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Address EmitCXXMemberDataPointerAddress(const Expr *E, Address base, llvm::Value *memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Value * EmitNonNullRValueCheck(RValue RV, QualType T)
Create a check that a scalar RValue is non-null.
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
LValue EmitCastLValue(const CastExpr *E)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
void EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue EmitCoawaitLValue(const CoawaitExpr *E)
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
llvm::Value * EmitIvarOffsetAsPointerDiff(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
CGCapturedStmtInfo * CapturedStmtInfo
void EmitCallArgs(CallArgList &Args, PrototypeWrapper Prototype, llvm::iterator_range< CallExpr::const_arg_iterator > ArgRange, AbstractCallee AC=AbstractCallee(), unsigned ParamsToSkip=0, EvaluationOrder Order=EvaluationOrder::Default)
LValue EmitCallExprLValue(const CallExpr *E, llvm::CallBase **CallOrInvoke=nullptr)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
LValue EmitHLSLArrayAssignLValue(const BinaryOperator *E)
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Load a pointer with type PtrTy stored at address Ptr.
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitHLSLOutArgExpr(const HLSLOutArgExpr *E, CallArgList &Args, QualType Ty)
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc)
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
LValue EmitBinaryOperatorLValue(const BinaryOperator *E)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
RawAddress CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits align, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E)
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E)
llvm::Value * EmitARCLoadWeakRetained(Address addr)
const LangOptions & getLangOpts() const
llvm::Value * LoadPassedObjectSize(const Expr *E, QualType EltTy)
If E references a parameter with pass_object_size info or a constant array size modifier,...
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
LValue EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field)
EmitLValueForFieldInitialization - Like EmitLValueForField, except that if the Field is a reference,...
LValue EmitInitListLValue(const InitListExpr *E)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
Address EmitLoadOfReference(LValue RefLVal, LValueBaseInfo *PointeeBaseInfo=nullptr, TBAAAccessInfo *PointeeTBAAInfo=nullptr)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Address EmitExtVectorElementLValue(LValue V)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_DowncastReference
Checking the operand of a static_cast to a derived reference type.
@ TCK_MemberAccess
Checking the object expression in a non-static data member access.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_UpcastToVirtualBase
Checking the operand of a cast to a virtual base object.
@ TCK_MemberCall
Checking the 'this' pointer for a call to a non-static member function.
@ TCK_DynamicOperation
Checking the operand of a dynamic_cast or a typeid expression.
@ TCK_ReferenceBinding
Checking the bound value in a reference binding.
@ TCK_Upcast
Checking the operand of a cast to a base object.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
bool InNoMergeAttributedStmt
True if the current statement has nomerge attribute.
LValue EmitUnsupportedLValue(const Expr *E, const char *Name)
EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue an ErrorUnsupported style ...
llvm::Type * ConvertTypeForMem(QualType T)
const Decl * CurCodeDecl
CurCodeDecl - This is the inner-most code context, which includes blocks.
llvm::AssertingVH< llvm::Instruction > AllocaInsertPt
AllocaInsertPoint - This is an instruction in the entry block before which we prefer to insert alloca...
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue, bool capturedByInit)
unsigned getDebugInfoFIndex(const RecordDecl *Rec, unsigned FieldIndex)
Get the record field index as represented in debug info.
LValue EmitLValueForField(LValue Base, const FieldDecl *Field)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void markStmtMaybeUsed(const Stmt *S)
@ ForceLeftToRight
! Language semantics require left-to-right evaluation.
@ Default
! No language constraints on evaluation order.
@ ForceRightToLeft
! Language semantics require right-to-left evaluation.
Destroyer * getDestroyer(QualType::DestructionKind destructionKind)
RawAddress CreateMemTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen without...
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
void EmitCfiCheckStub()
Emit a stub for the cross-DSO CFI check function.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
llvm::Value * EmitARCLoadWeak(Address addr)
const TargetInfo & getTarget() const
LValue EmitCXXConstructLValue(const CXXConstructExpr *E)
bool isInConditionalBranch() const
isInConditionalBranch - Return true if we're currently emitting one branch or the other of a conditio...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
LValue EmitVAArgExprLValue(const VAArgExpr *E)
bool EmitScalarRangeCheck(llvm::Value *Value, QualType Ty, SourceLocation Loc)
Check if the scalar Value is within the valid range for the given type Ty.
llvm::Function * generateDestroyHelper(Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray, const VarDecl *VD)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
LValue EmitCoyieldLValue(const CoyieldExpr *E)
RValue EmitAnyExprToTemp(const Expr *E)
EmitAnyExprToTemp - Similarly to EmitAnyExpr(), however, the result will always be accessible even if...
void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit)
EmitComplexExprIntoLValue - Emit the given expression of complex type and place its result into the s...
RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Address mergeAddressesInConditionalExpr(Address LHS, Address RHS, llvm::BasicBlock *LHSBlock, llvm::BasicBlock *RHSBlock, llvm::BasicBlock *MergeBlock, QualType MergedType)
LValue EmitUnaryOpLValue(const UnaryOperator *E)
ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitUnsupportedRValue(const Expr *E, const char *Name)
EmitUnsupportedRValue - Emit a dummy r-value using the type of E and issue an ErrorUnsupported style ...
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts)
getAccessedFieldNo - Given an encoded value and a result number, return the input field number being ...
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound, llvm::Value *Index, QualType IndexType, QualType IndexedType, bool Accessed)
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
void EmitCfiCheckFail()
Emit a cross-DSO CFI failure handling function.
AggValueSlot CreateAggTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateAggTemp - Create a temporary memory object for the given aggregate type.
RValue EmitLoadOfExtVectorElementLValue(LValue V)
ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc)
EmitLoadOfComplex - Load a complex number from the specified l-value.
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
LValue EmitComplexAssignmentLValue(const BinaryOperator *E)
Emit an l-value for an assignment (simple or compound) of complex type.
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E)
Address emitBlockByrefAddress(Address baseAddr, const VarDecl *V, bool followForward=true)
BuildBlockByrefAddress - Computes the location of the data in a variable which is declared as __block...
LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E)
Address emitAddrOfImagComponent(Address complex, QualType complexType)
LValue EmitDeclRefLValue(const DeclRefExpr *E)
const TargetCodeGenInfo & getTargetHooks() const
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
LValue MakeNaturalAlignRawAddrLValue(llvm::Value *V, QualType T)
LValue EmitPredefinedLValue(const PredefinedExpr *E)
RValue EmitReferenceBindingToExpr(const Expr *E)
Emits a reference binding to the passed in expression.
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst)
RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke=nullptr)
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::Value * EmitCheckValue(llvm::Value *V)
Convert a value into a format suitable for passing to a runtime sanitizer handler.
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, Address Ptr)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
void EmitCfiSlowPathCheck(SanitizerKind::SanitizerOrdinal Ordinal, llvm::Value *Cond, llvm::ConstantInt *TypeId, llvm::Value *Ptr, ArrayRef< llvm::Constant * > StaticArgs)
Emit a slow path cross-DSO CFI check which calls __cfi_slowpath if Cond if false.
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
CGCallee EmitCallee(const Expr *E)
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void EmitAggFinalDestCopy(QualType Type, AggValueSlot Dest, const LValue &Src, ExprValueKind SrcKind)
EmitAggFinalDestCopy - Emit copy of the specified aggregate into destination address.
void Destroyer(CodeGenFunction &CGF, Address addr, QualType ty)
LValue EmitLoadOfPointerLValue(Address Ptr, const PointerType *PtrTy)
std::pair< LValue, LValue > EmitHLSLOutArgLValues(const HLSLOutArgExpr *E, QualType Ty)
void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
llvm::Instruction * getPostAllocaInsertPoint()
Return PostAllocaInsertPt.
Address GetAddressOfBaseClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc)
GetAddressOfBaseClass - This function will add the necessary delta to the load of 'this' and returns ...
LValue EmitMemberExpr(const MemberExpr *E)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed=false)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::ConstantInt * getUBSanFunctionTypeHash(QualType T) const
Return a type hash constant for a function instrumented by -fsanitize=function.
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl)
llvm::DenseMap< const ValueDecl *, FieldDecl * > LambdaCaptureFields
CleanupKind getCleanupKind(QualType::DestructionKind kind)
void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest)
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, llvm::CallBase **CallOrInvoke)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitCXXTypeidExpr(const CXXTypeidExpr *E)
Address GetAddrOfBlockDecl(const VarDecl *var)
CodeGenTypes & getTypes() const
void EmitARCInitWeak(Address addr, llvm::Value *value)
LValue EmitArraySectionExpr(const ArraySectionExpr *E, bool IsLowerBound=true)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E)
void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst)
Address EmitCXXUuidofExpr(const CXXUuidofExpr *E)
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::SmallVector< const ParmVarDecl *, 4 > FnArgs
Save Parameter Decl for coroutine.
QualType BuildFunctionArgList(GlobalDecl GD, FunctionArgList &Args)
LValue EmitStringLiteralLValue(const StringLiteral *E)
static Destroyer destroyARCStrongPrecise
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr)
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
LValue EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E)
LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e)
static bool hasAggregateEvaluationKind(QualType T)
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
LValue EmitLoadOfReferenceLValue(LValue RefLVal)
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
void EmitInitializationToLValue(const Expr *E, LValue LV, AggValueSlot::IsZeroed_t IsZeroed=AggValueSlot::IsNotZeroed)
EmitInitializationToLValue - Emit an initializer to an LValue.
Address EmitFieldAnnotations(const FieldDecl *D, Address V)
Emit field annotations for the given field & value.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E)
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
static bool isNullPointerAllowed(TypeCheckKind TCK)
Determine whether the pointer type check TCK permits null pointers.
static Destroyer destroyARCStrongImprecise
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
LValue EmitLValueForIvar(QualType ObjectTy, llvm::Value *Base, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers)
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
LValue EmitStmtExprLValue(const StmtExpr *E)
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
LValue MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T)
Given a value of type T* that may not be to a complete object, construct an l-value with the natural ...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
RValue EmitLoadOfAnyValue(LValue V, AggValueSlot Slot=AggValueSlot::ignored(), SourceLocation Loc={})
Like EmitLoadOfLValue but also handles complex and aggregate types.
Address emitAddrOfRealComponent(Address complex, QualType complexType)
static bool isVptrCheckRequired(TypeCheckKind TCK, QualType Ty)
Determine whether the pointer type check TCK requires a vptr check.
LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E)
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E)
LValue EmitLValueForLambdaField(const FieldDecl *Field)
static bool IsWrappedCXXThis(const Expr *E)
Check if E is a C++ "this" pointer wrapped in value-preserving casts.
This class organizes the cross-function state that is used while generating LLVM code.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD)
Get the address of a GUID.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1269
void setDSOLocal(llvm::GlobalValue *GV) const
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
CGDebugInfo * getModuleDebugInfo()
ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E)
Returns a pointer to a constant global variable for the given file-scope compound literal expression.
llvm::ConstantInt * CreateCrossDsoCfiTypeId(llvm::Metadata *MD)
Generate a cross-DSO type identifier for MD.
void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C)
llvm::Constant * getRawFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return a function pointer for a reference to the given function.
Definition: CGExpr.cpp:2920
llvm::FunctionCallee getAddrAndTypeOfCXXStructor(GlobalDecl GD, const CGFunctionInfo *FnInfo=nullptr, llvm::FunctionType *FnType=nullptr, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Definition: CGCXX.cpp:218
llvm::Constant * GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH=false)
Get the address of the RTTI descriptor for the given type.
llvm::Constant * GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty=nullptr, bool ForVTable=false, bool DontDefer=false, ForDefinition_t IsForDefinition=NotForDefinition)
Return the address of the given function.
Address createUnnamedGlobalFrom(const VarDecl &D, llvm::Constant *Constant, CharUnits Align)
Definition: CGDecl.cpp:1108
llvm::Constant * getFunctionPointer(GlobalDecl GD, llvm::Type *Ty=nullptr)
Return the ABI-correct function pointer value for a reference to the given function.
DiagnosticsEngine & getDiags() const
void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref< void()> Fn)
Run some code with "sufficient" stack space.
const LangOptions & getLangOpts() const
CGCUDARuntime & getCUDARuntime()
Return a reference to the configured CUDA runtime.
CharUnits getNaturalTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, bool forPointeeType=false)
const TargetInfo & getTarget() const
llvm::Metadata * CreateMetadataIdentifierForType(QualType T)
Create a metadata identifier for the given type.
llvm::Constant * getTypeDescriptorFromMap(QualType Ty)
void addUsedGlobal(llvm::GlobalValue *GV)
Add a global to a list to be added to the llvm.used metadata.
llvm::MDNode * getTBAABaseTypeInfo(QualType QTy)
getTBAABaseTypeInfo - Get metadata that describes the given base access type.
llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD)
Returns LLVM linkage for a declarator.
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
ConstantAddress GetWeakRefReference(const ValueDecl *VD)
Get a reference to the target of VD.
CGPointerAuthInfo getFunctionPointerAuthInfo(QualType T)
Return the abstract pointer authentication schema for a pointer to the given function type.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
SanitizerMetadata * getSanitizerMetadata()
llvm::Metadata * CreateMetadataIdentifierGeneralized(QualType T)
Create a metadata identifier for the generalization of the given type.
const llvm::Triple & getTriple() const
llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage)
Definition: CGDecl.cpp:246
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAInfoForSubobject(LValue Base, QualType AccessType)
getTBAAInfoForSubobject - Get TBAA information for an access with a given base lvalue.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ConstantAddress GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name=".str")
Return a pointer to a constant array for the given string literal.
ASTContext & getContext() const
ConstantAddress GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO)
Get the address of a template parameter object.
TBAAAccessInfo mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo, TBAAAccessInfo TargetInfo)
mergeTBAAInfoForCast - Get merged TBAA information for the purposes of type casts.
llvm::Constant * GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty=nullptr, ForDefinition_t IsForDefinition=NotForDefinition)
Return the llvm::Constant for the address of the given global variable.
llvm::MDNode * getTBAATypeInfo(QualType QTy)
getTBAATypeInfo - Get metadata used to describe accesses to objects of the given type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
TBAAAccessInfo mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA, TBAAAccessInfo InfoB)
mergeTBAAInfoForConditionalOperator - Get merged TBAA information for the purposes of conditional ope...
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
CGObjCRuntime & getObjCRuntime()
Return a reference to the configured Objective-C runtime.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner)
Returns a pointer to a global variable representing a temporary with static or thread storage duratio...
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info, llvm::Function *F, bool IsThunk)
Set the LLVM function attributes (sext, zext, etc).
CharUnits getMinimumObjectSize(QualType Ty)
Returns the minimum object size for an object of the given type.
void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F)
Set the LLVM function attributes which only apply to a function definition.
ConstantAddress GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *)
Return a pointer to a constant array for the given ObjCEncodeExpr node.
ConstantAddress GetAddrOfConstantCString(const std::string &Str, const char *GlobalName=nullptr)
Returns a pointer to a character array containing the literal and a terminating '\0' character.
void setCurrentStmt(const Stmt *S)
If the execution count for the current statement is known, record that as the current count.
Definition: CodeGenPGO.h:76
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:679
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Type * ConvertTypeForMem(QualType T)
ConvertTypeForMem - Convert type T into a llvm::Type.
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenTypes.h:99
const CGFunctionInfo & arrangeFreeFunctionCall(const CallArgList &Args, const FunctionType *Ty, bool ChainCall)
Figure out the rules for calling a function with the given formal type using the given arguments.
Definition: CGCall.cpp:638
A specialization of Address that requires the address to be an LLVM Constant.
Definition: Address.h:294
ConstantAddress withElementType(llvm::Type *ElemTy) const
Definition: Address.h:310
llvm::Constant * getPointer() const
Definition: Address.h:306
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
llvm::Constant * tryEmitConstantExpr(const ConstantExpr *CE)
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition: CGCall.h:382
void mergeForCast(const LValueBaseInfo &Info)
Definition: CGValue.h:174
AlignmentSource getAlignmentSource() const
Definition: CGValue.h:171
LValue - This represents an lvalue references.
Definition: CGValue.h:182
bool isBitField() const
Definition: CGValue.h:280
bool isMatrixElt() const
Definition: CGValue.h:283
Expr * getBaseIvarExp() const
Definition: CGValue.h:332
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:409
static LValue MakeGlobalReg(llvm::Value *V, CharUnits alignment, QualType type)
Definition: CGValue.h:478
void setObjCIvar(bool Value)
Definition: CGValue.h:298
bool isObjCArray() const
Definition: CGValue.h:300
bool isObjCStrong() const
Definition: CGValue.h:324
bool isGlobalObjCRef() const
Definition: CGValue.h:306
bool isVectorElt() const
Definition: CGValue.h:279
void setObjCArray(bool Value)
Definition: CGValue.h:301
bool isSimple() const
Definition: CGValue.h:278
bool isVolatileQualified() const
Definition: CGValue.h:285
RValue asAggregateRValue() const
Definition: CGValue.h:498
CharUnits getAlignment() const
Definition: CGValue.h:343
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getMatrixIdx() const
Definition: CGValue.h:395
llvm::Value * getGlobalReg() const
Definition: CGValue.h:430
static LValue MakeAddr(Address Addr, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:432
bool isVolatile() const
Definition: CGValue.h:328
const Qualifiers & getQuals() const
Definition: CGValue.h:338
bool isGlobalReg() const
Definition: CGValue.h:282
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:452
bool isObjCWeak() const
Definition: CGValue.h:321
Address getAddress() const
Definition: CGValue.h:361
unsigned getVRQualifiers() const
Definition: CGValue.h:287
void setThreadLocalRef(bool Value)
Definition: CGValue.h:310
LValue setKnownNonNull()
Definition: CGValue.h:350
bool isNonGC() const
Definition: CGValue.h:303
void setGlobalObjCRef(bool Value)
Definition: CGValue.h:307
bool isExtVectorElt() const
Definition: CGValue.h:281
llvm::Value * getVectorIdx() const
Definition: CGValue.h:382
void setNontemporal(bool Value)
Definition: CGValue.h:319
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:346
void setARCPreciseLifetime(ARCPreciseLifetime_t value)
Definition: CGValue.h:315
QualType getType() const
Definition: CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:424
bool isThreadLocalRef() const
Definition: CGValue.h:309
KnownNonNull_t isKnownNonNull() const
Definition: CGValue.h:349
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
void setNonGC(bool Value)
Definition: CGValue.h:304
Address getVectorAddress() const
Definition: CGValue.h:370
bool isNontemporal() const
Definition: CGValue.h:318
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:468
bool isObjCIvar() const
Definition: CGValue.h:297
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:442
void setAddress(Address address)
Definition: CGValue.h:363
void setBaseIvarExp(Expr *V)
Definition: CGValue.h:333
Address getExtVectorAddress() const
Definition: CGValue.h:401
static LValue MakeMatrixElt(Address matAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:488
Address getMatrixAddress() const
Definition: CGValue.h:387
Address getBitFieldAddress() const
Definition: CGValue.h:415
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:108
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
An abstract representation of an aligned address.
Definition: Address.h:42
RawAddress withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:100
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:77
llvm::Value * getPointer() const
Definition: Address.h:66
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
void disableSanitizerForGlobal(llvm::GlobalVariable *GV)
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::Constant * getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const
Return a constant used by UBSan as a signature to identify functions possessing type information,...
Definition: TargetInfo.h:237
Complex values, per C99 6.2.5p11.
Definition: Type.h:3146
QualType getElementType() const
Definition: Type.h:3156
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3477
ConstStmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:196
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1077
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4233
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4251
RecordDecl * getOuterLexicalRecordContext()
Retrieve the outermost lexically enclosing record context.
Definition: DeclBase.cpp:2036
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1265
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition: Expr.h:1463
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition: Expr.cpp:487
ValueDecl * getDecl()
Definition: Expr.h:1333
SourceLocation getLocation() const
Definition: Expr.h:1341
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
T * getAttr() const
Definition: DeclBase.h:576
SourceLocation getLocation() const
Definition: DeclBase.h:442
bool isUsed(bool CheckUsedAttr=true) const
Whether any (re-)declaration of the entity was used, meaning that a definition is required.
Definition: DeclBase.cpp:557
DeclContext * getDeclContext()
Definition: DeclBase.h:451
bool hasAttr() const
Definition: DeclBase.h:580
void ConvertArgToString(ArgumentKind Kind, intptr_t Val, StringRef Modifier, StringRef Argument, ArrayRef< ArgumentValue > PrevArgs, SmallVectorImpl< char > &Output, ArrayRef< intptr_t > QualTypeVals) const
Converts a diagnostic argument (as an intptr_t) into the string that represents it.
Definition: Diagnostic.h:907
Represents an enum.
Definition: Decl.h:3861
bool isFixed() const
Returns true if this is an Objective-C, C++11, or Microsoft-style enumeration with a fixed underlying...
Definition: Decl.h:4075
void getValueRange(llvm::APInt &Max, llvm::APInt &Min) const
Calculates the [Min,Max) values the enum can store based on the NumPositiveBits and NumNegativeBits.
Definition: Decl.cpp:5017
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of enums.
Definition: Type.h:6104
EnumDecl * getDecl() const
Definition: Type.h:6111
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3799
This represents one expression.
Definition: Expr.h:110
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition: Expr.cpp:82
bool isGLValue() const
Definition: Expr.h:280
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition: Expr.cpp:3123
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition: Expr.h:437
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition: Expr.cpp:3096
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3084
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3092
bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsLValue - Evaluate an expression to see if we can fold it to an lvalue with link time known ...
bool isPRValue() const
Definition: Expr.h:278
bool isLValue() const
isLValue - True if this expression is an "l-value" according to the rules of the current language.
Definition: Expr.h:277
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition: Expr.cpp:1549
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition: Expr.cpp:3593
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3076
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:276
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
bool isFlexibleArrayMemberLike(ASTContext &Context, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution=false) const
Check whether this array fits the idiom of a flexible array member, depending on the value of -fstric...
Definition: Expr.cpp:205
QualType getType() const
Definition: Expr.h:142
bool isOBJCGCCandidate(ASTContext &Ctx) const
isOBJCGCCandidate - Return true if this expression may be used in a read/ write barrier.
Definition: Expr.cpp:3007
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition: Expr.h:6354
Represents a member of a struct/union/class.
Definition: Decl.h:3033
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3136
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.h:3118
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition: Decl.h:3264
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition: Decl.cpp:4726
FullExpr - Represents a "full-expression" node.
Definition: Expr.h:1044
const Expr * getSubExpr() const
Definition: Expr.h:1057
Represents a function declaration or definition.
Definition: Decl.h:1935
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition: Decl.cpp:3653
Represents a prototype with parameter type info, e.g.
Definition: Type.h:5108
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:56
const Decl * getDecl() const
Definition: GlobalDecl.h:103
This class represents temporary values used to represent inout and out arguments in HLSL.
Definition: Expr.h:7152
Describes an C or C++ initializer list.
Definition: Expr.h:5088
SanitizerSet Sanitize
Set of enabled sanitizers.
Definition: LangOptions.h:506
virtual void mangleCXXRTTI(QualType T, raw_ostream &)=0
unsigned getBlockId(const BlockDecl *BD, bool Local)
Definition: Mangle.h:84
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4732
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition: ExprCXX.h:4757
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition: ExprCXX.h:4749
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition: ExprCXX.h:4782
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2796
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3236
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition: Expr.h:3319
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why? This is only meaningful if the named memb...
Definition: Expr.h:3460
Expr * getBase() const
Definition: Expr.h:3313
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:3431
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3520
bool isObjCBOOLType(QualType T) const
Returns true if.
Definition: NSAPI.cpp:481
This represents a decl that may have a name.
Definition: Decl.h:253
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:280
A C++ nested-name-specifier augmented with source location information.
bool containsType(SanitizerMask Mask, StringRef MangledTypeName, StringRef Category=StringRef()) const
ObjCEncodeExpr, used for @encode in Objective-C.
Definition: ExprObjC.h:410
Represents an ObjC class declaration.
Definition: DeclObjC.h:1153
ObjCIvarDecl - Represents an ObjC instance variable.
Definition: DeclObjC.h:1951
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:941
Represents a class type in Objective C.
Definition: Type.h:7332
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1173
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition: Expr.h:1223
bool isUnique() const
Definition: Expr.h:1231
ParenExpr - This represents a parenthesized expression, e.g.
Definition: Expr.h:2170
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3199
QualType getPointeeType() const
Definition: Type.h:3209
[C99 6.4.2.2] - A predefined identifier such as func.
Definition: Expr.h:1991
StringRef getIdentKindName() const
Definition: Expr.h:2048
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
bool isValid() const
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6546
const Expr *const * const_semantics_iterator
Definition: Expr.h:6611
A (possibly-)qualified type.
Definition: Type.h:929
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition: Type.h:8021
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:8063
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7977
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1433
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:8031
QualType withCVRQualifiers(unsigned CVR) const
Definition: Type.h:1174
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition: Type.h:1531
bool isConstantStorage(const ASTContext &Ctx, bool ExcludeCtor, bool ExcludeDtor)
Definition: Type.h:1028
The collection of all-type qualifiers we support.
Definition: Type.h:324
unsigned getCVRQualifiers() const
Definition: Type.h:481
GC getObjCGCAttr() const
Definition: Type.h:512
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:354
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:347
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:343
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:357
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:360
bool hasConst() const
Definition: Type.h:450
void addCVRQualifiers(unsigned mask)
Definition: Type.h:495
void removeObjCGCAttr()
Definition: Type.h:516
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition: Type.h:643
void setAddressSpace(LangAS space)
Definition: Type.h:584
bool hasVolatile() const
Definition: Type.h:460
ObjCLifetime getObjCLifetime() const
Definition: Type.h:538
void addVolatile()
Definition: Type.h:463
Represents a struct/union/class.
Definition: Decl.h:4162
field_range fields() const
Definition: Decl.h:4376
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition: Decl.h:4361
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6078
decl_type * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Definition: Redeclarable.h:203
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4466
Stmt - This represents one statement.
Definition: Stmt.h:84
StmtClass getStmtClass() const
Definition: Stmt.h:1380
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:346
StringLiteral - This represents a string literal expression, e.g.
Definition: Expr.h:1778
bool isUnion() const
Definition: Decl.h:3784
Exposes information about the current target.
Definition: TargetInfo.h:220
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1262
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1330
const Type * getTypeForDecl() const
Definition: Decl.h:3409
The type-property cache.
Definition: Type.cpp:4499
The base class of the type hierarchy.
Definition: Type.h:1828
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.cpp:1916
bool isBlockPointerType() const
Definition: Type.h:8206
bool isVoidType() const
Definition: Type.h:8516
bool isBooleanType() const
Definition: Type.h:8648
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2201
bool hasAttr(attr::Kind AK) const
Determine whether this type had the specified attribute applied to it (looking through top-level type...
Definition: Type.cpp:1933
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2180
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition: Type.h:8819
bool isConstantArrayType() const
Definition: Type.h:8268
bool isArrayType() const
Definition: Type.h:8264
bool isFunctionPointerType() const
Definition: Type.h:8232
bool isCountAttributedType() const
Definition: Type.cpp:727
bool isArithmeticType() const
Definition: Type.cpp:2315
bool isConstantMatrixType() const
Definition: Type.h:8326
bool isPointerType() const
Definition: Type.h:8192
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:8560
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8810
bool isReferenceType() const
Definition: Type.h:8210
bool isVariableArrayType() const
Definition: Type.h:8276
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
bool isExtVectorBoolType() const
Definition: Type.h:8312
bool isBitIntType() const
Definition: Type.h:8430
bool isAnyComplexType() const
Definition: Type.h:8300
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition: Type.h:8691
bool isAtomicType() const
Definition: Type.h:8347
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2725
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2396
bool isFunctionType() const
Definition: Type.h:8188
bool isObjCObjectPointerType() const
Definition: Type.h:8334
bool isVectorType() const
Definition: Type.h:8304
bool isFloatingType() const
Definition: Type.cpp:2283
bool isSubscriptableVectorType() const
Definition: Type.h:8318
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8741
const Type * getUnqualifiedDesugaredType() const
Return the specified type with any "sugar" removed from the type, removing any typedefs,...
Definition: Type.cpp:638
bool isRecordType() const
Definition: Type.h:8292
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition: Type.cpp:1920
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2232
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4750
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:671
QualType getType() const
Definition: Decl.h:682
QualType getType() const
Definition: Value.cpp:234
Represents a variable declaration or definition.
Definition: Decl.h:882
TLSKind getTLSKind() const
Definition: Decl.cpp:2157
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition: Decl.cpp:2355
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition: Decl.h:1135
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition: Decl.h:908
@ TLS_None
Not a TLS variable.
Definition: Decl.h:902
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3809
Represents a GCC generic vector type.
Definition: Type.h:4035
unsigned getNumElements() const
Definition: Type.h:4050
#define INT_MIN
Definition: limits.h:55
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
Definition: CGValue.h:141
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
bool isEmptyFieldForLayout(const ASTContext &Context, const FieldDecl *FD)
isEmptyFieldForLayout - Return true iff the field is "empty", that is, either a zero-width bit-field ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
Definition: EHScopeStack.h:80
@ ARCImpreciseLifetime
Definition: CGValue.h:136
static AlignmentSource getFieldAlignmentSource(AlignmentSource Source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
Definition: CGValue.h:159
@ NotKnownNonNull
Definition: Address.h:33
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const AstTypeMatcher< FunctionType > functionType
Matches FunctionType nodes.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition: CNFFormula.h:64
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
bool This(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2387
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2350
bool IsNonNull(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2375
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1693
bool Cast(InterpState &S, CodePtr OpPC)
Definition: Interp.h:2126
The JSON file list parser is used to communicate input to InstallAPI.
@ OpenCL
Definition: LangStandard.h:65
@ CPlusPlus
Definition: LangStandard.h:55
@ OK_BitField
A bitfield object is a bitfield on a C or C++ record.
Definition: Specifiers.h:154
@ SC_Register
Definition: Specifiers.h:257
@ Asm
Assembly: we accept this only so that we can preprocess it.
StorageDuration
The storage duration for an object (per C++ [basic.stc]).
Definition: Specifiers.h:327
@ SD_Thread
Thread storage duration.
Definition: Specifiers.h:330
@ SD_Static
Static storage duration.
Definition: Specifiers.h:331
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition: Specifiers.h:328
@ SD_Automatic
Automatic storage duration (most local variables).
Definition: Specifiers.h:329
@ SD_Dynamic
Dynamic storage duration.
Definition: Specifiers.h:332
@ Result
The result type of a method or function.
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
llvm::cl::opt< bool > ClSanitizeGuardChecks
const FunctionProtoType * T
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:87
@ Interface
The "__interface" keyword introduces the elaborated-type-specifier.
bool isLambdaMethod(const DeclContext *DC)
Definition: ASTLambda.h:39
@ Other
Other implicit parameter.
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition: Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition: Specifiers.h:180
unsigned long uint64_t
unsigned int uint32_t
__INTPTR_TYPE__ intptr_t
A signed integer type with the property that any valid pointer to void can be converted to this type,...
Structure with information about how a bitfield should be accessed.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc)
Returns address of the threadprivate variable for the current thread.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
unsigned char PointerWidthInBits
The width of a pointer into the generic address space.
llvm::MDNode * AccessType
AccessType - The final access type.
Definition: CodeGenTBAA.h:105
uint64_t Offset
Offset - The byte offset of the final access within the base one.
Definition: CodeGenTBAA.h:109
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
uint64_t Size
Size - The size of access, in bytes.
Definition: CodeGenTBAA.h:112
llvm::MDNode * BaseType
BaseType - The base/leading access type.
Definition: CodeGenTBAA.h:101
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition: Expr.h:609
PointerAuthSchema FunctionPointers
The ABI for C function pointers.
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition: Sanitizers.h:182
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:169
An adjustment to be made to the temporary created when emitting a reference binding,...
Definition: Expr.h:66