clang 19.0.0git
CGExpr.cpp
Go to the documentation of this file.
1//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "ABIInfoImpl.h"
14#include "CGCUDARuntime.h"
15#include "CGCXXABI.h"
16#include "CGCall.h"
17#include "CGCleanup.h"
18#include "CGDebugInfo.h"
19#include "CGObjCRuntime.h"
20#include "CGOpenMPRuntime.h"
21#include "CGRecordLayout.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "TargetInfo.h"
27#include "clang/AST/Attr.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/NSAPI.h"
34#include "llvm/ADT/Hashing.h"
35#include "llvm/ADT/STLExtras.h"
36#include "llvm/ADT/StringExtras.h"
37#include "llvm/IR/DataLayout.h"
38#include "llvm/IR/Intrinsics.h"
39#include "llvm/IR/IntrinsicsWebAssembly.h"
40#include "llvm/IR/LLVMContext.h"
41#include "llvm/IR/MDBuilder.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/Passes/OptimizationLevel.h"
44#include "llvm/Support/ConvertUTF.h"
45#include "llvm/Support/MathExtras.h"
46#include "llvm/Support/Path.h"
47#include "llvm/Support/SaveAndRestore.h"
48#include "llvm/Support/xxhash.h"
49#include "llvm/Transforms/Utils/SanitizerStats.h"
50
51#include <optional>
52#include <string>
53
54using namespace clang;
55using namespace CodeGen;
56
57// Experiment to make sanitizers easier to debug
58static llvm::cl::opt<bool> ClSanitizeDebugDeoptimization(
59 "ubsan-unique-traps", llvm::cl::Optional,
60 llvm::cl::desc("Deoptimize traps for UBSAN so there is 1 trap per check."));
61
62// TODO: Introduce frontend options to enabled per sanitizers, similar to
63// `fsanitize-trap`.
64static llvm::cl::opt<bool> ClSanitizeGuardChecks(
65 "ubsan-guard-checks", llvm::cl::Optional,
66 llvm::cl::desc("Guard UBSAN checks with `llvm.allow.ubsan.check()`."));
67
68//===--------------------------------------------------------------------===//
69// Miscellaneous Helper Methods
70//===--------------------------------------------------------------------===//
71
72/// CreateTempAlloca - This creates a alloca and inserts it into the entry
73/// block.
75CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty, CharUnits Align,
76 const Twine &Name,
77 llvm::Value *ArraySize) {
78 auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
79 Alloca->setAlignment(Align.getAsAlign());
80 return RawAddress(Alloca, Ty, Align, KnownNonNull);
81}
82
83/// CreateTempAlloca - This creates a alloca and inserts it into the entry
84/// block. The alloca is casted to default address space if necessary.
86 const Twine &Name,
87 llvm::Value *ArraySize,
88 RawAddress *AllocaAddr) {
89 auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
90 if (AllocaAddr)
91 *AllocaAddr = Alloca;
92 llvm::Value *V = Alloca.getPointer();
93 // Alloca always returns a pointer in alloca address space, which may
94 // be different from the type defined by the language. For example,
95 // in C++ the auto variables are in the default address space. Therefore
96 // cast alloca to the default address space when necessary.
98 auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
99 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
100 // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
101 // otherwise alloca is inserted at the current insertion point of the
102 // builder.
103 if (!ArraySize)
104 Builder.SetInsertPoint(getPostAllocaInsertPoint());
107 Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
108 }
109
110 return RawAddress(V, Ty, Align, KnownNonNull);
111}
112
113/// CreateTempAlloca - This creates an alloca and inserts it into the entry
114/// block if \p ArraySize is nullptr, otherwise inserts it at the current
115/// insertion point of the builder.
116llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
117 const Twine &Name,
118 llvm::Value *ArraySize) {
119 llvm::AllocaInst *Alloca;
120 if (ArraySize)
121 Alloca = Builder.CreateAlloca(Ty, ArraySize, Name);
122 else
123 Alloca = new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
124 ArraySize, Name, &*AllocaInsertPt);
125 if (Allocas) {
126 Allocas->Add(Alloca);
127 }
128 return Alloca;
129}
130
131/// CreateDefaultAlignTempAlloca - This creates an alloca with the
132/// default alignment of the corresponding LLVM type, which is *not*
133/// guaranteed to be related in any way to the expected alignment of
134/// an AST type that might have been lowered to Ty.
136 const Twine &Name) {
137 CharUnits Align =
138 CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlign(Ty));
139 return CreateTempAlloca(Ty, Align, Name);
140}
141
144 return CreateTempAlloca(ConvertType(Ty), Align, Name);
145}
146
148 RawAddress *Alloca) {
149 // FIXME: Should we prefer the preferred type alignment here?
150 return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
151}
152
154 const Twine &Name,
155 RawAddress *Alloca) {
157 /*ArraySize=*/nullptr, Alloca);
158
159 if (Ty->isConstantMatrixType()) {
160 auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
161 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
162 ArrayTy->getNumElements());
163
164 Result = Address(Result.getPointer(), VectorTy, Result.getAlignment(),
166 }
167 return Result;
168}
169
171 CharUnits Align,
172 const Twine &Name) {
173 return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
174}
175
177 const Twine &Name) {
178 return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
179 Name);
180}
181
182/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
183/// expression and compare the result against zero, returning an Int1Ty value.
184llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
185 PGO.setCurrentStmt(E);
186 if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
187 llvm::Value *MemPtr = EmitScalarExpr(E);
188 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
189 }
190
191 QualType BoolTy = getContext().BoolTy;
193 CGFPOptionsRAII FPOptsRAII(*this, E);
194 if (!E->getType()->isAnyComplexType())
195 return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
196
198 Loc);
199}
200
201/// EmitIgnoredExpr - Emit code to compute the specified expression,
202/// ignoring the result.
204 if (E->isPRValue())
205 return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
206
207 // if this is a bitfield-resulting conditional operator, we can special case
208 // emit this. The normal 'EmitLValue' version of this is particularly
209 // difficult to codegen for, since creating a single "LValue" for two
210 // different sized arguments here is not particularly doable.
211 if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
213 if (CondOp->getObjectKind() == OK_BitField)
214 return EmitIgnoredConditionalOperator(CondOp);
215 }
216
217 // Just emit it as an l-value and drop the result.
218 EmitLValue(E);
219}
220
221/// EmitAnyExpr - Emit code to compute the specified expression which
222/// can have any type. The result is returned as an RValue struct.
223/// If this is an aggregate expression, AggSlot indicates where the
224/// result should be returned.
226 AggValueSlot aggSlot,
227 bool ignoreResult) {
228 switch (getEvaluationKind(E->getType())) {
229 case TEK_Scalar:
230 return RValue::get(EmitScalarExpr(E, ignoreResult));
231 case TEK_Complex:
232 return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
233 case TEK_Aggregate:
234 if (!ignoreResult && aggSlot.isIgnored())
235 aggSlot = CreateAggTemp(E->getType(), "agg-temp");
236 EmitAggExpr(E, aggSlot);
237 return aggSlot.asRValue();
238 }
239 llvm_unreachable("bad evaluation kind");
240}
241
242/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
243/// always be accessible even if no aggregate location is provided.
246
248 AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
249 return EmitAnyExpr(E, AggSlot);
250}
251
252/// EmitAnyExprToMem - Evaluate an expression into a given memory
253/// location.
255 Address Location,
256 Qualifiers Quals,
257 bool IsInit) {
258 // FIXME: This function should take an LValue as an argument.
259 switch (getEvaluationKind(E->getType())) {
260 case TEK_Complex:
262 /*isInit*/ false);
263 return;
264
265 case TEK_Aggregate: {
266 EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
271 return;
272 }
273
274 case TEK_Scalar: {
275 RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
276 LValue LV = MakeAddrLValue(Location, E->getType());
278 return;
279 }
280 }
281 llvm_unreachable("bad evaluation kind");
282}
283
284static void
286 const Expr *E, Address ReferenceTemporary) {
287 // Objective-C++ ARC:
288 // If we are binding a reference to a temporary that has ownership, we
289 // need to perform retain/release operations on the temporary.
290 //
291 // FIXME: This should be looking at E, not M.
292 if (auto Lifetime = M->getType().getObjCLifetime()) {
293 switch (Lifetime) {
296 // Carry on to normal cleanup handling.
297 break;
298
300 // Nothing to do; cleaned up by an autorelease pool.
301 return;
302
305 switch (StorageDuration Duration = M->getStorageDuration()) {
306 case SD_Static:
307 // Note: we intentionally do not register a cleanup to release
308 // the object on program termination.
309 return;
310
311 case SD_Thread:
312 // FIXME: We should probably register a cleanup in this case.
313 return;
314
315 case SD_Automatic:
319 if (Lifetime == Qualifiers::OCL_Strong) {
320 const ValueDecl *VD = M->getExtendingDecl();
321 bool Precise = isa_and_nonnull<VarDecl>(VD) &&
322 VD->hasAttr<ObjCPreciseLifetimeAttr>();
326 } else {
327 // __weak objects always get EH cleanups; otherwise, exceptions
328 // could cause really nasty crashes instead of mere leaks.
331 }
332 if (Duration == SD_FullExpression)
333 CGF.pushDestroy(CleanupKind, ReferenceTemporary,
334 M->getType(), *Destroy,
336 else
337 CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
338 M->getType(),
339 *Destroy, CleanupKind & EHCleanup);
340 return;
341
342 case SD_Dynamic:
343 llvm_unreachable("temporary cannot have dynamic storage duration");
344 }
345 llvm_unreachable("unknown storage duration");
346 }
347 }
348
349 CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
350 if (const RecordType *RT =
352 // Get the destructor for the reference temporary.
353 auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
354 if (!ClassDecl->hasTrivialDestructor())
355 ReferenceTemporaryDtor = ClassDecl->getDestructor();
356 }
357
358 if (!ReferenceTemporaryDtor)
359 return;
360
361 // Call the destructor for the temporary.
362 switch (M->getStorageDuration()) {
363 case SD_Static:
364 case SD_Thread: {
365 llvm::FunctionCallee CleanupFn;
366 llvm::Constant *CleanupArg;
367 if (E->getType()->isArrayType()) {
369 ReferenceTemporary, E->getType(),
371 dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
372 CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
373 } else {
374 CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
375 GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
376 CleanupArg = cast<llvm::Constant>(ReferenceTemporary.emitRawPointer(CGF));
377 }
379 CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
380 break;
381 }
382
384 CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
386 CGF.getLangOpts().Exceptions);
387 break;
388
389 case SD_Automatic:
391 ReferenceTemporary, E->getType(),
393 CGF.getLangOpts().Exceptions);
394 break;
395
396 case SD_Dynamic:
397 llvm_unreachable("temporary cannot have dynamic storage duration");
398 }
399}
400
403 const Expr *Inner,
404 RawAddress *Alloca = nullptr) {
405 auto &TCG = CGF.getTargetHooks();
406 switch (M->getStorageDuration()) {
408 case SD_Automatic: {
409 // If we have a constant temporary array or record try to promote it into a
410 // constant global under the same rules a normal constant would've been
411 // promoted. This is easier on the optimizer and generally emits fewer
412 // instructions.
413 QualType Ty = Inner->getType();
414 if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
415 (Ty->isArrayType() || Ty->isRecordType()) &&
416 Ty.isConstantStorage(CGF.getContext(), true, false))
417 if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
418 auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
419 auto *GV = new llvm::GlobalVariable(
420 CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
421 llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
422 llvm::GlobalValue::NotThreadLocal,
424 CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
425 GV->setAlignment(alignment.getAsAlign());
426 llvm::Constant *C = GV;
427 if (AS != LangAS::Default)
428 C = TCG.performAddrSpaceCast(
429 CGF.CGM, GV, AS, LangAS::Default,
430 GV->getValueType()->getPointerTo(
432 // FIXME: Should we put the new global into a COMDAT?
433 return RawAddress(C, GV->getValueType(), alignment);
434 }
435 return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
436 }
437 case SD_Thread:
438 case SD_Static:
439 return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
440
441 case SD_Dynamic:
442 llvm_unreachable("temporary can't have dynamic storage duration");
443 }
444 llvm_unreachable("unknown storage duration");
445}
446
447/// Helper method to check if the underlying ABI is AAPCS
448static bool isAAPCS(const TargetInfo &TargetInfo) {
449 return TargetInfo.getABI().starts_with("aapcs");
450}
451
454 const Expr *E = M->getSubExpr();
455
456 assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
457 !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
458 "Reference should never be pseudo-strong!");
459
460 // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
461 // as that will cause the lifetime adjustment to be lost for ARC
462 auto ownership = M->getType().getObjCLifetime();
463 if (ownership != Qualifiers::OCL_None &&
464 ownership != Qualifiers::OCL_ExplicitNone) {
466 if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
467 llvm::Type *Ty = ConvertTypeForMem(E->getType());
468 Object = Object.withElementType(Ty);
469
470 // createReferenceTemporary will promote the temporary to a global with a
471 // constant initializer if it can. It can only do this to a value of
472 // ARC-manageable type if the value is global and therefore "immune" to
473 // ref-counting operations. Therefore we have no need to emit either a
474 // dynamic initialization or a cleanup and we can just return the address
475 // of the temporary.
476 if (Var->hasInitializer())
477 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
478
479 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
480 }
481 LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
483
484 switch (getEvaluationKind(E->getType())) {
485 default: llvm_unreachable("expected scalar or aggregate expression");
486 case TEK_Scalar:
487 EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
488 break;
489 case TEK_Aggregate: {
496 break;
497 }
498 }
499
500 pushTemporaryCleanup(*this, M, E, Object);
501 return RefTempDst;
502 }
503
506 E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
507
508 for (const auto &Ignored : CommaLHSs)
509 EmitIgnoredExpr(Ignored);
510
511 if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
512 if (opaque->getType()->isRecordType()) {
513 assert(Adjustments.empty());
514 return EmitOpaqueValueLValue(opaque);
515 }
516 }
517
518 // Create and initialize the reference temporary.
519 RawAddress Alloca = Address::invalid();
520 RawAddress Object = createReferenceTemporary(*this, M, E, &Alloca);
521 if (auto *Var = dyn_cast<llvm::GlobalVariable>(
522 Object.getPointer()->stripPointerCasts())) {
523 llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
524 Object = Object.withElementType(TemporaryType);
525 // If the temporary is a global and has a constant initializer or is a
526 // constant temporary that we promoted to a global, we may have already
527 // initialized it.
528 if (!Var->hasInitializer()) {
529 Var->setInitializer(CGM.EmitNullConstant(E->getType()));
530 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
531 }
532 } else {
533 switch (M->getStorageDuration()) {
534 case SD_Automatic:
535 if (auto *Size = EmitLifetimeStart(
536 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
537 Alloca.getPointer())) {
538 pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
539 Alloca, Size);
540 }
541 break;
542
543 case SD_FullExpression: {
544 if (!ShouldEmitLifetimeMarkers)
545 break;
546
547 // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
548 // marker. Instead, start the lifetime of a conditional temporary earlier
549 // so that it's unconditional. Don't do this with sanitizers which need
550 // more precise lifetime marks. However when inside an "await.suspend"
551 // block, we should always avoid conditional cleanup because it creates
552 // boolean marker that lives across await_suspend, which can destroy coro
553 // frame.
554 ConditionalEvaluation *OldConditional = nullptr;
555 CGBuilderTy::InsertPoint OldIP;
557 ((!SanOpts.has(SanitizerKind::HWAddress) &&
558 !SanOpts.has(SanitizerKind::Memory) &&
559 !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) ||
560 inSuspendBlock())) {
561 OldConditional = OutermostConditional;
562 OutermostConditional = nullptr;
563
564 OldIP = Builder.saveIP();
565 llvm::BasicBlock *Block = OldConditional->getStartingBlock();
566 Builder.restoreIP(CGBuilderTy::InsertPoint(
567 Block, llvm::BasicBlock::iterator(Block->back())));
568 }
569
570 if (auto *Size = EmitLifetimeStart(
571 CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
572 Alloca.getPointer())) {
573 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
574 Size);
575 }
576
577 if (OldConditional) {
578 OutermostConditional = OldConditional;
579 Builder.restoreIP(OldIP);
580 }
581 break;
582 }
583
584 default:
585 break;
586 }
587 EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
588 }
589 pushTemporaryCleanup(*this, M, E, Object);
590
591 // Perform derived-to-base casts and/or field accesses, to get from the
592 // temporary object we created (and, potentially, for which we extended
593 // the lifetime) to the subobject we're binding the reference to.
594 for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
595 switch (Adjustment.Kind) {
597 Object =
598 GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
599 Adjustment.DerivedToBase.BasePath->path_begin(),
600 Adjustment.DerivedToBase.BasePath->path_end(),
601 /*NullCheckValue=*/ false, E->getExprLoc());
602 break;
603
606 LV = EmitLValueForField(LV, Adjustment.Field);
607 assert(LV.isSimple() &&
608 "materialized temporary field is not a simple lvalue");
609 Object = LV.getAddress();
610 break;
611 }
612
614 llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
616 Adjustment.Ptr.MPT);
617 break;
618 }
619 }
620 }
621
622 return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
623}
624
625RValue
627 // Emit the expression as an lvalue.
628 LValue LV = EmitLValue(E);
629 assert(LV.isSimple());
630 llvm::Value *Value = LV.getPointer(*this);
631
633 // C++11 [dcl.ref]p5 (as amended by core issue 453):
634 // If a glvalue to which a reference is directly bound designates neither
635 // an existing object or function of an appropriate type nor a region of
636 // storage of suitable size and alignment to contain an object of the
637 // reference's type, the behavior is undefined.
638 QualType Ty = E->getType();
640 }
641
642 return RValue::get(Value);
643}
644
645
646/// getAccessedFieldNo - Given an encoded value and a result number, return the
647/// input field number being accessed.
648unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
649 const llvm::Constant *Elts) {
650 return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
651 ->getZExtValue();
652}
653
654static llvm::Value *emitHashMix(CGBuilderTy &Builder, llvm::Value *Acc,
655 llvm::Value *Ptr) {
656 llvm::Value *A0 =
657 Builder.CreateMul(Ptr, Builder.getInt64(0xbf58476d1ce4e5b9u));
658 llvm::Value *A1 =
659 Builder.CreateXor(A0, Builder.CreateLShr(A0, Builder.getInt64(31)));
660 return Builder.CreateXor(Acc, A1);
661}
662
663bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
664 return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
666}
667
668bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
670 return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
671 (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
674}
675
677 return SanOpts.has(SanitizerKind::Null) ||
678 SanOpts.has(SanitizerKind::Alignment) ||
679 SanOpts.has(SanitizerKind::ObjectSize) ||
680 SanOpts.has(SanitizerKind::Vptr);
681}
682
684 llvm::Value *Ptr, QualType Ty,
685 CharUnits Alignment,
686 SanitizerSet SkippedChecks,
687 llvm::Value *ArraySize) {
689 return;
690
691 // Don't check pointers outside the default address space. The null check
692 // isn't correct, the object-size check isn't supported by LLVM, and we can't
693 // communicate the addresses to the runtime handler for the vptr check.
694 if (Ptr->getType()->getPointerAddressSpace())
695 return;
696
697 // Don't check pointers to volatile data. The behavior here is implementation-
698 // defined.
699 if (Ty.isVolatileQualified())
700 return;
701
702 SanitizerScope SanScope(this);
703
705 llvm::BasicBlock *Done = nullptr;
706
707 // Quickly determine whether we have a pointer to an alloca. It's possible
708 // to skip null checks, and some alignment checks, for these pointers. This
709 // can reduce compile-time significantly.
710 auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
711
712 llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
713 llvm::Value *IsNonNull = nullptr;
714 bool IsGuaranteedNonNull =
715 SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
716 bool AllowNullPointers = isNullPointerAllowed(TCK);
717 if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
718 !IsGuaranteedNonNull) {
719 // The glvalue must not be an empty glvalue.
720 IsNonNull = Builder.CreateIsNotNull(Ptr);
721
722 // The IR builder can constant-fold the null check if the pointer points to
723 // a constant.
724 IsGuaranteedNonNull = IsNonNull == True;
725
726 // Skip the null check if the pointer is known to be non-null.
727 if (!IsGuaranteedNonNull) {
728 if (AllowNullPointers) {
729 // When performing pointer casts, it's OK if the value is null.
730 // Skip the remaining checks in that case.
731 Done = createBasicBlock("null");
732 llvm::BasicBlock *Rest = createBasicBlock("not.null");
733 Builder.CreateCondBr(IsNonNull, Rest, Done);
734 EmitBlock(Rest);
735 } else {
736 Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
737 }
738 }
739 }
740
741 if (SanOpts.has(SanitizerKind::ObjectSize) &&
742 !SkippedChecks.has(SanitizerKind::ObjectSize) &&
743 !Ty->isIncompleteType()) {
745 llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
746 if (ArraySize)
747 Size = Builder.CreateMul(Size, ArraySize);
748
749 // Degenerate case: new X[0] does not need an objectsize check.
750 llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
751 if (!ConstantSize || !ConstantSize->isNullValue()) {
752 // The glvalue must refer to a large enough storage region.
753 // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
754 // to check this.
755 // FIXME: Get object address space
756 llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
757 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
758 llvm::Value *Min = Builder.getFalse();
759 llvm::Value *NullIsUnknown = Builder.getFalse();
760 llvm::Value *Dynamic = Builder.getFalse();
761 llvm::Value *LargeEnough = Builder.CreateICmpUGE(
762 Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic}), Size);
763 Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
764 }
765 }
766
767 llvm::MaybeAlign AlignVal;
768 llvm::Value *PtrAsInt = nullptr;
769
770 if (SanOpts.has(SanitizerKind::Alignment) &&
771 !SkippedChecks.has(SanitizerKind::Alignment)) {
772 AlignVal = Alignment.getAsMaybeAlign();
773 if (!Ty->isIncompleteType() && !AlignVal)
774 AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
775 /*ForPointeeType=*/true)
777
778 // The glvalue must be suitably aligned.
779 if (AlignVal && *AlignVal > llvm::Align(1) &&
780 (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
781 PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
782 llvm::Value *Align = Builder.CreateAnd(
783 PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
784 llvm::Value *Aligned =
785 Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
786 if (Aligned != True)
787 Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
788 }
789 }
790
791 if (Checks.size() > 0) {
792 llvm::Constant *StaticData[] = {
794 llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
795 llvm::ConstantInt::get(Int8Ty, TCK)};
796 EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
797 PtrAsInt ? PtrAsInt : Ptr);
798 }
799
800 // If possible, check that the vptr indicates that there is a subobject of
801 // type Ty at offset zero within this object.
802 //
803 // C++11 [basic.life]p5,6:
804 // [For storage which does not refer to an object within its lifetime]
805 // The program has undefined behavior if:
806 // -- the [pointer or glvalue] is used to access a non-static data member
807 // or call a non-static member function
808 if (SanOpts.has(SanitizerKind::Vptr) &&
809 !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
810 // Ensure that the pointer is non-null before loading it. If there is no
811 // compile-time guarantee, reuse the run-time null check or emit a new one.
812 if (!IsGuaranteedNonNull) {
813 if (!IsNonNull)
814 IsNonNull = Builder.CreateIsNotNull(Ptr);
815 if (!Done)
816 Done = createBasicBlock("vptr.null");
817 llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
818 Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
819 EmitBlock(VptrNotNull);
820 }
821
822 // Compute a deterministic hash of the mangled name of the type.
823 SmallString<64> MangledName;
824 llvm::raw_svector_ostream Out(MangledName);
826 Out);
827
828 // Contained in NoSanitizeList based on the mangled type.
829 if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
830 Out.str())) {
831 // Load the vptr, and mix it with TypeHash.
832 llvm::Value *TypeHash =
833 llvm::ConstantInt::get(Int64Ty, xxh3_64bits(Out.str()));
834
835 llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
836 Address VPtrAddr(Ptr, IntPtrTy, getPointerAlign());
837 llvm::Value *VPtrVal = GetVTablePtr(VPtrAddr, VPtrTy,
838 Ty->getAsCXXRecordDecl(),
840 VPtrVal = Builder.CreateBitOrPointerCast(VPtrVal, IntPtrTy);
841
842 llvm::Value *Hash =
843 emitHashMix(Builder, TypeHash, Builder.CreateZExt(VPtrVal, Int64Ty));
844 Hash = Builder.CreateTrunc(Hash, IntPtrTy);
845
846 // Look the hash up in our cache.
847 const int CacheSize = 128;
848 llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
849 llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
850 "__ubsan_vptr_type_cache");
851 llvm::Value *Slot = Builder.CreateAnd(Hash,
852 llvm::ConstantInt::get(IntPtrTy,
853 CacheSize-1));
854 llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
855 llvm::Value *CacheVal = Builder.CreateAlignedLoad(
856 IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
858
859 // If the hash isn't in the cache, call a runtime handler to perform the
860 // hard work of checking whether the vptr is for an object of the right
861 // type. This will either fill in the cache and return, or produce a
862 // diagnostic.
863 llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
864 llvm::Constant *StaticData[] = {
868 llvm::ConstantInt::get(Int8Ty, TCK)
869 };
870 llvm::Value *DynamicData[] = { Ptr, Hash };
871 EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
872 SanitizerHandler::DynamicTypeCacheMiss, StaticData,
873 DynamicData);
874 }
875 }
876
877 if (Done) {
878 Builder.CreateBr(Done);
879 EmitBlock(Done);
880 }
881}
882
884 QualType EltTy) {
886 uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
887 if (!EltSize)
888 return nullptr;
889
890 auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
891 if (!ArrayDeclRef)
892 return nullptr;
893
894 auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
895 if (!ParamDecl)
896 return nullptr;
897
898 auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
899 if (!POSAttr)
900 return nullptr;
901
902 // Don't load the size if it's a lower bound.
903 int POSType = POSAttr->getType();
904 if (POSType != 0 && POSType != 1)
905 return nullptr;
906
907 // Find the implicit size parameter.
908 auto PassedSizeIt = SizeArguments.find(ParamDecl);
909 if (PassedSizeIt == SizeArguments.end())
910 return nullptr;
911
912 const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
913 assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
914 Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
915 llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
916 C.getSizeType(), E->getExprLoc());
917 llvm::Value *SizeOfElement =
918 llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
919 return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
920}
921
922/// If Base is known to point to the start of an array, return the length of
923/// that array. Return 0 if the length cannot be determined.
924static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
925 const Expr *Base,
926 QualType &IndexedType,
928 StrictFlexArraysLevel) {
929 // For the vector indexing extension, the bound is the number of elements.
930 if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
931 IndexedType = Base->getType();
932 return CGF.Builder.getInt32(VT->getNumElements());
933 }
934
935 Base = Base->IgnoreParens();
936
937 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
938 if (CE->getCastKind() == CK_ArrayToPointerDecay &&
939 !CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
940 StrictFlexArraysLevel)) {
941 CodeGenFunction::SanitizerScope SanScope(&CGF);
942
943 IndexedType = CE->getSubExpr()->getType();
944 const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
945 if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
946 return CGF.Builder.getInt(CAT->getSize());
947
948 if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
949 return CGF.getVLASize(VAT).NumElts;
950 // Ignore pass_object_size here. It's not applicable on decayed pointers.
951 }
952 }
953
954 CodeGenFunction::SanitizerScope SanScope(&CGF);
955
956 QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
957 if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
958 IndexedType = Base->getType();
959 return POS;
960 }
961
962 return nullptr;
963}
964
965namespace {
966
967/// \p StructAccessBase returns the base \p Expr of a field access. It returns
968/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
969///
970/// p in p-> a.b.c
971///
972/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
973/// looking for:
974///
975/// struct s {
976/// struct s *ptr;
977/// int count;
978/// char array[] __attribute__((counted_by(count)));
979/// };
980///
981/// If we have an expression like \p p->ptr->array[index], we want the
982/// \p MemberExpr for \p p->ptr instead of \p p.
983class StructAccessBase
984 : public ConstStmtVisitor<StructAccessBase, const Expr *> {
985 const RecordDecl *ExpectedRD;
986
987 bool IsExpectedRecordDecl(const Expr *E) const {
988 QualType Ty = E->getType();
989 if (Ty->isPointerType())
990 Ty = Ty->getPointeeType();
991 return ExpectedRD == Ty->getAsRecordDecl();
992 }
993
994public:
995 StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
996
997 //===--------------------------------------------------------------------===//
998 // Visitor Methods
999 //===--------------------------------------------------------------------===//
1000
1001 // NOTE: If we build C++ support for counted_by, then we'll have to handle
1002 // horrors like this:
1003 //
1004 // struct S {
1005 // int x, y;
1006 // int blah[] __attribute__((counted_by(x)));
1007 // } s;
1008 //
1009 // int foo(int index, int val) {
1010 // int (S::*IHatePMDs)[] = &S::blah;
1011 // (s.*IHatePMDs)[index] = val;
1012 // }
1013
1014 const Expr *Visit(const Expr *E) {
1016 }
1017
1018 const Expr *VisitStmt(const Stmt *S) { return nullptr; }
1019
1020 // These are the types we expect to return (in order of most to least
1021 // likely):
1022 //
1023 // 1. DeclRefExpr - This is the expression for the base of the structure.
1024 // It's exactly what we want to build an access to the \p counted_by
1025 // field.
1026 // 2. MemberExpr - This is the expression that has the same \p RecordDecl
1027 // as the flexble array member's lexical enclosing \p RecordDecl. This
1028 // allows us to catch things like: "p->p->array"
1029 // 3. CompoundLiteralExpr - This is for people who create something
1030 // heretical like (struct foo has a flexible array member):
1031 //
1032 // (struct foo){ 1, 2 }.blah[idx];
1033 const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
1034 return IsExpectedRecordDecl(E) ? E : nullptr;
1035 }
1036 const Expr *VisitMemberExpr(const MemberExpr *E) {
1037 if (IsExpectedRecordDecl(E) && E->isArrow())
1038 return E;
1039 const Expr *Res = Visit(E->getBase());
1040 return !Res && IsExpectedRecordDecl(E) ? E : Res;
1041 }
1042 const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
1043 return IsExpectedRecordDecl(E) ? E : nullptr;
1044 }
1045 const Expr *VisitCallExpr(const CallExpr *E) {
1046 return IsExpectedRecordDecl(E) ? E : nullptr;
1047 }
1048
1049 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
1050 if (IsExpectedRecordDecl(E))
1051 return E;
1052 return Visit(E->getBase());
1053 }
1054 const Expr *VisitCastExpr(const CastExpr *E) {
1055 return Visit(E->getSubExpr());
1056 }
1057 const Expr *VisitParenExpr(const ParenExpr *E) {
1058 return Visit(E->getSubExpr());
1059 }
1060 const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
1061 return Visit(E->getSubExpr());
1062 }
1063 const Expr *VisitUnaryDeref(const UnaryOperator *E) {
1064 return Visit(E->getSubExpr());
1065 }
1066};
1067
1068} // end anonymous namespace
1069
1072
1074 const FieldDecl *Field,
1075 RecIndicesTy &Indices) {
1076 const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
1077 int64_t FieldNo = -1;
1078 for (const FieldDecl *FD : RD->fields()) {
1079 FieldNo = Layout.getLLVMFieldNo(FD);
1080 if (FD == Field) {
1081 Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
1082 return true;
1083 }
1084
1085 QualType Ty = FD->getType();
1086 if (Ty->isRecordType()) {
1087 if (getGEPIndicesToField(CGF, Ty->getAsRecordDecl(), Field, Indices)) {
1088 if (RD->isUnion())
1089 FieldNo = 0;
1090 Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
1091 return true;
1092 }
1093 }
1094 }
1095
1096 return false;
1097}
1098
1099/// This method is typically called in contexts where we can't generate
1100/// side-effects, like in __builtin_dynamic_object_size. When finding
1101/// expressions, only choose those that have either already been emitted or can
1102/// be loaded without side-effects.
1103///
1104/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
1105/// within the top-level struct.
1106/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
1108 const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
1109 const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
1110
1111 // Find the base struct expr (i.e. p in p->a.b.c.d).
1112 const Expr *StructBase = StructAccessBase(RD).Visit(Base);
1113 if (!StructBase || StructBase->HasSideEffects(getContext()))
1114 return nullptr;
1115
1116 llvm::Value *Res = nullptr;
1117 if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) {
1118 Res = EmitDeclRefLValue(DRE).getPointer(*this);
1119 Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res,
1120 getPointerAlign(), "dre.load");
1121 } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
1122 LValue LV = EmitMemberExpr(ME);
1123 Address Addr = LV.getAddress();
1124 Res = Addr.emitRawPointer(*this);
1125 } else if (StructBase->getType()->isPointerType()) {
1126 LValueBaseInfo BaseInfo;
1127 TBAAAccessInfo TBAAInfo;
1128 Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
1129 Res = Addr.emitRawPointer(*this);
1130 } else {
1131 return nullptr;
1132 }
1133
1134 llvm::Value *Zero = Builder.getInt32(0);
1135 RecIndicesTy Indices;
1136
1137 getGEPIndicesToField(*this, RD, CountDecl, Indices);
1138
1139 for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I)
1141 ConvertType(QualType(I->first->getTypeForDecl(), 0)), Res,
1142 {Zero, I->second}, "..counted_by.gep");
1143
1144 return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res,
1145 getIntAlign(), "..counted_by.load");
1146}
1147
1149 if (!FD)
1150 return nullptr;
1151
1152 const auto *CAT = FD->getType()->getAs<CountAttributedType>();
1153 if (!CAT)
1154 return nullptr;
1155
1156 const auto *CountDRE = cast<DeclRefExpr>(CAT->getCountExpr());
1157 const auto *CountDecl = CountDRE->getDecl();
1158 if (const auto *IFD = dyn_cast<IndirectFieldDecl>(CountDecl))
1159 CountDecl = IFD->getAnonField();
1160
1161 return dyn_cast<FieldDecl>(CountDecl);
1162}
1163
1164void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
1165 llvm::Value *Index, QualType IndexType,
1166 bool Accessed) {
1167 assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
1168 "should not be called unless adding bounds checks");
1169 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
1170 getLangOpts().getStrictFlexArraysLevel();
1171 QualType IndexedType;
1172 llvm::Value *Bound =
1173 getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
1174
1175 EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
1176}
1177
1178void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
1179 llvm::Value *Index,
1180 QualType IndexType,
1181 QualType IndexedType, bool Accessed) {
1182 if (!Bound)
1183 return;
1184
1185 SanitizerScope SanScope(this);
1186
1187 bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1188 llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1189 llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1190
1191 llvm::Constant *StaticData[] = {
1193 EmitCheckTypeDescriptor(IndexedType),
1194 EmitCheckTypeDescriptor(IndexType)
1195 };
1196 llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1197 : Builder.CreateICmpULE(IndexVal, BoundVal);
1198 EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1199 SanitizerHandler::OutOfBounds, StaticData, Index);
1200}
1201
1204 bool isInc, bool isPre) {
1206
1207 llvm::Value *NextVal;
1208 if (isa<llvm::IntegerType>(InVal.first->getType())) {
1209 uint64_t AmountVal = isInc ? 1 : -1;
1210 NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1211
1212 // Add the inc/dec to the real part.
1213 NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1214 } else {
1215 QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1216 llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1217 if (!isInc)
1218 FVal.changeSign();
1219 NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1220
1221 // Add the inc/dec to the real part.
1222 NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1223 }
1224
1225 ComplexPairTy IncVal(NextVal, InVal.second);
1226
1227 // Store the updated result through the lvalue.
1228 EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1229 if (getLangOpts().OpenMP)
1231 E->getSubExpr());
1232
1233 // If this is a postinc, return the value read from memory, otherwise use the
1234 // updated value.
1235 return isPre ? IncVal : InVal;
1236}
1237
1239 CodeGenFunction *CGF) {
1240 // Bind VLAs in the cast type.
1241 if (CGF && E->getType()->isVariablyModifiedType())
1243
1244 if (CGDebugInfo *DI = getModuleDebugInfo())
1245 DI->EmitExplicitCastType(E->getType());
1246}
1247
1248//===----------------------------------------------------------------------===//
1249// LValue Expression Emission
1250//===----------------------------------------------------------------------===//
1251
1253 TBAAAccessInfo *TBAAInfo,
1254 KnownNonNull_t IsKnownNonNull,
1255 CodeGenFunction &CGF) {
1256 // We allow this with ObjC object pointers because of fragile ABIs.
1257 assert(E->getType()->isPointerType() ||
1259 E = E->IgnoreParens();
1260
1261 // Casts:
1262 if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1263 if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1264 CGF.CGM.EmitExplicitCastExprType(ECE, &CGF);
1265
1266 switch (CE->getCastKind()) {
1267 // Non-converting casts (but not C's implicit conversion from void*).
1268 case CK_BitCast:
1269 case CK_NoOp:
1270 case CK_AddressSpaceConversion:
1271 if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1272 if (PtrTy->getPointeeType()->isVoidType())
1273 break;
1274
1275 LValueBaseInfo InnerBaseInfo;
1276 TBAAAccessInfo InnerTBAAInfo;
1278 CE->getSubExpr(), &InnerBaseInfo, &InnerTBAAInfo, IsKnownNonNull);
1279 if (BaseInfo) *BaseInfo = InnerBaseInfo;
1280 if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1281
1282 if (isa<ExplicitCastExpr>(CE)) {
1283 LValueBaseInfo TargetTypeBaseInfo;
1284 TBAAAccessInfo TargetTypeTBAAInfo;
1286 E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1287 if (TBAAInfo)
1288 *TBAAInfo =
1289 CGF.CGM.mergeTBAAInfoForCast(*TBAAInfo, TargetTypeTBAAInfo);
1290 // If the source l-value is opaque, honor the alignment of the
1291 // casted-to type.
1292 if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1293 if (BaseInfo)
1294 BaseInfo->mergeForCast(TargetTypeBaseInfo);
1295 Addr.setAlignment(Align);
1296 }
1297 }
1298
1299 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1300 CE->getCastKind() == CK_BitCast) {
1301 if (auto PT = E->getType()->getAs<PointerType>())
1302 CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1303 /*MayBeNull=*/true,
1305 CE->getBeginLoc());
1306 }
1307
1308 llvm::Type *ElemTy =
1310 Addr = Addr.withElementType(ElemTy);
1311 if (CE->getCastKind() == CK_AddressSpaceConversion)
1312 Addr = CGF.Builder.CreateAddrSpaceCast(
1313 Addr, CGF.ConvertType(E->getType()), ElemTy);
1314 return CGF.authPointerToPointerCast(Addr, CE->getSubExpr()->getType(),
1315 CE->getType());
1316 }
1317 break;
1318
1319 // Array-to-pointer decay.
1320 case CK_ArrayToPointerDecay:
1321 return CGF.EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1322
1323 // Derived-to-base conversions.
1324 case CK_UncheckedDerivedToBase:
1325 case CK_DerivedToBase: {
1326 // TODO: Support accesses to members of base classes in TBAA. For now, we
1327 // conservatively pretend that the complete object is of the base class
1328 // type.
1329 if (TBAAInfo)
1330 *TBAAInfo = CGF.CGM.getTBAAAccessInfo(E->getType());
1332 CE->getSubExpr(), BaseInfo, nullptr,
1333 (KnownNonNull_t)(IsKnownNonNull ||
1334 CE->getCastKind() == CK_UncheckedDerivedToBase));
1335 auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1336 return CGF.GetAddressOfBaseClass(
1337 Addr, Derived, CE->path_begin(), CE->path_end(),
1338 CGF.ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
1339 }
1340
1341 // TODO: Is there any reason to treat base-to-derived conversions
1342 // specially?
1343 default:
1344 break;
1345 }
1346 }
1347
1348 // Unary &.
1349 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1350 if (UO->getOpcode() == UO_AddrOf) {
1351 LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
1352 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1353 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1354 return LV.getAddress();
1355 }
1356 }
1357
1358 // std::addressof and variants.
1359 if (auto *Call = dyn_cast<CallExpr>(E)) {
1360 switch (Call->getBuiltinCallee()) {
1361 default:
1362 break;
1363 case Builtin::BIaddressof:
1364 case Builtin::BI__addressof:
1365 case Builtin::BI__builtin_addressof: {
1366 LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
1367 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1368 if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1369 return LV.getAddress();
1370 }
1371 }
1372 }
1373
1374 // TODO: conditional operators, comma.
1375
1376 // Otherwise, use the alignment of the type.
1379 /*ForPointeeType=*/true, BaseInfo, TBAAInfo, IsKnownNonNull);
1380}
1381
1382/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1383/// derive a more accurate bound on the alignment of the pointer.
1385 const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo,
1386 KnownNonNull_t IsKnownNonNull) {
1387 Address Addr =
1388 ::EmitPointerWithAlignment(E, BaseInfo, TBAAInfo, IsKnownNonNull, *this);
1389 if (IsKnownNonNull && !Addr.isKnownNonNull())
1390 Addr.setKnownNonNull();
1391 return Addr;
1392}
1393
1395 llvm::Value *V = RV.getScalarVal();
1396 if (auto MPT = T->getAs<MemberPointerType>())
1397 return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1398 return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1399}
1400
1402 if (Ty->isVoidType())
1403 return RValue::get(nullptr);
1404
1405 switch (getEvaluationKind(Ty)) {
1406 case TEK_Complex: {
1407 llvm::Type *EltTy =
1409 llvm::Value *U = llvm::UndefValue::get(EltTy);
1410 return RValue::getComplex(std::make_pair(U, U));
1411 }
1412
1413 // If this is a use of an undefined aggregate type, the aggregate must have an
1414 // identifiable address. Just because the contents of the value are undefined
1415 // doesn't mean that the address can't be taken and compared.
1416 case TEK_Aggregate: {
1417 Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1418 return RValue::getAggregate(DestPtr);
1419 }
1420
1421 case TEK_Scalar:
1422 return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1423 }
1424 llvm_unreachable("bad evaluation kind");
1425}
1426
1428 const char *Name) {
1429 ErrorUnsupported(E, Name);
1430 return GetUndefRValue(E->getType());
1431}
1432
1434 const char *Name) {
1435 ErrorUnsupported(E, Name);
1436 llvm::Type *ElTy = ConvertType(E->getType());
1437 llvm::Type *Ty = UnqualPtrTy;
1438 return MakeAddrLValue(
1439 Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1440}
1441
1442bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1443 const Expr *Base = Obj;
1444 while (!isa<CXXThisExpr>(Base)) {
1445 // The result of a dynamic_cast can be null.
1446 if (isa<CXXDynamicCastExpr>(Base))
1447 return false;
1448
1449 if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1450 Base = CE->getSubExpr();
1451 } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1452 Base = PE->getSubExpr();
1453 } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1454 if (UO->getOpcode() == UO_Extension)
1455 Base = UO->getSubExpr();
1456 else
1457 return false;
1458 } else {
1459 return false;
1460 }
1461 }
1462 return true;
1463}
1464
1465LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1466 LValue LV;
1467 if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1468 LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1469 else
1470 LV = EmitLValue(E);
1471 if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1472 SanitizerSet SkippedChecks;
1473 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1474 bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1475 if (IsBaseCXXThis)
1476 SkippedChecks.set(SanitizerKind::Alignment, true);
1477 if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1478 SkippedChecks.set(SanitizerKind::Null, true);
1479 }
1480 EmitTypeCheck(TCK, E->getExprLoc(), LV, E->getType(), SkippedChecks);
1481 }
1482 return LV;
1483}
1484
1485/// EmitLValue - Emit code to compute a designator that specifies the location
1486/// of the expression.
1487///
1488/// This can return one of two things: a simple address or a bitfield reference.
1489/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1490/// an LLVM pointer type.
1491///
1492/// If this returns a bitfield reference, nothing about the pointee type of the
1493/// LLVM value is known: For example, it may not be a pointer to an integer.
1494///
1495/// If this returns a normal address, and if the lvalue's C type is fixed size,
1496/// this method guarantees that the returned pointer type will point to an LLVM
1497/// type of the same size of the lvalue's type. If the lvalue has a variable
1498/// length type, this is not possible.
1499///
1501 KnownNonNull_t IsKnownNonNull) {
1502 LValue LV = EmitLValueHelper(E, IsKnownNonNull);
1503 if (IsKnownNonNull && !LV.isKnownNonNull())
1504 LV.setKnownNonNull();
1505 return LV;
1506}
1507
1509 const ASTContext &Ctx) {
1510 const Expr *SE = E->getSubExpr()->IgnoreImplicit();
1511 if (isa<OpaqueValueExpr>(SE))
1512 return SE->getType();
1513 return cast<CallExpr>(SE)->getCallReturnType(Ctx)->getPointeeType();
1514}
1515
1516LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
1517 KnownNonNull_t IsKnownNonNull) {
1518 ApplyDebugLocation DL(*this, E);
1519 switch (E->getStmtClass()) {
1520 default: return EmitUnsupportedLValue(E, "l-value expression");
1521
1522 case Expr::ObjCPropertyRefExprClass:
1523 llvm_unreachable("cannot emit a property reference directly");
1524
1525 case Expr::ObjCSelectorExprClass:
1526 return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1527 case Expr::ObjCIsaExprClass:
1528 return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1529 case Expr::BinaryOperatorClass:
1530 return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1531 case Expr::CompoundAssignOperatorClass: {
1532 QualType Ty = E->getType();
1533 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1534 Ty = AT->getValueType();
1535 if (!Ty->isAnyComplexType())
1536 return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1537 return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1538 }
1539 case Expr::CallExprClass:
1540 case Expr::CXXMemberCallExprClass:
1541 case Expr::CXXOperatorCallExprClass:
1542 case Expr::UserDefinedLiteralClass:
1543 return EmitCallExprLValue(cast<CallExpr>(E));
1544 case Expr::CXXRewrittenBinaryOperatorClass:
1545 return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm(),
1546 IsKnownNonNull);
1547 case Expr::VAArgExprClass:
1548 return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1549 case Expr::DeclRefExprClass:
1550 return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1551 case Expr::ConstantExprClass: {
1552 const ConstantExpr *CE = cast<ConstantExpr>(E);
1553 if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1555 return MakeNaturalAlignAddrLValue(Result, RetType);
1556 }
1557 return EmitLValue(cast<ConstantExpr>(E)->getSubExpr(), IsKnownNonNull);
1558 }
1559 case Expr::ParenExprClass:
1560 return EmitLValue(cast<ParenExpr>(E)->getSubExpr(), IsKnownNonNull);
1561 case Expr::GenericSelectionExprClass:
1562 return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr(),
1563 IsKnownNonNull);
1564 case Expr::PredefinedExprClass:
1565 return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1566 case Expr::StringLiteralClass:
1567 return EmitStringLiteralLValue(cast<StringLiteral>(E));
1568 case Expr::ObjCEncodeExprClass:
1569 return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1570 case Expr::PseudoObjectExprClass:
1571 return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1572 case Expr::InitListExprClass:
1573 return EmitInitListLValue(cast<InitListExpr>(E));
1574 case Expr::CXXTemporaryObjectExprClass:
1575 case Expr::CXXConstructExprClass:
1576 return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1577 case Expr::CXXBindTemporaryExprClass:
1578 return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1579 case Expr::CXXUuidofExprClass:
1580 return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1581 case Expr::LambdaExprClass:
1582 return EmitAggExprToLValue(E);
1583
1584 case Expr::ExprWithCleanupsClass: {
1585 const auto *cleanups = cast<ExprWithCleanups>(E);
1586 RunCleanupsScope Scope(*this);
1587 LValue LV = EmitLValue(cleanups->getSubExpr(), IsKnownNonNull);
1588 if (LV.isSimple()) {
1589 // Defend against branches out of gnu statement expressions surrounded by
1590 // cleanups.
1591 Address Addr = LV.getAddress();
1592 llvm::Value *V = Addr.getBasePointer();
1593 Scope.ForceCleanup({&V});
1594 Addr.replaceBasePointer(V);
1595 return LValue::MakeAddr(Addr, LV.getType(), getContext(),
1596 LV.getBaseInfo(), LV.getTBAAInfo());
1597 }
1598 // FIXME: Is it possible to create an ExprWithCleanups that produces a
1599 // bitfield lvalue or some other non-simple lvalue?
1600 return LV;
1601 }
1602
1603 case Expr::CXXDefaultArgExprClass: {
1604 auto *DAE = cast<CXXDefaultArgExpr>(E);
1605 CXXDefaultArgExprScope Scope(*this, DAE);
1606 return EmitLValue(DAE->getExpr(), IsKnownNonNull);
1607 }
1608 case Expr::CXXDefaultInitExprClass: {
1609 auto *DIE = cast<CXXDefaultInitExpr>(E);
1610 CXXDefaultInitExprScope Scope(*this, DIE);
1611 return EmitLValue(DIE->getExpr(), IsKnownNonNull);
1612 }
1613 case Expr::CXXTypeidExprClass:
1614 return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1615
1616 case Expr::ObjCMessageExprClass:
1617 return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1618 case Expr::ObjCIvarRefExprClass:
1619 return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1620 case Expr::StmtExprClass:
1621 return EmitStmtExprLValue(cast<StmtExpr>(E));
1622 case Expr::UnaryOperatorClass:
1623 return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1624 case Expr::ArraySubscriptExprClass:
1625 return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1626 case Expr::MatrixSubscriptExprClass:
1627 return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1628 case Expr::ArraySectionExprClass:
1629 return EmitArraySectionExpr(cast<ArraySectionExpr>(E));
1630 case Expr::ExtVectorElementExprClass:
1631 return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1632 case Expr::CXXThisExprClass:
1634 case Expr::MemberExprClass:
1635 return EmitMemberExpr(cast<MemberExpr>(E));
1636 case Expr::CompoundLiteralExprClass:
1637 return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1638 case Expr::ConditionalOperatorClass:
1639 return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1640 case Expr::BinaryConditionalOperatorClass:
1641 return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1642 case Expr::ChooseExprClass:
1643 return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(), IsKnownNonNull);
1644 case Expr::OpaqueValueExprClass:
1645 return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1646 case Expr::SubstNonTypeTemplateParmExprClass:
1647 return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
1648 IsKnownNonNull);
1649 case Expr::ImplicitCastExprClass:
1650 case Expr::CStyleCastExprClass:
1651 case Expr::CXXFunctionalCastExprClass:
1652 case Expr::CXXStaticCastExprClass:
1653 case Expr::CXXDynamicCastExprClass:
1654 case Expr::CXXReinterpretCastExprClass:
1655 case Expr::CXXConstCastExprClass:
1656 case Expr::CXXAddrspaceCastExprClass:
1657 case Expr::ObjCBridgedCastExprClass:
1658 return EmitCastLValue(cast<CastExpr>(E));
1659
1660 case Expr::MaterializeTemporaryExprClass:
1661 return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1662
1663 case Expr::CoawaitExprClass:
1664 return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1665 case Expr::CoyieldExprClass:
1666 return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1667 case Expr::PackIndexingExprClass:
1668 return EmitLValue(cast<PackIndexingExpr>(E)->getSelectedExpr());
1669 }
1670}
1671
1672/// Given an object of the given canonical type, can we safely copy a
1673/// value out of it based on its initializer?
1675 assert(type.isCanonical());
1676 assert(!type->isReferenceType());
1677
1678 // Must be const-qualified but non-volatile.
1679 Qualifiers qs = type.getLocalQualifiers();
1680 if (!qs.hasConst() || qs.hasVolatile()) return false;
1681
1682 // Otherwise, all object types satisfy this except C++ classes with
1683 // mutable subobjects or non-trivial copy/destroy behavior.
1684 if (const auto *RT = dyn_cast<RecordType>(type))
1685 if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1686 if (RD->hasMutableFields() || !RD->isTrivial())
1687 return false;
1688
1689 return true;
1690}
1691
1692/// Can we constant-emit a load of a reference to a variable of the
1693/// given type? This is different from predicates like
1694/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1695/// in situations that don't necessarily satisfy the language's rules
1696/// for this (e.g. C++'s ODR-use rules). For example, we want to able
1697/// to do this with const float variables even if those variables
1698/// aren't marked 'constexpr'.
1706 type = type.getCanonicalType();
1707 if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1708 if (isConstantEmittableObjectType(ref->getPointeeType()))
1710 return CEK_AsReferenceOnly;
1711 }
1713 return CEK_AsValueOnly;
1714 return CEK_None;
1715}
1716
1717/// Try to emit a reference to the given value without producing it as
1718/// an l-value. This is just an optimization, but it avoids us needing
1719/// to emit global copies of variables if they're named without triggering
1720/// a formal use in a context where we can't emit a direct reference to them,
1721/// for instance if a block or lambda or a member of a local class uses a
1722/// const int variable or constexpr variable from an enclosing function.
1723CodeGenFunction::ConstantEmission
1725 ValueDecl *value = refExpr->getDecl();
1726
1727 // The value needs to be an enum constant or a constant variable.
1729 if (isa<ParmVarDecl>(value)) {
1730 CEK = CEK_None;
1731 } else if (auto *var = dyn_cast<VarDecl>(value)) {
1732 CEK = checkVarTypeForConstantEmission(var->getType());
1733 } else if (isa<EnumConstantDecl>(value)) {
1734 CEK = CEK_AsValueOnly;
1735 } else {
1736 CEK = CEK_None;
1737 }
1738 if (CEK == CEK_None) return ConstantEmission();
1739
1740 Expr::EvalResult result;
1741 bool resultIsReference;
1742 QualType resultType;
1743
1744 // It's best to evaluate all the way as an r-value if that's permitted.
1745 if (CEK != CEK_AsReferenceOnly &&
1746 refExpr->EvaluateAsRValue(result, getContext())) {
1747 resultIsReference = false;
1748 resultType = refExpr->getType();
1749
1750 // Otherwise, try to evaluate as an l-value.
1751 } else if (CEK != CEK_AsValueOnly &&
1752 refExpr->EvaluateAsLValue(result, getContext())) {
1753 resultIsReference = true;
1754 resultType = value->getType();
1755
1756 // Failure.
1757 } else {
1758 return ConstantEmission();
1759 }
1760
1761 // In any case, if the initializer has side-effects, abandon ship.
1762 if (result.HasSideEffects)
1763 return ConstantEmission();
1764
1765 // In CUDA/HIP device compilation, a lambda may capture a reference variable
1766 // referencing a global host variable by copy. In this case the lambda should
1767 // make a copy of the value of the global host variable. The DRE of the
1768 // captured reference variable cannot be emitted as load from the host
1769 // global variable as compile time constant, since the host variable is not
1770 // accessible on device. The DRE of the captured reference variable has to be
1771 // loaded from captures.
1772 if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1774 auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1775 if (MD && MD->getParent()->isLambda() &&
1776 MD->getOverloadedOperator() == OO_Call) {
1777 const APValue::LValueBase &base = result.Val.getLValueBase();
1778 if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1779 if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1780 if (!VD->hasAttr<CUDADeviceAttr>()) {
1781 return ConstantEmission();
1782 }
1783 }
1784 }
1785 }
1786 }
1787
1788 // Emit as a constant.
1789 auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1790 result.Val, resultType);
1791
1792 // Make sure we emit a debug reference to the global variable.
1793 // This should probably fire even for
1794 if (isa<VarDecl>(value)) {
1795 if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1796 EmitDeclRefExprDbgValue(refExpr, result.Val);
1797 } else {
1798 assert(isa<EnumConstantDecl>(value));
1799 EmitDeclRefExprDbgValue(refExpr, result.Val);
1800 }
1801
1802 // If we emitted a reference constant, we need to dereference that.
1803 if (resultIsReference)
1805
1807}
1808
1810 const MemberExpr *ME) {
1811 if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1812 // Try to emit static variable member expressions as DREs.
1813 return DeclRefExpr::Create(
1815 /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1816 ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1817 }
1818 return nullptr;
1819}
1820
1821CodeGenFunction::ConstantEmission
1824 return tryEmitAsConstant(DRE);
1825 return ConstantEmission();
1826}
1827
1829 const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1830 assert(Constant && "not a constant");
1831 if (Constant.isReference())
1832 return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1833 E->getExprLoc())
1834 .getScalarVal();
1835 return Constant.getValue();
1836}
1837
1838llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1840 return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1841 lvalue.getType(), Loc, lvalue.getBaseInfo(),
1842 lvalue.getTBAAInfo(), lvalue.isNontemporal());
1843}
1844
1846 if (Ty->isBooleanType())
1847 return true;
1848
1849 if (const EnumType *ET = Ty->getAs<EnumType>())
1850 return ET->getDecl()->getIntegerType()->isBooleanType();
1851
1852 if (const AtomicType *AT = Ty->getAs<AtomicType>())
1853 return hasBooleanRepresentation(AT->getValueType());
1854
1855 return false;
1856}
1857
1859 llvm::APInt &Min, llvm::APInt &End,
1860 bool StrictEnums, bool IsBool) {
1861 const EnumType *ET = Ty->getAs<EnumType>();
1862 bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1863 ET && !ET->getDecl()->isFixed();
1864 if (!IsBool && !IsRegularCPlusPlusEnum)
1865 return false;
1866
1867 if (IsBool) {
1868 Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1869 End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1870 } else {
1871 const EnumDecl *ED = ET->getDecl();
1872 ED->getValueRange(End, Min);
1873 }
1874 return true;
1875}
1876
1877llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1878 llvm::APInt Min, End;
1879 if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1881 return nullptr;
1882
1883 llvm::MDBuilder MDHelper(getLLVMContext());
1884 return MDHelper.createRange(Min, End);
1885}
1886
1889 bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1890 bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1891 if (!HasBoolCheck && !HasEnumCheck)
1892 return false;
1893
1894 bool IsBool = hasBooleanRepresentation(Ty) ||
1896 bool NeedsBoolCheck = HasBoolCheck && IsBool;
1897 bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1898 if (!NeedsBoolCheck && !NeedsEnumCheck)
1899 return false;
1900
1901 // Single-bit booleans don't need to be checked. Special-case this to avoid
1902 // a bit width mismatch when handling bitfield values. This is handled by
1903 // EmitFromMemory for the non-bitfield case.
1904 if (IsBool &&
1905 cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1906 return false;
1907
1908 llvm::APInt Min, End;
1909 if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1910 return true;
1911
1912 auto &Ctx = getLLVMContext();
1913 SanitizerScope SanScope(this);
1914 llvm::Value *Check;
1915 --End;
1916 if (!Min) {
1917 Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1918 } else {
1919 llvm::Value *Upper =
1920 Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1921 llvm::Value *Lower =
1922 Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1923 Check = Builder.CreateAnd(Upper, Lower);
1924 }
1925 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1928 NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1929 EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1930 StaticArgs, EmitCheckValue(Value));
1931 return true;
1932}
1933
1934llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1935 QualType Ty,
1937 LValueBaseInfo BaseInfo,
1938 TBAAAccessInfo TBAAInfo,
1939 bool isNontemporal) {
1940 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
1941 if (GV->isThreadLocal())
1942 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
1944
1945 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1946 // Boolean vectors use `iN` as storage type.
1947 if (ClangVecTy->isExtVectorBoolType()) {
1948 llvm::Type *ValTy = ConvertType(Ty);
1949 unsigned ValNumElems =
1950 cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1951 // Load the `iP` storage object (P is the padded vector size).
1952 auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1953 const auto *RawIntTy = RawIntV->getType();
1954 assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1955 // Bitcast iP --> <P x i1>.
1956 auto *PaddedVecTy = llvm::FixedVectorType::get(
1957 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1958 llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1959 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1960 V = emitBoolVecConversion(V, ValNumElems, "extractvec");
1961
1962 return EmitFromMemory(V, Ty);
1963 }
1964
1965 // Handle vectors of size 3 like size 4 for better performance.
1966 const llvm::Type *EltTy = Addr.getElementType();
1967 const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1968
1969 if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
1970
1971 llvm::VectorType *vec4Ty =
1972 llvm::FixedVectorType::get(VTy->getElementType(), 4);
1973 Address Cast = Addr.withElementType(vec4Ty);
1974 // Now load value.
1975 llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1976
1977 // Shuffle vector to get vec3.
1978 V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
1979 return EmitFromMemory(V, Ty);
1980 }
1981 }
1982
1983 // Atomic operations have to be done on integral types.
1984 LValue AtomicLValue =
1985 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1986 if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1987 return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1988 }
1989
1990 Addr =
1992
1993 llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1994 if (isNontemporal) {
1995 llvm::MDNode *Node = llvm::MDNode::get(
1996 Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1997 Load->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
1998 }
1999
2000 CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
2001
2002 if (EmitScalarRangeCheck(Load, Ty, Loc)) {
2003 // In order to prevent the optimizer from throwing away the check, don't
2004 // attach range metadata to the load.
2005 } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
2006 if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) {
2007 Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
2008 Load->setMetadata(llvm::LLVMContext::MD_noundef,
2009 llvm::MDNode::get(getLLVMContext(), std::nullopt));
2010 }
2011
2012 return EmitFromMemory(Load, Ty);
2013}
2014
2015/// Converts a scalar value from its primary IR type (as returned
2016/// by ConvertType) to its load/store type (as returned by
2017/// convertTypeForLoadStore).
2018llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
2019 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2020 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2022 return Builder.CreateIntCast(Value, StoreTy, Signed, "storedv");
2023 }
2024
2025 if (Ty->isExtVectorBoolType()) {
2026 llvm::Type *StoreTy = convertTypeForLoadStore(Ty, Value->getType());
2027 // Expand to the memory bit width.
2028 unsigned MemNumElems = StoreTy->getPrimitiveSizeInBits();
2029 // <N x i1> --> <P x i1>.
2030 Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
2031 // <P x i1> --> iP.
2032 Value = Builder.CreateBitCast(Value, StoreTy);
2033 }
2034
2035 return Value;
2036}
2037
2038/// Converts a scalar value from its load/store type (as returned
2039/// by convertTypeForLoadStore) to its primary IR type (as returned
2040/// by ConvertType).
2041llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
2042 if (Ty->isExtVectorBoolType()) {
2043 const auto *RawIntTy = Value->getType();
2044 // Bitcast iP --> <P x i1>.
2045 auto *PaddedVecTy = llvm::FixedVectorType::get(
2046 Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
2047 auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
2048 // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
2049 llvm::Type *ValTy = ConvertType(Ty);
2050 unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
2051 return emitBoolVecConversion(V, ValNumElems, "extractvec");
2052 }
2053
2054 if (hasBooleanRepresentation(Ty) || Ty->isBitIntType()) {
2055 llvm::Type *ResTy = ConvertType(Ty);
2056 return Builder.CreateTrunc(Value, ResTy, "loadedv");
2057 }
2058
2059 return Value;
2060}
2061
2062// Convert the pointer of \p Addr to a pointer to a vector (the value type of
2063// MatrixType), if it points to a array (the memory type of MatrixType).
2065 CodeGenFunction &CGF,
2066 bool IsVector = true) {
2067 auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
2068 if (ArrayTy && IsVector) {
2069 auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
2070 ArrayTy->getNumElements());
2071
2072 return Addr.withElementType(VectorTy);
2073 }
2074 auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
2075 if (VectorTy && !IsVector) {
2076 auto *ArrayTy = llvm::ArrayType::get(
2077 VectorTy->getElementType(),
2078 cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
2079
2080 return Addr.withElementType(ArrayTy);
2081 }
2082
2083 return Addr;
2084}
2085
2086// Emit a store of a matrix LValue. This may require casting the original
2087// pointer to memory address (ArrayType) to a pointer to the value type
2088// (VectorType).
2089static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
2090 bool isInit, CodeGenFunction &CGF) {
2091 Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
2092 value->getType()->isVectorTy());
2093 CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
2094 lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
2095 lvalue.isNontemporal());
2096}
2097
2098void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
2099 bool Volatile, QualType Ty,
2100 LValueBaseInfo BaseInfo,
2101 TBAAAccessInfo TBAAInfo,
2102 bool isInit, bool isNontemporal) {
2103 if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr.getBasePointer()))
2104 if (GV->isThreadLocal())
2105 Addr = Addr.withPointer(Builder.CreateThreadLocalAddress(GV),
2107
2108 llvm::Type *SrcTy = Value->getType();
2109 if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
2110 auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
2111 if (!CGM.getCodeGenOpts().PreserveVec3Type) {
2112 // Handle vec3 special.
2113 if (VecTy && !ClangVecTy->isExtVectorBoolType() &&
2114 cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
2115 // Our source is a vec3, do a shuffle vector to make it a vec4.
2116 Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
2117 "extractVec");
2118 SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
2119 }
2120 if (Addr.getElementType() != SrcTy) {
2121 Addr = Addr.withElementType(SrcTy);
2122 }
2123 }
2124 }
2125
2126 Value = EmitToMemory(Value, Ty);
2127
2128 LValue AtomicLValue =
2129 LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
2130 if (Ty->isAtomicType() ||
2131 (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
2132 EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
2133 return;
2134 }
2135
2136 llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
2137 if (isNontemporal) {
2138 llvm::MDNode *Node =
2139 llvm::MDNode::get(Store->getContext(),
2140 llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
2141 Store->setMetadata(llvm::LLVMContext::MD_nontemporal, Node);
2142 }
2143
2144 CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
2145}
2146
2147void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
2148 bool isInit) {
2149 if (lvalue.getType()->isConstantMatrixType()) {
2150 EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
2151 return;
2152 }
2153
2154 EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
2155 lvalue.getType(), lvalue.getBaseInfo(),
2156 lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
2157}
2158
2159// Emit a load of a LValue of matrix type. This may require casting the pointer
2160// to memory address (ArrayType) to a pointer to the value type (VectorType).
2162 CodeGenFunction &CGF) {
2163 assert(LV.getType()->isConstantMatrixType());
2165 LV.setAddress(Addr);
2166 return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
2167}
2168
2171 QualType Ty = LV.getType();
2172 switch (getEvaluationKind(Ty)) {
2173 case TEK_Scalar:
2174 return EmitLoadOfLValue(LV, Loc);
2175 case TEK_Complex:
2177 case TEK_Aggregate:
2178 EmitAggFinalDestCopy(Ty, Slot, LV, EVK_NonRValue);
2179 return Slot.asRValue();
2180 }
2181 llvm_unreachable("bad evaluation kind");
2182}
2183
2184/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
2185/// method emits the address of the lvalue, then loads the result as an rvalue,
2186/// returning the rvalue.
2188 if (LV.isObjCWeak()) {
2189 // load of a __weak object.
2190 Address AddrWeakObj = LV.getAddress();
2192 AddrWeakObj));
2193 }
2195 // In MRC mode, we do a load+autorelease.
2196 if (!getLangOpts().ObjCAutoRefCount) {
2198 }
2199
2200 // In ARC mode, we load retained and then consume the value.
2201 llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
2202 Object = EmitObjCConsumeObject(LV.getType(), Object);
2203 return RValue::get(Object);
2204 }
2205
2206 if (LV.isSimple()) {
2207 assert(!LV.getType()->isFunctionType());
2208
2209 if (LV.getType()->isConstantMatrixType())
2210 return EmitLoadOfMatrixLValue(LV, Loc, *this);
2211
2212 // Everything needs a load.
2213 return RValue::get(EmitLoadOfScalar(LV, Loc));
2214 }
2215
2216 if (LV.isVectorElt()) {
2217 llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
2218 LV.isVolatileQualified());
2219 return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
2220 "vecext"));
2221 }
2222
2223 // If this is a reference to a subset of the elements of a vector, either
2224 // shuffle the input or extract/insert them as appropriate.
2225 if (LV.isExtVectorElt()) {
2227 }
2228
2229 // Global Register variables always invoke intrinsics
2230 if (LV.isGlobalReg())
2231 return EmitLoadOfGlobalRegLValue(LV);
2232
2233 if (LV.isMatrixElt()) {
2234 llvm::Value *Idx = LV.getMatrixIdx();
2235 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2236 const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
2237 llvm::MatrixBuilder MB(Builder);
2238 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2239 }
2240 llvm::LoadInst *Load =
2242 return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2243 }
2244
2245 assert(LV.isBitField() && "Unknown LValue type!");
2246 return EmitLoadOfBitfieldLValue(LV, Loc);
2247}
2248
2251 const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2252
2253 // Get the output type.
2254 llvm::Type *ResLTy = ConvertType(LV.getType());
2255
2256 Address Ptr = LV.getBitFieldAddress();
2257 llvm::Value *Val =
2258 Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2259
2260 bool UseVolatile = LV.isVolatileQualified() &&
2261 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2262 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2263 const unsigned StorageSize =
2264 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2265 if (Info.IsSigned) {
2266 assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2267 unsigned HighBits = StorageSize - Offset - Info.Size;
2268 if (HighBits)
2269 Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2270 if (Offset + HighBits)
2271 Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2272 } else {
2273 if (Offset)
2274 Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2275 if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2276 Val = Builder.CreateAnd(
2277 Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2278 }
2279 Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2280 EmitScalarRangeCheck(Val, LV.getType(), Loc);
2281 return RValue::get(Val);
2282}
2283
2284// If this is a reference to a subset of the elements of a vector, create an
2285// appropriate shufflevector.
2287 llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2288 LV.isVolatileQualified());
2289
2290 // HLSL allows treating scalars as one-element vectors. Converting the scalar
2291 // IR value to a vector here allows the rest of codegen to behave as normal.
2292 if (getLangOpts().HLSL && !Vec->getType()->isVectorTy()) {
2293 llvm::Type *DstTy = llvm::FixedVectorType::get(Vec->getType(), 1);
2294 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2295 Vec = Builder.CreateInsertElement(DstTy, Vec, Zero, "cast.splat");
2296 }
2297
2298 const llvm::Constant *Elts = LV.getExtVectorElts();
2299
2300 // If the result of the expression is a non-vector type, we must be extracting
2301 // a single element. Just codegen as an extractelement.
2302 const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2303 if (!ExprVT) {
2304 unsigned InIdx = getAccessedFieldNo(0, Elts);
2305 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2306 return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2307 }
2308
2309 // Always use shuffle vector to try to retain the original program structure
2310 unsigned NumResultElts = ExprVT->getNumElements();
2311
2313 for (unsigned i = 0; i != NumResultElts; ++i)
2314 Mask.push_back(getAccessedFieldNo(i, Elts));
2315
2316 Vec = Builder.CreateShuffleVector(Vec, Mask);
2317 return RValue::get(Vec);
2318}
2319
2320/// Generates lvalue for partial ext_vector access.
2322 Address VectorAddress = LV.getExtVectorAddress();
2323 QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2324 llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2325
2326 Address CastToPointerElement = VectorAddress.withElementType(VectorElementTy);
2327
2328 const llvm::Constant *Elts = LV.getExtVectorElts();
2329 unsigned ix = getAccessedFieldNo(0, Elts);
2330
2331 Address VectorBasePtrPlusIx =
2332 Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2333 "vector.elt");
2334
2335 return VectorBasePtrPlusIx;
2336}
2337
2338/// Load of global gamed gegisters are always calls to intrinsics.
2340 assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2341 "Bad type for register variable");
2342 llvm::MDNode *RegName = cast<llvm::MDNode>(
2343 cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2344
2345 // We accept integer and pointer types only
2346 llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2347 llvm::Type *Ty = OrigTy;
2348 if (OrigTy->isPointerTy())
2349 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2350 llvm::Type *Types[] = { Ty };
2351
2352 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2353 llvm::Value *Call = Builder.CreateCall(
2354 F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2355 if (OrigTy->isPointerTy())
2356 Call = Builder.CreateIntToPtr(Call, OrigTy);
2357 return RValue::get(Call);
2358}
2359
2360/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2361/// lvalue, where both are guaranteed to the have the same type, and that type
2362/// is 'Ty'.
2364 bool isInit) {
2365 if (!Dst.isSimple()) {
2366 if (Dst.isVectorElt()) {
2367 // Read/modify/write the vector, inserting the new element.
2368 llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2369 Dst.isVolatileQualified());
2370 auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2371 if (IRStoreTy) {
2372 auto *IRVecTy = llvm::FixedVectorType::get(
2373 Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2374 Vec = Builder.CreateBitCast(Vec, IRVecTy);
2375 // iN --> <N x i1>.
2376 }
2377 Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2378 Dst.getVectorIdx(), "vecins");
2379 if (IRStoreTy) {
2380 // <N x i1> --> <iN>.
2381 Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2382 }
2384 Dst.isVolatileQualified());
2385 return;
2386 }
2387
2388 // If this is an update of extended vector elements, insert them as
2389 // appropriate.
2390 if (Dst.isExtVectorElt())
2392
2393 if (Dst.isGlobalReg())
2394 return EmitStoreThroughGlobalRegLValue(Src, Dst);
2395
2396 if (Dst.isMatrixElt()) {
2397 llvm::Value *Idx = Dst.getMatrixIdx();
2398 if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2399 const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2400 llvm::MatrixBuilder MB(Builder);
2401 MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2402 }
2403 llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2404 llvm::Value *Vec =
2405 Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2407 Dst.isVolatileQualified());
2408 return;
2409 }
2410
2411 assert(Dst.isBitField() && "Unknown LValue type");
2412 return EmitStoreThroughBitfieldLValue(Src, Dst);
2413 }
2414
2415 // There's special magic for assigning into an ARC-qualified l-value.
2416 if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2417 switch (Lifetime) {
2419 llvm_unreachable("present but none");
2420
2422 // nothing special
2423 break;
2424
2426 if (isInit) {
2427 Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2428 break;
2429 }
2430 EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2431 return;
2432
2434 if (isInit)
2435 // Initialize and then skip the primitive store.
2437 else
2439 /*ignore*/ true);
2440 return;
2441
2444 Src.getScalarVal()));
2445 // fall into the normal path
2446 break;
2447 }
2448 }
2449
2450 if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2451 // load of a __weak object.
2452 Address LvalueDst = Dst.getAddress();
2453 llvm::Value *src = Src.getScalarVal();
2454 CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2455 return;
2456 }
2457
2458 if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2459 // load of a __strong object.
2460 Address LvalueDst = Dst.getAddress();
2461 llvm::Value *src = Src.getScalarVal();
2462 if (Dst.isObjCIvar()) {
2463 assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2464 llvm::Type *ResultType = IntPtrTy;
2466 llvm::Value *RHS = dst.emitRawPointer(*this);
2467 RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2468 llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst.emitRawPointer(*this),
2469 ResultType, "sub.ptr.lhs.cast");
2470 llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2471 CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween);
2472 } else if (Dst.isGlobalObjCRef()) {
2473 CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2474 Dst.isThreadLocalRef());
2475 }
2476 else
2477 CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2478 return;
2479 }
2480
2481 assert(Src.isScalar() && "Can't emit an agg store with this method");
2482 EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2483}
2484
2486 llvm::Value **Result) {
2487 const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2488 llvm::Type *ResLTy = convertTypeForLoadStore(Dst.getType());
2489 Address Ptr = Dst.getBitFieldAddress();
2490
2491 // Get the source value, truncated to the width of the bit-field.
2492 llvm::Value *SrcVal = Src.getScalarVal();
2493
2494 // Cast the source to the storage type and shift it into place.
2495 SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2496 /*isSigned=*/false);
2497 llvm::Value *MaskedVal = SrcVal;
2498
2499 const bool UseVolatile =
2500 CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2501 Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2502 const unsigned StorageSize =
2503 UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2504 const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2505 // See if there are other bits in the bitfield's storage we'll need to load
2506 // and mask together with source before storing.
2507 if (StorageSize != Info.Size) {
2508 assert(StorageSize > Info.Size && "Invalid bitfield size.");
2509 llvm::Value *Val =
2510 Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2511
2512 // Mask the source value as needed.
2514 SrcVal = Builder.CreateAnd(
2515 SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2516 "bf.value");
2517 MaskedVal = SrcVal;
2518 if (Offset)
2519 SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2520
2521 // Mask out the original value.
2522 Val = Builder.CreateAnd(
2523 Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2524 "bf.clear");
2525
2526 // Or together the unchanged values and the source value.
2527 SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2528 } else {
2529 assert(Offset == 0);
2530 // According to the AACPS:
2531 // When a volatile bit-field is written, and its container does not overlap
2532 // with any non-bit-field member, its container must be read exactly once
2533 // and written exactly once using the access width appropriate to the type
2534 // of the container. The two accesses are not atomic.
2535 if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2536 CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2537 Builder.CreateLoad(Ptr, true, "bf.load");
2538 }
2539
2540 // Write the new value back out.
2541 Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2542
2543 // Return the new value of the bit-field, if requested.
2544 if (Result) {
2545 llvm::Value *ResultVal = MaskedVal;
2546
2547 // Sign extend the value if needed.
2548 if (Info.IsSigned) {
2549 assert(Info.Size <= StorageSize);
2550 unsigned HighBits = StorageSize - Info.Size;
2551 if (HighBits) {
2552 ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2553 ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2554 }
2555 }
2556
2557 ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2558 "bf.result.cast");
2559 *Result = EmitFromMemory(ResultVal, Dst.getType());
2560 }
2561}
2562
2564 LValue Dst) {
2565 // HLSL allows storing to scalar values through ExtVector component LValues.
2566 // To support this we need to handle the case where the destination address is
2567 // a scalar.
2568 Address DstAddr = Dst.getExtVectorAddress();
2569 if (!DstAddr.getElementType()->isVectorTy()) {
2570 assert(!Dst.getType()->isVectorType() &&
2571 "this should only occur for non-vector l-values");
2572 Builder.CreateStore(Src.getScalarVal(), DstAddr, Dst.isVolatileQualified());
2573 return;
2574 }
2575
2576 // This access turns into a read/modify/write of the vector. Load the input
2577 // value now.
2578 llvm::Value *Vec = Builder.CreateLoad(DstAddr, Dst.isVolatileQualified());
2579 const llvm::Constant *Elts = Dst.getExtVectorElts();
2580
2581 llvm::Value *SrcVal = Src.getScalarVal();
2582
2583 if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2584 unsigned NumSrcElts = VTy->getNumElements();
2585 unsigned NumDstElts =
2586 cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2587 if (NumDstElts == NumSrcElts) {
2588 // Use shuffle vector is the src and destination are the same number of
2589 // elements and restore the vector mask since it is on the side it will be
2590 // stored.
2591 SmallVector<int, 4> Mask(NumDstElts);
2592 for (unsigned i = 0; i != NumSrcElts; ++i)
2593 Mask[getAccessedFieldNo(i, Elts)] = i;
2594
2595 Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2596 } else if (NumDstElts > NumSrcElts) {
2597 // Extended the source vector to the same length and then shuffle it
2598 // into the destination.
2599 // FIXME: since we're shuffling with undef, can we just use the indices
2600 // into that? This could be simpler.
2601 SmallVector<int, 4> ExtMask;
2602 for (unsigned i = 0; i != NumSrcElts; ++i)
2603 ExtMask.push_back(i);
2604 ExtMask.resize(NumDstElts, -1);
2605 llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2606 // build identity
2608 for (unsigned i = 0; i != NumDstElts; ++i)
2609 Mask.push_back(i);
2610
2611 // When the vector size is odd and .odd or .hi is used, the last element
2612 // of the Elts constant array will be one past the size of the vector.
2613 // Ignore the last element here, if it is greater than the mask size.
2614 if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2615 NumSrcElts--;
2616
2617 // modify when what gets shuffled in
2618 for (unsigned i = 0; i != NumSrcElts; ++i)
2619 Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2620 Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2621 } else {
2622 // We should never shorten the vector
2623 llvm_unreachable("unexpected shorten vector length");
2624 }
2625 } else {
2626 // If the Src is a scalar (not a vector), and the target is a vector it must
2627 // be updating one element.
2628 unsigned InIdx = getAccessedFieldNo(0, Elts);
2629 llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2630 Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2631 }
2632
2634 Dst.isVolatileQualified());
2635}
2636
2637/// Store of global named registers are always calls to intrinsics.
2639 assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2640 "Bad type for register variable");
2641 llvm::MDNode *RegName = cast<llvm::MDNode>(
2642 cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2643 assert(RegName && "Register LValue is not metadata");
2644
2645 // We accept integer and pointer types only
2646 llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2647 llvm::Type *Ty = OrigTy;
2648 if (OrigTy->isPointerTy())
2649 Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2650 llvm::Type *Types[] = { Ty };
2651
2652 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2653 llvm::Value *Value = Src.getScalarVal();
2654 if (OrigTy->isPointerTy())
2655 Value = Builder.CreatePtrToInt(Value, Ty);
2656 Builder.CreateCall(
2657 F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2658}
2659
2660// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2661// generating write-barries API. It is currently a global, ivar,
2662// or neither.
2663static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2664 LValue &LV,
2665 bool IsMemberAccess=false) {
2666 if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2667 return;
2668
2669 if (isa<ObjCIvarRefExpr>(E)) {
2670 QualType ExpTy = E->getType();
2671 if (IsMemberAccess && ExpTy->isPointerType()) {
2672 // If ivar is a structure pointer, assigning to field of
2673 // this struct follows gcc's behavior and makes it a non-ivar
2674 // writer-barrier conservatively.
2675 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2676 if (ExpTy->isRecordType()) {
2677 LV.setObjCIvar(false);
2678 return;
2679 }
2680 }
2681 LV.setObjCIvar(true);
2682 auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2683 LV.setBaseIvarExp(Exp->getBase());
2685 return;
2686 }
2687
2688 if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2689 if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2690 if (VD->hasGlobalStorage()) {
2691 LV.setGlobalObjCRef(true);
2692 LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2693 }
2694 }
2696 return;
2697 }
2698
2699 if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2700 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2701 return;
2702 }
2703
2704 if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2705 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2706 if (LV.isObjCIvar()) {
2707 // If cast is to a structure pointer, follow gcc's behavior and make it
2708 // a non-ivar write-barrier.
2709 QualType ExpTy = E->getType();
2710 if (ExpTy->isPointerType())
2711 ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2712 if (ExpTy->isRecordType())
2713 LV.setObjCIvar(false);
2714 }
2715 return;
2716 }
2717
2718 if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2719 setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2720 return;
2721 }
2722
2723 if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2724 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2725 return;
2726 }
2727
2728 if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2729 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2730 return;
2731 }
2732
2733 if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2734 setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2735 return;
2736 }
2737
2738 if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2739 setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2740 if (LV.isObjCIvar() && !LV.isObjCArray())
2741 // Using array syntax to assigning to what an ivar points to is not
2742 // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2743 LV.setObjCIvar(false);
2744 else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2745 // Using array syntax to assigning to what global points to is not
2746 // same as assigning to the global itself. {id *G;} G[i] = 0;
2747 LV.setGlobalObjCRef(false);
2748 return;
2749 }
2750
2751 if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2752 setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2753 // We don't know if member is an 'ivar', but this flag is looked at
2754 // only in the context of LV.isObjCIvar().
2756 return;
2757 }
2758}
2759
2761 CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2762 llvm::Type *RealVarTy, SourceLocation Loc) {
2763 if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2765 CGF, VD, Addr, Loc);
2766 else
2767 Addr =
2768 CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2769
2770 Addr = Addr.withElementType(RealVarTy);
2771 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2772}
2773
2775 const VarDecl *VD, QualType T) {
2776 std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2777 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2778 // Return an invalid address if variable is MT_To (or MT_Enter starting with
2779 // OpenMP 5.2) and unified memory is not enabled. For all other cases: MT_Link
2780 // and MT_To (or MT_Enter) with unified memory, return a valid address.
2781 if (!Res || ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2782 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2784 return Address::invalid();
2785 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2786 ((*Res == OMPDeclareTargetDeclAttr::MT_To ||
2787 *Res == OMPDeclareTargetDeclAttr::MT_Enter) &&
2789 "Expected link clause OR to clause with unified memory enabled.");
2790 QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2792 return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2793}
2794
2795Address
2797 LValueBaseInfo *PointeeBaseInfo,
2798 TBAAAccessInfo *PointeeTBAAInfo) {
2799 llvm::LoadInst *Load =
2800 Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
2802 return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
2803 CharUnits(), /*ForPointeeType=*/true,
2804 PointeeBaseInfo, PointeeTBAAInfo);
2805}
2806
2808 LValueBaseInfo PointeeBaseInfo;
2809 TBAAAccessInfo PointeeTBAAInfo;
2810 Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2811 &PointeeTBAAInfo);
2812 return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2813 PointeeBaseInfo, PointeeTBAAInfo);
2814}
2815
2817 const PointerType *PtrTy,
2818 LValueBaseInfo *BaseInfo,
2819 TBAAAccessInfo *TBAAInfo) {
2820 llvm::Value *Addr = Builder.CreateLoad(Ptr);
2821 return makeNaturalAddressForPointer(Addr, PtrTy->getPointeeType(),
2822 CharUnits(), /*ForPointeeType=*/true,
2823 BaseInfo, TBAAInfo);
2824}
2825
2827 const PointerType *PtrTy) {
2828 LValueBaseInfo BaseInfo;
2829 TBAAAccessInfo TBAAInfo;
2830 Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2831 return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2832}
2833
2835 const Expr *E, const VarDecl *VD) {
2836 QualType T = E->getType();
2837
2838 // If it's thread_local, emit a call to its wrapper function instead.
2839 if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2841 return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2842 // Check if the variable is marked as declare target with link clause in
2843 // device codegen.
2844 if (CGF.getLangOpts().OpenMPIsTargetDevice) {
2845 Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2846 if (Addr.isValid())
2847 return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2848 }
2849
2850 llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2851
2852 if (VD->getTLSKind() != VarDecl::TLS_None)
2853 V = CGF.Builder.CreateThreadLocalAddress(V);
2854
2855 llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2856 CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2857 Address Addr(V, RealVarTy, Alignment);
2858 // Emit reference to the private copy of the variable if it is an OpenMP
2859 // threadprivate variable.
2860 if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2861 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2862 return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2863 E->getExprLoc());
2864 }
2865 LValue LV = VD->getType()->isReferenceType() ?
2866 CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2869 setObjCGCLValueClass(CGF.getContext(), E, LV);
2870 return LV;
2871}
2872
2874 llvm::Type *Ty) {
2875 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2876 if (FD->hasAttr<WeakRefAttr>()) {
2878 return aliasee.getPointer();
2879 }
2880
2881 llvm::Constant *V = GetAddrOfFunction(GD, Ty);
2882 return V;
2883}
2884
2886 GlobalDecl GD) {
2887 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2888 llvm::Constant *V = CGF.CGM.getFunctionPointer(GD);
2889 CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2890 return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2892}
2893
2895 llvm::Value *ThisValue) {
2896
2897 return CGF.EmitLValueForLambdaField(FD, ThisValue);
2898}
2899
2900/// Named Registers are named metadata pointing to the register name
2901/// which will be read from/written to as an argument to the intrinsic
2902/// @llvm.read/write_register.
2903/// So far, only the name is being passed down, but other options such as
2904/// register type, allocation type or even optimization options could be
2905/// passed down via the metadata node.
2907 SmallString<64> Name("llvm.named.register.");
2908 AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2909 assert(Asm->getLabel().size() < 64-Name.size() &&
2910 "Register name too big");
2911 Name.append(Asm->getLabel());
2912 llvm::NamedMDNode *M =
2913 CGM.getModule().getOrInsertNamedMetadata(Name);
2914 if (M->getNumOperands() == 0) {
2915 llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2916 Asm->getLabel());
2917 llvm::Metadata *Ops[] = {Str};
2918 M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2919 }
2920
2921 CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2922
2923 llvm::Value *Ptr =
2924 llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2925 return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2926}
2927
2928/// Determine whether we can emit a reference to \p VD from the current
2929/// context, despite not necessarily having seen an odr-use of the variable in
2930/// this context.
2932 const DeclRefExpr *E,
2933 const VarDecl *VD) {
2934 // For a variable declared in an enclosing scope, do not emit a spurious
2935 // reference even if we have a capture, as that will emit an unwarranted
2936 // reference to our capture state, and will likely generate worse code than
2937 // emitting a local copy.
2938 if (E->refersToEnclosingVariableOrCapture())
2939 return false;
2940
2941 // For a local declaration declared in this function, we can always reference
2942 // it even if we don't have an odr-use.
2943 if (VD->hasLocalStorage()) {
2944 return VD->getDeclContext() ==
2945 dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2946 }
2947
2948 // For a global declaration, we can emit a reference to it if we know
2949 // for sure that we are able to emit a definition of it.
2950 VD = VD->getDefinition(CGF.getContext());
2951 if (!VD)
2952 return false;
2953
2954 // Don't emit a spurious reference if it might be to a variable that only
2955 // exists on a different device / target.
2956 // FIXME: This is unnecessarily broad. Check whether this would actually be a
2957 // cross-target reference.
2958 if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2959 CGF.getLangOpts().OpenCL) {
2960 return false;
2961 }
2962
2963 // We can emit a spurious reference only if the linkage implies that we'll
2964 // be emitting a non-interposable symbol that will be retained until link
2965 // time.
2966 switch (CGF.CGM.getLLVMLinkageVarDefinition(VD)) {
2967 case llvm::GlobalValue::ExternalLinkage:
2968 case llvm::GlobalValue::LinkOnceODRLinkage:
2969 case llvm::GlobalValue::WeakODRLinkage:
2970 case llvm::GlobalValue::InternalLinkage:
2971 case llvm::GlobalValue::PrivateLinkage:
2972 return true;
2973 default:
2974 return false;
2975 }
2976}
2977
2979 const NamedDecl *ND = E->getDecl();
2980 QualType T = E->getType();
2981
2982 assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2983 "should not emit an unevaluated operand");
2984
2985 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2986 // Global Named registers access via intrinsics only
2987 if (VD->getStorageClass() == SC_Register &&
2988 VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2989 return EmitGlobalNamedRegister(VD, CGM);
2990
2991 // If this DeclRefExpr does not constitute an odr-use of the variable,
2992 // we're not permitted to emit a reference to it in general, and it might
2993 // not be captured if capture would be necessary for a use. Emit the
2994 // constant value directly instead.
2995 if (E->isNonOdrUse() == NOUR_Constant &&
2996 (VD->getType()->isReferenceType() ||
2997 !canEmitSpuriousReferenceToVariable(*this, E, VD))) {
2998 VD->getAnyInitializer(VD);
2999 llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
3000 E->getLocation(), *VD->evaluateValue(), VD->getType());
3001 assert(Val && "failed to emit constant expression");
3002
3003 Address Addr = Address::invalid();
3004 if (!VD->getType()->isReferenceType()) {
3005 // Spill the constant value to a global.
3006 Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
3007 getContext().getDeclAlign(VD));
3008 llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
3009 auto *PTy = llvm::PointerType::get(
3010 VarTy, getTypes().getTargetAddressSpace(VD->getType()));
3011 Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
3012 } else {
3013 // Should we be using the alignment of the constant pointer we emitted?
3014 CharUnits Alignment =
3016 /* BaseInfo= */ nullptr,
3017 /* TBAAInfo= */ nullptr,
3018 /* forPointeeType= */ true);
3019 Addr = makeNaturalAddressForPointer(Val, T, Alignment);
3020 }
3021 return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
3022 }
3023
3024 // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
3025
3026 // Check for captured variables.
3027 if (E->refersToEnclosingVariableOrCapture()) {
3028 VD = VD->getCanonicalDecl();
3029 if (auto *FD = LambdaCaptureFields.lookup(VD))
3030 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3031 if (CapturedStmtInfo) {
3032 auto I = LocalDeclMap.find(VD);
3033 if (I != LocalDeclMap.end()) {
3034 LValue CapLVal;
3035 if (VD->getType()->isReferenceType())
3036 CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
3038 else
3039 CapLVal = MakeAddrLValue(I->second, T);
3040 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3041 // in simd context.
3042 if (getLangOpts().OpenMP &&
3044 CapLVal.setNontemporal(/*Value=*/true);
3045 return CapLVal;
3046 }
3047 LValue CapLVal =
3050 Address LValueAddress = CapLVal.getAddress();
3051 CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
3052 LValueAddress.getElementType(),
3053 getContext().getDeclAlign(VD)),
3054 CapLVal.getType(),
3056 CapLVal.getTBAAInfo());
3057 // Mark lvalue as nontemporal if the variable is marked as nontemporal
3058 // in simd context.
3059 if (getLangOpts().OpenMP &&
3061 CapLVal.setNontemporal(/*Value=*/true);
3062 return CapLVal;
3063 }
3064
3065 assert(isa<BlockDecl>(CurCodeDecl));
3066 Address addr = GetAddrOfBlockDecl(VD);
3067 return MakeAddrLValue(addr, T, AlignmentSource::Decl);
3068 }
3069 }
3070
3071 // FIXME: We should be able to assert this for FunctionDecls as well!
3072 // FIXME: We should be able to assert this for all DeclRefExprs, not just
3073 // those with a valid source location.
3074 assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
3075 !E->getLocation().isValid()) &&
3076 "Should not use decl without marking it used!");
3077
3078 if (ND->hasAttr<WeakRefAttr>()) {
3079 const auto *VD = cast<ValueDecl>(ND);
3081 return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
3082 }
3083
3084 if (const auto *VD = dyn_cast<VarDecl>(ND)) {
3085 // Check if this is a global variable.
3086 if (VD->hasLinkage() || VD->isStaticDataMember())
3087 return EmitGlobalVarDeclLValue(*this, E, VD);
3088
3089 Address addr = Address::invalid();
3090
3091 // The variable should generally be present in the local decl map.
3092 auto iter = LocalDeclMap.find(VD);
3093 if (iter != LocalDeclMap.end()) {
3094 addr = iter->second;
3095
3096 // Otherwise, it might be static local we haven't emitted yet for
3097 // some reason; most likely, because it's in an outer function.
3098 } else if (VD->isStaticLocal()) {
3099 llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
3101 addr = Address(
3102 var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
3103
3104 // No other cases for now.
3105 } else {
3106 llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
3107 }
3108
3109 // Handle threadlocal function locals.
3110 if (VD->getTLSKind() != VarDecl::TLS_None)
3111 addr = addr.withPointer(
3112 Builder.CreateThreadLocalAddress(addr.getBasePointer()),
3114
3115 // Check for OpenMP threadprivate variables.
3116 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
3117 VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
3119 *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
3120 E->getExprLoc());
3121 }
3122
3123 // Drill into block byref variables.
3124 bool isBlockByref = VD->isEscapingByref();
3125 if (isBlockByref) {
3126 addr = emitBlockByrefAddress(addr, VD);
3127 }
3128
3129 // Drill into reference types.
3130 LValue LV = VD->getType()->isReferenceType() ?
3131 EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
3133
3134 bool isLocalStorage = VD->hasLocalStorage();
3135
3136 bool NonGCable = isLocalStorage &&
3137 !VD->getType()->isReferenceType() &&
3138 !isBlockByref;
3139 if (NonGCable) {
3141 LV.setNonGC(true);
3142 }
3143
3144 bool isImpreciseLifetime =
3145 (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
3146 if (isImpreciseLifetime)
3149 return LV;
3150 }
3151
3152 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3153 return EmitFunctionDeclLValue(*this, E, FD);
3154
3155 // FIXME: While we're emitting a binding from an enclosing scope, all other
3156 // DeclRefExprs we see should be implicitly treated as if they also refer to
3157 // an enclosing scope.
3158 if (const auto *BD = dyn_cast<BindingDecl>(ND)) {
3159 if (E->refersToEnclosingVariableOrCapture()) {
3160 auto *FD = LambdaCaptureFields.lookup(BD);
3161 return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
3162 }
3163 return EmitLValue(BD->getBinding());
3164 }
3165
3166 // We can form DeclRefExprs naming GUID declarations when reconstituting
3167 // non-type template parameters into expressions.
3168 if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
3171
3172 if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
3173 auto ATPO = CGM.GetAddrOfTemplateParamObject(TPO);
3174 auto AS = getLangASFromTargetAS(ATPO.getAddressSpace());
3175
3176 if (AS != T.getAddressSpace()) {
3177 auto TargetAS = getContext().getTargetAddressSpace(T.getAddressSpace());
3178 auto PtrTy = ATPO.getElementType()->getPointerTo(TargetAS);
3180 CGM, ATPO.getPointer(), AS, T.getAddressSpace(), PtrTy);
3181 ATPO = ConstantAddress(ASC, ATPO.getElementType(), ATPO.getAlignment());
3182 }
3183
3184 return MakeAddrLValue(ATPO, T, AlignmentSource::Decl);
3185 }
3186
3187 llvm_unreachable("Unhandled DeclRefExpr");
3188}
3189
3191 // __extension__ doesn't affect lvalue-ness.
3192 if (E->getOpcode() == UO_Extension)
3193 return EmitLValue(E->getSubExpr());
3194
3195 QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
3196 switch (E->getOpcode()) {
3197 default: llvm_unreachable("Unknown unary operator lvalue!");
3198 case UO_Deref: {
3199 QualType T = E->getSubExpr()->getType()->getPointeeType();
3200 assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
3201
3202 LValueBaseInfo BaseInfo;
3203 TBAAAccessInfo TBAAInfo;
3204 Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
3205 &TBAAInfo);
3206 LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
3208
3209 // We should not generate __weak write barrier on indirect reference
3210 // of a pointer to object; as in void foo (__weak id *param); *param = 0;
3211 // But, we continue to generate __strong write barrier on indirect write
3212 // into a pointer to object.
3213 if (getLangOpts().ObjC &&
3214 getLangOpts().getGC() != LangOptions::NonGC &&
3215 LV.isObjCWeak())
3217 return LV;
3218 }
3219 case UO_Real:
3220 case UO_Imag: {
3221 LValue LV = EmitLValue(E->getSubExpr());
3222 assert(LV.isSimple() && "real/imag on non-ordinary l-value");
3223
3224 // __real is valid on scalars. This is a faster way of testing that.
3225 // __imag can only produce an rvalue on scalars.
3226 if (E->getOpcode() == UO_Real &&
3227 !LV.getAddress().getElementType()->isStructTy()) {
3228 assert(E->getSubExpr()->getType()->isArithmeticType());
3229 return LV;
3230 }
3231
3232 QualType T = ExprTy->castAs<ComplexType>()->getElementType();
3233
3234 Address Component =
3235 (E->getOpcode() == UO_Real
3237 : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
3238 LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
3240 ElemLV.getQuals().addQualifiers(LV.getQuals());
3241 return ElemLV;
3242 }
3243 case UO_PreInc:
3244 case UO_PreDec: {
3245 LValue LV = EmitLValue(E->getSubExpr());
3246 bool isInc = E->getOpcode() == UO_PreInc;
3247
3248 if (E->getType()->isAnyComplexType())
3249 EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3250 else
3251 EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3252 return LV;
3253 }
3254 }
3255}
3256
3260}
3261
3265}
3266
3268 auto SL = E->getFunctionName();
3269 assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3270 StringRef FnName = CurFn->getName();
3271 if (FnName.starts_with("\01"))
3272 FnName = FnName.substr(1);
3273 StringRef NameItems[] = {
3274 PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
3275 std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3276 if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3277 std::string Name = std::string(SL->getString());
3278 if (!Name.empty()) {
3279 unsigned Discriminator =
3281 if (Discriminator)
3282 Name += "_" + Twine(Discriminator + 1).str();
3283 auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3285 } else {
3286 auto C =
3287 CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3289 }
3290 }
3291 auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3293}
3294
3295/// Emit a type description suitable for use by a runtime sanitizer library. The
3296/// format of a type descriptor is
3297///
3298/// \code
3299/// { i16 TypeKind, i16 TypeInfo }
3300/// \endcode
3301///
3302/// followed by an array of i8 containing the type name. TypeKind is 0 for an
3303/// integer, 1 for a floating point value, and -1 for anything else.
3305 // Only emit each type's descriptor once.
3306 if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3307 return C;
3308
3309 uint16_t TypeKind = -1;
3310 uint16_t TypeInfo = 0;
3311
3312 if (T->isIntegerType()) {
3313 TypeKind = 0;
3314 TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3315 (T->isSignedIntegerType() ? 1 : 0);
3316 } else if (T->isFloatingType()) {
3317 TypeKind = 1;
3319 }
3320
3321 // Format the type name as if for a diagnostic, including quotes and
3322 // optionally an 'aka'.
3323 SmallString<32> Buffer;
3325 DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(),
3326 StringRef(), std::nullopt, Buffer, std::nullopt);
3327
3328 llvm::Constant *Components[] = {
3329 Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3330 llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3331 };
3332 llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3333
3334 auto *GV = new llvm::GlobalVariable(
3335 CGM.getModule(), Descriptor->getType(),
3336 /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3337 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3339
3340 // Remember the descriptor for this type.
3342
3343 return GV;
3344}
3345
3346llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3347 llvm::Type *TargetTy = IntPtrTy;
3348
3349 if (V->getType() == TargetTy)
3350 return V;
3351
3352 // Floating-point types which fit into intptr_t are bitcast to integers
3353 // and then passed directly (after zero-extension, if necessary).
3354 if (V->getType()->isFloatingPointTy()) {
3355 unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedValue();
3356 if (Bits <= TargetTy->getIntegerBitWidth())
3357 V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3358 Bits));
3359 }
3360
3361 // Integers which fit in intptr_t are zero-extended and passed directly.
3362 if (V->getType()->isIntegerTy() &&
3363 V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3364 return Builder.CreateZExt(V, TargetTy);
3365
3366 // Pointers are passed directly, everything else is passed by address.
3367 if (!V->getType()->isPointerTy()) {
3368 RawAddress Ptr = CreateDefaultAlignTempAlloca(V->getType());
3369 Builder.CreateStore(V, Ptr);
3370 V = Ptr.getPointer();
3371 }
3372 return Builder.CreatePtrToInt(V, TargetTy);
3373}
3374
3375/// Emit a representation of a SourceLocation for passing to a handler
3376/// in a sanitizer runtime library. The format for this data is:
3377/// \code
3378/// struct SourceLocation {
3379/// const char *Filename;
3380/// int32_t Line, Column;
3381/// };
3382/// \endcode
3383/// For an invalid SourceLocation, the Filename pointer is null.
3385 llvm::Constant *Filename;
3386 int Line, Column;
3387
3389 if (PLoc.isValid()) {
3390 StringRef FilenameString = PLoc.getFilename();
3391
3392 int PathComponentsToStrip =
3393 CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3394 if (PathComponentsToStrip < 0) {
3395 assert(PathComponentsToStrip != INT_MIN);
3396 int PathComponentsToKeep = -PathComponentsToStrip;
3397 auto I = llvm::sys::path::rbegin(FilenameString);
3398 auto E = llvm::sys::path::rend(FilenameString);
3399 while (I != E && --PathComponentsToKeep)
3400 ++I;
3401
3402 FilenameString = FilenameString.substr(I - E);
3403 } else if (PathComponentsToStrip > 0) {
3404 auto I = llvm::sys::path::begin(FilenameString);
3405 auto E = llvm::sys::path::end(FilenameString);
3406 while (I != E && PathComponentsToStrip--)
3407 ++I;
3408
3409 if (I != E)
3410 FilenameString =
3411 FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3412 else
3413 FilenameString = llvm::sys::path::filename(FilenameString);
3414 }
3415
3416 auto FilenameGV =
3417 CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3419 cast<llvm::GlobalVariable>(
3420 FilenameGV.getPointer()->stripPointerCasts()));
3421 Filename = FilenameGV.getPointer();
3422 Line = PLoc.getLine();
3423 Column = PLoc.getColumn();
3424 } else {
3425 Filename = llvm::Constant::getNullValue(Int8PtrTy);
3426 Line = Column = 0;
3427 }
3428
3429 llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3430 Builder.getInt32(Column)};
3431
3432 return llvm::ConstantStruct::getAnon(Data);
3433}
3434
3435namespace {
3436/// Specify under what conditions this check can be recovered
3437enum class CheckRecoverableKind {
3438 /// Always terminate program execution if this check fails.
3440 /// Check supports recovering, runtime has both fatal (noreturn) and
3441 /// non-fatal handlers for this check.
3442 Recoverable,
3443 /// Runtime conditionally aborts, always need to support recovery.
3445};
3446}
3447
3448static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3449 assert(Kind.countPopulation() == 1);
3450 if (Kind == SanitizerKind::Vptr)
3451 return CheckRecoverableKind::AlwaysRecoverable;
3452 else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
3453 return CheckRecoverableKind::Unrecoverable;
3454 else
3455 return CheckRecoverableKind::Recoverable;
3456}
3457
3458namespace {
3459struct SanitizerHandlerInfo {
3460 char const *const Name;
3461 unsigned Version;
3462};
3463}
3464
3465const SanitizerHandlerInfo SanitizerHandlers[] = {
3466#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3468#undef SANITIZER_CHECK
3469};
3470
3472 llvm::FunctionType *FnType,
3474 SanitizerHandler CheckHandler,
3475 CheckRecoverableKind RecoverKind, bool IsFatal,
3476 llvm::BasicBlock *ContBB) {
3477 assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3478 std::optional<ApplyDebugLocation> DL;
3479 if (!CGF.Builder.getCurrentDebugLocation()) {
3480 // Ensure that the call has at least an artificial debug location.
3481 DL.emplace(CGF, SourceLocation());
3482 }
3483 bool NeedsAbortSuffix =
3484 IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3485 bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3486 const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3487 const StringRef CheckName = CheckInfo.Name;
3488 std::string FnName = "__ubsan_handle_" + CheckName.str();
3489 if (CheckInfo.Version && !MinimalRuntime)
3490 FnName += "_v" + llvm::utostr(CheckInfo.Version);
3491 if (MinimalRuntime)
3492 FnName += "_minimal";
3493 if (NeedsAbortSuffix)
3494 FnName += "_abort";
3495 bool MayReturn =
3496 !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3497
3498 llvm::AttrBuilder B(CGF.getLLVMContext());
3499 if (!MayReturn) {
3500 B.addAttribute(llvm::Attribute::NoReturn)
3501 .addAttribute(llvm::Attribute::NoUnwind);
3502 }
3503 B.addUWTableAttr(llvm::UWTableKind::Default);
3504
3505 llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3506 FnType, FnName,
3507 llvm::AttributeList::get(CGF.getLLVMContext(),
3508 llvm::AttributeList::FunctionIndex, B),
3509 /*Local=*/true);
3510 llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3511 if (!MayReturn) {
3512 HandlerCall->setDoesNotReturn();
3513 CGF.Builder.CreateUnreachable();
3514 } else {
3515 CGF.Builder.CreateBr(ContBB);
3516 }
3517}
3518
3520 ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3521 SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3522 ArrayRef<llvm::Value *> DynamicArgs) {
3523 assert(IsSanitizerScope);
3524 assert(Checked.size() > 0);
3525 assert(CheckHandler >= 0 &&
3526 size_t(CheckHandler) < std::size(SanitizerHandlers));
3527 const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3528
3529 llvm::Value *FatalCond = nullptr;
3530 llvm::Value *RecoverableCond = nullptr;
3531 llvm::Value *TrapCond = nullptr;
3532 for (int i = 0, n = Checked.size(); i < n; ++i) {
3533 llvm::Value *Check = Checked[i].first;
3534 // -fsanitize-trap= overrides -fsanitize-recover=.
3535 llvm::Value *&Cond =
3536 CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3537 ? TrapCond
3538 : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3539 ? RecoverableCond
3540 : FatalCond;
3541 Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3542 }
3543
3545 llvm::Value *Allow =
3546 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::allow_ubsan_check),
3547 llvm::ConstantInt::get(CGM.Int8Ty, CheckHandler));
3548
3549 for (llvm::Value **Cond : {&FatalCond, &RecoverableCond, &TrapCond}) {
3550 if (*Cond)
3551 *Cond = Builder.CreateOr(*Cond, Builder.CreateNot(Allow));
3552 }
3553 }
3554
3555 if (TrapCond)
3556 EmitTrapCheck(TrapCond, CheckHandler);
3557 if (!FatalCond && !RecoverableCond)
3558 return;
3559
3560 llvm::Value *JointCond;
3561 if (FatalCond && RecoverableCond)
3562 JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3563 else
3564 JointCond = FatalCond ? FatalCond : RecoverableCond;
3565 assert(JointCond);
3566
3567 CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3568 assert(SanOpts.has(Checked[0].second));
3569#ifndef NDEBUG
3570 for (int i = 1, n = Checked.size(); i < n; ++i) {
3571 assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3572 "All recoverable kinds in a single check must be same!");
3573 assert(SanOpts.has(Checked[i].second));
3574 }
3575#endif
3576
3577 llvm::BasicBlock *Cont = createBasicBlock("cont");
3578 llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3579 llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3580 // Give hint that we very much don't expect to execute the handler
3581 llvm::MDBuilder MDHelper(getLLVMContext());
3582 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3583 Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3584 EmitBlock(Handlers);
3585
3586 // Handler functions take an i8* pointing to the (handler-specific) static
3587 // information block, followed by a sequence of intptr_t arguments
3588 // representing operand values.
3591 if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3592 Args.reserve(DynamicArgs.size() + 1);
3593 ArgTypes.reserve(DynamicArgs.size() + 1);
3594
3595 // Emit handler arguments and create handler function type.
3596 if (!StaticArgs.empty()) {
3597 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3598 auto *InfoPtr = new llvm::GlobalVariable(
3599 CGM.getModule(), Info->getType(), false,
3600 llvm::GlobalVariable::PrivateLinkage, Info, "", nullptr,
3601 llvm::GlobalVariable::NotThreadLocal,
3602 CGM.getDataLayout().getDefaultGlobalsAddressSpace());
3603 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3605 Args.push_back(InfoPtr);
3606 ArgTypes.push_back(Args.back()->getType());
3607 }
3608
3609 for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3610 Args.push_back(EmitCheckValue(DynamicArgs[i]));
3611 ArgTypes.push_back(IntPtrTy);
3612 }
3613 }
3614
3615 llvm::FunctionType *FnType =
3616 llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3617
3618 if (!FatalCond || !RecoverableCond) {
3619 // Simple case: we need to generate a single handler call, either
3620 // fatal, or non-fatal.
3621 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3622 (FatalCond != nullptr), Cont);
3623 } else {
3624 // Emit two handler calls: first one for set of unrecoverable checks,
3625 // another one for recoverable.
3626 llvm::BasicBlock *NonFatalHandlerBB =
3627 createBasicBlock("non_fatal." + CheckName);
3628 llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3629 Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3630 EmitBlock(FatalHandlerBB);
3631 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3632 NonFatalHandlerBB);
3633 EmitBlock(NonFatalHandlerBB);
3634 emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3635 Cont);
3636 }
3637
3638 EmitBlock(Cont);
3639}
3640
3642 SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3643 llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3644 llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3645
3646 llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3647 llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3648
3649 llvm::MDBuilder MDHelper(getLLVMContext());
3650 llvm::MDNode *Node = MDHelper.createLikelyBranchWeights();
3651 BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3652
3653 EmitBlock(CheckBB);
3654
3655 bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3656
3657 llvm::CallInst *CheckCall;
3658 llvm::FunctionCallee SlowPathFn;
3659 if (WithDiag) {
3660 llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3661 auto *InfoPtr =
3662 new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3663 llvm::GlobalVariable::PrivateLinkage, Info);
3664 InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3666
3667 SlowPathFn = CGM.getModule().getOrInsertFunction(
3668 "__cfi_slowpath_diag",
3669 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3670 false));
3671 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr, InfoPtr});
3672 } else {
3673 SlowPathFn = CGM.getModule().getOrInsertFunction(
3674 "__cfi_slowpath",
3675 llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3676 CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3677 }
3678
3680 cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3681 CheckCall->setDoesNotThrow();
3682
3683 EmitBlock(Cont);
3684}
3685
3686// Emit a stub for __cfi_check function so that the linker knows about this
3687// symbol in LTO mode.
3689 llvm::Module *M = &CGM.getModule();
3690 ASTContext &C = getContext();
3691 QualType QInt64Ty = C.getIntTypeForBitwidth(64, false);
3692
3694 ImplicitParamDecl ArgCallsiteTypeId(C, QInt64Ty, ImplicitParamKind::Other);
3695 ImplicitParamDecl ArgAddr(C, C.VoidPtrTy, ImplicitParamKind::Other);
3696 ImplicitParamDecl ArgCFICheckFailData(C, C.VoidPtrTy,
3698 FnArgs.push_back(&ArgCallsiteTypeId);
3699 FnArgs.push_back(&ArgAddr);
3700 FnArgs.push_back(&ArgCFICheckFailData);
3701 const CGFunctionInfo &FI =
3703
3704 llvm::Function *F = llvm::Function::Create(
3705 llvm::FunctionType::get(VoidTy, {Int64Ty, VoidPtrTy, VoidPtrTy}, false),
3706 llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3707 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3709 F->setAlignment(llvm::Align(4096));
3710 CGM.setDSOLocal(F);
3711
3712 llvm::LLVMContext &Ctx = M->getContext();
3713 llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3714 // CrossDSOCFI pass is not executed if there is no executable code.
3715 SmallVector<llvm::Value*> Args{F->getArg(2), F->getArg(1)};
3716 llvm::CallInst::Create(M->getFunction("__cfi_check_fail"), Args, "", BB);
3717 llvm::ReturnInst::Create(Ctx, nullptr, BB);
3718}
3719
3720// This function is basically a switch over the CFI failure kind, which is
3721// extracted from CFICheckFailData (1st function argument). Each case is either
3722// llvm.trap or a call to one of the two runtime handlers, based on
3723// -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3724// failure kind) traps, but this should really never happen. CFICheckFailData
3725// can be nullptr if the calling module has -fsanitize-trap behavior for this
3726// check kind; in this case __cfi_check_fail traps as well.
3728 SanitizerScope SanScope(this);
3729 FunctionArgList Args;
3734 Args.push_back(&ArgData);
3735 Args.push_back(&ArgAddr);
3736
3737 const CGFunctionInfo &FI =
3739
3740 llvm::Function *F = llvm::Function::Create(
3741 llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3742 llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3743
3744 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3746 F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3747
3748 StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3749 SourceLocation());
3750
3751 // This function is not affected by NoSanitizeList. This function does
3752 // not have a source location, but "src:*" would still apply. Revert any
3753 // changes to SanOpts made in StartFunction.
3755
3756 llvm::Value *Data =
3757 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3758 CGM.getContext().VoidPtrTy, ArgData.getLocation());
3759 llvm::Value *Addr =
3760 EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3761 CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3762
3763 // Data == nullptr means the calling module has trap behaviour for this check.
3764 llvm::Value *DataIsNotNullPtr =
3765 Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3766 EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3767
3768 llvm::StructType *SourceLocationTy =
3769 llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3770 llvm::StructType *CfiCheckFailDataTy =
3771 llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3772
3773 llvm::Value *V = Builder.CreateConstGEP2_32(
3774 CfiCheckFailDataTy,
3775 Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3776 0);
3777
3778 Address CheckKindAddr(V, Int8Ty, getIntAlign());
3779 llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3780
3781 llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3783 llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3784 llvm::Value *ValidVtable = Builder.CreateZExt(
3785 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3786 {Addr, AllVtables}),
3787 IntPtrTy);
3788
3789 const std::pair<int, SanitizerMask> CheckKinds[] = {
3790 {CFITCK_VCall, SanitizerKind::CFIVCall},
3791 {CFITCK_NVCall, SanitizerKind::CFINVCall},
3792 {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3793 {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3794 {CFITCK_ICall, SanitizerKind::CFIICall}};
3795
3797 for (auto CheckKindMaskPair : CheckKinds) {
3798 int Kind = CheckKindMaskPair.first;
3799 SanitizerMask Mask = CheckKindMaskPair.second;
3800 llvm::Value *Cond =
3801 Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3802 if (CGM.getLangOpts().Sanitize.has(Mask))
3803 EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3804 {Data, Addr, ValidVtable});
3805 else
3806 EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3807 }
3808
3810 // The only reference to this function will be created during LTO link.
3811 // Make sure it survives until then.
3812 CGM.addUsedGlobal(F);
3813}
3814
3816 if (SanOpts.has(SanitizerKind::Unreachable)) {
3817 SanitizerScope SanScope(this);
3818 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3819 SanitizerKind::Unreachable),
3820 SanitizerHandler::BuiltinUnreachable,
3821 EmitCheckSourceLocation(Loc), std::nullopt);
3822 }
3823 Builder.CreateUnreachable();
3824}
3825
3826void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3827 SanitizerHandler CheckHandlerID) {
3828 llvm::BasicBlock *Cont = createBasicBlock("cont");
3829
3830 // If we're optimizing, collapse all calls to trap down to just one per
3831 // check-type per function to save on code size.
3832 if ((int)TrapBBs.size() <= CheckHandlerID)
3833 TrapBBs.resize(CheckHandlerID + 1);
3834
3835 llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3836
3838 CGM.getCodeGenOpts().OptimizationLevel && TrapBB &&
3839 (!CurCodeDecl || !CurCodeDecl->hasAttr<OptimizeNoneAttr>())) {
3840 auto Call = TrapBB->begin();
3841 assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3842
3843 Call->applyMergedLocation(Call->getDebugLoc(),
3844 Builder.getCurrentDebugLocation());
3845 Builder.CreateCondBr(Checked, Cont, TrapBB);
3846 } else {
3847 TrapBB = createBasicBlock("trap");
3848 Builder.CreateCondBr(Checked, Cont, TrapBB);
3849 EmitBlock(TrapBB);
3850
3851 llvm::CallInst *TrapCall = Builder.CreateCall(
3852 CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3853 llvm::ConstantInt::get(CGM.Int8Ty,
3855 ? TrapBB->getParent()->size()
3856 : static_cast<uint64_t>(CheckHandlerID)));
3857
3858 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3859 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3861 TrapCall->addFnAttr(A);
3862 }
3863 TrapCall->setDoesNotReturn();
3864 TrapCall->setDoesNotThrow();
3865 Builder.CreateUnreachable();
3866 }
3867
3868 EmitBlock(Cont);
3869}
3870
3871llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3872 llvm::CallInst *TrapCall =
3873 Builder.CreateCall(CGM.getIntrinsic(IntrID));
3874
3875 if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3876 auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3878 TrapCall->addFnAttr(A);
3879 }
3880
3881 return TrapCall;
3882}
3883
3885 LValueBaseInfo *BaseInfo,
3886 TBAAAccessInfo *TBAAInfo) {
3887 assert(E->getType()->isArrayType() &&
3888 "Array to pointer decay must have array source type!");
3889
3890 // Expressions of array type can't be bitfields or vector elements.
3891 LValue LV = EmitLValue(E);
3892 Address Addr = LV.getAddress();
3893
3894 // If the array type was an incomplete type, we need to make sure
3895 // the decay ends up being the right type.
3896 llvm::Type *NewTy = ConvertType(E->getType());
3897 Addr = Addr.withElementType(NewTy);
3898
3899 // Note that VLA pointers are always decayed, so we don't need to do
3900 // anything here.
3901 if (!E->getType()->isVariableArrayType()) {
3902 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3903 "Expected pointer to array");
3904 Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3905 }
3906
3907 // The result of this decay conversion points to an array element within the
3908 // base lvalue. However, since TBAA currently does not support representing
3909 // accesses to elements of member arrays, we conservatively represent accesses
3910 // to the pointee object as if it had no any base lvalue specified.
3911 // TODO: Support TBAA for member arrays.
3913 if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3914 if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3915
3916 return Addr.withElementType(ConvertTypeForMem(EltType));
3917}
3918
3919/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3920/// array to pointer, return the array subexpression.
3921static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3922 // If this isn't just an array->pointer decay, bail out.
3923 const auto *CE = dyn_cast<CastExpr>(E);
3924 if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3925 return nullptr;
3926
3927 // If this is a decay from variable width array, bail out.
3928 const Expr *SubExpr = CE->getSubExpr();
3929 if (SubExpr->getType()->isVariableArrayType())
3930 return nullptr;
3931
3932 return SubExpr;
3933}
3934
3936 llvm::Type *elemType,
3937 llvm::Value *ptr,
3938 ArrayRef<llvm::Value*> indices,
3939 bool inbounds,
3940 bool signedIndices,
3941 SourceLocation loc,
3942 const llvm::Twine &name = "arrayidx") {
3943 if (inbounds) {
3944 return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
3946 name);
3947 } else {
3948 return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
3949 }
3950}
3951
3954 llvm::Type *elementType, bool inbounds,
3955 bool signedIndices, SourceLocation loc,
3956 CharUnits align,
3957 const llvm::Twine &name = "arrayidx") {
3958 if (inbounds) {
3959 return CGF.EmitCheckedInBoundsGEP(addr, indices, elementType, signedIndices,
3961 align, name);
3962 } else {
3963 return CGF.Builder.CreateGEP(addr, indices, elementType, align, name);
3964 }
3965}
3966
3968 llvm::Value *idx,
3969 CharUnits eltSize) {
3970 // If we have a constant index, we can use the exact offset of the
3971 // element we're accessing.
3972 if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3973 CharUnits offset = constantIdx->getZExtValue() * eltSize;
3974 return arrayAlign.alignmentAtOffset(offset);
3975
3976 // Otherwise, use the worst-case alignment for any element.
3977 } else {
3978 return arrayAlign.alignmentOfArrayElement(eltSize);
3979 }
3980}
3981
3983 const VariableArrayType *vla) {
3984 QualType eltType;
3985 do {
3986 eltType = vla->getElementType();
3987 } while ((vla = ctx.getAsVariableArrayType(eltType)));
3988 return eltType;
3989}
3990
3992 return D && D->hasAttr<BPFPreserveStaticOffsetAttr>();
3993}
3994
3995static bool hasBPFPreserveStaticOffset(const Expr *E) {
3996 if (!E)
3997 return false;
3998 QualType PointeeType = E->getType()->getPointeeType();
3999 if (PointeeType.isNull())
4000 return false;
4001 if (const auto *BaseDecl = PointeeType->getAsRecordDecl())
4002 return hasBPFPreserveStaticOffset(BaseDecl);
4003 return false;
4004}
4005
4006// Wraps Addr with a call to llvm.preserve.static.offset intrinsic.
4008 Address &Addr) {
4009 if (!CGF.getTarget().getTriple().isBPF())
4010 return Addr;
4011
4012 llvm::Function *Fn =
4013 CGF.CGM.getIntrinsic(llvm::Intrinsic::preserve_static_offset);
4014 llvm::CallInst *Call = CGF.Builder.CreateCall(Fn, {Addr.emitRawPointer(CGF)});
4015 return Address(Call, Addr.getElementType(), Addr.getAlignment());
4016}
4017
4018/// Given an array base, check whether its member access belongs to a record
4019/// with preserve_access_index attribute or not.
4020static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
4021 if (!ArrayBase || !CGF.getDebugInfo())
4022 return false;
4023
4024 // Only support base as either a MemberExpr or DeclRefExpr.
4025 // DeclRefExpr to cover cases like:
4026 // struct s { int a; int b[10]; };
4027 // struct s *p;
4028 // p[1].a
4029 // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
4030 // p->b[5] is a MemberExpr example.
4031 const Expr *E = ArrayBase->IgnoreImpCasts();
4032 if (const auto *ME = dyn_cast<MemberExpr>(E))
4033 return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4034
4035 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
4036 const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
4037 if (!VarDef)
4038 return false;
4039
4040 const auto *PtrT = VarDef->getType()->getAs<PointerType>();
4041 if (!PtrT)
4042 return false;
4043
4044 const auto *PointeeT = PtrT->getPointeeType()
4046 if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
4047 return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
4048 return false;
4049 }
4050
4051 return false;
4052}
4053
4056 QualType eltType, bool inbounds,
4057 bool signedIndices, SourceLocation loc,
4058 QualType *arrayType = nullptr,
4059 const Expr *Base = nullptr,
4060 const llvm::Twine &name = "arrayidx") {
4061 // All the indices except that last must be zero.
4062#ifndef NDEBUG
4063 for (auto *idx : indices.drop_back())
4064 assert(isa<llvm::ConstantInt>(idx) &&
4065 cast<llvm::ConstantInt>(idx)->isZero());
4066#endif
4067
4068 // Determine the element size of the statically-sized base. This is
4069 // the thing that the indices are expressed in terms of.
4070 if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
4071 eltType = getFixedSizeElementType(CGF.getContext(), vla);
4072 }
4073
4074 // We can use that to compute the best alignment of the element.
4075 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
4076 CharUnits eltAlign =
4077 getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
4078
4080 addr = wrapWithBPFPreserveStaticOffset(CGF, addr);
4081
4082 llvm::Value *eltPtr;
4083 auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
4084 if (!LastIndex ||
4086 addr = emitArraySubscriptGEP(CGF, addr, indices,
4087 CGF.ConvertTypeForMem(eltType), inbounds,
4088 signedIndices, loc, eltAlign, name);
4089 return addr;
4090 } else {
4091 // Remember the original array subscript for bpf target
4092 unsigned idx = LastIndex->getZExtValue();
4093 llvm::DIType *DbgInfo = nullptr;
4094 if (arrayType)
4095 DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
4096 eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(
4097 addr.getElementType(), addr.emitRawPointer(CGF), indices.size() - 1,
4098 idx, DbgInfo);
4099 }
4100
4101 return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
4102}
4103
4104/// The offset of a field from the beginning of the record.
4106 const FieldDecl *FD, int64_t &Offset) {
4107 ASTContext &Ctx = CGF.getContext();
4108 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
4109 unsigned FieldNo = 0;
4110
4111 for (const Decl *D : RD->decls()) {
4112 if (const auto *Record = dyn_cast<RecordDecl>(D))
4113 if (getFieldOffsetInBits(CGF, Record, FD, Offset)) {
4114 Offset += Layout.getFieldOffset(FieldNo);
4115 return true;
4116 }
4117
4118 if (const auto *Field = dyn_cast<FieldDecl>(D))
4119 if (FD == Field) {
4120 Offset += Layout.getFieldOffset(FieldNo);
4121 return true;
4122 }
4123
4124 if (isa<FieldDecl>(D))
4125 ++FieldNo;
4126 }
4127
4128 return false;
4129}
4130
4131/// Returns the relative offset difference between \p FD1 and \p FD2.
4132/// \code
4133/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
4134/// \endcode
4135/// Both fields must be within the same struct.
4136static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
4137 const FieldDecl *FD1,
4138 const FieldDecl *FD2) {
4139 const RecordDecl *FD1OuterRec =
4141 const RecordDecl *FD2OuterRec =
4143
4144 if (FD1OuterRec != FD2OuterRec)
4145 // Fields must be within the same RecordDecl.
4146 return std::optional<int64_t>();
4147
4148 int64_t FD1Offset = 0;
4149 if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
4150 return std::optional<int64_t>();
4151
4152 int64_t FD2Offset = 0;
4153 if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
4154 return std::optional<int64_t>();
4155
4156 return std::make_optional<int64_t>(FD1Offset - FD2Offset);
4157}
4158
4160 bool Accessed) {
4161 // The index must always be an integer, which is not an aggregate. Emit it
4162 // in lexical order (this complexity is, sadly, required by C++17).
4163 llvm::Value *IdxPre =
4164 (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
4165 bool SignedIndices = false;
4166 auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
4167 auto *Idx = IdxPre;
4168 if (E->getLHS() != E->getIdx()) {
4169 assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
4170 Idx = EmitScalarExpr(E->getIdx());
4171 }
4172
4173 QualType IdxTy = E->getIdx()->getType();
4174 bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
4175 SignedIndices |= IdxSigned;
4176
4177 if (SanOpts.has(SanitizerKind::ArrayBounds))
4178 EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
4179
4180 // Extend or truncate the index type to 32 or 64-bits.
4181 if (Promote && Idx->getType() != IntPtrTy)
4182 Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
4183
4184 return Idx;
4185 };
4186 IdxPre = nullptr;
4187
4188 // If the base is a vector type, then we are forming a vector element lvalue
4189 // with this subscript.
4190 if (E->getBase()->getType()->isSubscriptableVectorType() &&
4191 !isa<ExtVectorElementExpr>(E->getBase())) {
4192 // Emit the vector as an lvalue to get its address.
4193 LValue LHS = EmitLValue(E->getBase());
4194 auto *Idx = EmitIdxAfterBase(/*Promote*/false);
4195 assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
4196 return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
4197 LHS.getBaseInfo(), TBAAAccessInfo());
4198 }
4199
4200 // All the other cases basically behave like simple offsetting.
4201
4202 // Handle the extvector case we ignored above.
4203 if (isa<ExtVectorElementExpr>(E->getBase())) {
4204 LValue LV = EmitLValue(E->getBase());
4205 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4207
4208 QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
4209 Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
4210 SignedIndices, E->getExprLoc());
4211 return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
4212 CGM.getTBAAInfoForSubobject(LV, EltType));
4213 }
4214
4215 LValueBaseInfo EltBaseInfo;
4216 TBAAAccessInfo EltTBAAInfo;
4217 Address Addr = Address::invalid();
4218 if (const VariableArrayType *vla =
4219 getContext().getAsVariableArrayType(E->getType())) {
4220 // The base must be a pointer, which is not an aggregate. Emit
4221 // it. It needs to be emitted first in case it's what captures
4222 // the VLA bounds.
4223 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4224 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4225
4226 // The element count here is the total number of non-VLA elements.
4227 llvm::Value *numElements = getVLASize(vla).NumElts;
4228
4229 // Effectively, the multiply by the VLA size is part of the GEP.
4230 // GEP indexes are signed, and scaling an index isn't permitted to
4231 // signed-overflow, so we use the same semantics for our explicit
4232 // multiply. We suppress this if overflow is not undefined behavior.
4233 if (getLangOpts().isSignedOverflowDefined()) {
4234 Idx = Builder.CreateMul(Idx, numElements);
4235 } else {
4236 Idx = Builder.CreateNSWMul(Idx, numElements);
4237 }
4238
4239 Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
4240 !getLangOpts().isSignedOverflowDefined(),
4241 SignedIndices, E->getExprLoc());
4242
4243 } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
4244 // Indexing over an interface, as in "NSString *P; P[4];"
4245
4246 // Emit the base pointer.
4247 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4248 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4249
4250 CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
4251 llvm::Value *InterfaceSizeVal =
4252 llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
4253
4254 llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
4255
4256 // We don't necessarily build correct LLVM struct types for ObjC
4257 // interfaces, so we can't rely on GEP to do this scaling
4258 // correctly, so we need to cast to i8*. FIXME: is this actually
4259 // true? A lot of other things in the fragile ABI would break...
4260 llvm::Type *OrigBaseElemTy = Addr.getElementType();
4261
4262 // Do the GEP.
4263 CharUnits EltAlign =
4264 getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
4265 llvm::Value *EltPtr =
4266 emitArraySubscriptGEP(*this, Int8Ty, Addr.emitRawPointer(*this),
4267 ScaledIdx, false, SignedIndices, E->getExprLoc());
4268 Addr = Address(EltPtr, OrigBaseElemTy, EltAlign);
4269 } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4270 // If this is A[i] where A is an array, the frontend will have decayed the
4271 // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4272 // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4273 // "gep x, i" here. Emit one "gep A, 0, i".
4274 assert(Array->getType()->isArrayType() &&
4275 "Array to pointer decay must have array source type!");
4276 LValue ArrayLV;
4277 // For simple multidimensional array indexing, set the 'accessed' flag for
4278 // better bounds-checking of the base expression.
4279 if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4280 ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4281 else
4282 ArrayLV = EmitLValue(Array);
4283 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4284
4285 if (SanOpts.has(SanitizerKind::ArrayBounds)) {
4286 // If the array being accessed has a "counted_by" attribute, generate
4287 // bounds checking code. The "count" field is at the top level of the
4288 // struct or in an anonymous struct, that's also at the top level. Future
4289 // expansions may allow the "count" to reside at any place in the struct,
4290 // but the value of "counted_by" will be a "simple" path to the count,
4291 // i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
4292 // similar to emit the correct GEP.
4293 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
4294 getLangOpts().getStrictFlexArraysLevel();
4295
4296 if (const auto *ME = dyn_cast<MemberExpr>(Array);
4297 ME &&
4298 ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
4300 const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());
4301 if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) {
4302 if (std::optional<int64_t> Diff =
4303 getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
4304 CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
4305
4306 // Create a GEP with a byte offset between the FAM and count and
4307 // use that to load the count value.
4309 ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
4310
4311 llvm::Type *CountTy = ConvertType(CountFD->getType());
4312 llvm::Value *Res = Builder.CreateInBoundsGEP(
4313 Int8Ty, Addr.emitRawPointer(*this),
4314 Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
4315 Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
4316 ".counted_by.load");
4317
4318 // Now emit the bounds checking.
4319 EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
4320 Array->getType(), Accessed);
4321 }
4322 }
4323 }
4324 }
4325
4326 // Propagate the alignment from the array itself to the result.
4327 QualType arrayType = Array->getType();
4328 Addr = emitArraySubscriptGEP(
4329 *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
4330 E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
4331 E->getExprLoc(), &arrayType, E->getBase());
4332 EltBaseInfo = ArrayLV.getBaseInfo();
4333 EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
4334 } else {
4335 // The base must be a pointer; emit it with an estimate of its alignment.
4336 Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
4337 auto *Idx = EmitIdxAfterBase(/*Promote*/true);
4338 QualType ptrType = E->getBase()->getType();
4339 Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
4340 !getLangOpts().isSignedOverflowDefined(),
4341 SignedIndices, E->getExprLoc(), &ptrType,
4342 E->getBase());
4343 }
4344
4345 LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
4346
4347 if (getLangOpts().ObjC &&
4348 getLangOpts().getGC() != LangOptions::NonGC) {
4351 }
4352 return LV;
4353}
4354
4356 assert(
4357 !E->isIncomplete() &&
4358 "incomplete matrix subscript expressions should be rejected during Sema");
4359 LValue Base = EmitLValue(E->getBase());
4360 llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
4361 llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
4362 llvm::Value *NumRows = Builder.getIntN(
4363 RowIdx->getType()->getScalarSizeInBits(),
4364 E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
4365 llvm::Value *FinalIdx =
4366 Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
4367 return LValue::MakeMatrixElt(
4368 MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
4369 E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
4370}
4371
4373 LValueBaseInfo &BaseInfo,
4374 TBAAAccessInfo &TBAAInfo,
4375 QualType BaseTy, QualType ElTy,
4376 bool IsLowerBound) {
4377 LValue BaseLVal;
4378 if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
4379 BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
4380 if (BaseTy->isArrayType()) {
4381 Address Addr = BaseLVal.getAddress();
4382 BaseInfo = BaseLVal.getBaseInfo();
4383
4384 // If the array type was an incomplete type, we need to make sure
4385 // the decay ends up being the right type.
4386 llvm::Type *NewTy = CGF.ConvertType(BaseTy);
4387 Addr = Addr.withElementType(NewTy);
4388
4389 // Note that VLA pointers are always decayed, so we don't need to do
4390 // anything here.
4391 if (!BaseTy->isVariableArrayType()) {
4392 assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
4393 "Expected pointer to array");
4394 Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
4395 }
4396
4397 return Addr.withElementType(CGF.ConvertTypeForMem(ElTy));
4398 }
4399 LValueBaseInfo TypeBaseInfo;
4400 TBAAAccessInfo TypeTBAAInfo;
4401 CharUnits Align =
4402 CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
4403 BaseInfo.mergeForCast(TypeBaseInfo);
4404 TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
4405 return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
4406 CGF.ConvertTypeForMem(ElTy), Align);
4407 }
4408 return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
4409}
4410
4412 bool IsLowerBound) {
4413
4414 assert(!E->isOpenACCArraySection() &&
4415 "OpenACC Array section codegen not implemented");
4416
4418 QualType ResultExprTy;
4419 if (auto *AT = getContext().getAsArrayType(BaseTy))
4420 ResultExprTy = AT->getElementType();
4421 else
4422 ResultExprTy = BaseTy->getPointeeType();
4423 llvm::Value *Idx = nullptr;
4424 if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4425 // Requesting lower bound or upper bound, but without provided length and
4426 // without ':' symbol for the default length -> length = 1.
4427 // Idx = LowerBound ?: 0;
4428 if (auto *LowerBound = E->getLowerBound()) {
4429 Idx = Builder.CreateIntCast(
4430 EmitScalarExpr(LowerBound), IntPtrTy,
4431 LowerBound->getType()->hasSignedIntegerRepresentation());
4432 }