clang  14.0.0git
CGExpr.cpp
Go to the documentation of this file.
1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCUDARuntime.h"
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenFunction.h"
22 #include "CodeGenModule.h"
23 #include "ConstantEmitter.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/NSAPI.h"
29 #include "clang/Basic/Builtins.h"
32 #include "llvm/ADT/Hashing.h"
33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/MatrixBuilder.h"
39 #include "llvm/Support/ConvertUTF.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Path.h"
42 #include "llvm/Support/SaveAndRestore.h"
43 #include "llvm/Transforms/Utils/SanitizerStats.h"
44 
45 #include <string>
46 
47 using namespace clang;
48 using namespace CodeGen;
49 
50 //===--------------------------------------------------------------------===//
51 // Miscellaneous Helper Methods
52 //===--------------------------------------------------------------------===//
53 
55  unsigned addressSpace =
56  cast<llvm::PointerType>(value->getType())->getAddressSpace();
57 
58  llvm::PointerType *destType = Int8PtrTy;
59  if (addressSpace)
60  destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
61 
62  if (value->getType() == destType) return value;
63  return Builder.CreateBitCast(value, destType);
64 }
65 
66 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
67 /// block.
69  CharUnits Align,
70  const Twine &Name,
71  llvm::Value *ArraySize) {
72  auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
73  Alloca->setAlignment(Align.getAsAlign());
74  return Address(Alloca, Align);
75 }
76 
77 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
78 /// block. The alloca is casted to default address space if necessary.
80  const Twine &Name,
81  llvm::Value *ArraySize,
82  Address *AllocaAddr) {
83  auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
84  if (AllocaAddr)
85  *AllocaAddr = Alloca;
86  llvm::Value *V = Alloca.getPointer();
87  // Alloca always returns a pointer in alloca address space, which may
88  // be different from the type defined by the language. For example,
89  // in C++ the auto variables are in the default address space. Therefore
90  // cast alloca to the default address space when necessary.
92  auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
93  llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
94  // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
95  // otherwise alloca is inserted at the current insertion point of the
96  // builder.
97  if (!ArraySize)
98  Builder.SetInsertPoint(AllocaInsertPt);
101  Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
102  }
103 
104  return Address(V, Align);
105 }
106 
107 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
108 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
109 /// insertion point of the builder.
110 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
111  const Twine &Name,
112  llvm::Value *ArraySize) {
113  if (ArraySize)
114  return Builder.CreateAlloca(Ty, ArraySize, Name);
115  return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
116  ArraySize, Name, AllocaInsertPt);
117 }
118 
119 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
120 /// default alignment of the corresponding LLVM type, which is *not*
121 /// guaranteed to be related in any way to the expected alignment of
122 /// an AST type that might have been lowered to Ty.
124  const Twine &Name) {
125  CharUnits Align =
126  CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlignment(Ty));
127  return CreateTempAlloca(Ty, Align, Name);
128 }
129 
131  auto *Alloca = Var.getPointer();
132  assert(isa<llvm::AllocaInst>(Alloca) ||
133  (isa<llvm::AddrSpaceCastInst>(Alloca) &&
134  isa<llvm::AllocaInst>(
135  cast<llvm::AddrSpaceCastInst>(Alloca)->getPointerOperand())));
136 
137  auto *Store = new llvm::StoreInst(Init, Alloca, /*volatile*/ false,
138  Var.getAlignment().getAsAlign());
139  llvm::BasicBlock *Block = AllocaInsertPt->getParent();
140  Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
141 }
142 
145  return CreateTempAlloca(ConvertType(Ty), Align, Name);
146 }
147 
149  Address *Alloca) {
150  // FIXME: Should we prefer the preferred type alignment here?
151  return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
152 }
153 
155  const Twine &Name, Address *Alloca) {
156  Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
157  /*ArraySize=*/nullptr, Alloca);
158 
159  if (Ty->isConstantMatrixType()) {
160  auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType());
161  auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
162  ArrayTy->getNumElements());
163 
164  Result = Address(
165  Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
166  Result.getAlignment());
167  }
168  return Result;
169 }
170 
172  const Twine &Name) {
173  return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
174 }
175 
177  const Twine &Name) {
178  return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
179  Name);
180 }
181 
182 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
183 /// expression and compare the result against zero, returning an Int1Ty value.
185  PGO.setCurrentStmt(E);
186  if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
187  llvm::Value *MemPtr = EmitScalarExpr(E);
188  return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
189  }
190 
191  QualType BoolTy = getContext().BoolTy;
192  SourceLocation Loc = E->getExprLoc();
193  CGFPOptionsRAII FPOptsRAII(*this, E);
194  if (!E->getType()->isAnyComplexType())
195  return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
196 
197  return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
198  Loc);
199 }
200 
201 /// EmitIgnoredExpr - Emit code to compute the specified expression,
202 /// ignoring the result.
204  if (E->isPRValue())
205  return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
206 
207  // Just emit it as an l-value and drop the result.
208  EmitLValue(E);
209 }
210 
211 /// EmitAnyExpr - Emit code to compute the specified expression which
212 /// can have any type. The result is returned as an RValue struct.
213 /// If this is an aggregate expression, AggSlot indicates where the
214 /// result should be returned.
216  AggValueSlot aggSlot,
217  bool ignoreResult) {
218  switch (getEvaluationKind(E->getType())) {
219  case TEK_Scalar:
220  return RValue::get(EmitScalarExpr(E, ignoreResult));
221  case TEK_Complex:
222  return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
223  case TEK_Aggregate:
224  if (!ignoreResult && aggSlot.isIgnored())
225  aggSlot = CreateAggTemp(E->getType(), "agg-temp");
226  EmitAggExpr(E, aggSlot);
227  return aggSlot.asRValue();
228  }
229  llvm_unreachable("bad evaluation kind");
230 }
231 
232 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
233 /// always be accessible even if no aggregate location is provided.
236 
238  AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
239  return EmitAnyExpr(E, AggSlot);
240 }
241 
242 /// EmitAnyExprToMem - Evaluate an expression into a given memory
243 /// location.
245  Address Location,
246  Qualifiers Quals,
247  bool IsInit) {
248  // FIXME: This function should take an LValue as an argument.
249  switch (getEvaluationKind(E->getType())) {
250  case TEK_Complex:
252  /*isInit*/ false);
253  return;
254 
255  case TEK_Aggregate: {
256  EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
259  AggValueSlot::IsAliased_t(!IsInit),
261  return;
262  }
263 
264  case TEK_Scalar: {
265  RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
266  LValue LV = MakeAddrLValue(Location, E->getType());
267  EmitStoreThroughLValue(RV, LV);
268  return;
269  }
270  }
271  llvm_unreachable("bad evaluation kind");
272 }
273 
274 static void
276  const Expr *E, Address ReferenceTemporary) {
277  // Objective-C++ ARC:
278  // If we are binding a reference to a temporary that has ownership, we
279  // need to perform retain/release operations on the temporary.
280  //
281  // FIXME: This should be looking at E, not M.
282  if (auto Lifetime = M->getType().getObjCLifetime()) {
283  switch (Lifetime) {
286  // Carry on to normal cleanup handling.
287  break;
288 
290  // Nothing to do; cleaned up by an autorelease pool.
291  return;
292 
295  switch (StorageDuration Duration = M->getStorageDuration()) {
296  case SD_Static:
297  // Note: we intentionally do not register a cleanup to release
298  // the object on program termination.
299  return;
300 
301  case SD_Thread:
302  // FIXME: We should probably register a cleanup in this case.
303  return;
304 
305  case SD_Automatic:
306  case SD_FullExpression:
309  if (Lifetime == Qualifiers::OCL_Strong) {
310  const ValueDecl *VD = M->getExtendingDecl();
311  bool Precise =
312  VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
316  } else {
317  // __weak objects always get EH cleanups; otherwise, exceptions
318  // could cause really nasty crashes instead of mere leaks.
321  }
322  if (Duration == SD_FullExpression)
323  CGF.pushDestroy(CleanupKind, ReferenceTemporary,
324  M->getType(), *Destroy,
326  else
327  CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
328  M->getType(),
330  return;
331 
332  case SD_Dynamic:
333  llvm_unreachable("temporary cannot have dynamic storage duration");
334  }
335  llvm_unreachable("unknown storage duration");
336  }
337  }
338 
339  CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
340  if (const RecordType *RT =
342  // Get the destructor for the reference temporary.
343  auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
344  if (!ClassDecl->hasTrivialDestructor())
345  ReferenceTemporaryDtor = ClassDecl->getDestructor();
346  }
347 
348  if (!ReferenceTemporaryDtor)
349  return;
350 
351  // Call the destructor for the temporary.
352  switch (M->getStorageDuration()) {
353  case SD_Static:
354  case SD_Thread: {
355  llvm::FunctionCallee CleanupFn;
356  llvm::Constant *CleanupArg;
357  if (E->getType()->isArrayType()) {
358  CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
359  ReferenceTemporary, E->getType(),
361  dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
362  CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
363  } else {
364  CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
365  GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
366  CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
367  }
369  CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
370  break;
371  }
372 
373  case SD_FullExpression:
374  CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
376  CGF.getLangOpts().Exceptions);
377  break;
378 
379  case SD_Automatic:
381  ReferenceTemporary, E->getType(),
383  CGF.getLangOpts().Exceptions);
384  break;
385 
386  case SD_Dynamic:
387  llvm_unreachable("temporary cannot have dynamic storage duration");
388  }
389 }
390 
392  const MaterializeTemporaryExpr *M,
393  const Expr *Inner,
394  Address *Alloca = nullptr) {
395  auto &TCG = CGF.getTargetHooks();
396  switch (M->getStorageDuration()) {
397  case SD_FullExpression:
398  case SD_Automatic: {
399  // If we have a constant temporary array or record try to promote it into a
400  // constant global under the same rules a normal constant would've been
401  // promoted. This is easier on the optimizer and generally emits fewer
402  // instructions.
403  QualType Ty = Inner->getType();
404  if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
405  (Ty->isArrayType() || Ty->isRecordType()) &&
406  CGF.CGM.isTypeConstant(Ty, true))
407  if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
408  auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
409  auto *GV = new llvm::GlobalVariable(
410  CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
411  llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
412  llvm::GlobalValue::NotThreadLocal,
414  CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
415  GV->setAlignment(alignment.getAsAlign());
416  llvm::Constant *C = GV;
417  if (AS != LangAS::Default)
418  C = TCG.performAddrSpaceCast(
419  CGF.CGM, GV, AS, LangAS::Default,
420  GV->getValueType()->getPointerTo(
422  // FIXME: Should we put the new global into a COMDAT?
423  return Address(C, alignment);
424  }
425  return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
426  }
427  case SD_Thread:
428  case SD_Static:
429  return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
430 
431  case SD_Dynamic:
432  llvm_unreachable("temporary can't have dynamic storage duration");
433  }
434  llvm_unreachable("unknown storage duration");
435 }
436 
437 /// Helper method to check if the underlying ABI is AAPCS
438 static bool isAAPCS(const TargetInfo &TargetInfo) {
439  return TargetInfo.getABI().startswith("aapcs");
440 }
441 
444  const Expr *E = M->getSubExpr();
445 
446  assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
447  !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
448  "Reference should never be pseudo-strong!");
449 
450  // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
451  // as that will cause the lifetime adjustment to be lost for ARC
452  auto ownership = M->getType().getObjCLifetime();
453  if (ownership != Qualifiers::OCL_None &&
454  ownership != Qualifiers::OCL_ExplicitNone) {
455  Address Object = createReferenceTemporary(*this, M, E);
456  if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
457  Object = Address(llvm::ConstantExpr::getBitCast(Var,
459  ->getPointerTo(Object.getAddressSpace())),
460  Object.getAlignment());
461 
462  // createReferenceTemporary will promote the temporary to a global with a
463  // constant initializer if it can. It can only do this to a value of
464  // ARC-manageable type if the value is global and therefore "immune" to
465  // ref-counting operations. Therefore we have no need to emit either a
466  // dynamic initialization or a cleanup and we can just return the address
467  // of the temporary.
468  if (Var->hasInitializer())
469  return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
470 
471  Var->setInitializer(CGM.EmitNullConstant(E->getType()));
472  }
473  LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
475 
476  switch (getEvaluationKind(E->getType())) {
477  default: llvm_unreachable("expected scalar or aggregate expression");
478  case TEK_Scalar:
479  EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
480  break;
481  case TEK_Aggregate: {
483  E->getType().getQualifiers(),
488  break;
489  }
490  }
491 
492  pushTemporaryCleanup(*this, M, E, Object);
493  return RefTempDst;
494  }
495 
498  E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
499 
500  for (const auto &Ignored : CommaLHSs)
501  EmitIgnoredExpr(Ignored);
502 
503  if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
504  if (opaque->getType()->isRecordType()) {
505  assert(Adjustments.empty());
506  return EmitOpaqueValueLValue(opaque);
507  }
508  }
509 
510  // Create and initialize the reference temporary.
511  Address Alloca = Address::invalid();
512  Address Object = createReferenceTemporary(*this, M, E, &Alloca);
513  if (auto *Var = dyn_cast<llvm::GlobalVariable>(
514  Object.getPointer()->stripPointerCasts())) {
515  Object = Address(llvm::ConstantExpr::getBitCast(
516  cast<llvm::Constant>(Object.getPointer()),
517  ConvertTypeForMem(E->getType())->getPointerTo()),
518  Object.getAlignment());
519  // If the temporary is a global and has a constant initializer or is a
520  // constant temporary that we promoted to a global, we may have already
521  // initialized it.
522  if (!Var->hasInitializer()) {
523  Var->setInitializer(CGM.EmitNullConstant(E->getType()));
524  EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
525  }
526  } else {
527  switch (M->getStorageDuration()) {
528  case SD_Automatic:
529  if (auto *Size = EmitLifetimeStart(
530  CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
531  Alloca.getPointer())) {
532  pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
533  Alloca, Size);
534  }
535  break;
536 
537  case SD_FullExpression: {
538  if (!ShouldEmitLifetimeMarkers)
539  break;
540 
541  // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
542  // marker. Instead, start the lifetime of a conditional temporary earlier
543  // so that it's unconditional. Don't do this with sanitizers which need
544  // more precise lifetime marks.
545  ConditionalEvaluation *OldConditional = nullptr;
546  CGBuilderTy::InsertPoint OldIP;
548  !SanOpts.has(SanitizerKind::HWAddress) &&
549  !SanOpts.has(SanitizerKind::Memory) &&
550  !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
551  OldConditional = OutermostConditional;
552  OutermostConditional = nullptr;
553 
554  OldIP = Builder.saveIP();
555  llvm::BasicBlock *Block = OldConditional->getStartingBlock();
556  Builder.restoreIP(CGBuilderTy::InsertPoint(
557  Block, llvm::BasicBlock::iterator(Block->back())));
558  }
559 
560  if (auto *Size = EmitLifetimeStart(
561  CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
562  Alloca.getPointer())) {
563  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
564  Size);
565  }
566 
567  if (OldConditional) {
568  OutermostConditional = OldConditional;
569  Builder.restoreIP(OldIP);
570  }
571  break;
572  }
573 
574  default:
575  break;
576  }
577  EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
578  }
579  pushTemporaryCleanup(*this, M, E, Object);
580 
581  // Perform derived-to-base casts and/or field accesses, to get from the
582  // temporary object we created (and, potentially, for which we extended
583  // the lifetime) to the subobject we're binding the reference to.
584  for (unsigned I = Adjustments.size(); I != 0; --I) {
585  SubobjectAdjustment &Adjustment = Adjustments[I-1];
586  switch (Adjustment.Kind) {
588  Object =
590  Adjustment.DerivedToBase.BasePath->path_begin(),
591  Adjustment.DerivedToBase.BasePath->path_end(),
592  /*NullCheckValue=*/ false, E->getExprLoc());
593  break;
594 
597  LV = EmitLValueForField(LV, Adjustment.Field);
598  assert(LV.isSimple() &&
599  "materialized temporary field is not a simple lvalue");
600  Object = LV.getAddress(*this);
601  break;
602  }
603 
605  llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
606  Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
607  Adjustment.Ptr.MPT);
608  break;
609  }
610  }
611  }
612 
613  return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
614 }
615 
616 RValue
618  // Emit the expression as an lvalue.
619  LValue LV = EmitLValue(E);
620  assert(LV.isSimple());
621  llvm::Value *Value = LV.getPointer(*this);
622 
623  if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
624  // C++11 [dcl.ref]p5 (as amended by core issue 453):
625  // If a glvalue to which a reference is directly bound designates neither
626  // an existing object or function of an appropriate type nor a region of
627  // storage of suitable size and alignment to contain an object of the
628  // reference's type, the behavior is undefined.
629  QualType Ty = E->getType();
631  }
632 
633  return RValue::get(Value);
634 }
635 
636 
637 /// getAccessedFieldNo - Given an encoded value and a result number, return the
638 /// input field number being accessed.
640  const llvm::Constant *Elts) {
641  return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
642  ->getZExtValue();
643 }
644 
645 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
647  llvm::Value *High) {
648  llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
649  llvm::Value *K47 = Builder.getInt64(47);
650  llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
651  llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
652  llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
653  llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
654  return Builder.CreateMul(B1, KMul);
655 }
656 
658  return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
660 }
661 
663  CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
664  return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
665  (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
666  TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
668 }
669 
672  SanOpts.has(SanitizerKind::Alignment) |
673  SanOpts.has(SanitizerKind::ObjectSize) |
674  SanOpts.has(SanitizerKind::Vptr);
675 }
676 
678  llvm::Value *Ptr, QualType Ty,
679  CharUnits Alignment,
680  SanitizerSet SkippedChecks,
681  llvm::Value *ArraySize) {
683  return;
684 
685  // Don't check pointers outside the default address space. The null check
686  // isn't correct, the object-size check isn't supported by LLVM, and we can't
687  // communicate the addresses to the runtime handler for the vptr check.
688  if (Ptr->getType()->getPointerAddressSpace())
689  return;
690 
691  // Don't check pointers to volatile data. The behavior here is implementation-
692  // defined.
693  if (Ty.isVolatileQualified())
694  return;
695 
696  SanitizerScope SanScope(this);
697 
699  llvm::BasicBlock *Done = nullptr;
700 
701  // Quickly determine whether we have a pointer to an alloca. It's possible
702  // to skip null checks, and some alignment checks, for these pointers. This
703  // can reduce compile-time significantly.
704  auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
705 
706  llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
707  llvm::Value *IsNonNull = nullptr;
708  bool IsGuaranteedNonNull =
709  SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
710  bool AllowNullPointers = isNullPointerAllowed(TCK);
711  if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
712  !IsGuaranteedNonNull) {
713  // The glvalue must not be an empty glvalue.
714  IsNonNull = Builder.CreateIsNotNull(Ptr);
715 
716  // The IR builder can constant-fold the null check if the pointer points to
717  // a constant.
718  IsGuaranteedNonNull = IsNonNull == True;
719 
720  // Skip the null check if the pointer is known to be non-null.
721  if (!IsGuaranteedNonNull) {
722  if (AllowNullPointers) {
723  // When performing pointer casts, it's OK if the value is null.
724  // Skip the remaining checks in that case.
725  Done = createBasicBlock("null");
726  llvm::BasicBlock *Rest = createBasicBlock("not.null");
727  Builder.CreateCondBr(IsNonNull, Rest, Done);
728  EmitBlock(Rest);
729  } else {
730  Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
731  }
732  }
733  }
734 
735  if (SanOpts.has(SanitizerKind::ObjectSize) &&
736  !SkippedChecks.has(SanitizerKind::ObjectSize) &&
737  !Ty->isIncompleteType()) {
738  uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
739  llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
740  if (ArraySize)
741  Size = Builder.CreateMul(Size, ArraySize);
742 
743  // Degenerate case: new X[0] does not need an objectsize check.
744  llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
745  if (!ConstantSize || !ConstantSize->isNullValue()) {
746  // The glvalue must refer to a large enough storage region.
747  // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
748  // to check this.
749  // FIXME: Get object address space
750  llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
751  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
752  llvm::Value *Min = Builder.getFalse();
753  llvm::Value *NullIsUnknown = Builder.getFalse();
754  llvm::Value *Dynamic = Builder.getFalse();
755  llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
756  llvm::Value *LargeEnough = Builder.CreateICmpUGE(
757  Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size);
758  Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
759  }
760  }
761 
762  uint64_t AlignVal = 0;
763  llvm::Value *PtrAsInt = nullptr;
764 
765  if (SanOpts.has(SanitizerKind::Alignment) &&
766  !SkippedChecks.has(SanitizerKind::Alignment)) {
767  AlignVal = Alignment.getQuantity();
768  if (!Ty->isIncompleteType() && !AlignVal)
769  AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
770  /*ForPointeeType=*/true)
771  .getQuantity();
772 
773  // The glvalue must be suitably aligned.
774  if (AlignVal > 1 &&
775  (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) {
776  PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
777  llvm::Value *Align = Builder.CreateAnd(
778  PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
779  llvm::Value *Aligned =
780  Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
781  if (Aligned != True)
782  Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
783  }
784  }
785 
786  if (Checks.size() > 0) {
787  // Make sure we're not losing information. Alignment needs to be a power of
788  // 2
789  assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal);
790  llvm::Constant *StaticData[] = {
792  llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1),
793  llvm::ConstantInt::get(Int8Ty, TCK)};
794  EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
795  PtrAsInt ? PtrAsInt : Ptr);
796  }
797 
798  // If possible, check that the vptr indicates that there is a subobject of
799  // type Ty at offset zero within this object.
800  //
801  // C++11 [basic.life]p5,6:
802  // [For storage which does not refer to an object within its lifetime]
803  // The program has undefined behavior if:
804  // -- the [pointer or glvalue] is used to access a non-static data member
805  // or call a non-static member function
806  if (SanOpts.has(SanitizerKind::Vptr) &&
807  !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
808  // Ensure that the pointer is non-null before loading it. If there is no
809  // compile-time guarantee, reuse the run-time null check or emit a new one.
810  if (!IsGuaranteedNonNull) {
811  if (!IsNonNull)
812  IsNonNull = Builder.CreateIsNotNull(Ptr);
813  if (!Done)
814  Done = createBasicBlock("vptr.null");
815  llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
816  Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
817  EmitBlock(VptrNotNull);
818  }
819 
820  // Compute a hash of the mangled name of the type.
821  //
822  // FIXME: This is not guaranteed to be deterministic! Move to a
823  // fingerprinting mechanism once LLVM provides one. For the time
824  // being the implementation happens to be deterministic.
825  SmallString<64> MangledName;
826  llvm::raw_svector_ostream Out(MangledName);
828  Out);
829 
830  // Contained in NoSanitizeList based on the mangled type.
831  if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
832  Out.str())) {
833  llvm::hash_code TypeHash = hash_value(Out.str());
834 
835  // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
836  llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
837  llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
838  Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
839  llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
840  llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
841 
842  llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
843  Hash = Builder.CreateTrunc(Hash, IntPtrTy);
844 
845  // Look the hash up in our cache.
846  const int CacheSize = 128;
847  llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
849  "__ubsan_vptr_type_cache");
850  llvm::Value *Slot = Builder.CreateAnd(Hash,
851  llvm::ConstantInt::get(IntPtrTy,
852  CacheSize-1));
853  llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
855  IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
856  getPointerAlign());
857 
858  // If the hash isn't in the cache, call a runtime handler to perform the
859  // hard work of checking whether the vptr is for an object of the right
860  // type. This will either fill in the cache and return, or produce a
861  // diagnostic.
862  llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
863  llvm::Constant *StaticData[] = {
867  llvm::ConstantInt::get(Int8Ty, TCK)
868  };
869  llvm::Value *DynamicData[] = { Ptr, Hash };
870  EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
871  SanitizerHandler::DynamicTypeCacheMiss, StaticData,
872  DynamicData);
873  }
874  }
875 
876  if (Done) {
877  Builder.CreateBr(Done);
878  EmitBlock(Done);
879  }
880 }
881 
882 /// Determine whether this expression refers to a flexible array member in a
883 /// struct. We disable array bounds checks for such members.
884 static bool isFlexibleArrayMemberExpr(const Expr *E) {
885  // For compatibility with existing code, we treat arrays of length 0 or
886  // 1 as flexible array members.
887  // FIXME: This is inconsistent with the warning code in SemaChecking. Unify
888  // the two mechanisms.
889  const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
890  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
891  // FIXME: Sema doesn't treat [1] as a flexible array member if the bound
892  // was produced by macro expansion.
893  if (CAT->getSize().ugt(1))
894  return false;
895  } else if (!isa<IncompleteArrayType>(AT))
896  return false;
897 
898  E = E->IgnoreParens();
899 
900  // A flexible array member must be the last member in the class.
901  if (const auto *ME = dyn_cast<MemberExpr>(E)) {
902  // FIXME: If the base type of the member expr is not FD->getParent(),
903  // this should not be treated as a flexible array member access.
904  if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
905  // FIXME: Sema doesn't treat a T[1] union member as a flexible array
906  // member, only a T[0] or T[] member gets that treatment.
907  if (FD->getParent()->isUnion())
908  return true;
910  DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
911  return ++FI == FD->getParent()->field_end();
912  }
913  } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
914  return IRE->getDecl()->getNextIvar() == nullptr;
915  }
916 
917  return false;
918 }
919 
921  QualType EltTy) {
922  ASTContext &C = getContext();
923  uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
924  if (!EltSize)
925  return nullptr;
926 
927  auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
928  if (!ArrayDeclRef)
929  return nullptr;
930 
931  auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
932  if (!ParamDecl)
933  return nullptr;
934 
935  auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
936  if (!POSAttr)
937  return nullptr;
938 
939  // Don't load the size if it's a lower bound.
940  int POSType = POSAttr->getType();
941  if (POSType != 0 && POSType != 1)
942  return nullptr;
943 
944  // Find the implicit size parameter.
945  auto PassedSizeIt = SizeArguments.find(ParamDecl);
946  if (PassedSizeIt == SizeArguments.end())
947  return nullptr;
948 
949  const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
950  assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
951  Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
952  llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
953  C.getSizeType(), E->getExprLoc());
954  llvm::Value *SizeOfElement =
955  llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
956  return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
957 }
958 
959 /// If Base is known to point to the start of an array, return the length of
960 /// that array. Return 0 if the length cannot be determined.
962  CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
963  // For the vector indexing extension, the bound is the number of elements.
964  if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
965  IndexedType = Base->getType();
966  return CGF.Builder.getInt32(VT->getNumElements());
967  }
968 
969  Base = Base->IgnoreParens();
970 
971  if (const auto *CE = dyn_cast<CastExpr>(Base)) {
972  if (CE->getCastKind() == CK_ArrayToPointerDecay &&
973  !isFlexibleArrayMemberExpr(CE->getSubExpr())) {
974  IndexedType = CE->getSubExpr()->getType();
975  const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
976  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
977  return CGF.Builder.getInt(CAT->getSize());
978  else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
979  return CGF.getVLASize(VAT).NumElts;
980  // Ignore pass_object_size here. It's not applicable on decayed pointers.
981  }
982  }
983 
984  QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
985  if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
986  IndexedType = Base->getType();
987  return POS;
988  }
989 
990  return nullptr;
991 }
992 
994  llvm::Value *Index, QualType IndexType,
995  bool Accessed) {
996  assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
997  "should not be called unless adding bounds checks");
998  SanitizerScope SanScope(this);
999 
1000  QualType IndexedType;
1001  llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
1002  if (!Bound)
1003  return;
1004 
1005  bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1006  llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1007  llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1008 
1009  llvm::Constant *StaticData[] = {
1011  EmitCheckTypeDescriptor(IndexedType),
1012  EmitCheckTypeDescriptor(IndexType)
1013  };
1014  llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1015  : Builder.CreateICmpULE(IndexVal, BoundVal);
1016  EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1017  SanitizerHandler::OutOfBounds, StaticData, Index);
1018 }
1019 
1020 
1023  bool isInc, bool isPre) {
1024  ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1025 
1026  llvm::Value *NextVal;
1027  if (isa<llvm::IntegerType>(InVal.first->getType())) {
1028  uint64_t AmountVal = isInc ? 1 : -1;
1029  NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1030 
1031  // Add the inc/dec to the real part.
1032  NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1033  } else {
1034  QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1035  llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1036  if (!isInc)
1037  FVal.changeSign();
1038  NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1039 
1040  // Add the inc/dec to the real part.
1041  NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1042  }
1043 
1044  ComplexPairTy IncVal(NextVal, InVal.second);
1045 
1046  // Store the updated result through the lvalue.
1047  EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1048  if (getLangOpts().OpenMP)
1050  E->getSubExpr());
1051 
1052  // If this is a postinc, return the value read from memory, otherwise use the
1053  // updated value.
1054  return isPre ? IncVal : InVal;
1055 }
1056 
1058  CodeGenFunction *CGF) {
1059  // Bind VLAs in the cast type.
1060  if (CGF && E->getType()->isVariablyModifiedType())
1061  CGF->EmitVariablyModifiedType(E->getType());
1062 
1063  if (CGDebugInfo *DI = getModuleDebugInfo())
1064  DI->EmitExplicitCastType(E->getType());
1065 }
1066 
1067 //===----------------------------------------------------------------------===//
1068 // LValue Expression Emission
1069 //===----------------------------------------------------------------------===//
1070 
1071 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1072 /// derive a more accurate bound on the alignment of the pointer.
1074  LValueBaseInfo *BaseInfo,
1075  TBAAAccessInfo *TBAAInfo) {
1076  // We allow this with ObjC object pointers because of fragile ABIs.
1077  assert(E->getType()->isPointerType() ||
1079  E = E->IgnoreParens();
1080 
1081  // Casts:
1082  if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1083  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1084  CGM.EmitExplicitCastExprType(ECE, this);
1085 
1086  switch (CE->getCastKind()) {
1087  // Non-converting casts (but not C's implicit conversion from void*).
1088  case CK_BitCast:
1089  case CK_NoOp:
1090  case CK_AddressSpaceConversion:
1091  if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1092  if (PtrTy->getPointeeType()->isVoidType())
1093  break;
1094 
1095  LValueBaseInfo InnerBaseInfo;
1096  TBAAAccessInfo InnerTBAAInfo;
1097  Address Addr = EmitPointerWithAlignment(CE->getSubExpr(),
1098  &InnerBaseInfo,
1099  &InnerTBAAInfo);
1100  if (BaseInfo) *BaseInfo = InnerBaseInfo;
1101  if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1102 
1103  if (isa<ExplicitCastExpr>(CE)) {
1104  LValueBaseInfo TargetTypeBaseInfo;
1105  TBAAAccessInfo TargetTypeTBAAInfo;
1107  E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1108  if (TBAAInfo)
1109  *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
1110  TargetTypeTBAAInfo);
1111  // If the source l-value is opaque, honor the alignment of the
1112  // casted-to type.
1113  if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1114  if (BaseInfo)
1115  BaseInfo->mergeForCast(TargetTypeBaseInfo);
1116  Addr = Address(Addr.getPointer(), Align);
1117  }
1118  }
1119 
1120  if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1121  CE->getCastKind() == CK_BitCast) {
1122  if (auto PT = E->getType()->getAs<PointerType>())
1123  EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
1124  /*MayBeNull=*/true,
1126  CE->getBeginLoc());
1127  }
1128  return CE->getCastKind() != CK_AddressSpaceConversion
1129  ? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
1131  ConvertType(E->getType()));
1132  }
1133  break;
1134 
1135  // Array-to-pointer decay.
1136  case CK_ArrayToPointerDecay:
1137  return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1138 
1139  // Derived-to-base conversions.
1140  case CK_UncheckedDerivedToBase:
1141  case CK_DerivedToBase: {
1142  // TODO: Support accesses to members of base classes in TBAA. For now, we
1143  // conservatively pretend that the complete object is of the base class
1144  // type.
1145  if (TBAAInfo)
1146  *TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
1147  Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
1148  auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1149  return GetAddressOfBaseClass(Addr, Derived,
1150  CE->path_begin(), CE->path_end(),
1152  CE->getExprLoc());
1153  }
1154 
1155  // TODO: Is there any reason to treat base-to-derived conversions
1156  // specially?
1157  default:
1158  break;
1159  }
1160  }
1161 
1162  // Unary &.
1163  if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1164  if (UO->getOpcode() == UO_AddrOf) {
1165  LValue LV = EmitLValue(UO->getSubExpr());
1166  if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1167  if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1168  return LV.getAddress(*this);
1169  }
1170  }
1171 
1172  // TODO: conditional operators, comma.
1173 
1174  // Otherwise, use the alignment of the type.
1175  CharUnits Align =
1176  CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
1177  return Address(EmitScalarExpr(E), Align);
1178 }
1179 
1181  llvm::Value *V = RV.getScalarVal();
1182  if (auto MPT = T->getAs<MemberPointerType>())
1183  return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1184  return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1185 }
1186 
1188  if (Ty->isVoidType())
1189  return RValue::get(nullptr);
1190 
1191  switch (getEvaluationKind(Ty)) {
1192  case TEK_Complex: {
1193  llvm::Type *EltTy =
1195  llvm::Value *U = llvm::UndefValue::get(EltTy);
1196  return RValue::getComplex(std::make_pair(U, U));
1197  }
1198 
1199  // If this is a use of an undefined aggregate type, the aggregate must have an
1200  // identifiable address. Just because the contents of the value are undefined
1201  // doesn't mean that the address can't be taken and compared.
1202  case TEK_Aggregate: {
1203  Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1204  return RValue::getAggregate(DestPtr);
1205  }
1206 
1207  case TEK_Scalar:
1208  return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1209  }
1210  llvm_unreachable("bad evaluation kind");
1211 }
1212 
1214  const char *Name) {
1215  ErrorUnsupported(E, Name);
1216  return GetUndefRValue(E->getType());
1217 }
1218 
1220  const char *Name) {
1221  ErrorUnsupported(E, Name);
1222  llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
1223  return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
1224  E->getType());
1225 }
1226 
1228  const Expr *Base = Obj;
1229  while (!isa<CXXThisExpr>(Base)) {
1230  // The result of a dynamic_cast can be null.
1231  if (isa<CXXDynamicCastExpr>(Base))
1232  return false;
1233 
1234  if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1235  Base = CE->getSubExpr();
1236  } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1237  Base = PE->getSubExpr();
1238  } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1239  if (UO->getOpcode() == UO_Extension)
1240  Base = UO->getSubExpr();
1241  else
1242  return false;
1243  } else {
1244  return false;
1245  }
1246  }
1247  return true;
1248 }
1249 
1251  LValue LV;
1252  if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1253  LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1254  else
1255  LV = EmitLValue(E);
1256  if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1257  SanitizerSet SkippedChecks;
1258  if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1259  bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1260  if (IsBaseCXXThis)
1261  SkippedChecks.set(SanitizerKind::Alignment, true);
1262  if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1263  SkippedChecks.set(SanitizerKind::Null, true);
1264  }
1265  EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(),
1266  LV.getAlignment(), SkippedChecks);
1267  }
1268  return LV;
1269 }
1270 
1271 /// EmitLValue - Emit code to compute a designator that specifies the location
1272 /// of the expression.
1273 ///
1274 /// This can return one of two things: a simple address or a bitfield reference.
1275 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1276 /// an LLVM pointer type.
1277 ///
1278 /// If this returns a bitfield reference, nothing about the pointee type of the
1279 /// LLVM value is known: For example, it may not be a pointer to an integer.
1280 ///
1281 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1282 /// this method guarantees that the returned pointer type will point to an LLVM
1283 /// type of the same size of the lvalue's type. If the lvalue has a variable
1284 /// length type, this is not possible.
1285 ///
1287  ApplyDebugLocation DL(*this, E);
1288  switch (E->getStmtClass()) {
1289  default: return EmitUnsupportedLValue(E, "l-value expression");
1290 
1291  case Expr::ObjCPropertyRefExprClass:
1292  llvm_unreachable("cannot emit a property reference directly");
1293 
1294  case Expr::ObjCSelectorExprClass:
1295  return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1296  case Expr::ObjCIsaExprClass:
1297  return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1298  case Expr::BinaryOperatorClass:
1299  return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1300  case Expr::CompoundAssignOperatorClass: {
1301  QualType Ty = E->getType();
1302  if (const AtomicType *AT = Ty->getAs<AtomicType>())
1303  Ty = AT->getValueType();
1304  if (!Ty->isAnyComplexType())
1305  return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1306  return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1307  }
1308  case Expr::CallExprClass:
1309  case Expr::CXXMemberCallExprClass:
1310  case Expr::CXXOperatorCallExprClass:
1311  case Expr::UserDefinedLiteralClass:
1312  return EmitCallExprLValue(cast<CallExpr>(E));
1313  case Expr::CXXRewrittenBinaryOperatorClass:
1314  return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm());
1315  case Expr::VAArgExprClass:
1316  return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1317  case Expr::DeclRefExprClass:
1318  return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1319  case Expr::ConstantExprClass: {
1320  const ConstantExpr *CE = cast<ConstantExpr>(E);
1321  if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1322  QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
1323  ->getCallReturnType(getContext());
1324  return MakeNaturalAlignAddrLValue(Result, RetType);
1325  }
1326  return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
1327  }
1328  case Expr::ParenExprClass:
1329  return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
1330  case Expr::GenericSelectionExprClass:
1331  return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
1332  case Expr::PredefinedExprClass:
1333  return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1334  case Expr::StringLiteralClass:
1335  return EmitStringLiteralLValue(cast<StringLiteral>(E));
1336  case Expr::ObjCEncodeExprClass:
1337  return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1338  case Expr::PseudoObjectExprClass:
1339  return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1340  case Expr::InitListExprClass:
1341  return EmitInitListLValue(cast<InitListExpr>(E));
1342  case Expr::CXXTemporaryObjectExprClass:
1343  case Expr::CXXConstructExprClass:
1344  return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1345  case Expr::CXXBindTemporaryExprClass:
1346  return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1347  case Expr::CXXUuidofExprClass:
1348  return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1349  case Expr::LambdaExprClass:
1350  return EmitAggExprToLValue(E);
1351 
1352  case Expr::ExprWithCleanupsClass: {
1353  const auto *cleanups = cast<ExprWithCleanups>(E);
1354  RunCleanupsScope Scope(*this);
1355  LValue LV = EmitLValue(cleanups->getSubExpr());
1356  if (LV.isSimple()) {
1357  // Defend against branches out of gnu statement expressions surrounded by
1358  // cleanups.
1359  llvm::Value *V = LV.getPointer(*this);
1360  Scope.ForceCleanup({&V});
1361  return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
1362  getContext(), LV.getBaseInfo(), LV.getTBAAInfo());
1363  }
1364  // FIXME: Is it possible to create an ExprWithCleanups that produces a
1365  // bitfield lvalue or some other non-simple lvalue?
1366  return LV;
1367  }
1368 
1369  case Expr::CXXDefaultArgExprClass: {
1370  auto *DAE = cast<CXXDefaultArgExpr>(E);
1371  CXXDefaultArgExprScope Scope(*this, DAE);
1372  return EmitLValue(DAE->getExpr());
1373  }
1374  case Expr::CXXDefaultInitExprClass: {
1375  auto *DIE = cast<CXXDefaultInitExpr>(E);
1376  CXXDefaultInitExprScope Scope(*this, DIE);
1377  return EmitLValue(DIE->getExpr());
1378  }
1379  case Expr::CXXTypeidExprClass:
1380  return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1381 
1382  case Expr::ObjCMessageExprClass:
1383  return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1384  case Expr::ObjCIvarRefExprClass:
1385  return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1386  case Expr::StmtExprClass:
1387  return EmitStmtExprLValue(cast<StmtExpr>(E));
1388  case Expr::UnaryOperatorClass:
1389  return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1390  case Expr::ArraySubscriptExprClass:
1391  return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1392  case Expr::MatrixSubscriptExprClass:
1393  return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1394  case Expr::OMPArraySectionExprClass:
1395  return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
1396  case Expr::ExtVectorElementExprClass:
1397  return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1398  case Expr::MemberExprClass:
1399  return EmitMemberExpr(cast<MemberExpr>(E));
1400  case Expr::CompoundLiteralExprClass:
1401  return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1402  case Expr::ConditionalOperatorClass:
1403  return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1404  case Expr::BinaryConditionalOperatorClass:
1405  return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1406  case Expr::ChooseExprClass:
1407  return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
1408  case Expr::OpaqueValueExprClass:
1409  return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1410  case Expr::SubstNonTypeTemplateParmExprClass:
1411  return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
1412  case Expr::ImplicitCastExprClass:
1413  case Expr::CStyleCastExprClass:
1414  case Expr::CXXFunctionalCastExprClass:
1415  case Expr::CXXStaticCastExprClass:
1416  case Expr::CXXDynamicCastExprClass:
1417  case Expr::CXXReinterpretCastExprClass:
1418  case Expr::CXXConstCastExprClass:
1419  case Expr::CXXAddrspaceCastExprClass:
1420  case Expr::ObjCBridgedCastExprClass:
1421  return EmitCastLValue(cast<CastExpr>(E));
1422 
1423  case Expr::MaterializeTemporaryExprClass:
1424  return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1425 
1426  case Expr::CoawaitExprClass:
1427  return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1428  case Expr::CoyieldExprClass:
1429  return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1430  }
1431 }
1432 
1433 /// Given an object of the given canonical type, can we safely copy a
1434 /// value out of it based on its initializer?
1436  assert(type.isCanonical());
1437  assert(!type->isReferenceType());
1438 
1439  // Must be const-qualified but non-volatile.
1440  Qualifiers qs = type.getLocalQualifiers();
1441  if (!qs.hasConst() || qs.hasVolatile()) return false;
1442 
1443  // Otherwise, all object types satisfy this except C++ classes with
1444  // mutable subobjects or non-trivial copy/destroy behavior.
1445  if (const auto *RT = dyn_cast<RecordType>(type))
1446  if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1447  if (RD->hasMutableFields() || !RD->isTrivial())
1448  return false;
1449 
1450  return true;
1451 }
1452 
1453 /// Can we constant-emit a load of a reference to a variable of the
1454 /// given type? This is different from predicates like
1455 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1456 /// in situations that don't necessarily satisfy the language's rules
1457 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
1458 /// to do this with const float variables even if those variables
1459 /// aren't marked 'constexpr'.
1465 };
1467  type = type.getCanonicalType();
1468  if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1469  if (isConstantEmittableObjectType(ref->getPointeeType()))
1470  return CEK_AsValueOrReference;
1471  return CEK_AsReferenceOnly;
1472  }
1474  return CEK_AsValueOnly;
1475  return CEK_None;
1476 }
1477 
1478 /// Try to emit a reference to the given value without producing it as
1479 /// an l-value. This is just an optimization, but it avoids us needing
1480 /// to emit global copies of variables if they're named without triggering
1481 /// a formal use in a context where we can't emit a direct reference to them,
1482 /// for instance if a block or lambda or a member of a local class uses a
1483 /// const int variable or constexpr variable from an enclosing function.
1486  ValueDecl *value = refExpr->getDecl();
1487 
1488  // The value needs to be an enum constant or a constant variable.
1490  if (isa<ParmVarDecl>(value)) {
1491  CEK = CEK_None;
1492  } else if (auto *var = dyn_cast<VarDecl>(value)) {
1493  CEK = checkVarTypeForConstantEmission(var->getType());
1494  } else if (isa<EnumConstantDecl>(value)) {
1495  CEK = CEK_AsValueOnly;
1496  } else {
1497  CEK = CEK_None;
1498  }
1499  if (CEK == CEK_None) return ConstantEmission();
1500 
1501  Expr::EvalResult result;
1502  bool resultIsReference;
1503  QualType resultType;
1504 
1505  // It's best to evaluate all the way as an r-value if that's permitted.
1506  if (CEK != CEK_AsReferenceOnly &&
1507  refExpr->EvaluateAsRValue(result, getContext())) {
1508  resultIsReference = false;
1509  resultType = refExpr->getType();
1510 
1511  // Otherwise, try to evaluate as an l-value.
1512  } else if (CEK != CEK_AsValueOnly &&
1513  refExpr->EvaluateAsLValue(result, getContext())) {
1514  resultIsReference = true;
1515  resultType = value->getType();
1516 
1517  // Failure.
1518  } else {
1519  return ConstantEmission();
1520  }
1521 
1522  // In any case, if the initializer has side-effects, abandon ship.
1523  if (result.HasSideEffects)
1524  return ConstantEmission();
1525 
1526  // In CUDA/HIP device compilation, a lambda may capture a reference variable
1527  // referencing a global host variable by copy. In this case the lambda should
1528  // make a copy of the value of the global host variable. The DRE of the
1529  // captured reference variable cannot be emitted as load from the host
1530  // global variable as compile time constant, since the host variable is not
1531  // accessible on device. The DRE of the captured reference variable has to be
1532  // loaded from captures.
1533  if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1535  auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1536  if (MD && MD->getParent()->isLambda() &&
1537  MD->getOverloadedOperator() == OO_Call) {
1538  const APValue::LValueBase &base = result.Val.getLValueBase();
1539  if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1540  if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1541  if (!VD->hasAttr<CUDADeviceAttr>()) {
1542  return ConstantEmission();
1543  }
1544  }
1545  }
1546  }
1547  }
1548 
1549  // Emit as a constant.
1550  auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1551  result.Val, resultType);
1552 
1553  // Make sure we emit a debug reference to the global variable.
1554  // This should probably fire even for
1555  if (isa<VarDecl>(value)) {
1556  if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1557  EmitDeclRefExprDbgValue(refExpr, result.Val);
1558  } else {
1559  assert(isa<EnumConstantDecl>(value));
1560  EmitDeclRefExprDbgValue(refExpr, result.Val);
1561  }
1562 
1563  // If we emitted a reference constant, we need to dereference that.
1564  if (resultIsReference)
1566 
1567  return ConstantEmission::forValue(C);
1568 }
1569 
1571  const MemberExpr *ME) {
1572  if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1573  // Try to emit static variable member expressions as DREs.
1574  return DeclRefExpr::Create(
1576  /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1577  ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1578  }
1579  return nullptr;
1580 }
1581 
1584  if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1585  return tryEmitAsConstant(DRE);
1586  return ConstantEmission();
1587 }
1588 
1590  const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1591  assert(Constant && "not a constant");
1592  if (Constant.isReference())
1593  return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1594  E->getExprLoc())
1595  .getScalarVal();
1596  return Constant.getValue();
1597 }
1598 
1600  SourceLocation Loc) {
1601  return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
1602  lvalue.getType(), Loc, lvalue.getBaseInfo(),
1603  lvalue.getTBAAInfo(), lvalue.isNontemporal());
1604 }
1605 
1607  if (Ty->isBooleanType())
1608  return true;
1609 
1610  if (const EnumType *ET = Ty->getAs<EnumType>())
1611  return ET->getDecl()->getIntegerType()->isBooleanType();
1612 
1613  if (const AtomicType *AT = Ty->getAs<AtomicType>())
1614  return hasBooleanRepresentation(AT->getValueType());
1615 
1616  return false;
1617 }
1618 
1620  llvm::APInt &Min, llvm::APInt &End,
1621  bool StrictEnums, bool IsBool) {
1622  const EnumType *ET = Ty->getAs<EnumType>();
1623  bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1624  ET && !ET->getDecl()->isFixed();
1625  if (!IsBool && !IsRegularCPlusPlusEnum)
1626  return false;
1627 
1628  if (IsBool) {
1629  Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1630  End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1631  } else {
1632  const EnumDecl *ED = ET->getDecl();
1633  llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
1634  unsigned Bitwidth = LTy->getScalarSizeInBits();
1635  unsigned NumNegativeBits = ED->getNumNegativeBits();
1636  unsigned NumPositiveBits = ED->getNumPositiveBits();
1637 
1638  if (NumNegativeBits) {
1639  unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
1640  assert(NumBits <= Bitwidth);
1641  End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
1642  Min = -End;
1643  } else {
1644  assert(NumPositiveBits <= Bitwidth);
1645  End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
1646  Min = llvm::APInt::getZero(Bitwidth);
1647  }
1648  }
1649  return true;
1650 }
1651 
1652 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1653  llvm::APInt Min, End;
1654  if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1656  return nullptr;
1657 
1658  llvm::MDBuilder MDHelper(getLLVMContext());
1659  return MDHelper.createRange(Min, End);
1660 }
1661 
1663  SourceLocation Loc) {
1664  bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1665  bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1666  if (!HasBoolCheck && !HasEnumCheck)
1667  return false;
1668 
1669  bool IsBool = hasBooleanRepresentation(Ty) ||
1671  bool NeedsBoolCheck = HasBoolCheck && IsBool;
1672  bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1673  if (!NeedsBoolCheck && !NeedsEnumCheck)
1674  return false;
1675 
1676  // Single-bit booleans don't need to be checked. Special-case this to avoid
1677  // a bit width mismatch when handling bitfield values. This is handled by
1678  // EmitFromMemory for the non-bitfield case.
1679  if (IsBool &&
1680  cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1681  return false;
1682 
1683  llvm::APInt Min, End;
1684  if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1685  return true;
1686 
1687  auto &Ctx = getLLVMContext();
1688  SanitizerScope SanScope(this);
1689  llvm::Value *Check;
1690  --End;
1691  if (!Min) {
1692  Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1693  } else {
1694  llvm::Value *Upper =
1695  Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1696  llvm::Value *Lower =
1697  Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1698  Check = Builder.CreateAnd(Upper, Lower);
1699  }
1700  llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1703  NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1704  EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1705  StaticArgs, EmitCheckValue(Value));
1706  return true;
1707 }
1708 
1710  QualType Ty,
1711  SourceLocation Loc,
1712  LValueBaseInfo BaseInfo,
1713  TBAAAccessInfo TBAAInfo,
1714  bool isNontemporal) {
1715  if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1716  // For better performance, handle vector loads differently.
1717  if (Ty->isVectorType()) {
1718  const llvm::Type *EltTy = Addr.getElementType();
1719 
1720  const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1721 
1722  // Handle vectors of size 3 like size 4 for better performance.
1723  if (VTy->getNumElements() == 3) {
1724 
1725  // Bitcast to vec4 type.
1726  auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4);
1727  Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
1728  // Now load value.
1729  llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1730 
1731  // Shuffle vector to get vec3.
1732  V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2},
1733  "extractVec");
1734  return EmitFromMemory(V, Ty);
1735  }
1736  }
1737  }
1738 
1739  // Atomic operations have to be done on integral types.
1740  LValue AtomicLValue =
1741  LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1742  if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1743  return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1744  }
1745 
1746  llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1747  if (isNontemporal) {
1748  llvm::MDNode *Node = llvm::MDNode::get(
1749  Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1750  Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1751  }
1752 
1754 
1755  if (EmitScalarRangeCheck(Load, Ty, Loc)) {
1756  // In order to prevent the optimizer from throwing away the check, don't
1757  // attach range metadata to the load.
1758  } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1759  if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1760  Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1761 
1762  return EmitFromMemory(Load, Ty);
1763 }
1764 
1766  // Bool has a different representation in memory than in registers.
1767  if (hasBooleanRepresentation(Ty)) {
1768  // This should really always be an i1, but sometimes it's already
1769  // an i8, and it's awkward to track those cases down.
1770  if (Value->getType()->isIntegerTy(1))
1771  return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
1772  assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1773  "wrong value rep of bool");
1774  }
1775 
1776  return Value;
1777 }
1778 
1780  // Bool has a different representation in memory than in registers.
1781  if (hasBooleanRepresentation(Ty)) {
1782  assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1783  "wrong value rep of bool");
1784  return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1785  }
1786 
1787  return Value;
1788 }
1789 
1790 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
1791 // MatrixType), if it points to a array (the memory type of MatrixType).
1793  bool IsVector = true) {
1794  auto *ArrayTy = dyn_cast<llvm::ArrayType>(
1795  cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
1796  if (ArrayTy && IsVector) {
1797  auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
1798  ArrayTy->getNumElements());
1799 
1800  return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
1801  }
1802  auto *VectorTy = dyn_cast<llvm::VectorType>(
1803  cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
1804  if (VectorTy && !IsVector) {
1805  auto *ArrayTy = llvm::ArrayType::get(
1806  VectorTy->getElementType(),
1807  cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
1808 
1809  return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
1810  }
1811 
1812  return Addr;
1813 }
1814 
1815 // Emit a store of a matrix LValue. This may require casting the original
1816 // pointer to memory address (ArrayType) to a pointer to the value type
1817 // (VectorType).
1818 static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
1819  bool isInit, CodeGenFunction &CGF) {
1820  Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
1821  value->getType()->isVectorTy());
1822  CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
1823  lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
1824  lvalue.isNontemporal());
1825 }
1826 
1828  bool Volatile, QualType Ty,
1829  LValueBaseInfo BaseInfo,
1830  TBAAAccessInfo TBAAInfo,
1831  bool isInit, bool isNontemporal) {
1832  if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1833  // Handle vectors differently to get better performance.
1834  if (Ty->isVectorType()) {
1835  llvm::Type *SrcTy = Value->getType();
1836  auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
1837  // Handle vec3 special.
1838  if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
1839  // Our source is a vec3, do a shuffle vector to make it a vec4.
1840  Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
1841  "extractVec");
1842  SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
1843  }
1844  if (Addr.getElementType() != SrcTy) {
1845  Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
1846  }
1847  }
1848  }
1849 
1850  Value = EmitToMemory(Value, Ty);
1851 
1852  LValue AtomicLValue =
1853  LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1854  if (Ty->isAtomicType() ||
1855  (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
1856  EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
1857  return;
1858  }
1859 
1860  llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1861  if (isNontemporal) {
1862  llvm::MDNode *Node =
1863  llvm::MDNode::get(Store->getContext(),
1864  llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1865  Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1866  }
1867 
1869 }
1870 
1872  bool isInit) {
1873  if (lvalue.getType()->isConstantMatrixType()) {
1874  EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
1875  return;
1876  }
1877 
1878  EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
1879  lvalue.getType(), lvalue.getBaseInfo(),
1880  lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
1881 }
1882 
1883 // Emit a load of a LValue of matrix type. This may require casting the pointer
1884 // to memory address (ArrayType) to a pointer to the value type (VectorType).
1886  CodeGenFunction &CGF) {
1887  assert(LV.getType()->isConstantMatrixType());
1888  Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
1889  LV.setAddress(Addr);
1890  return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
1891 }
1892 
1893 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1894 /// method emits the address of the lvalue, then loads the result as an rvalue,
1895 /// returning the rvalue.
1897  if (LV.isObjCWeak()) {
1898  // load of a __weak object.
1899  Address AddrWeakObj = LV.getAddress(*this);
1901  AddrWeakObj));
1902  }
1904  // In MRC mode, we do a load+autorelease.
1905  if (!getLangOpts().ObjCAutoRefCount) {
1906  return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
1907  }
1908 
1909  // In ARC mode, we load retained and then consume the value.
1910  llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
1911  Object = EmitObjCConsumeObject(LV.getType(), Object);
1912  return RValue::get(Object);
1913  }
1914 
1915  if (LV.isSimple()) {
1916  assert(!LV.getType()->isFunctionType());
1917 
1918  if (LV.getType()->isConstantMatrixType())
1919  return EmitLoadOfMatrixLValue(LV, Loc, *this);
1920 
1921  // Everything needs a load.
1922  return RValue::get(EmitLoadOfScalar(LV, Loc));
1923  }
1924 
1925  if (LV.isVectorElt()) {
1926  llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
1927  LV.isVolatileQualified());
1928  return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1929  "vecext"));
1930  }
1931 
1932  // If this is a reference to a subset of the elements of a vector, either
1933  // shuffle the input or extract/insert them as appropriate.
1934  if (LV.isExtVectorElt()) {
1936  }
1937 
1938  // Global Register variables always invoke intrinsics
1939  if (LV.isGlobalReg())
1940  return EmitLoadOfGlobalRegLValue(LV);
1941 
1942  if (LV.isMatrixElt()) {
1943  llvm::Value *Idx = LV.getMatrixIdx();
1944  if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
1945  const auto *const MatTy = LV.getType()->getAs<ConstantMatrixType>();
1946  llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
1947  MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
1948  }
1949  llvm::LoadInst *Load =
1951  return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
1952  }
1953 
1954  assert(LV.isBitField() && "Unknown LValue type!");
1955  return EmitLoadOfBitfieldLValue(LV, Loc);
1956 }
1957 
1959  SourceLocation Loc) {
1960  const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1961 
1962  // Get the output type.
1963  llvm::Type *ResLTy = ConvertType(LV.getType());
1964 
1965  Address Ptr = LV.getBitFieldAddress();
1966  llvm::Value *Val =
1967  Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
1968 
1969  bool UseVolatile = LV.isVolatileQualified() &&
1970  Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
1971  const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
1972  const unsigned StorageSize =
1973  UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
1974  if (Info.IsSigned) {
1975  assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
1976  unsigned HighBits = StorageSize - Offset - Info.Size;
1977  if (HighBits)
1978  Val = Builder.CreateShl(Val, HighBits, "bf.shl");
1979  if (Offset + HighBits)
1980  Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
1981  } else {
1982  if (Offset)
1983  Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
1984  if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
1985  Val = Builder.CreateAnd(
1986  Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
1987  }
1988  Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
1989  EmitScalarRangeCheck(Val, LV.getType(), Loc);
1990  return RValue::get(Val);
1991 }
1992 
1993 // If this is a reference to a subset of the elements of a vector, create an
1994 // appropriate shufflevector.
1997  LV.isVolatileQualified());
1998 
1999  const llvm::Constant *Elts = LV.getExtVectorElts();
2000 
2001  // If the result of the expression is a non-vector type, we must be extracting
2002  // a single element. Just codegen as an extractelement.
2003  const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2004  if (!ExprVT) {
2005  unsigned InIdx = getAccessedFieldNo(0, Elts);
2006  llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2007  return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2008  }
2009 
2010  // Always use shuffle vector to try to retain the original program structure
2011  unsigned NumResultElts = ExprVT->getNumElements();
2012 
2013  SmallVector<int, 4> Mask;
2014  for (unsigned i = 0; i != NumResultElts; ++i)
2015  Mask.push_back(getAccessedFieldNo(i, Elts));
2016 
2017  Vec = Builder.CreateShuffleVector(Vec, Mask);
2018  return RValue::get(Vec);
2019 }
2020 
2021 /// Generates lvalue for partial ext_vector access.
2023  Address VectorAddress = LV.getExtVectorAddress();
2024  QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2025  llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2026 
2027  Address CastToPointerElement =
2028  Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
2029  "conv.ptr.element");
2030 
2031  const llvm::Constant *Elts = LV.getExtVectorElts();
2032  unsigned ix = getAccessedFieldNo(0, Elts);
2033 
2034  Address VectorBasePtrPlusIx =
2035  Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2036  "vector.elt");
2037 
2038  return VectorBasePtrPlusIx;
2039 }
2040 
2041 /// Load of global gamed gegisters are always calls to intrinsics.
2043  assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2044  "Bad type for register variable");
2045  llvm::MDNode *RegName = cast<llvm::MDNode>(
2046  cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2047 
2048  // We accept integer and pointer types only
2049  llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2050  llvm::Type *Ty = OrigTy;
2051  if (OrigTy->isPointerTy())
2052  Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2053  llvm::Type *Types[] = { Ty };
2054 
2055  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2056  llvm::Value *Call = Builder.CreateCall(
2057  F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2058  if (OrigTy->isPointerTy())
2059  Call = Builder.CreateIntToPtr(Call, OrigTy);
2060  return RValue::get(Call);
2061 }
2062 
2063 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2064 /// lvalue, where both are guaranteed to the have the same type, and that type
2065 /// is 'Ty'.
2067  bool isInit) {
2068  if (!Dst.isSimple()) {
2069  if (Dst.isVectorElt()) {
2070  // Read/modify/write the vector, inserting the new element.
2072  Dst.isVolatileQualified());
2073  Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2074  Dst.getVectorIdx(), "vecins");
2076  Dst.isVolatileQualified());
2077  return;
2078  }
2079 
2080  // If this is an update of extended vector elements, insert them as
2081  // appropriate.
2082  if (Dst.isExtVectorElt())
2084 
2085  if (Dst.isGlobalReg())
2086  return EmitStoreThroughGlobalRegLValue(Src, Dst);
2087 
2088  if (Dst.isMatrixElt()) {
2089  llvm::Value *Idx = Dst.getMatrixIdx();
2090  if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2091  const auto *const MatTy = Dst.getType()->getAs<ConstantMatrixType>();
2092  llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
2093  MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2094  }
2095  llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2096  llvm::Value *Vec =
2097  Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2099  Dst.isVolatileQualified());
2100  return;
2101  }
2102 
2103  assert(Dst.isBitField() && "Unknown LValue type");
2104  return EmitStoreThroughBitfieldLValue(Src, Dst);
2105  }
2106 
2107  // There's special magic for assigning into an ARC-qualified l-value.
2108  if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2109  switch (Lifetime) {
2110  case Qualifiers::OCL_None:
2111  llvm_unreachable("present but none");
2112 
2114  // nothing special
2115  break;
2116 
2118  if (isInit) {
2119  Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2120  break;
2121  }
2122  EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2123  return;
2124 
2125  case Qualifiers::OCL_Weak:
2126  if (isInit)
2127  // Initialize and then skip the primitive store.
2128  EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
2129  else
2130  EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
2131  /*ignore*/ true);
2132  return;
2133 
2136  Src.getScalarVal()));
2137  // fall into the normal path
2138  break;
2139  }
2140  }
2141 
2142  if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2143  // load of a __weak object.
2144  Address LvalueDst = Dst.getAddress(*this);
2145  llvm::Value *src = Src.getScalarVal();
2146  CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2147  return;
2148  }
2149 
2150  if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2151  // load of a __strong object.
2152  Address LvalueDst = Dst.getAddress(*this);
2153  llvm::Value *src = Src.getScalarVal();
2154  if (Dst.isObjCIvar()) {
2155  assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2156  llvm::Type *ResultType = IntPtrTy;
2158  llvm::Value *RHS = dst.getPointer();
2159  RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2160  llvm::Value *LHS =
2161  Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
2162  "sub.ptr.lhs.cast");
2163  llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2164  CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
2165  BytesBetween);
2166  } else if (Dst.isGlobalObjCRef()) {
2167  CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2168  Dst.isThreadLocalRef());
2169  }
2170  else
2171  CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2172  return;
2173  }
2174 
2175  assert(Src.isScalar() && "Can't emit an agg store with this method");
2176  EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2177 }
2178 
2180  llvm::Value **Result) {
2181  const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2182  llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
2183  Address Ptr = Dst.getBitFieldAddress();
2184 
2185  // Get the source value, truncated to the width of the bit-field.
2186  llvm::Value *SrcVal = Src.getScalarVal();
2187 
2188  // Cast the source to the storage type and shift it into place.
2189  SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2190  /*isSigned=*/false);
2191  llvm::Value *MaskedVal = SrcVal;
2192 
2193  const bool UseVolatile =
2194  CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2195  Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2196  const unsigned StorageSize =
2197  UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2198  const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2199  // See if there are other bits in the bitfield's storage we'll need to load
2200  // and mask together with source before storing.
2201  if (StorageSize != Info.Size) {
2202  assert(StorageSize > Info.Size && "Invalid bitfield size.");
2203  llvm::Value *Val =
2204  Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2205 
2206  // Mask the source value as needed.
2207  if (!hasBooleanRepresentation(Dst.getType()))
2208  SrcVal = Builder.CreateAnd(
2209  SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2210  "bf.value");
2211  MaskedVal = SrcVal;
2212  if (Offset)
2213  SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2214 
2215  // Mask out the original value.
2216  Val = Builder.CreateAnd(
2217  Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2218  "bf.clear");
2219 
2220  // Or together the unchanged values and the source value.
2221  SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2222  } else {
2223  assert(Offset == 0);
2224  // According to the AACPS:
2225  // When a volatile bit-field is written, and its container does not overlap
2226  // with any non-bit-field member, its container must be read exactly once
2227  // and written exactly once using the access width appropriate to the type
2228  // of the container. The two accesses are not atomic.
2229  if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2230  CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2231  Builder.CreateLoad(Ptr, true, "bf.load");
2232  }
2233 
2234  // Write the new value back out.
2235  Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2236 
2237  // Return the new value of the bit-field, if requested.
2238  if (Result) {
2239  llvm::Value *ResultVal = MaskedVal;
2240 
2241  // Sign extend the value if needed.
2242  if (Info.IsSigned) {
2243  assert(Info.Size <= StorageSize);
2244  unsigned HighBits = StorageSize - Info.Size;
2245  if (HighBits) {
2246  ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2247  ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2248  }
2249  }
2250 
2251  ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2252  "bf.result.cast");
2253  *Result = EmitFromMemory(ResultVal, Dst.getType());
2254  }
2255 }
2256 
2258  LValue Dst) {
2259  // This access turns into a read/modify/write of the vector. Load the input
2260  // value now.
2262  Dst.isVolatileQualified());
2263  const llvm::Constant *Elts = Dst.getExtVectorElts();
2264 
2265  llvm::Value *SrcVal = Src.getScalarVal();
2266 
2267  if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2268  unsigned NumSrcElts = VTy->getNumElements();
2269  unsigned NumDstElts =
2270  cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2271  if (NumDstElts == NumSrcElts) {
2272  // Use shuffle vector is the src and destination are the same number of
2273  // elements and restore the vector mask since it is on the side it will be
2274  // stored.
2275  SmallVector<int, 4> Mask(NumDstElts);
2276  for (unsigned i = 0; i != NumSrcElts; ++i)
2277  Mask[getAccessedFieldNo(i, Elts)] = i;
2278 
2279  Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2280  } else if (NumDstElts > NumSrcElts) {
2281  // Extended the source vector to the same length and then shuffle it
2282  // into the destination.
2283  // FIXME: since we're shuffling with undef, can we just use the indices
2284  // into that? This could be simpler.
2285  SmallVector<int, 4> ExtMask;
2286  for (unsigned i = 0; i != NumSrcElts; ++i)
2287  ExtMask.push_back(i);
2288  ExtMask.resize(NumDstElts, -1);
2289  llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2290  // build identity
2291  SmallVector<int, 4> Mask;
2292  for (unsigned i = 0; i != NumDstElts; ++i)
2293  Mask.push_back(i);
2294 
2295  // When the vector size is odd and .odd or .hi is used, the last element
2296  // of the Elts constant array will be one past the size of the vector.
2297  // Ignore the last element here, if it is greater than the mask size.
2298  if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2299  NumSrcElts--;
2300 
2301  // modify when what gets shuffled in
2302  for (unsigned i = 0; i != NumSrcElts; ++i)
2303  Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2304  Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2305  } else {
2306  // We should never shorten the vector
2307  llvm_unreachable("unexpected shorten vector length");
2308  }
2309  } else {
2310  // If the Src is a scalar (not a vector) it must be updating one element.
2311  unsigned InIdx = getAccessedFieldNo(0, Elts);
2312  llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2313  Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2314  }
2315 
2317  Dst.isVolatileQualified());
2318 }
2319 
2320 /// Store of global named registers are always calls to intrinsics.
2322  assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2323  "Bad type for register variable");
2324  llvm::MDNode *RegName = cast<llvm::MDNode>(
2325  cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2326  assert(RegName && "Register LValue is not metadata");
2327 
2328  // We accept integer and pointer types only
2329  llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2330  llvm::Type *Ty = OrigTy;
2331  if (OrigTy->isPointerTy())
2332  Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2333  llvm::Type *Types[] = { Ty };
2334 
2335  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2336  llvm::Value *Value = Src.getScalarVal();
2337  if (OrigTy->isPointerTy())
2338  Value = Builder.CreatePtrToInt(Value, Ty);
2339  Builder.CreateCall(
2340  F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2341 }
2342 
2343 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2344 // generating write-barries API. It is currently a global, ivar,
2345 // or neither.
2346 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2347  LValue &LV,
2348  bool IsMemberAccess=false) {
2349  if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2350  return;
2351 
2352  if (isa<ObjCIvarRefExpr>(E)) {
2353  QualType ExpTy = E->getType();
2354  if (IsMemberAccess && ExpTy->isPointerType()) {
2355  // If ivar is a structure pointer, assigning to field of
2356  // this struct follows gcc's behavior and makes it a non-ivar
2357  // writer-barrier conservatively.
2358  ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2359  if (ExpTy->isRecordType()) {
2360  LV.setObjCIvar(false);
2361  return;
2362  }
2363  }
2364  LV.setObjCIvar(true);
2365  auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2366  LV.setBaseIvarExp(Exp->getBase());
2367  LV.setObjCArray(E->getType()->isArrayType());
2368  return;
2369  }
2370 
2371  if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2372  if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2373  if (VD->hasGlobalStorage()) {
2374  LV.setGlobalObjCRef(true);
2375  LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2376  }
2377  }
2378  LV.setObjCArray(E->getType()->isArrayType());
2379  return;
2380  }
2381 
2382  if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2383  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2384  return;
2385  }
2386 
2387  if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2388  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2389  if (LV.isObjCIvar()) {
2390  // If cast is to a structure pointer, follow gcc's behavior and make it
2391  // a non-ivar write-barrier.
2392  QualType ExpTy = E->getType();
2393  if (ExpTy->isPointerType())
2394  ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2395  if (ExpTy->isRecordType())
2396  LV.setObjCIvar(false);
2397  }
2398  return;
2399  }
2400 
2401  if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2402  setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2403  return;
2404  }
2405 
2406  if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2407  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2408  return;
2409  }
2410 
2411  if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2412  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2413  return;
2414  }
2415 
2416  if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2417  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2418  return;
2419  }
2420 
2421  if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2422  setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2423  if (LV.isObjCIvar() && !LV.isObjCArray())
2424  // Using array syntax to assigning to what an ivar points to is not
2425  // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2426  LV.setObjCIvar(false);
2427  else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2428  // Using array syntax to assigning to what global points to is not
2429  // same as assigning to the global itself. {id *G;} G[i] = 0;
2430  LV.setGlobalObjCRef(false);
2431  return;
2432  }
2433 
2434  if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2435  setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2436  // We don't know if member is an 'ivar', but this flag is looked at
2437  // only in the context of LV.isObjCIvar().
2438  LV.setObjCArray(E->getType()->isArrayType());
2439  return;
2440  }
2441 }
2442 
2443 static llvm::Value *
2445  llvm::Value *V, llvm::Type *IRType,
2446  StringRef Name = StringRef()) {
2447  unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
2448  return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
2449 }
2450 
2452  CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2453  llvm::Type *RealVarTy, SourceLocation Loc) {
2454  if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2456  CGF, VD, Addr, Loc);
2457  else
2458  Addr =
2459  CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2460 
2461  Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
2462  return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2463 }
2464 
2466  const VarDecl *VD, QualType T) {
2468  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2469  // Return an invalid address if variable is MT_To and unified
2470  // memory is not enabled. For all other cases: MT_Link and
2471  // MT_To with unified memory, return a valid address.
2472  if (!Res || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2474  return Address::invalid();
2475  assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2476  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2478  "Expected link clause OR to clause with unified memory enabled.");
2479  QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2481  return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2482 }
2483 
2484 Address
2486  LValueBaseInfo *PointeeBaseInfo,
2487  TBAAAccessInfo *PointeeTBAAInfo) {
2488  llvm::LoadInst *Load =
2489  Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
2491 
2493  RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo,
2494  /* forPointeeType= */ true);
2495  return Address(Load, Align);
2496 }
2497 
2499  LValueBaseInfo PointeeBaseInfo;
2500  TBAAAccessInfo PointeeTBAAInfo;
2501  Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2502  &PointeeTBAAInfo);
2503  return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2504  PointeeBaseInfo, PointeeTBAAInfo);
2505 }
2506 
2508  const PointerType *PtrTy,
2509  LValueBaseInfo *BaseInfo,
2510  TBAAAccessInfo *TBAAInfo) {
2511  llvm::Value *Addr = Builder.CreateLoad(Ptr);
2512  return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(),
2513  BaseInfo, TBAAInfo,
2514  /*forPointeeType=*/true));
2515 }
2516 
2518  const PointerType *PtrTy) {
2519  LValueBaseInfo BaseInfo;
2520  TBAAAccessInfo TBAAInfo;
2521  Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2522  return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2523 }
2524 
2526  const Expr *E, const VarDecl *VD) {
2527  QualType T = E->getType();
2528 
2529  // If it's thread_local, emit a call to its wrapper function instead.
2530  if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2532  return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2533  // Check if the variable is marked as declare target with link clause in
2534  // device codegen.
2535  if (CGF.getLangOpts().OpenMPIsDevice) {
2536  Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2537  if (Addr.isValid())
2538  return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2539  }
2540 
2541  llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2542  llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2543  V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
2544  CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2545  Address Addr(V, Alignment);
2546  // Emit reference to the private copy of the variable if it is an OpenMP
2547  // threadprivate variable.
2548  if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2549  VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2550  return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2551  E->getExprLoc());
2552  }
2553  LValue LV = VD->getType()->isReferenceType() ?
2554  CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2556  CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2557  setObjCGCLValueClass(CGF.getContext(), E, LV);
2558  return LV;
2559 }
2560 
2561 static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
2562  GlobalDecl GD) {
2563  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2564  if (FD->hasAttr<WeakRefAttr>()) {
2565  ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
2566  return aliasee.getPointer();
2567  }
2568 
2569  llvm::Constant *V = CGM.GetAddrOfFunction(GD);
2570  if (!FD->hasPrototype()) {
2571  if (const FunctionProtoType *Proto =
2572  FD->getType()->getAs<FunctionProtoType>()) {
2573  // Ugly case: for a K&R-style definition, the type of the definition
2574  // isn't the same as the type of a use. Correct for this with a
2575  // bitcast.
2576  QualType NoProtoType =
2577  CGM.getContext().getFunctionNoProtoType(Proto->getReturnType());
2578  NoProtoType = CGM.getContext().getPointerType(NoProtoType);
2579  V = llvm::ConstantExpr::getBitCast(V,
2580  CGM.getTypes().ConvertType(NoProtoType));
2581  }
2582  }
2583  return V;
2584 }
2585 
2587  GlobalDecl GD) {
2588  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2590  CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2591  return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2593 }
2594 
2596  llvm::Value *ThisValue) {
2598  LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
2599  return CGF.EmitLValueForField(LV, FD);
2600 }
2601 
2602 /// Named Registers are named metadata pointing to the register name
2603 /// which will be read from/written to as an argument to the intrinsic
2604 /// @llvm.read/write_register.
2605 /// So far, only the name is being passed down, but other options such as
2606 /// register type, allocation type or even optimization options could be
2607 /// passed down via the metadata node.
2609  SmallString<64> Name("llvm.named.register.");
2610  AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2611  assert(Asm->getLabel().size() < 64-Name.size() &&
2612  "Register name too big");
2613  Name.append(Asm->getLabel());
2614  llvm::NamedMDNode *M =
2615  CGM.getModule().getOrInsertNamedMetadata(Name);
2616  if (M->getNumOperands() == 0) {
2617  llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2618  Asm->getLabel());
2619  llvm::Metadata *Ops[] = {Str};
2620  M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2621  }
2622 
2623  CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2624 
2625  llvm::Value *Ptr =
2626  llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2627  return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
2628 }
2629 
2630 /// Determine whether we can emit a reference to \p VD from the current
2631 /// context, despite not necessarily having seen an odr-use of the variable in
2632 /// this context.
2634  const DeclRefExpr *E,
2635  const VarDecl *VD,
2636  bool IsConstant) {
2637  // For a variable declared in an enclosing scope, do not emit a spurious
2638  // reference even if we have a capture, as that will emit an unwarranted
2639  // reference to our capture state, and will likely generate worse code than
2640  // emitting a local copy.
2642  return false;
2643 
2644  // For a local declaration declared in this function, we can always reference
2645  // it even if we don't have an odr-use.
2646  if (VD->hasLocalStorage()) {
2647  return VD->getDeclContext() ==
2648  dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2649  }
2650 
2651  // For a global declaration, we can emit a reference to it if we know
2652  // for sure that we are able to emit a definition of it.
2653  VD = VD->getDefinition(CGF.getContext());
2654  if (!VD)
2655  return false;
2656 
2657  // Don't emit a spurious reference if it might be to a variable that only
2658  // exists on a different device / target.
2659  // FIXME: This is unnecessarily broad. Check whether this would actually be a
2660  // cross-target reference.
2661  if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2662  CGF.getLangOpts().OpenCL) {
2663  return false;
2664  }
2665 
2666  // We can emit a spurious reference only if the linkage implies that we'll
2667  // be emitting a non-interposable symbol that will be retained until link
2668  // time.
2669  switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) {
2671  case llvm::GlobalValue::LinkOnceODRLinkage:
2672  case llvm::GlobalValue::WeakODRLinkage:
2674  case llvm::GlobalValue::PrivateLinkage:
2675  return true;
2676  default:
2677  return false;
2678  }
2679 }
2680 
2682  const NamedDecl *ND = E->getDecl();
2683  QualType T = E->getType();
2684 
2685  assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2686  "should not emit an unevaluated operand");
2687 
2688  if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2689  // Global Named registers access via intrinsics only
2690  if (VD->getStorageClass() == SC_Register &&
2691  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2692  return EmitGlobalNamedRegister(VD, CGM);
2693 
2694  // If this DeclRefExpr does not constitute an odr-use of the variable,
2695  // we're not permitted to emit a reference to it in general, and it might
2696  // not be captured if capture would be necessary for a use. Emit the
2697  // constant value directly instead.
2698  if (E->isNonOdrUse() == NOUR_Constant &&
2699  (VD->getType()->isReferenceType() ||
2700  !canEmitSpuriousReferenceToVariable(*this, E, VD, true))) {
2701  VD->getAnyInitializer(VD);
2702  llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
2703  E->getLocation(), *VD->evaluateValue(), VD->getType());
2704  assert(Val && "failed to emit constant expression");
2705 
2706  Address Addr = Address::invalid();
2707  if (!VD->getType()->isReferenceType()) {
2708  // Spill the constant value to a global.
2709  Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
2710  getContext().getDeclAlign(VD));
2711  llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
2712  auto *PTy = llvm::PointerType::get(
2713  VarTy, getContext().getTargetAddressSpace(VD->getType()));
2714  if (PTy != Addr.getType())
2715  Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy);
2716  } else {
2717  // Should we be using the alignment of the constant pointer we emitted?
2718  CharUnits Alignment =
2720  /* BaseInfo= */ nullptr,
2721  /* TBAAInfo= */ nullptr,
2722  /* forPointeeType= */ true);
2723  Addr = Address(Val, Alignment);
2724  }
2725  return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2726  }
2727 
2728  // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
2729 
2730  // Check for captured variables.
2732  VD = VD->getCanonicalDecl();
2733  if (auto *FD = LambdaCaptureFields.lookup(VD))
2734  return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
2735  if (CapturedStmtInfo) {
2736  auto I = LocalDeclMap.find(VD);
2737  if (I != LocalDeclMap.end()) {
2738  LValue CapLVal;
2739  if (VD->getType()->isReferenceType())
2740  CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
2742  else
2743  CapLVal = MakeAddrLValue(I->second, T);
2744  // Mark lvalue as nontemporal if the variable is marked as nontemporal
2745  // in simd context.
2746  if (getLangOpts().OpenMP &&
2748  CapLVal.setNontemporal(/*Value=*/true);
2749  return CapLVal;
2750  }
2751  LValue CapLVal =
2754  CapLVal = MakeAddrLValue(
2755  Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)),
2757  CapLVal.getTBAAInfo());
2758  // Mark lvalue as nontemporal if the variable is marked as nontemporal
2759  // in simd context.
2760  if (getLangOpts().OpenMP &&
2762  CapLVal.setNontemporal(/*Value=*/true);
2763  return CapLVal;
2764  }
2765 
2766  assert(isa<BlockDecl>(CurCodeDecl));
2767  Address addr = GetAddrOfBlockDecl(VD);
2768  return MakeAddrLValue(addr, T, AlignmentSource::Decl);
2769  }
2770  }
2771 
2772  // FIXME: We should be able to assert this for FunctionDecls as well!
2773  // FIXME: We should be able to assert this for all DeclRefExprs, not just
2774  // those with a valid source location.
2775  assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
2776  !E->getLocation().isValid()) &&
2777  "Should not use decl without marking it used!");
2778 
2779  if (ND->hasAttr<WeakRefAttr>()) {
2780  const auto *VD = cast<ValueDecl>(ND);
2781  ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
2782  return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
2783  }
2784 
2785  if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2786  // Check if this is a global variable.
2787  if (VD->hasLinkage() || VD->isStaticDataMember())
2788  return EmitGlobalVarDeclLValue(*this, E, VD);
2789 
2790  Address addr = Address::invalid();
2791 
2792  // The variable should generally be present in the local decl map.
2793  auto iter = LocalDeclMap.find(VD);
2794  if (iter != LocalDeclMap.end()) {
2795  addr = iter->second;
2796 
2797  // Otherwise, it might be static local we haven't emitted yet for
2798  // some reason; most likely, because it's in an outer function.
2799  } else if (VD->isStaticLocal()) {
2801  *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)),
2802  getContext().getDeclAlign(VD));
2803 
2804  // No other cases for now.
2805  } else {
2806  llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
2807  }
2808 
2809 
2810  // Check for OpenMP threadprivate variables.
2811  if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
2812  VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2814  *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
2815  E->getExprLoc());
2816  }
2817 
2818  // Drill into block byref variables.
2819  bool isBlockByref = VD->isEscapingByref();
2820  if (isBlockByref) {
2821  addr = emitBlockByrefAddress(addr, VD);
2822  }
2823 
2824  // Drill into reference types.
2825  LValue LV = VD->getType()->isReferenceType() ?
2828 
2829  bool isLocalStorage = VD->hasLocalStorage();
2830 
2831  bool NonGCable = isLocalStorage &&
2832  !VD->getType()->isReferenceType() &&
2833  !isBlockByref;
2834  if (NonGCable) {
2835  LV.getQuals().removeObjCGCAttr();
2836  LV.setNonGC(true);
2837  }
2838 
2839  bool isImpreciseLifetime =
2840  (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
2841  if (isImpreciseLifetime)
2843  setObjCGCLValueClass(getContext(), E, LV);
2844  return LV;
2845  }
2846 
2847  if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
2848  LValue LV = EmitFunctionDeclLValue(*this, E, FD);
2849 
2850  // Emit debuginfo for the function declaration if the target wants to.
2851  if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
2852  if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) {
2853  auto *Fn =
2854  cast<llvm::Function>(LV.getPointer(*this)->stripPointerCasts());
2855  if (!Fn->getSubprogram())
2856  DI->EmitFunctionDecl(FD, FD->getLocation(), T, Fn);
2857  }
2858  }
2859 
2860  return LV;
2861  }
2862 
2863  // FIXME: While we're emitting a binding from an enclosing scope, all other
2864  // DeclRefExprs we see should be implicitly treated as if they also refer to
2865  // an enclosing scope.
2866  if (const auto *BD = dyn_cast<BindingDecl>(ND))
2867  return EmitLValue(BD->getBinding());
2868 
2869  // We can form DeclRefExprs naming GUID declarations when reconstituting
2870  // non-type template parameters into expressions.
2871  if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
2872  return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
2874 
2875  if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND))
2878 
2879  llvm_unreachable("Unhandled DeclRefExpr");
2880 }
2881 
2883  // __extension__ doesn't affect lvalue-ness.
2884  if (E->getOpcode() == UO_Extension)
2885  return EmitLValue(E->getSubExpr());
2886 
2888  switch (E->getOpcode()) {
2889  default: llvm_unreachable("Unknown unary operator lvalue!");
2890  case UO_Deref: {
2891  QualType T = E->getSubExpr()->getType()->getPointeeType();
2892  assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
2893 
2894  LValueBaseInfo BaseInfo;
2895  TBAAAccessInfo TBAAInfo;
2896  Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
2897  &TBAAInfo);
2898  LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
2899  LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
2900 
2901  // We should not generate __weak write barrier on indirect reference
2902  // of a pointer to object; as in void foo (__weak id *param); *param = 0;
2903  // But, we continue to generate __strong write barrier on indirect write
2904  // into a pointer to object.
2905  if (getLangOpts().ObjC &&
2906  getLangOpts().getGC() != LangOptions::NonGC &&
2907  LV.isObjCWeak())
2909  return LV;
2910  }
2911  case UO_Real:
2912  case UO_Imag: {
2913  LValue LV = EmitLValue(E->getSubExpr());
2914  assert(LV.isSimple() && "real/imag on non-ordinary l-value");
2915 
2916  // __real is valid on scalars. This is a faster way of testing that.
2917  // __imag can only produce an rvalue on scalars.
2918  if (E->getOpcode() == UO_Real &&
2919  !LV.getAddress(*this).getElementType()->isStructTy()) {
2920  assert(E->getSubExpr()->getType()->isArithmeticType());
2921  return LV;
2922  }
2923 
2924  QualType T = ExprTy->castAs<ComplexType>()->getElementType();
2925 
2926  Address Component =
2927  (E->getOpcode() == UO_Real
2928  ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
2929  : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
2930  LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
2931  CGM.getTBAAInfoForSubobject(LV, T));
2932  ElemLV.getQuals().addQualifiers(LV.getQuals());
2933  return ElemLV;
2934  }
2935  case UO_PreInc:
2936  case UO_PreDec: {
2937  LValue LV = EmitLValue(E->getSubExpr());
2938  bool isInc = E->getOpcode() == UO_PreInc;
2939 
2940  if (E->getType()->isAnyComplexType())
2941  EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
2942  else
2943  EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
2944  return LV;
2945  }
2946  }
2947 }
2948 
2952 }
2953 
2957 }
2958 
2960  auto SL = E->getFunctionName();
2961  assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
2962  StringRef FnName = CurFn->getName();
2963  if (FnName.startswith("\01"))
2964  FnName = FnName.substr(1);
2965  StringRef NameItems[] = {
2967  std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
2968  if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
2969  std::string Name = std::string(SL->getString());
2970  if (!Name.empty()) {
2971  unsigned Discriminator =
2972  CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
2973  if (Discriminator)
2974  Name += "_" + Twine(Discriminator + 1).str();
2975  auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
2976  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2977  } else {
2978  auto C =
2979  CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
2980  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2981  }
2982  }
2983  auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
2984  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2985 }
2986 
2987 /// Emit a type description suitable for use by a runtime sanitizer library. The
2988 /// format of a type descriptor is
2989 ///
2990 /// \code
2991 /// { i16 TypeKind, i16 TypeInfo }
2992 /// \endcode
2993 ///
2994 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
2995 /// integer, 1 for a floating point value, and -1 for anything else.
2997  // Only emit each type's descriptor once.
2998  if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
2999  return C;
3000 
3001  uint16_t TypeKind = -1;
3002  uint16_t TypeInfo = 0;
3003 
3004  if (T->isIntegerType()) {
3005  TypeKind = 0;
3006  TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3007  (T->isSignedIntegerType() ? 1 : 0);
3008  } else if (T->isFloatingType()) {
3009  TypeKind = 1;
3011  }
3012 
3013  // Format the type name as if for a diagnostic, including quotes and
3014  // optionally an 'aka'.
3015  SmallString<32> Buffer;
3017  (intptr_t)T.getAsOpaquePtr(),
3018  StringRef(), StringRef(), None, Buffer,
3019  None);
3020 
3021  llvm::Constant *Components[] = {
3022  Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3023  llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3024  };
3025  llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3026 
3027  auto *GV = new llvm::GlobalVariable(
3028  CGM.getModule(), Descriptor->getType(),
3029  /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3030  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3032 
3033  // Remember the descriptor for this type.
3034  CGM.setTypeDescriptorInMap(T, GV);
3035 
3036  return GV;
3037 }
3038 
3040  llvm::Type *TargetTy = IntPtrTy;
3041 
3042  if (V->getType() == TargetTy)
3043  return V;
3044 
3045  // Floating-point types which fit into intptr_t are bitcast to integers
3046  // and then passed directly (after zero-extension, if necessary).
3047  if (V->getType()->isFloatingPointTy()) {
3048  unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedSize();
3049  if (Bits <= TargetTy->getIntegerBitWidth())
3050  V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3051  Bits));
3052  }
3053 
3054  // Integers which fit in intptr_t are zero-extended and passed directly.
3055  if (V->getType()->isIntegerTy() &&
3056  V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3057  return Builder.CreateZExt(V, TargetTy);
3058 
3059  // Pointers are passed directly, everything else is passed by address.
3060  if (!V->getType()->isPointerTy()) {
3061  Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
3062  Builder.CreateStore(V, Ptr);
3063  V = Ptr.getPointer();
3064  }
3065  return Builder.CreatePtrToInt(V, TargetTy);
3066 }
3067 
3068 /// Emit a representation of a SourceLocation for passing to a handler
3069 /// in a sanitizer runtime library. The format for this data is:
3070 /// \code
3071 /// struct SourceLocation {
3072 /// const char *Filename;
3073 /// int32_t Line, Column;
3074 /// };
3075 /// \endcode
3076 /// For an invalid SourceLocation, the Filename pointer is null.
3078  llvm::Constant *Filename;
3079  int Line, Column;
3080 
3082  if (PLoc.isValid()) {
3083  StringRef FilenameString = PLoc.getFilename();
3084 
3085  int PathComponentsToStrip =
3086  CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3087  if (PathComponentsToStrip < 0) {
3088  assert(PathComponentsToStrip != INT_MIN);
3089  int PathComponentsToKeep = -PathComponentsToStrip;
3090  auto I = llvm::sys::path::rbegin(FilenameString);
3091  auto E = llvm::sys::path::rend(FilenameString);
3092  while (I != E && --PathComponentsToKeep)
3093  ++I;
3094 
3095  FilenameString = FilenameString.substr(I - E);
3096  } else if (PathComponentsToStrip > 0) {
3097  auto I = llvm::sys::path::begin(FilenameString);
3098  auto E = llvm::sys::path::end(FilenameString);
3099  while (I != E && PathComponentsToStrip--)
3100  ++I;
3101 
3102  if (I != E)
3103  FilenameString =
3104  FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3105  else
3106  FilenameString = llvm::sys::path::filename(FilenameString);
3107  }
3108 
3109  auto FilenameGV =
3110  CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3112  cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
3113  Filename = FilenameGV.getPointer();
3114  Line = PLoc.getLine();
3115  Column = PLoc.getColumn();
3116  } else {
3117  Filename = llvm::Constant::getNullValue(Int8PtrTy);
3118  Line = Column = 0;
3119  }
3120 
3121  llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3122  Builder.getInt32(Column)};
3123 
3124  return llvm::ConstantStruct::getAnon(Data);
3125 }
3126 
3127 namespace {
3128 /// Specify under what conditions this check can be recovered
3129 enum class CheckRecoverableKind {
3130  /// Always terminate program execution if this check fails.
3131  Unrecoverable,
3132  /// Check supports recovering, runtime has both fatal (noreturn) and
3133  /// non-fatal handlers for this check.
3134  Recoverable,
3135  /// Runtime conditionally aborts, always need to support recovery.
3136  AlwaysRecoverable
3137 };
3138 }
3139 
3140 static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3141  assert(Kind.countPopulation() == 1);
3142  if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr)
3144  else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
3146  else
3147  return CheckRecoverableKind::Recoverable;
3148 }
3149 
3150 namespace {
3151 struct SanitizerHandlerInfo {
3152  char const *const Name;
3153  unsigned Version;
3154 };
3155 }
3156 
3157 const SanitizerHandlerInfo SanitizerHandlers[] = {
3158 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3160 #undef SANITIZER_CHECK
3161 };
3162 
3164  llvm::FunctionType *FnType,
3165  ArrayRef<llvm::Value *> FnArgs,
3166  SanitizerHandler CheckHandler,
3167  CheckRecoverableKind RecoverKind, bool IsFatal,
3168  llvm::BasicBlock *ContBB) {
3169  assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3171  if (!CGF.Builder.getCurrentDebugLocation()) {
3172  // Ensure that the call has at least an artificial debug location.
3173  DL.emplace(CGF, SourceLocation());
3174  }
3175  bool NeedsAbortSuffix =
3176  IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3177  bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3178  const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3179  const StringRef CheckName = CheckInfo.Name;
3180  std::string FnName = "__ubsan_handle_" + CheckName.str();
3181  if (CheckInfo.Version && !MinimalRuntime)
3182  FnName += "_v" + llvm::utostr(CheckInfo.Version);
3183  if (MinimalRuntime)
3184  FnName += "_minimal";
3185  if (NeedsAbortSuffix)
3186  FnName += "_abort";
3187  bool MayReturn =
3188  !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3189 
3190  llvm::AttrBuilder B;
3191  if (!MayReturn) {
3192  B.addAttribute(llvm::Attribute::NoReturn)
3193  .addAttribute(llvm::Attribute::NoUnwind);
3194  }
3195  B.addAttribute(llvm::Attribute::UWTable);
3196 
3197  llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3198  FnType, FnName,
3199  llvm::AttributeList::get(CGF.getLLVMContext(),
3200  llvm::AttributeList::FunctionIndex, B),
3201  /*Local=*/true);
3202  llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3203  if (!MayReturn) {
3204  HandlerCall->setDoesNotReturn();
3205  CGF.Builder.CreateUnreachable();
3206  } else {
3207  CGF.Builder.CreateBr(ContBB);
3208  }
3209 }
3210 
3212  ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3213  SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3214  ArrayRef<llvm::Value *> DynamicArgs) {
3215  assert(IsSanitizerScope);
3216  assert(Checked.size() > 0);
3217  assert(CheckHandler >= 0 &&
3218  size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers));
3219  const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3220 
3221  llvm::Value *FatalCond = nullptr;
3222  llvm::Value *RecoverableCond = nullptr;
3223  llvm::Value *TrapCond = nullptr;
3224  for (int i = 0, n = Checked.size(); i < n; ++i) {
3225  llvm::Value *Check = Checked[i].first;
3226  // -fsanitize-trap= overrides -fsanitize-recover=.
3227  llvm::Value *&Cond =
3228  CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3229  ? TrapCond
3230  : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3231  ? RecoverableCond
3232  : FatalCond;
3233  Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3234  }
3235 
3236  if (TrapCond)
3237  EmitTrapCheck(TrapCond, CheckHandler);
3238  if (!FatalCond && !RecoverableCond)
3239  return;
3240 
3241  llvm::Value *JointCond;
3242  if (FatalCond && RecoverableCond)
3243  JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3244  else
3245  JointCond = FatalCond ? FatalCond : RecoverableCond;
3246  assert(JointCond);
3247 
3248  CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3249  assert(SanOpts.has(Checked[0].second));
3250 #ifndef NDEBUG
3251  for (int i = 1, n = Checked.size(); i < n; ++i) {
3252  assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3253  "All recoverable kinds in a single check must be same!");
3254  assert(SanOpts.has(Checked[i].second));
3255  }
3256 #endif
3257 
3258  llvm::BasicBlock *Cont = createBasicBlock("cont");
3259  llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3260  llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3261  // Give hint that we very much don't expect to execute the handler
3262  // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3263  llvm::MDBuilder MDHelper(getLLVMContext());
3264  llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3265  Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3266  EmitBlock(Handlers);
3267 
3268  // Handler functions take an i8* pointing to the (handler-specific) static
3269  // information block, followed by a sequence of intptr_t arguments
3270  // representing operand values.
3273  if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3274  Args.reserve(DynamicArgs.size() + 1);
3275  ArgTypes.reserve(DynamicArgs.size() + 1);
3276 
3277  // Emit handler arguments and create handler function type.
3278  if (!StaticArgs.empty()) {
3279  llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3280  auto *InfoPtr =
3281  new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3282  llvm::GlobalVariable::PrivateLinkage, Info);
3283  InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3285  Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
3286  ArgTypes.push_back(Int8PtrTy);
3287  }
3288 
3289  for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3290  Args.push_back(EmitCheckValue(DynamicArgs[i]));
3291  ArgTypes.push_back(IntPtrTy);
3292  }
3293  }
3294 
3295  llvm::FunctionType *FnType =
3296  llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3297 
3298  if (!FatalCond || !RecoverableCond) {
3299  // Simple case: we need to generate a single handler call, either
3300  // fatal, or non-fatal.
3301  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3302  (FatalCond != nullptr), Cont);
3303  } else {
3304  // Emit two handler calls: first one for set of unrecoverable checks,
3305  // another one for recoverable.
3306  llvm::BasicBlock *NonFatalHandlerBB =
3307  createBasicBlock("non_fatal." + CheckName);
3308  llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3309  Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3310  EmitBlock(FatalHandlerBB);
3311  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3312  NonFatalHandlerBB);
3313  EmitBlock(NonFatalHandlerBB);
3314  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3315  Cont);
3316  }
3317 
3318  EmitBlock(Cont);
3319 }
3320 
3322  SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3323  llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3324  llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3325 
3326  llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3327  llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3328 
3329  llvm::MDBuilder MDHelper(getLLVMContext());
3330  llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3331  BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3332 
3333  EmitBlock(CheckBB);
3334 
3335  bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3336 
3337  llvm::CallInst *CheckCall;
3338  llvm::FunctionCallee SlowPathFn;
3339  if (WithDiag) {
3340  llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3341  auto *InfoPtr =
3342  new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3343  llvm::GlobalVariable::PrivateLinkage, Info);
3344  InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3346 
3347  SlowPathFn = CGM.getModule().getOrInsertFunction(
3348  "__cfi_slowpath_diag",
3349  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3350  false));
3351  CheckCall = Builder.CreateCall(
3352  SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)});
3353  } else {
3354  SlowPathFn = CGM.getModule().getOrInsertFunction(
3355  "__cfi_slowpath",
3356  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3357  CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3358  }
3359 
3360  CGM.setDSOLocal(
3361  cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3362  CheckCall->setDoesNotThrow();
3363 
3364  EmitBlock(Cont);
3365 }
3366 
3367 // Emit a stub for __cfi_check function so that the linker knows about this
3368 // symbol in LTO mode.
3370  llvm::Module *M = &CGM.getModule();
3371  auto &Ctx = M->getContext();
3372  llvm::Function *F = llvm::Function::Create(
3373  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
3374  llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3375  CGM.setDSOLocal(F);
3376  llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3377  // FIXME: consider emitting an intrinsic call like
3378  // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
3379  // which can be lowered in CrossDSOCFI pass to the actual contents of
3380  // __cfi_check. This would allow inlining of __cfi_check calls.
3381  llvm::CallInst::Create(
3382  llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB);
3383  llvm::ReturnInst::Create(Ctx, nullptr, BB);
3384 }
3385 
3386 // This function is basically a switch over the CFI failure kind, which is
3387 // extracted from CFICheckFailData (1st function argument). Each case is either
3388 // llvm.trap or a call to one of the two runtime handlers, based on
3389 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3390 // failure kind) traps, but this should really never happen. CFICheckFailData
3391 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3392 // check kind; in this case __cfi_check_fail traps as well.
3394  SanitizerScope SanScope(this);
3395  FunctionArgList Args;
3400  Args.push_back(&ArgData);
3401  Args.push_back(&ArgAddr);
3402 
3403  const CGFunctionInfo &FI =
3405 
3406  llvm::Function *F = llvm::Function::Create(
3407  llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3408  llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3409 
3410  CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3412  F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3413 
3414  StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3415  SourceLocation());
3416 
3417  // This function is not affected by NoSanitizeList. This function does
3418  // not have a source location, but "src:*" would still apply. Revert any
3419  // changes to SanOpts made in StartFunction.
3421 
3422  llvm::Value *Data =
3423  EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3424  CGM.getContext().VoidPtrTy, ArgData.getLocation());
3425  llvm::Value *Addr =
3426  EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3427  CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3428 
3429  // Data == nullptr means the calling module has trap behaviour for this check.
3430  llvm::Value *DataIsNotNullPtr =
3431  Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3432  EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3433 
3434  llvm::StructType *SourceLocationTy =
3435  llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3436  llvm::StructType *CfiCheckFailDataTy =
3437  llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3438 
3439  llvm::Value *V = Builder.CreateConstGEP2_32(
3440  CfiCheckFailDataTy,
3441  Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3442  0);
3443  Address CheckKindAddr(V, getIntAlign());
3444  llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3445 
3446  llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3447  CGM.getLLVMContext(),
3448  llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3449  llvm::Value *ValidVtable = Builder.CreateZExt(
3450  Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3451  {Addr, AllVtables}),
3452  IntPtrTy);
3453 
3454  const std::pair<int, SanitizerMask> CheckKinds[] = {
3455  {CFITCK_VCall, SanitizerKind::CFIVCall},
3456  {CFITCK_NVCall, SanitizerKind::CFINVCall},
3457  {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3458  {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3459  {CFITCK_ICall, SanitizerKind::CFIICall}};
3460 
3462  for (auto CheckKindMaskPair : CheckKinds) {
3463  int Kind = CheckKindMaskPair.first;
3464  SanitizerMask Mask = CheckKindMaskPair.second;
3465  llvm::Value *Cond =
3466  Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3467  if (CGM.getLangOpts().Sanitize.has(Mask))
3468  EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3469  {Data, Addr, ValidVtable});
3470  else
3471  EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3472  }
3473 
3474  FinishFunction();
3475  // The only reference to this function will be created during LTO link.
3476  // Make sure it survives until then.
3477  CGM.addUsedGlobal(F);
3478 }
3479 
3481  if (SanOpts.has(SanitizerKind::Unreachable)) {
3482  SanitizerScope SanScope(this);
3483  EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3484  SanitizerKind::Unreachable),
3485  SanitizerHandler::BuiltinUnreachable,
3486  EmitCheckSourceLocation(Loc), None);
3487  }
3488  Builder.CreateUnreachable();
3489 }
3490 
3492  SanitizerHandler CheckHandlerID) {
3493  llvm::BasicBlock *Cont = createBasicBlock("cont");
3494 
3495  // If we're optimizing, collapse all calls to trap down to just one per
3496  // check-type per function to save on code size.
3497  if (TrapBBs.size() <= CheckHandlerID)
3498  TrapBBs.resize(CheckHandlerID + 1);
3499  llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3500 
3501  if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
3502  TrapBB = createBasicBlock("trap");
3503  Builder.CreateCondBr(Checked, Cont, TrapBB);
3504  EmitBlock(TrapBB);
3505 
3506  llvm::CallInst *TrapCall =
3507  Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3508  llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
3509 
3510  if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3511  auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3513  TrapCall->addFnAttr(A);
3514  }
3515  TrapCall->setDoesNotReturn();
3516  TrapCall->setDoesNotThrow();
3517  Builder.CreateUnreachable();
3518  } else {
3519  auto Call = TrapBB->begin();
3520  assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3521 
3522  Call->applyMergedLocation(Call->getDebugLoc(),
3523  Builder.getCurrentDebugLocation());
3524  Builder.CreateCondBr(Checked, Cont, TrapBB);
3525  }
3526 
3527  EmitBlock(Cont);
3528 }
3529 
3531  llvm::CallInst *TrapCall =
3532  Builder.CreateCall(CGM.getIntrinsic(IntrID));
3533 
3534  if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3535  auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3537  TrapCall->addFnAttr(A);
3538  }
3539 
3540  return TrapCall;
3541 }
3542 
3544  LValueBaseInfo *BaseInfo,
3545  TBAAAccessInfo *TBAAInfo) {
3546  assert(E->getType()->isArrayType() &&
3547  "Array to pointer decay must have array source type!");
3548 
3549  // Expressions of array type can't be bitfields or vector elements.
3550  LValue LV = EmitLValue(E);
3551  Address Addr = LV.getAddress(*this);
3552 
3553  // If the array type was an incomplete type, we need to make sure
3554  // the decay ends up being the right type.
3555  llvm::Type *NewTy = ConvertType(E->getType());
3556  Addr = Builder.CreateElementBitCast(Addr, NewTy);
3557 
3558  // Note that VLA pointers are always decayed, so we don't need to do
3559  // anything here.
3560  if (!E->getType()->isVariableArrayType()) {
3561  assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3562  "Expected pointer to array");
3563  Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3564  }
3565 
3566  // The result of this decay conversion points to an array element within the
3567  // base lvalue. However, since TBAA currently does not support representing
3568  // accesses to elements of member arrays, we conservatively represent accesses
3569  // to the pointee object as if it had no any base lvalue specified.
3570  // TODO: Support TBAA for member arrays.
3572  if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3573  if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3574 
3575  return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
3576 }
3577 
3578 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3579 /// array to pointer, return the array subexpression.
3580 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3581  // If this isn't just an array->pointer decay, bail out.
3582  const auto *CE = dyn_cast<CastExpr>(E);
3583  if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3584  return nullptr;
3585 
3586  // If this is a decay from variable width array, bail out.
3587  const Expr *SubExpr = CE->getSubExpr();
3588  if (SubExpr->getType()->isVariableArrayType())
3589  return nullptr;
3590 
3591  return SubExpr;
3592 }
3593 
3595  llvm::Type *elemType,
3596  llvm::Value *ptr,
3597  ArrayRef<llvm::Value*> indices,
3598  bool inbounds,
3599  bool signedIndices,
3600  SourceLocation loc,
3601  const llvm::Twine &name = "arrayidx") {
3602  if (inbounds) {
3603  return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices,
3605  name);
3606  } else {
3607  return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
3608  }
3609 }
3610 
3612  llvm::Value *idx,
3613  CharUnits eltSize) {
3614  // If we have a constant index, we can use the exact offset of the
3615  // element we're accessing.
3616  if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3617  CharUnits offset = constantIdx->getZExtValue() * eltSize;
3618  return arrayAlign.alignmentAtOffset(offset);
3619 
3620  // Otherwise, use the worst-case alignment for any element.
3621  } else {
3622  return arrayAlign.alignmentOfArrayElement(eltSize);
3623  }
3624 }
3625 
3627  const VariableArrayType *vla) {
3628  QualType eltType;
3629  do {
3630  eltType = vla->getElementType();
3631  } while ((vla = ctx.getAsVariableArrayType(eltType)));
3632  return eltType;
3633 }
3634 
3635 /// Given an array base, check whether its member access belongs to a record
3636 /// with preserve_access_index attribute or not.
3637 static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
3638  if (!ArrayBase || !CGF.getDebugInfo())
3639  return false;
3640 
3641  // Only support base as either a MemberExpr or DeclRefExpr.
3642  // DeclRefExpr to cover cases like:
3643  // struct s { int a; int b[10]; };
3644  // struct s *p;
3645  // p[1].a
3646  // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
3647  // p->b[5] is a MemberExpr example.
3648  const Expr *E = ArrayBase->IgnoreImpCasts();
3649  if (const auto *ME = dyn_cast<MemberExpr>(E))
3650  return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
3651 
3652  if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
3653  const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
3654  if (!VarDef)
3655  return false;
3656 
3657  const auto *PtrT = VarDef->getType()->getAs<PointerType>();
3658  if (!PtrT)
3659  return false;
3660 
3661  const auto *PointeeT = PtrT->getPointeeType()
3663  if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
3664  return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
3665  return false;
3666  }
3667 
3668  return false;
3669 }
3670 
3672  ArrayRef<llvm::Value *> indices,
3673  QualType eltType, bool inbounds,
3674  bool signedIndices, SourceLocation loc,
3675  QualType *arrayType = nullptr,
3676  const Expr *Base = nullptr,
3677  const llvm::Twine &name = "arrayidx") {
3678  // All the indices except that last must be zero.
3679 #ifndef NDEBUG
3680  for (auto idx : indices.drop_back())
3681  assert(isa<llvm::ConstantInt>(idx) &&
3682  cast<llvm::ConstantInt>(idx)->isZero());
3683 #endif
3684 
3685  // Determine the element size of the statically-sized base. This is
3686  // the thing that the indices are expressed in terms of.
3687  if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
3688  eltType = getFixedSizeElementType(CGF.getContext(), vla);
3689  }
3690 
3691  // We can use that to compute the best alignment of the element.
3692  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
3693  CharUnits eltAlign =
3694  getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
3695 
3696  llvm::Value *eltPtr;
3697  auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
3698  if (!LastIndex ||
3700  eltPtr = emitArraySubscriptGEP(
3701  CGF, addr.getElementType(), addr.getPointer(), indices, inbounds,
3702  signedIndices, loc, name);
3703  } else {
3704  // Remember the original array subscript for bpf target
3705  unsigned idx = LastIndex->getZExtValue();
3706  llvm::DIType *DbgInfo = nullptr;
3707  if (arrayType)
3708  DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
3709  eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(),
3710  addr.getPointer(),
3711  indices.size() - 1,
3712  idx, DbgInfo);
3713  }
3714 
3715  return Address(eltPtr, eltAlign);
3716 }
3717 
3719  bool Accessed) {
3720  // The index must always be an integer, which is not an aggregate. Emit it
3721  // in lexical order (this complexity is, sadly, required by C++17).
3722  llvm::Value *IdxPre =
3723  (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
3724  bool SignedIndices = false;
3725  auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
3726  auto *Idx = IdxPre;
3727  if (E->getLHS() != E->getIdx()) {
3728  assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
3729  Idx = EmitScalarExpr(E->getIdx());
3730  }
3731 
3732  QualType IdxTy = E->getIdx()->getType();
3733  bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
3734  SignedIndices |= IdxSigned;
3735 
3736  if (SanOpts.has(SanitizerKind::ArrayBounds))
3737  EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
3738 
3739  // Extend or truncate the index type to 32 or 64-bits.
3740  if (Promote && Idx->getType() != IntPtrTy)
3741  Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
3742 
3743  return Idx;
3744  };
3745  IdxPre = nullptr;
3746 
3747  // If the base is a vector type, then we are forming a vector element lvalue
3748  // with this subscript.
3749  if (E->getBase()->getType()->isVectorType() &&
3750  !isa<ExtVectorElementExpr>(E->getBase())) {
3751  // Emit the vector as an lvalue to get its address.
3752  LValue LHS = EmitLValue(E->getBase());
3753  auto *Idx = EmitIdxAfterBase(/*Promote*/false);
3754  assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
3755  return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
3756  E->getBase()->getType(), LHS.getBaseInfo(),
3757  TBAAAccessInfo());
3758  }
3759 
3760  // All the other cases basically behave like simple offsetting.
3761 
3762  // Handle the extvector case we ignored above.
3763  if (isa<ExtVectorElementExpr>(E->getBase())) {
3764  LValue LV = EmitLValue(E->getBase());
3765  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3767 
3768  QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
3769  Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
3770  SignedIndices, E->getExprLoc());
3771  return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
3772  CGM.getTBAAInfoForSubobject(LV, EltType));
3773  }
3774 
3775  LValueBaseInfo EltBaseInfo;
3776  TBAAAccessInfo EltTBAAInfo;
3777  Address Addr = Address::invalid();
3778  if (const VariableArrayType *vla =
3779  getContext().getAsVariableArrayType(E->getType())) {
3780  // The base must be a pointer, which is not an aggregate. Emit
3781  // it. It needs to be emitted first in case it's what captures
3782  // the VLA bounds.
3783  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3784  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3785 
3786  // The element count here is the total number of non-VLA elements.
3787  llvm::Value *numElements = getVLASize(vla).NumElts;
3788 
3789  // Effectively, the multiply by the VLA size is part of the GEP.
3790  // GEP indexes are signed, and scaling an index isn't permitted to
3791  // signed-overflow, so we use the same semantics for our explicit
3792  // multiply. We suppress this if overflow is not undefined behavior.
3793  if (getLangOpts().isSignedOverflowDefined()) {
3794  Idx = Builder.CreateMul(Idx, numElements);
3795  } else {
3796  Idx = Builder.CreateNSWMul(Idx, numElements);
3797  }
3798 
3799  Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
3800  !getLangOpts().isSignedOverflowDefined(),
3801  SignedIndices, E->getExprLoc());
3802 
3803  } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
3804  // Indexing over an interface, as in "NSString *P; P[4];"
3805 
3806  // Emit the base pointer.
3807  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3808  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3809 
3810  CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
3811  llvm::Value *InterfaceSizeVal =
3812  llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
3813 
3814  llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
3815 
3816  // We don't necessarily build correct LLVM struct types for ObjC
3817  // interfaces, so we can't rely on GEP to do this scaling
3818  // correctly, so we need to cast to i8*. FIXME: is this actually
3819  // true? A lot of other things in the fragile ABI would break...
3820  llvm::Type *OrigBaseTy = Addr.getType();
3821  Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
3822 
3823  // Do the GEP.
3824  CharUnits EltAlign =
3825  getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
3826  llvm::Value *EltPtr =
3827  emitArraySubscriptGEP(*this, Addr.getElementType(), Addr.getPointer(),
3828  ScaledIdx, false, SignedIndices, E->getExprLoc());
3829  Addr = Address(EltPtr, EltAlign);
3830 
3831  // Cast back.
3832  Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
3833  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
3834  // If this is A[i] where A is an array, the frontend will have decayed the
3835  // base to be a ArrayToPointerDecay implicit cast. While correct, it is
3836  // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3837  // "gep x, i" here. Emit one "gep A, 0, i".
3838  assert(Array->getType()->isArrayType() &&
3839  "Array to pointer decay must have array source type!");
3840  LValue ArrayLV;
3841  // For simple multidimensional array indexing, set the 'accessed' flag for
3842  // better bounds-checking of the base expression.
3843  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
3844  ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
3845  else
3846  ArrayLV = EmitLValue(Array);
3847  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3848 
3849  // Propagate the alignment from the array itself to the result.
3850  QualType arrayType = Array->getType();
3851  Addr = emitArraySubscriptGEP(
3852  *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
3853  E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
3854  E->getExprLoc(), &arrayType, E->getBase());
3855  EltBaseInfo = ArrayLV.getBaseInfo();
3856  EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
3857  } else {
3858  // The base must be a pointer; emit it with an estimate of its alignment.
3859  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3860  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3861  QualType ptrType = E->getBase()->getType();
3862  Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
3863  !getLangOpts().isSignedOverflowDefined(),
3864  SignedIndices, E->getExprLoc(), &ptrType,
3865  E->getBase());
3866  }
3867 
3868  LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
3869 
3870  if (getLangOpts().ObjC &&
3871  getLangOpts().getGC() != LangOptions::NonGC) {
3873  setObjCGCLValueClass(getContext(), E, LV);
3874  }
3875  return LV;
3876 }
3877 
3879  assert(
3880  !E->isIncomplete() &&
3881  "incomplete matrix subscript expressions should be rejected during Sema");
3882  LValue Base = EmitLValue(E->getBase());
3883  llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
3884  llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
3885  llvm::Value *NumRows = Builder.getIntN(
3886  RowIdx->getType()->getScalarSizeInBits(),
3888  llvm::Value *FinalIdx =
3889  Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
3890  return LValue::MakeMatrixElt(
3891  MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
3892  E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
3893 }
3894 
3896  LValueBaseInfo &BaseInfo,
3897  TBAAAccessInfo &TBAAInfo,
3898  QualType BaseTy, QualType ElTy,
3899  bool IsLowerBound) {
3900  LValue BaseLVal;
3901  if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) {
3902  BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound);
3903  if (BaseTy->isArrayType()) {
3904  Address Addr = BaseLVal.getAddress(CGF);
3905  BaseInfo = BaseLVal.getBaseInfo();
3906 
3907  // If the array type was an incomplete type, we need to make sure
3908  // the decay ends up being the right type.
3909  llvm::Type *NewTy = CGF.ConvertType(BaseTy);
3910  Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy);
3911 
3912  // Note that VLA pointers are always decayed, so we don't need to do
3913  // anything here.
3914  if (!BaseTy->isVariableArrayType()) {
3915  assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3916  "Expected pointer to array");
3917  Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3918  }
3919 
3920  return CGF.Builder.CreateElementBitCast(Addr,
3921  CGF.ConvertTypeForMem(ElTy));
3922  }
3923  LValueBaseInfo TypeBaseInfo;
3924  TBAAAccessInfo TypeTBAAInfo;
3925  CharUnits Align =
3926  CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
3927  BaseInfo.mergeForCast(TypeBaseInfo);
3928  TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
3929  return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align);
3930  }
3931  return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
3932 }
3933 
3935  bool IsLowerBound) {
3937  QualType ResultExprTy;
3938  if (auto *AT = getContext().getAsArrayType(BaseTy))
3939  ResultExprTy = AT->getElementType();
3940  else
3941  ResultExprTy = BaseTy->getPointeeType();
3942  llvm::Value *Idx = nullptr;
3943  if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
3944  // Requesting lower bound or upper bound, but without provided length and
3945  // without ':' symbol for the default length -> length = 1.
3946  // Idx = LowerBound ?: 0;
3947  if (auto *LowerBound = E->getLowerBound()) {
3948  Idx = Builder.CreateIntCast(
3949  EmitScalarExpr(LowerBound), IntPtrTy,
3950  LowerBound->getType()->hasSignedIntegerRepresentation());
3951  } else
3952  Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
3953  } else {
3954  // Try to emit length or lower bound as constant. If this is possible, 1
3955  // is subtracted from constant length or lower bound. Otherwise, emit LLVM
3956  // IR (LB + Len) - 1.
3957  auto &C = CGM.getContext();
3958  auto *Length = E->getLength();
3959  llvm::APSInt ConstLength;
3960  if (Length) {
3961  // Idx = LowerBound + Length - 1;
3962  if (Optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
3963  ConstLength = CL->zextOrTrunc(PointerWidthInBits);
3964  Length = nullptr;
3965  }
3966  auto *LowerBound = E->getLowerBound();
3967  llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
3968  if (LowerBound) {
3969  if (Optional<llvm::APSInt> LB = LowerBound->getIntegerConstantExpr(C)) {
3970  ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
3971  LowerBound = nullptr;
3972  }
3973  }
3974  if (!Length)
3975  --ConstLength;
3976  else if (!LowerBound)
3977  --ConstLowerBound;
3978 
3979  if (Length || LowerBound) {
3980  auto *LowerBoundVal =
3981  LowerBound
3982  ? Builder.CreateIntCast(
3983  EmitScalarExpr(LowerBound), IntPtrTy,
3984  LowerBound->getType()->hasSignedIntegerRepresentation())
3985  : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
3986  auto *LengthVal =
3987  Length
3988  ? Builder.CreateIntCast(
3989  EmitScalarExpr(Length), IntPtrTy,
3990  Length->getType()->hasSignedIntegerRepresentation())
3991  : llvm::ConstantInt::get(IntPtrTy, ConstLength);
3992  Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
3993  /*HasNUW=*/false,
3994  !getLangOpts().isSignedOverflowDefined());
3995  if (Length && LowerBound) {
3996  Idx = Builder.CreateSub(
3997  Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
3998  /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
3999  }
4000  } else
4001  Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4002  } else {
4003  // Idx = ArraySize - 1;
4004  QualType ArrayTy = BaseTy->isPointerType()
4005  ? E->getBase()->IgnoreParenImpCasts()->getType()
4006  : BaseTy;
4007  if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4008  Length = VAT->getSizeExpr();
4009  if (Optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4010  ConstLength = *L;
4011  Length = nullptr;
4012  }
4013  } else {
4014  auto *CAT = C.getAsConstantArrayType(ArrayTy);
4015  ConstLength = CAT->getSize();
4016  }
4017  if (Length) {
4018  auto *LengthVal = Builder.CreateIntCast(
4019  EmitScalarExpr(Length), IntPtrTy,
4020  Length->getType()->hasSignedIntegerRepresentation());
4021  Idx = Builder.CreateSub(
4022  LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4023  /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4024  } else {
4025  ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4026  --ConstLength;
4027  Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4028  }
4029  }
4030  }
4031  assert(Idx);
4032 
4033  Address EltPtr = Address::invalid();
4034  LValueBaseInfo BaseInfo;
4035  TBAAAccessInfo TBAAInfo;
4036  if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4037  // The base must be a pointer, which is not an aggregate. Emit
4038  // it. It needs to be emitted first in case it's what captures
4039  // the VLA bounds.
4040  Address Base =
4041  emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4042  BaseTy, VLA->getElementType(), IsLowerBound);
4043  // The element count here is the total number of non-VLA elements.
4044  llvm::Value *NumElements = getVLASize(VLA).NumElts;
4045 
4046  // Effectively, the multiply by the VLA size is part of the GEP.
4047  // GEP indexes are signed, and scaling an index isn't permitted to
4048  // signed-overflow, so we use the same semantics for our explicit
4049  // multiply. We suppress this if overflow is not undefined behavior.
4050  if (getLangOpts().isSignedOverflowDefined())
4051  Idx = Builder.CreateMul(Idx, NumElements);
4052  else
4053  Idx = Builder.CreateNSWMul(Idx, NumElements);
4054  EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4055  !getLangOpts().isSignedOverflowDefined(),
4056  /*signedIndices=*/false, E->getExprLoc());
4057  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4058  // If this is A[i] where A is an array, the frontend will have decayed the
4059  // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4060  // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4061  // "gep x, i" here. Emit one "gep A, 0, i".
4062  assert(Array->getType()->isArrayType() &&
4063  "Array to pointer decay must have array source type!");
4064  LValue ArrayLV;
4065  // For simple multidimensional array indexing, set the 'accessed' flag for
4066  // better bounds-checking of the base expression.
4067  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4068  ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4069  else
4070  ArrayLV = EmitLValue(Array);
4071 
4072  // Propagate the alignment from the array itself to the result.
4073  EltPtr = emitArraySubscriptGEP(
4074  *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
4075  ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4076  /*signedIndices=*/false, E->getExprLoc());
4077  BaseInfo = ArrayLV.getBaseInfo();
4078  TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4079  } else {
4080  Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
4081  TBAAInfo, BaseTy, ResultExprTy,
4082  IsLowerBound);
4083  EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4084  !getLangOpts().isSignedOverflowDefined(),
4085  /*signedIndices=*/false, E->getExprLoc());
4086  }
4087 
4088  return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4089 }
4090 
4093  // Emit the base vector as an l-value.
4094  LValue Base;
4095 
4096  // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4097  if (E->isArrow()) {
4098  // If it is a pointer to a vector, emit the address and form an lvalue with
4099  // it.
4100  LValueBaseInfo BaseInfo;
4101  TBAAAccessInfo TBAAInfo;
4102  Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4103  const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4104  Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4105  Base.getQuals().removeObjCGCAttr();
4106  } else if (E->getBase()->isGLValue()) {
4107  // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4108  // emit the base as an lvalue.
4109  assert(E->getBase()->getType()->isVectorType());
4110  Base = EmitLValue(E->getBase());
4111  } else {
4112  // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4113  assert(E->getBase()->getType()->isVectorType() &&
4114  "Result must be a vector");
4115  llvm::Value *Vec = EmitScalarExpr(E->getBase());
4116 
4117  // Store the vector to memory (because LValue wants an address).
4118  Address VecMem = CreateMemTemp(E->getBase()->getType());
4119  Builder.CreateStore(Vec, VecMem);
4120  Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4122  }
4123 
4124  QualType type =
4125  E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4126 
4127  // Encode the element access list into a vector of unsigned indices.
4128  SmallVector<uint32_t, 4> Indices;
4129  E->getEncodedElementAccess(Indices);
4130 
4131  if (Base.isSimple()) {
4132  llvm::Constant *CV =
4133  llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4134  return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
4135  Base.getBaseInfo(), TBAAAccessInfo());
4136  }
4137  assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4138 
4139  llvm::Constant *BaseElts = Base.getExtVectorElts();
4141 
4142  for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4143  CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4144  llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4145  return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4146  Base.getBaseInfo(), TBAAAccessInfo());
4147 }
4148 
4150  if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4151  EmitIgnoredExpr(E->getBase());
4152  return EmitDeclRefLValue(DRE);
4153  }
4154 
4155  Expr *BaseExpr = E->getBase();
4156  // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4157  LValue BaseLV;
4158  if (E->isArrow()) {
4159  LValueBaseInfo BaseInfo;
4160  TBAAAccessInfo TBAAInfo;
4161  Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4162  QualType PtrTy = BaseExpr->getType()->getPointeeType();
4163  SanitizerSet SkippedChecks;
4164  bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4165  if (IsBaseCXXThis)
4166  SkippedChecks.set(SanitizerKind::Alignment, true);
4167  if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4168  SkippedChecks.set(SanitizerKind::Null, true);
4169  EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
4170  /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4171  BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4172  } else
4173  BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4174 
4175  NamedDecl *ND = E->getMemberDecl();
4176  if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4177  LValue LV = EmitLValueForField(BaseLV, Field);
4178  setObjCGCLValueClass(getContext(), E, LV);
4179  if (getLangOpts().OpenMP) {
4180  // If the member was explicitly marked as nontemporal, mark it as
4181  // nontemporal. If the base lvalue is marked as nontemporal, mark access
4182  // to children as nontemporal too.
4183  if ((IsWrappedCXXThis(BaseExpr) &&
4185  BaseLV.isNontemporal())
4186  LV.setNontemporal(/*Value=*/true);
4187  }
4188  return LV;
4189  }
4190 
4191  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4192  return EmitFunctionDeclLValue(*this, E, FD);
4193 
4194  llvm_unreachable("Unhandled member declaration!");
4195 }
4196 
4197 /// Given that we are currently emitting a lambda, emit an l-value for
4198 /// one of its members.
4200  if (CurCodeDecl) {
4201  assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
4202  assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
4203  }
4204  QualType LambdaTagType =
4205  getContext().getTagDeclType(Field->getParent());
4206  LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
4207  return EmitLValueForField(LambdaLV, Field);
4208 }
4209 
4210 /// Get the field index in the debug info. The debug info structure/union
4211 /// will ignore the unnamed bitfields.
4213  unsigned FieldIndex) {
4214  unsigned I = 0, Skipped = 0;
4215 
4216  for (auto F : Rec->getDefinition()->fields()) {
4217  if (I == FieldIndex)
4218  break;
4219  if (F->isUnnamedBitfield())
4220  Skipped++;
4221  I++;
4222  }
4223 
4224  return FieldIndex - Skipped;
4225 }
4226 
4227 /// Get the address of a zero-sized field within a record. The resulting
4228 /// address doesn't necessarily have the right type.
4230  const FieldDecl *Field) {
4232  CGF.getContext().getFieldOffset(Field));
4233  if (Offset.isZero())
4234  return Base;
4237 }
4238 
4239 /// Drill down to the storage of a field without walking into
4240 /// reference types.
4241 ///
4242 /// The resulting address doesn't necessarily have the right type.
4244  const FieldDecl *field) {
4245  if (field->isZeroSize(CGF.getContext()))
4246  return emitAddrOfZeroSizeField(CGF, base, field);
4247 
4248  const RecordDecl *rec = field->getParent();
4249 
4250  unsigned idx =
4251  CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4252 
4253  return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4254 }
4255 
4257  Address addr, const FieldDecl *field) {
4258  const RecordDecl *rec = field->getParent();
4259  llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4260  base.getType(), rec->getLocation());
4261 
4262  unsigned idx =
4263  CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4264 
4266  addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4267 }
4268 
4269 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4270  const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4271  if (!RD)
4272  return false;
4273 
4274  if (RD->isDynamicClass())
4275  return true;
4276 
4277  for (const auto &Base : RD->bases())
4278  if (hasAnyVptr(Base.getType(), Context))
4279  return true;
4280 
4281  for (const FieldDecl *Field : RD->fields())
4282  if (hasAnyVptr(Field->getType(), Context))
4283  return true;
4284 
4285  return false;
4286 }
4287 
4289  const FieldDecl *field) {
4290  LValueBaseInfo BaseInfo = base.getBaseInfo();
4291 
4292  if (field->isBitField()) {
4293  const CGRecordLayout &RL =
4295  const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4296  const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4297  CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4298  Info.VolatileStorageSize != 0 &&
4299  field->getType()
4302  Address Addr = base.getAddress(*this);
4303  unsigned Idx = RL.getLLVMFieldNo(field);
4304  const RecordDecl *rec = field->getParent();
4305  if (!UseVolatile) {
4306  if (!IsInPreservedAIRegion &&
4307  (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4308  if (Idx != 0)
4309  // For structs, we GEP to the field that the record layout suggests.
4310  Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4311  } else {
4312  llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4313  getContext().getRecordType(rec), rec->getLocation());
4315  Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4316  DbgInfo);
4317  }
4318  }
4319  const unsigned SS =
4320  UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4321  // Get the access type.
4322  llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4323  if (Addr.getElementType() != FieldIntTy)
4324  Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
4325  if (UseVolatile) {
4326  const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4327  if (VolatileOffset)
4328  Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4329  }
4330 
4331  QualType fieldType =
4332  field->getType().withCVRQualifiers(base.getVRQualifiers());
4333  // TODO: Support TBAA for bit fields.
4334  LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4335  return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4336  TBAAAccessInfo());
4337  }
4338 
4339  // Fields of may-alias structures are may-alias themselves.
4340  // FIXME: this should get propagated down through anonymous structs
4341  // and unions.
4342  QualType FieldType = field->getType();
4343  const RecordDecl *rec = field->getParent();
4344  AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4345  LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4346  TBAAAccessInfo FieldTBAAInfo;
4347  if (base.getTBAAInfo().isMayAlias() ||
4348  rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4349  FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4350  } else if (rec->isUnion()) {
4351  // TODO: Support TBAA for unions.
4352  FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4353  } else {
4354  // If no base type been assigned for the base access, then try to generate
4355  // one for this base lvalue.
4356  FieldTBAAInfo = base.getTBAAInfo();
4357  if (!FieldTBAAInfo.BaseType) {
4358  FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4359  assert(!FieldTBAAInfo.Offset &&
4360  "Nonzero offset for an access with no base type!");
4361  }
4362 
4363  // Adjust offset to be relative to the base type.
4364  const ASTRecordLayout &Layout =
4366  unsigned CharWidth = getContext().getCharWidth();
4367  if (FieldTBAAInfo.BaseType)
4368  FieldTBAAInfo.Offset +=
4369  Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4370 
4371  // Update the final access type and size.
4372  FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4373  FieldTBAAInfo.Size =
4374  getContext().getTypeSizeInChars(FieldType).getQuantity();
4375  }
4376 
4377  Address addr = base.getAddress(*this);
4378  if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4379  if (CGM.getCodeGenOpts().StrictVTablePointers &&
4380  ClassDef->isDynamicClass()) {
4381  // Getting to any field of dynamic object requires stripping dynamic
4382  // information provided by invariant.group. This is because accessing
4383  // fields may leak the real address of dynamic object, which could result
4384  // in miscompilation when leaked pointer would be compared.
4385  auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
4386  addr = Address(stripped, addr.getAlignment());
4387  }
4388  }
4389 
4390  unsigned RecordCVR = base.getVRQualifiers();
4391  if (rec->isUnion()) {
4392  // For unions, there is no pointer adjustment.
4393  if (CGM.getCodeGenOpts().StrictVTablePointers &&
4394  hasAnyVptr(FieldType, getContext()))
4395  // Because unions can easily skip invariant.barriers, we need to add
4396  // a barrier every time CXXRecord field with vptr is referenced.
4397  addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
4398  addr.getAlignment());
4399 
4400  if (IsInPreservedAIRegion ||
4401  (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4402  // Remember the original union field index
4403  llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
4404  rec->getLocation());
4405  addr = Address(
4406  Builder.CreatePreserveUnionAccessIndex(
4407  addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
4408  addr.getAlignment());
4409  }
4410 
4411  if (FieldType->isReferenceType())
4413  addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
4414  } else {
4415  if (!IsInPreservedAIRegion &&
4416  (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
4417  // For structs, we GEP to the field that the record layout suggests.
4418  addr = emitAddrOfFieldStorage(*this, addr, field);
4419  else
4420  // Remember the original struct field index
4421  addr = emitPreserveStructAccess(*this, base, addr, field);
4422  }
4423 
4424  // If this is a reference field, load the reference right now.
4425  if (FieldType->isReferenceType()) {
4426  LValue RefLVal =
4427  MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4428  if (RecordCVR & Qualifiers::Volatile)
4429  RefLVal.getQuals().addVolatile();
4430  addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
4431 
4432  // Qualifiers on the struct don't apply to the referencee.
4433  RecordCVR = 0;
4434  FieldType = FieldType->getPointeeType();
4435  }
4436 
4437  // Make sure that the address is pointing to the right type. This is critical
4438  // for both unions and structs. A union needs a bitcast, a struct element
4439  // will need a bitcast if the LLVM type laid out doesn't match the desired
4440  // type.
4442  addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
4443 
4444  if (field->hasAttr<AnnotateAttr>())
4445  addr = EmitFieldAnnotations(field, addr);
4446 
4447  LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4448  LV.getQuals().addCVRQualifiers(RecordCVR);
4449 
4450  // __weak attribute on a field is ignored.
4451  if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
4452  LV.getQuals().removeObjCGCAttr();
4453 
4454  return LV;
4455 }
4456 
4457 LValue
4459  const FieldDecl *Field) {
4460  QualType FieldType = Field->getType();
4461 
4462  if (!FieldType->isReferenceType())
4463  return EmitLValueForField(Base, Field);
4464 
4465  Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
4466 
4467  // Make sure that the address is pointing to the right type.
4468  llvm::Type *llvmType = ConvertTypeForMem(FieldType);
4469  V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
4470 
4471  // TODO: Generate TBAA information that describes this access as a structure
4472  // member access and not just an access to an object of the field's type. This
4473  // should be similar to what we do in EmitLValueForField().
4474  LValueBaseInfo BaseInfo = Base.getBaseInfo();
4475  AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
4476  LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
4477  return MakeAddrLValue(V, FieldType, FieldBaseInfo,
4478  CGM.getTBAAInfoForSubobject(Base, FieldType));
4479 }
4480 
4482  if (E->isFileScope()) {
4484  return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
4485  }
4486  if (E->getType()->isVariablyModifiedType())
4487  // make sure to emit the VLA size.
4489 
4490  Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
4491  const Expr *InitExpr = E->getInitializer();
4492  LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
4493 
4494  EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
4495  /*Init*/ true);
4496 
4497  // Block-scope compound literals are destroyed at the end of the enclosing
4498  // scope in C.
4499  if (!getLangOpts().CPlusPlus)
4500  if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
4501  pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
4502  E->getType(), getDestroyer(DtorKind),
4503  DtorKind & EHCleanup);
4504 
4505  return Result;
4506 }
4507 
4509  if (!E->isGLValue())
4510  // Initializing an aggregate temporary in C++11: T{...}.
4511  return EmitAggExprToLValue(E);
4512 
4513  // An lvalue initializer list must be initializing a reference.
4514  assert(E->isTransparent() && "non-transparent glvalue init list");
4515  return EmitLValue(E->getInit(0));
4516 }
4517 
4518 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
4519 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
4520 /// LValue is returned and the current block has been terminated.
4522  const Expr *Operand) {
4523  if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
4524  CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
4525  return None;
4526  }
4527 
4528  return CGF.EmitLValue(Operand);
4529 }
4530 
4533  if (!expr->isGLValue()) {
4534  // ?: here should be an aggregate.
4535  assert(hasAggregateEvaluationKind(expr->getType()) &&
4536  "Unexpected conditional operator!");
4537  return EmitAggExprToLValue(expr);
4538  }
4539 
4540  OpaqueValueMapping binding(*this, expr);
4541 
4542  const Expr *condExpr = expr->getCond();
4543  bool CondExprBool;
4544  if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4545  const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
4546  if (!CondExprBool) std::swap(live, dead);
4547 
4548  if (!ContainsLabel(dead)) {
4549  // If the true case is live, we need to track its region.
4550  if (CondExprBool)
4552  // If a throw expression we emit it and return an undefined lvalue
4553  // because it can't be used.
4554  if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
4555  EmitCXXThrowExpr(ThrowExpr);
4556  llvm::Type *Ty =
4557  llvm::PointerType::getUnqual(ConvertType(dead->getType()));
4558  return MakeAddrLValue(
4559  Address(llvm::UndefValue::get(Ty), CharUnits::One()),
4560  dead->getType());
4561  }
4562  return EmitLValue(live);
4563  }
4564  }
4565 
4566  llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
4567  llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
4568  llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
4569 
4570  ConditionalEvaluation eval(*this);
4571  EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr));
4572 
4573  // Any temporaries created here are conditional.
4574  EmitBlock(lhsBlock);
4576  eval.begin(*this);
4577  Optional<LValue> lhs =
4578  EmitLValueOrThrowExpression(*this, expr->getTrueExpr());
4579  eval.end(*this);
4580 
4581  if (lhs && !lhs->isSimple())
4582  return EmitUnsupportedLValue(expr, "conditional operator");
4583 
4584  lhsBlock = Builder.GetInsertBlock();
4585  if (lhs)
4586  Builder.CreateBr(contBlock);
4587 
4588  // Any temporaries created here are conditional.
4589  EmitBlock(rhsBlock);
4590  eval.begin(*this);
4591  Optional<LValue> rhs =
4592  EmitLValueOrThrowExpression(*this, expr->getFalseExpr());
4593  eval.end(*this);
4594  if (rhs && !rhs->isSimple())
4595  return EmitUnsupportedLValue(expr, "conditional operator");
4596  rhsBlock = Builder.GetInsertBlock();
4597 
4598  EmitBlock(contBlock);
4599 
4600  if (lhs && rhs) {
4601  llvm::PHINode *phi =
4602  Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue");
4603  phi->addIncoming(lhs->getPointer(*this), lhsBlock);
4604  phi->addIncoming(rhs->getPointer(*this), rhsBlock);
4605  Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
4606  AlignmentSource alignSource =
4607  std::max(lhs->getBaseInfo().getAlignmentSource(),
4608  rhs->getBaseInfo().getAlignmentSource());
4610  lhs->getTBAAInfo(), rhs->getTBAAInfo());
4611  return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
4612  TBAAInfo);
4613  } else {
4614  assert((lhs || rhs) &&
4615  "both operands of glvalue conditional are throw-expressions?");
4616  return lhs ? *lhs : *rhs;
4617  }
4618 }
4619 
4620 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
4621 /// type. If the cast is to a reference, we can have the usual lvalue result,
4622 /// otherwise if a cast is needed by the code generator in an lvalue context,
4623 /// then it must mean that we need the address of an aggregate in order to
4624 /// access one of its members. This can happen for all the reasons that casts
4625 /// are permitted with aggregate result, including noop aggregate casts, and
4626 /// cast from scalar to union.
4628  switch (E->getCastKind()) {
4629  case CK_ToVoid:
4630  case CK_BitCast:
4631  case CK_LValueToRValueBitCast:
4632  case CK_ArrayToPointerDecay:
4633  case CK_FunctionToPointerDecay:
4634  case CK_NullToMemberPointer:
4635  case CK_NullToPointer:
4636  case CK_IntegralToPointer:
4637  case CK_PointerToIntegral:
4638  case CK_PointerToBoolean:
4639  case CK_VectorSplat:
4640  case CK_IntegralCast:
4641  case CK_BooleanToSignedIntegral:
4642  case CK_IntegralToBoolean:
4643  case CK_IntegralToFloating:
4644  case CK_FloatingToIntegral:
4645  case CK_FloatingToBoolean:
4646  case CK_FloatingCast:
4647  case CK_FloatingRealToComplex:
4648  case CK_FloatingComplexToReal:
4649  case CK_FloatingComplexToBoolean:
4650  case CK_FloatingComplexCast:
4651  case CK_FloatingComplexToIntegralComplex:
4652  case CK_IntegralRealToComplex:
4653  case CK_IntegralComplexToReal:
4654  case CK_IntegralComplexToBoolean:
4655  case CK_IntegralComplexCast:
4656  case CK_IntegralComplexToFloatingComplex:
4657  case CK_DerivedToBaseMemberPointer:
4658  case CK_BaseToDerivedMemberPointer:
4659  case CK_MemberPointerToBoolean:
4660  case CK_ReinterpretMemberPointer:
4661  case CK_AnyPointerToBlockPointerCast:
4662  case CK_ARCProduceObject:
4663  case CK_ARCConsumeObject:
4664  case CK_ARCReclaimReturnedObject:
4665  case CK_ARCExtendBlockObject:
4666  case CK_CopyAndAutoreleaseBlockObject:
4667  case CK_IntToOCLSampler:
4668  case CK_FloatingToFixedPoint:
4669  case CK_FixedPointToFloating:
4670  case CK_FixedPointCast:
4671  case CK_FixedPointToBoolean:
4672  case CK_FixedPointToIntegral:
4673  case CK_IntegralToFixedPoint:
4674  case CK_MatrixCast:
4675  return EmitUnsupportedLValue(E, "unexpected cast lvalue");
4676 
4677  case CK_Dependent:
4678  llvm_unreachable("dependent cast kind in IR gen!");
4679 
4680  case CK_BuiltinFnToFnPtr:
4681  llvm_unreachable("builtin functions are handled elsewhere");
4682 
4683  // These are never l-values; just use the aggregate emission code.
4684  case CK_NonAtomicToAtomic:
4685  case CK_AtomicToNonAtomic:
4686  return EmitAggExprToLValue(E);
4687 
4688  case CK_Dynamic: {
4689  LValue LV = EmitLValue(E->getSubExpr());
4690  Address V = LV.getAddress(*this);
4691  const auto *DCE = cast<CXXDynamicCastExpr>(E);
4693  }
4694 
4695  case CK_ConstructorConversion:
4696  case CK_UserDefinedConversion:
4697  case CK_CPointerToObjCPointerCast:
4698  case CK_BlockPointerToObjCPointerCast:
4699  case CK_NoOp:
4700  case CK_LValueToRValue:
4701  return EmitLValue(E->getSubExpr());
4702 
4703  case CK_UncheckedDerivedToBase:
4704  case CK_DerivedToBase: {
4705  const auto *DerivedClassTy =
4706  E->getSubExpr()->getType()->castAs<RecordType>();
4707  auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
4708 
4709  LValue LV = EmitLValue(E->getSubExpr());
4710  Address This = LV.getAddress(*this);
4711 
4712  // Perform the derived-to-base conversion
4714  This, DerivedClassDecl, E->path_begin(), E->path_end(),
4715  /*NullCheckValue=*/false, E->getExprLoc());
4716 
4717  // TODO: Support accesses to members of base classes in TBAA. For now, we
4718  // conservatively pretend that the complete object is of the base class
4719  // type.
4720  return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
4721  CGM.getTBAAInfoForSubobject(LV, E->getType()));
4722  }
4723  case CK_ToUnion:
4724  return EmitAggExprToLValue(E);
4725  case CK_BaseToDerived: {
4726  const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
4727  auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
4728 
4729  LValue LV = EmitLValue(E->getSubExpr());
4730 
4731  // Perform the base-to-derived conversion
4733  LV.getAddress(*this), DerivedClassDecl, E->