clang  9.0.0svn
CGExpr.cpp
Go to the documentation of this file.
1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenFunction.h"
22 #include "CodeGenModule.h"
23 #include "ConstantEmitter.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/NSAPI.h"
30 #include "llvm/ADT/Hashing.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/MDBuilder.h"
36 #include "llvm/Support/ConvertUTF.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/Path.h"
39 #include "llvm/Transforms/Utils/SanitizerStats.h"
40 
41 #include <string>
42 
43 using namespace clang;
44 using namespace CodeGen;
45 
46 //===--------------------------------------------------------------------===//
47 // Miscellaneous Helper Methods
48 //===--------------------------------------------------------------------===//
49 
51  unsigned addressSpace =
52  cast<llvm::PointerType>(value->getType())->getAddressSpace();
53 
54  llvm::PointerType *destType = Int8PtrTy;
55  if (addressSpace)
56  destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
57 
58  if (value->getType() == destType) return value;
59  return Builder.CreateBitCast(value, destType);
60 }
61 
62 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
63 /// block.
65  CharUnits Align,
66  const Twine &Name,
67  llvm::Value *ArraySize) {
68  auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
69  Alloca->setAlignment(Align.getQuantity());
70  return Address(Alloca, Align);
71 }
72 
73 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
74 /// block. The alloca is casted to default address space if necessary.
76  const Twine &Name,
77  llvm::Value *ArraySize,
78  Address *AllocaAddr) {
79  auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
80  if (AllocaAddr)
81  *AllocaAddr = Alloca;
82  llvm::Value *V = Alloca.getPointer();
83  // Alloca always returns a pointer in alloca address space, which may
84  // be different from the type defined by the language. For example,
85  // in C++ the auto variables are in the default address space. Therefore
86  // cast alloca to the default address space when necessary.
88  auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
89  llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
90  // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
91  // otherwise alloca is inserted at the current insertion point of the
92  // builder.
93  if (!ArraySize)
94  Builder.SetInsertPoint(AllocaInsertPt);
97  Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
98  }
99 
100  return Address(V, Align);
101 }
102 
103 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
104 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
105 /// insertion point of the builder.
107  const Twine &Name,
108  llvm::Value *ArraySize) {
109  if (ArraySize)
110  return Builder.CreateAlloca(Ty, ArraySize, Name);
111  return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
112  ArraySize, Name, AllocaInsertPt);
113 }
114 
115 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
116 /// default alignment of the corresponding LLVM type, which is *not*
117 /// guaranteed to be related in any way to the expected alignment of
118 /// an AST type that might have been lowered to Ty.
120  const Twine &Name) {
121  CharUnits Align =
122  CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty));
123  return CreateTempAlloca(Ty, Align, Name);
124 }
125 
127  assert(isa<llvm::AllocaInst>(Var.getPointer()));
128  auto *Store = new llvm::StoreInst(Init, Var.getPointer());
129  Store->setAlignment(Var.getAlignment().getQuantity());
130  llvm::BasicBlock *Block = AllocaInsertPt->getParent();
131  Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
132 }
133 
136  return CreateTempAlloca(ConvertType(Ty), Align, Name);
137 }
138 
140  Address *Alloca) {
141  // FIXME: Should we prefer the preferred type alignment here?
142  return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
143 }
144 
146  const Twine &Name, Address *Alloca) {
147  return CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
148  /*ArraySize=*/nullptr, Alloca);
149 }
150 
152  const Twine &Name) {
153  return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
154 }
155 
157  const Twine &Name) {
158  return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
159  Name);
160 }
161 
162 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
163 /// expression and compare the result against zero, returning an Int1Ty value.
165  PGO.setCurrentStmt(E);
166  if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
167  llvm::Value *MemPtr = EmitScalarExpr(E);
168  return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
169  }
170 
171  QualType BoolTy = getContext().BoolTy;
172  SourceLocation Loc = E->getExprLoc();
173  if (!E->getType()->isAnyComplexType())
174  return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
175 
176  return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
177  Loc);
178 }
179 
180 /// EmitIgnoredExpr - Emit code to compute the specified expression,
181 /// ignoring the result.
183  if (E->isRValue())
184  return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
185 
186  // Just emit it as an l-value and drop the result.
187  EmitLValue(E);
188 }
189 
190 /// EmitAnyExpr - Emit code to compute the specified expression which
191 /// can have any type. The result is returned as an RValue struct.
192 /// If this is an aggregate expression, AggSlot indicates where the
193 /// result should be returned.
195  AggValueSlot aggSlot,
196  bool ignoreResult) {
197  switch (getEvaluationKind(E->getType())) {
198  case TEK_Scalar:
199  return RValue::get(EmitScalarExpr(E, ignoreResult));
200  case TEK_Complex:
201  return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
202  case TEK_Aggregate:
203  if (!ignoreResult && aggSlot.isIgnored())
204  aggSlot = CreateAggTemp(E->getType(), "agg-temp");
205  EmitAggExpr(E, aggSlot);
206  return aggSlot.asRValue();
207  }
208  llvm_unreachable("bad evaluation kind");
209 }
210 
211 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
212 /// always be accessible even if no aggregate location is provided.
215 
217  AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
218  return EmitAnyExpr(E, AggSlot);
219 }
220 
221 /// EmitAnyExprToMem - Evaluate an expression into a given memory
222 /// location.
224  Address Location,
225  Qualifiers Quals,
226  bool IsInit) {
227  // FIXME: This function should take an LValue as an argument.
228  switch (getEvaluationKind(E->getType())) {
229  case TEK_Complex:
231  /*isInit*/ false);
232  return;
233 
234  case TEK_Aggregate: {
235  EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
238  AggValueSlot::IsAliased_t(!IsInit),
240  return;
241  }
242 
243  case TEK_Scalar: {
244  RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
245  LValue LV = MakeAddrLValue(Location, E->getType());
246  EmitStoreThroughLValue(RV, LV);
247  return;
248  }
249  }
250  llvm_unreachable("bad evaluation kind");
251 }
252 
253 static void
255  const Expr *E, Address ReferenceTemporary) {
256  // Objective-C++ ARC:
257  // If we are binding a reference to a temporary that has ownership, we
258  // need to perform retain/release operations on the temporary.
259  //
260  // FIXME: This should be looking at E, not M.
261  if (auto Lifetime = M->getType().getObjCLifetime()) {
262  switch (Lifetime) {
265  // Carry on to normal cleanup handling.
266  break;
267 
269  // Nothing to do; cleaned up by an autorelease pool.
270  return;
271 
274  switch (StorageDuration Duration = M->getStorageDuration()) {
275  case SD_Static:
276  // Note: we intentionally do not register a cleanup to release
277  // the object on program termination.
278  return;
279 
280  case SD_Thread:
281  // FIXME: We should probably register a cleanup in this case.
282  return;
283 
284  case SD_Automatic:
285  case SD_FullExpression:
288  if (Lifetime == Qualifiers::OCL_Strong) {
289  const ValueDecl *VD = M->getExtendingDecl();
290  bool Precise =
291  VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
292  CleanupKind = CGF.getARCCleanupKind();
293  Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
295  } else {
296  // __weak objects always get EH cleanups; otherwise, exceptions
297  // could cause really nasty crashes instead of mere leaks.
298  CleanupKind = NormalAndEHCleanup;
300  }
301  if (Duration == SD_FullExpression)
302  CGF.pushDestroy(CleanupKind, ReferenceTemporary,
303  M->getType(), *Destroy,
304  CleanupKind & EHCleanup);
305  else
306  CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
307  M->getType(),
308  *Destroy, CleanupKind & EHCleanup);
309  return;
310 
311  case SD_Dynamic:
312  llvm_unreachable("temporary cannot have dynamic storage duration");
313  }
314  llvm_unreachable("unknown storage duration");
315  }
316  }
317 
318  CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
319  if (const RecordType *RT =
321  // Get the destructor for the reference temporary.
322  auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
323  if (!ClassDecl->hasTrivialDestructor())
324  ReferenceTemporaryDtor = ClassDecl->getDestructor();
325  }
326 
327  if (!ReferenceTemporaryDtor)
328  return;
329 
330  // Call the destructor for the temporary.
331  switch (M->getStorageDuration()) {
332  case SD_Static:
333  case SD_Thread: {
334  llvm::Constant *CleanupFn;
335  llvm::Constant *CleanupArg;
336  if (E->getType()->isArrayType()) {
337  CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
338  ReferenceTemporary, E->getType(),
340  dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
341  CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
342  } else {
343  CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor,
345  CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
346  }
348  CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
349  break;
350  }
351 
352  case SD_FullExpression:
353  CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
355  CGF.getLangOpts().Exceptions);
356  break;
357 
358  case SD_Automatic:
360  ReferenceTemporary, E->getType(),
362  CGF.getLangOpts().Exceptions);
363  break;
364 
365  case SD_Dynamic:
366  llvm_unreachable("temporary cannot have dynamic storage duration");
367  }
368 }
369 
371  const MaterializeTemporaryExpr *M,
372  const Expr *Inner,
373  Address *Alloca = nullptr) {
374  auto &TCG = CGF.getTargetHooks();
375  switch (M->getStorageDuration()) {
376  case SD_FullExpression:
377  case SD_Automatic: {
378  // If we have a constant temporary array or record try to promote it into a
379  // constant global under the same rules a normal constant would've been
380  // promoted. This is easier on the optimizer and generally emits fewer
381  // instructions.
382  QualType Ty = Inner->getType();
383  if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
384  (Ty->isArrayType() || Ty->isRecordType()) &&
385  CGF.CGM.isTypeConstant(Ty, true))
386  if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
387  if (auto AddrSpace = CGF.getTarget().getConstantAddressSpace()) {
388  auto AS = AddrSpace.getValue();
389  auto *GV = new llvm::GlobalVariable(
390  CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
391  llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
392  llvm::GlobalValue::NotThreadLocal,
394  CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
395  GV->setAlignment(alignment.getQuantity());
396  llvm::Constant *C = GV;
397  if (AS != LangAS::Default)
398  C = TCG.performAddrSpaceCast(
399  CGF.CGM, GV, AS, LangAS::Default,
400  GV->getValueType()->getPointerTo(
402  // FIXME: Should we put the new global into a COMDAT?
403  return Address(C, alignment);
404  }
405  }
406  return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
407  }
408  case SD_Thread:
409  case SD_Static:
410  return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
411 
412  case SD_Dynamic:
413  llvm_unreachable("temporary can't have dynamic storage duration");
414  }
415  llvm_unreachable("unknown storage duration");
416 }
417 
420  const Expr *E = M->GetTemporaryExpr();
421 
422  assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
423  !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
424  "Reference should never be pseudo-strong!");
425 
426  // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
427  // as that will cause the lifetime adjustment to be lost for ARC
428  auto ownership = M->getType().getObjCLifetime();
429  if (ownership != Qualifiers::OCL_None &&
430  ownership != Qualifiers::OCL_ExplicitNone) {
431  Address Object = createReferenceTemporary(*this, M, E);
432  if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
433  Object = Address(llvm::ConstantExpr::getBitCast(Var,
435  ->getPointerTo(Object.getAddressSpace())),
436  Object.getAlignment());
437 
438  // createReferenceTemporary will promote the temporary to a global with a
439  // constant initializer if it can. It can only do this to a value of
440  // ARC-manageable type if the value is global and therefore "immune" to
441  // ref-counting operations. Therefore we have no need to emit either a
442  // dynamic initialization or a cleanup and we can just return the address
443  // of the temporary.
444  if (Var->hasInitializer())
445  return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
446 
447  Var->setInitializer(CGM.EmitNullConstant(E->getType()));
448  }
449  LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
451 
452  switch (getEvaluationKind(E->getType())) {
453  default: llvm_unreachable("expected scalar or aggregate expression");
454  case TEK_Scalar:
455  EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
456  break;
457  case TEK_Aggregate: {
459  E->getType().getQualifiers(),
464  break;
465  }
466  }
467 
468  pushTemporaryCleanup(*this, M, E, Object);
469  return RefTempDst;
470  }
471 
474  E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
475 
476  for (const auto &Ignored : CommaLHSs)
477  EmitIgnoredExpr(Ignored);
478 
479  if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
480  if (opaque->getType()->isRecordType()) {
481  assert(Adjustments.empty());
482  return EmitOpaqueValueLValue(opaque);
483  }
484  }
485 
486  // Create and initialize the reference temporary.
487  Address Alloca = Address::invalid();
488  Address Object = createReferenceTemporary(*this, M, E, &Alloca);
489  if (auto *Var = dyn_cast<llvm::GlobalVariable>(
490  Object.getPointer()->stripPointerCasts())) {
491  Object = Address(llvm::ConstantExpr::getBitCast(
492  cast<llvm::Constant>(Object.getPointer()),
493  ConvertTypeForMem(E->getType())->getPointerTo()),
494  Object.getAlignment());
495  // If the temporary is a global and has a constant initializer or is a
496  // constant temporary that we promoted to a global, we may have already
497  // initialized it.
498  if (!Var->hasInitializer()) {
499  Var->setInitializer(CGM.EmitNullConstant(E->getType()));
500  EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
501  }
502  } else {
503  switch (M->getStorageDuration()) {
504  case SD_Automatic:
505  if (auto *Size = EmitLifetimeStart(
506  CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
507  Alloca.getPointer())) {
508  pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
509  Alloca, Size);
510  }
511  break;
512 
513  case SD_FullExpression: {
514  if (!ShouldEmitLifetimeMarkers)
515  break;
516 
517  // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
518  // marker. Instead, start the lifetime of a conditional temporary earlier
519  // so that it's unconditional. Don't do this in ASan's use-after-scope
520  // mode so that it gets the more precise lifetime marks. If the type has
521  // a non-trivial destructor, we'll have a cleanup block for it anyway,
522  // so this typically doesn't help; skip it in that case.
523  ConditionalEvaluation *OldConditional = nullptr;
524  CGBuilderTy::InsertPoint OldIP;
526  !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
527  OldConditional = OutermostConditional;
528  OutermostConditional = nullptr;
529 
530  OldIP = Builder.saveIP();
531  llvm::BasicBlock *Block = OldConditional->getStartingBlock();
532  Builder.restoreIP(CGBuilderTy::InsertPoint(
533  Block, llvm::BasicBlock::iterator(Block->back())));
534  }
535 
536  if (auto *Size = EmitLifetimeStart(
537  CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
538  Alloca.getPointer())) {
539  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
540  Size);
541  }
542 
543  if (OldConditional) {
544  OutermostConditional = OldConditional;
545  Builder.restoreIP(OldIP);
546  }
547  break;
548  }
549 
550  default:
551  break;
552  }
553  EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
554  }
555  pushTemporaryCleanup(*this, M, E, Object);
556 
557  // Perform derived-to-base casts and/or field accesses, to get from the
558  // temporary object we created (and, potentially, for which we extended
559  // the lifetime) to the subobject we're binding the reference to.
560  for (unsigned I = Adjustments.size(); I != 0; --I) {
561  SubobjectAdjustment &Adjustment = Adjustments[I-1];
562  switch (Adjustment.Kind) {
564  Object =
566  Adjustment.DerivedToBase.BasePath->path_begin(),
567  Adjustment.DerivedToBase.BasePath->path_end(),
568  /*NullCheckValue=*/ false, E->getExprLoc());
569  break;
570 
573  LV = EmitLValueForField(LV, Adjustment.Field);
574  assert(LV.isSimple() &&
575  "materialized temporary field is not a simple lvalue");
576  Object = LV.getAddress();
577  break;
578  }
579 
581  llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
582  Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
583  Adjustment.Ptr.MPT);
584  break;
585  }
586  }
587  }
588 
589  return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
590 }
591 
592 RValue
594  // Emit the expression as an lvalue.
595  LValue LV = EmitLValue(E);
596  assert(LV.isSimple());
597  llvm::Value *Value = LV.getPointer();
598 
599  if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
600  // C++11 [dcl.ref]p5 (as amended by core issue 453):
601  // If a glvalue to which a reference is directly bound designates neither
602  // an existing object or function of an appropriate type nor a region of
603  // storage of suitable size and alignment to contain an object of the
604  // reference's type, the behavior is undefined.
605  QualType Ty = E->getType();
607  }
608 
609  return RValue::get(Value);
610 }
611 
612 
613 /// getAccessedFieldNo - Given an encoded value and a result number, return the
614 /// input field number being accessed.
616  const llvm::Constant *Elts) {
617  return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
618  ->getZExtValue();
619 }
620 
621 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
623  llvm::Value *High) {
624  llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
625  llvm::Value *K47 = Builder.getInt64(47);
626  llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
627  llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
628  llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
629  llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
630  return Builder.CreateMul(B1, KMul);
631 }
632 
634  return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
636 }
637 
639  CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
640  return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
641  (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
642  TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
644 }
645 
647  return SanOpts.has(SanitizerKind::Null) |
648  SanOpts.has(SanitizerKind::Alignment) |
649  SanOpts.has(SanitizerKind::ObjectSize) |
650  SanOpts.has(SanitizerKind::Vptr);
651 }
652 
654  llvm::Value *Ptr, QualType Ty,
655  CharUnits Alignment,
656  SanitizerSet SkippedChecks) {
658  return;
659 
660  // Don't check pointers outside the default address space. The null check
661  // isn't correct, the object-size check isn't supported by LLVM, and we can't
662  // communicate the addresses to the runtime handler for the vptr check.
663  if (Ptr->getType()->getPointerAddressSpace())
664  return;
665 
666  // Don't check pointers to volatile data. The behavior here is implementation-
667  // defined.
668  if (Ty.isVolatileQualified())
669  return;
670 
671  SanitizerScope SanScope(this);
672 
674  llvm::BasicBlock *Done = nullptr;
675 
676  // Quickly determine whether we have a pointer to an alloca. It's possible
677  // to skip null checks, and some alignment checks, for these pointers. This
678  // can reduce compile-time significantly.
679  auto PtrToAlloca =
680  dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCastsNoFollowAliases());
681 
682  llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
683  llvm::Value *IsNonNull = nullptr;
684  bool IsGuaranteedNonNull =
685  SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
686  bool AllowNullPointers = isNullPointerAllowed(TCK);
687  if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
688  !IsGuaranteedNonNull) {
689  // The glvalue must not be an empty glvalue.
690  IsNonNull = Builder.CreateIsNotNull(Ptr);
691 
692  // The IR builder can constant-fold the null check if the pointer points to
693  // a constant.
694  IsGuaranteedNonNull = IsNonNull == True;
695 
696  // Skip the null check if the pointer is known to be non-null.
697  if (!IsGuaranteedNonNull) {
698  if (AllowNullPointers) {
699  // When performing pointer casts, it's OK if the value is null.
700  // Skip the remaining checks in that case.
701  Done = createBasicBlock("null");
702  llvm::BasicBlock *Rest = createBasicBlock("not.null");
703  Builder.CreateCondBr(IsNonNull, Rest, Done);
704  EmitBlock(Rest);
705  } else {
706  Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
707  }
708  }
709  }
710 
711  if (SanOpts.has(SanitizerKind::ObjectSize) &&
712  !SkippedChecks.has(SanitizerKind::ObjectSize) &&
713  !Ty->isIncompleteType()) {
714  uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
715 
716  // The glvalue must refer to a large enough storage region.
717  // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
718  // to check this.
719  // FIXME: Get object address space
720  llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
721  llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
722  llvm::Value *Min = Builder.getFalse();
723  llvm::Value *NullIsUnknown = Builder.getFalse();
724  llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
725  llvm::Value *LargeEnough = Builder.CreateICmpUGE(
726  Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown}),
727  llvm::ConstantInt::get(IntPtrTy, Size));
728  Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
729  }
730 
731  uint64_t AlignVal = 0;
732  llvm::Value *PtrAsInt = nullptr;
733 
734  if (SanOpts.has(SanitizerKind::Alignment) &&
735  !SkippedChecks.has(SanitizerKind::Alignment)) {
736  AlignVal = Alignment.getQuantity();
737  if (!Ty->isIncompleteType() && !AlignVal)
738  AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
739 
740  // The glvalue must be suitably aligned.
741  if (AlignVal > 1 &&
742  (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) {
743  PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
744  llvm::Value *Align = Builder.CreateAnd(
745  PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
746  llvm::Value *Aligned =
747  Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
748  if (Aligned != True)
749  Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
750  }
751  }
752 
753  if (Checks.size() > 0) {
754  // Make sure we're not losing information. Alignment needs to be a power of
755  // 2
756  assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal);
757  llvm::Constant *StaticData[] = {
759  llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1),
760  llvm::ConstantInt::get(Int8Ty, TCK)};
761  EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
762  PtrAsInt ? PtrAsInt : Ptr);
763  }
764 
765  // If possible, check that the vptr indicates that there is a subobject of
766  // type Ty at offset zero within this object.
767  //
768  // C++11 [basic.life]p5,6:
769  // [For storage which does not refer to an object within its lifetime]
770  // The program has undefined behavior if:
771  // -- the [pointer or glvalue] is used to access a non-static data member
772  // or call a non-static member function
773  if (SanOpts.has(SanitizerKind::Vptr) &&
774  !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
775  // Ensure that the pointer is non-null before loading it. If there is no
776  // compile-time guarantee, reuse the run-time null check or emit a new one.
777  if (!IsGuaranteedNonNull) {
778  if (!IsNonNull)
779  IsNonNull = Builder.CreateIsNotNull(Ptr);
780  if (!Done)
781  Done = createBasicBlock("vptr.null");
782  llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
783  Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
784  EmitBlock(VptrNotNull);
785  }
786 
787  // Compute a hash of the mangled name of the type.
788  //
789  // FIXME: This is not guaranteed to be deterministic! Move to a
790  // fingerprinting mechanism once LLVM provides one. For the time
791  // being the implementation happens to be deterministic.
792  SmallString<64> MangledName;
793  llvm::raw_svector_ostream Out(MangledName);
795  Out);
796 
797  // Blacklist based on the mangled type.
799  SanitizerKind::Vptr, Out.str())) {
800  llvm::hash_code TypeHash = hash_value(Out.str());
801 
802  // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
803  llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
804  llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
805  Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
806  llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
807  llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
808 
809  llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
810  Hash = Builder.CreateTrunc(Hash, IntPtrTy);
811 
812  // Look the hash up in our cache.
813  const int CacheSize = 128;
814  llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
816  "__ubsan_vptr_type_cache");
817  llvm::Value *Slot = Builder.CreateAnd(Hash,
818  llvm::ConstantInt::get(IntPtrTy,
819  CacheSize-1));
820  llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
821  llvm::Value *CacheVal =
822  Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices),
823  getPointerAlign());
824 
825  // If the hash isn't in the cache, call a runtime handler to perform the
826  // hard work of checking whether the vptr is for an object of the right
827  // type. This will either fill in the cache and return, or produce a
828  // diagnostic.
829  llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
830  llvm::Constant *StaticData[] = {
834  llvm::ConstantInt::get(Int8Ty, TCK)
835  };
836  llvm::Value *DynamicData[] = { Ptr, Hash };
837  EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
838  SanitizerHandler::DynamicTypeCacheMiss, StaticData,
839  DynamicData);
840  }
841  }
842 
843  if (Done) {
844  Builder.CreateBr(Done);
845  EmitBlock(Done);
846  }
847 }
848 
849 /// Determine whether this expression refers to a flexible array member in a
850 /// struct. We disable array bounds checks for such members.
851 static bool isFlexibleArrayMemberExpr(const Expr *E) {
852  // For compatibility with existing code, we treat arrays of length 0 or
853  // 1 as flexible array members.
854  const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
855  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
856  if (CAT->getSize().ugt(1))
857  return false;
858  } else if (!isa<IncompleteArrayType>(AT))
859  return false;
860 
861  E = E->IgnoreParens();
862 
863  // A flexible array member must be the last member in the class.
864  if (const auto *ME = dyn_cast<MemberExpr>(E)) {
865  // FIXME: If the base type of the member expr is not FD->getParent(),
866  // this should not be treated as a flexible array member access.
867  if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
869  DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
870  return ++FI == FD->getParent()->field_end();
871  }
872  } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
873  return IRE->getDecl()->getNextIvar() == nullptr;
874  }
875 
876  return false;
877 }
878 
880  QualType EltTy) {
881  ASTContext &C = getContext();
882  uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
883  if (!EltSize)
884  return nullptr;
885 
886  auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
887  if (!ArrayDeclRef)
888  return nullptr;
889 
890  auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
891  if (!ParamDecl)
892  return nullptr;
893 
894  auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
895  if (!POSAttr)
896  return nullptr;
897 
898  // Don't load the size if it's a lower bound.
899  int POSType = POSAttr->getType();
900  if (POSType != 0 && POSType != 1)
901  return nullptr;
902 
903  // Find the implicit size parameter.
904  auto PassedSizeIt = SizeArguments.find(ParamDecl);
905  if (PassedSizeIt == SizeArguments.end())
906  return nullptr;
907 
908  const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
909  assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
910  Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
911  llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
912  C.getSizeType(), E->getExprLoc());
913  llvm::Value *SizeOfElement =
914  llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
915  return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
916 }
917 
918 /// If Base is known to point to the start of an array, return the length of
919 /// that array. Return 0 if the length cannot be determined.
921  CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
922  // For the vector indexing extension, the bound is the number of elements.
923  if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
924  IndexedType = Base->getType();
925  return CGF.Builder.getInt32(VT->getNumElements());
926  }
927 
928  Base = Base->IgnoreParens();
929 
930  if (const auto *CE = dyn_cast<CastExpr>(Base)) {
931  if (CE->getCastKind() == CK_ArrayToPointerDecay &&
932  !isFlexibleArrayMemberExpr(CE->getSubExpr())) {
933  IndexedType = CE->getSubExpr()->getType();
934  const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
935  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
936  return CGF.Builder.getInt(CAT->getSize());
937  else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
938  return CGF.getVLASize(VAT).NumElts;
939  // Ignore pass_object_size here. It's not applicable on decayed pointers.
940  }
941  }
942 
943  QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
944  if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
945  IndexedType = Base->getType();
946  return POS;
947  }
948 
949  return nullptr;
950 }
951 
953  llvm::Value *Index, QualType IndexType,
954  bool Accessed) {
955  assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
956  "should not be called unless adding bounds checks");
957  SanitizerScope SanScope(this);
958 
959  QualType IndexedType;
960  llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
961  if (!Bound)
962  return;
963 
964  bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
965  llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
966  llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
967 
968  llvm::Constant *StaticData[] = {
970  EmitCheckTypeDescriptor(IndexedType),
971  EmitCheckTypeDescriptor(IndexType)
972  };
973  llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
974  : Builder.CreateICmpULE(IndexVal, BoundVal);
975  EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
976  SanitizerHandler::OutOfBounds, StaticData, Index);
977 }
978 
979 
982  bool isInc, bool isPre) {
983  ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
984 
985  llvm::Value *NextVal;
986  if (isa<llvm::IntegerType>(InVal.first->getType())) {
987  uint64_t AmountVal = isInc ? 1 : -1;
988  NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
989 
990  // Add the inc/dec to the real part.
991  NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
992  } else {
993  QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
994  llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
995  if (!isInc)
996  FVal.changeSign();
997  NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
998 
999  // Add the inc/dec to the real part.
1000  NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1001  }
1002 
1003  ComplexPairTy IncVal(NextVal, InVal.second);
1004 
1005  // Store the updated result through the lvalue.
1006  EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1007 
1008  // If this is a postinc, return the value read from memory, otherwise use the
1009  // updated value.
1010  return isPre ? IncVal : InVal;
1011 }
1012 
1014  CodeGenFunction *CGF) {
1015  // Bind VLAs in the cast type.
1016  if (CGF && E->getType()->isVariablyModifiedType())
1017  CGF->EmitVariablyModifiedType(E->getType());
1018 
1019  if (CGDebugInfo *DI = getModuleDebugInfo())
1020  DI->EmitExplicitCastType(E->getType());
1021 }
1022 
1023 //===----------------------------------------------------------------------===//
1024 // LValue Expression Emission
1025 //===----------------------------------------------------------------------===//
1026 
1027 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1028 /// derive a more accurate bound on the alignment of the pointer.
1030  LValueBaseInfo *BaseInfo,
1031  TBAAAccessInfo *TBAAInfo) {
1032  // We allow this with ObjC object pointers because of fragile ABIs.
1033  assert(E->getType()->isPointerType() ||
1035  E = E->IgnoreParens();
1036 
1037  // Casts:
1038  if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1039  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1040  CGM.EmitExplicitCastExprType(ECE, this);
1041 
1042  switch (CE->getCastKind()) {
1043  // Non-converting casts (but not C's implicit conversion from void*).
1044  case CK_BitCast:
1045  case CK_NoOp:
1046  case CK_AddressSpaceConversion:
1047  if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1048  if (PtrTy->getPointeeType()->isVoidType())
1049  break;
1050 
1051  LValueBaseInfo InnerBaseInfo;
1052  TBAAAccessInfo InnerTBAAInfo;
1053  Address Addr = EmitPointerWithAlignment(CE->getSubExpr(),
1054  &InnerBaseInfo,
1055  &InnerTBAAInfo);
1056  if (BaseInfo) *BaseInfo = InnerBaseInfo;
1057  if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1058 
1059  if (isa<ExplicitCastExpr>(CE)) {
1060  LValueBaseInfo TargetTypeBaseInfo;
1061  TBAAAccessInfo TargetTypeTBAAInfo;
1062  CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(),
1063  &TargetTypeBaseInfo,
1064  &TargetTypeTBAAInfo);
1065  if (TBAAInfo)
1066  *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
1067  TargetTypeTBAAInfo);
1068  // If the source l-value is opaque, honor the alignment of the
1069  // casted-to type.
1070  if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1071  if (BaseInfo)
1072  BaseInfo->mergeForCast(TargetTypeBaseInfo);
1073  Addr = Address(Addr.getPointer(), Align);
1074  }
1075  }
1076 
1077  if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1078  CE->getCastKind() == CK_BitCast) {
1079  if (auto PT = E->getType()->getAs<PointerType>())
1080  EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
1081  /*MayBeNull=*/true,
1083  CE->getBeginLoc());
1084  }
1085  return CE->getCastKind() != CK_AddressSpaceConversion
1086  ? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
1088  ConvertType(E->getType()));
1089  }
1090  break;
1091 
1092  // Array-to-pointer decay.
1093  case CK_ArrayToPointerDecay:
1094  return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1095 
1096  // Derived-to-base conversions.
1097  case CK_UncheckedDerivedToBase:
1098  case CK_DerivedToBase: {
1099  // TODO: Support accesses to members of base classes in TBAA. For now, we
1100  // conservatively pretend that the complete object is of the base class
1101  // type.
1102  if (TBAAInfo)
1103  *TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
1104  Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
1105  auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1106  return GetAddressOfBaseClass(Addr, Derived,
1107  CE->path_begin(), CE->path_end(),
1109  CE->getExprLoc());
1110  }
1111 
1112  // TODO: Is there any reason to treat base-to-derived conversions
1113  // specially?
1114  default:
1115  break;
1116  }
1117  }
1118 
1119  // Unary &.
1120  if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1121  if (UO->getOpcode() == UO_AddrOf) {
1122  LValue LV = EmitLValue(UO->getSubExpr());
1123  if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1124  if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1125  return LV.getAddress();
1126  }
1127  }
1128 
1129  // TODO: conditional operators, comma.
1130 
1131  // Otherwise, use the alignment of the type.
1132  CharUnits Align = getNaturalPointeeTypeAlignment(E->getType(), BaseInfo,
1133  TBAAInfo);
1134  return Address(EmitScalarExpr(E), Align);
1135 }
1136 
1138  if (Ty->isVoidType())
1139  return RValue::get(nullptr);
1140 
1141  switch (getEvaluationKind(Ty)) {
1142  case TEK_Complex: {
1143  llvm::Type *EltTy =
1145  llvm::Value *U = llvm::UndefValue::get(EltTy);
1146  return RValue::getComplex(std::make_pair(U, U));
1147  }
1148 
1149  // If this is a use of an undefined aggregate type, the aggregate must have an
1150  // identifiable address. Just because the contents of the value are undefined
1151  // doesn't mean that the address can't be taken and compared.
1152  case TEK_Aggregate: {
1153  Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1154  return RValue::getAggregate(DestPtr);
1155  }
1156 
1157  case TEK_Scalar:
1158  return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1159  }
1160  llvm_unreachable("bad evaluation kind");
1161 }
1162 
1164  const char *Name) {
1165  ErrorUnsupported(E, Name);
1166  return GetUndefRValue(E->getType());
1167 }
1168 
1170  const char *Name) {
1171  ErrorUnsupported(E, Name);
1172  llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
1173  return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
1174  E->getType());
1175 }
1176 
1178  const Expr *Base = Obj;
1179  while (!isa<CXXThisExpr>(Base)) {
1180  // The result of a dynamic_cast can be null.
1181  if (isa<CXXDynamicCastExpr>(Base))
1182  return false;
1183 
1184  if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1185  Base = CE->getSubExpr();
1186  } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1187  Base = PE->getSubExpr();
1188  } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1189  if (UO->getOpcode() == UO_Extension)
1190  Base = UO->getSubExpr();
1191  else
1192  return false;
1193  } else {
1194  return false;
1195  }
1196  }
1197  return true;
1198 }
1199 
1201  LValue LV;
1202  if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1203  LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1204  else
1205  LV = EmitLValue(E);
1206  if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1207  SanitizerSet SkippedChecks;
1208  if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1209  bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1210  if (IsBaseCXXThis)
1211  SkippedChecks.set(SanitizerKind::Alignment, true);
1212  if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1213  SkippedChecks.set(SanitizerKind::Null, true);
1214  }
1215  EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(),
1216  E->getType(), LV.getAlignment(), SkippedChecks);
1217  }
1218  return LV;
1219 }
1220 
1221 /// EmitLValue - Emit code to compute a designator that specifies the location
1222 /// of the expression.
1223 ///
1224 /// This can return one of two things: a simple address or a bitfield reference.
1225 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1226 /// an LLVM pointer type.
1227 ///
1228 /// If this returns a bitfield reference, nothing about the pointee type of the
1229 /// LLVM value is known: For example, it may not be a pointer to an integer.
1230 ///
1231 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1232 /// this method guarantees that the returned pointer type will point to an LLVM
1233 /// type of the same size of the lvalue's type. If the lvalue has a variable
1234 /// length type, this is not possible.
1235 ///
1237  ApplyDebugLocation DL(*this, E);
1238  switch (E->getStmtClass()) {
1239  default: return EmitUnsupportedLValue(E, "l-value expression");
1240 
1241  case Expr::ObjCPropertyRefExprClass:
1242  llvm_unreachable("cannot emit a property reference directly");
1243 
1244  case Expr::ObjCSelectorExprClass:
1245  return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1246  case Expr::ObjCIsaExprClass:
1247  return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1248  case Expr::BinaryOperatorClass:
1249  return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1250  case Expr::CompoundAssignOperatorClass: {
1251  QualType Ty = E->getType();
1252  if (const AtomicType *AT = Ty->getAs<AtomicType>())
1253  Ty = AT->getValueType();
1254  if (!Ty->isAnyComplexType())
1255  return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1256  return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1257  }
1258  case Expr::CallExprClass:
1259  case Expr::CXXMemberCallExprClass:
1260  case Expr::CXXOperatorCallExprClass:
1261  case Expr::UserDefinedLiteralClass:
1262  return EmitCallExprLValue(cast<CallExpr>(E));
1263  case Expr::VAArgExprClass:
1264  return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1265  case Expr::DeclRefExprClass:
1266  return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1267  case Expr::ConstantExprClass:
1268  return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
1269  case Expr::ParenExprClass:
1270  return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
1271  case Expr::GenericSelectionExprClass:
1272  return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
1273  case Expr::PredefinedExprClass:
1274  return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1275  case Expr::StringLiteralClass:
1276  return EmitStringLiteralLValue(cast<StringLiteral>(E));
1277  case Expr::ObjCEncodeExprClass:
1278  return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1279  case Expr::PseudoObjectExprClass:
1280  return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1281  case Expr::InitListExprClass:
1282  return EmitInitListLValue(cast<InitListExpr>(E));
1283  case Expr::CXXTemporaryObjectExprClass:
1284  case Expr::CXXConstructExprClass:
1285  return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1286  case Expr::CXXBindTemporaryExprClass:
1287  return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1288  case Expr::CXXUuidofExprClass:
1289  return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1290 
1291  case Expr::ExprWithCleanupsClass: {
1292  const auto *cleanups = cast<ExprWithCleanups>(E);
1293  enterFullExpression(cleanups);
1294  RunCleanupsScope Scope(*this);
1295  LValue LV = EmitLValue(cleanups->getSubExpr());
1296  if (LV.isSimple()) {
1297  // Defend against branches out of gnu statement expressions surrounded by
1298  // cleanups.
1299  llvm::Value *V = LV.getPointer();
1300  Scope.ForceCleanup({&V});
1301  return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
1302  getContext(), LV.getBaseInfo(), LV.getTBAAInfo());
1303  }
1304  // FIXME: Is it possible to create an ExprWithCleanups that produces a
1305  // bitfield lvalue or some other non-simple lvalue?
1306  return LV;
1307  }
1308 
1309  case Expr::CXXDefaultArgExprClass:
1310  return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
1311  case Expr::CXXDefaultInitExprClass: {
1313  return EmitLValue(cast<CXXDefaultInitExpr>(E)->getExpr());
1314  }
1315  case Expr::CXXTypeidExprClass:
1316  return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1317 
1318  case Expr::ObjCMessageExprClass:
1319  return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1320  case Expr::ObjCIvarRefExprClass:
1321  return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1322  case Expr::StmtExprClass:
1323  return EmitStmtExprLValue(cast<StmtExpr>(E));
1324  case Expr::UnaryOperatorClass:
1325  return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1326  case Expr::ArraySubscriptExprClass:
1327  return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1328  case Expr::OMPArraySectionExprClass:
1329  return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
1330  case Expr::ExtVectorElementExprClass:
1331  return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1332  case Expr::MemberExprClass:
1333  return EmitMemberExpr(cast<MemberExpr>(E));
1334  case Expr::CompoundLiteralExprClass:
1335  return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1336  case Expr::ConditionalOperatorClass:
1337  return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1338  case Expr::BinaryConditionalOperatorClass:
1339  return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1340  case Expr::ChooseExprClass:
1341  return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
1342  case Expr::OpaqueValueExprClass:
1343  return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1344  case Expr::SubstNonTypeTemplateParmExprClass:
1345  return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
1346  case Expr::ImplicitCastExprClass:
1347  case Expr::CStyleCastExprClass:
1348  case Expr::CXXFunctionalCastExprClass:
1349  case Expr::CXXStaticCastExprClass:
1350  case Expr::CXXDynamicCastExprClass:
1351  case Expr::CXXReinterpretCastExprClass:
1352  case Expr::CXXConstCastExprClass:
1353  case Expr::ObjCBridgedCastExprClass:
1354  return EmitCastLValue(cast<CastExpr>(E));
1355 
1356  case Expr::MaterializeTemporaryExprClass:
1357  return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1358 
1359  case Expr::CoawaitExprClass:
1360  return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1361  case Expr::CoyieldExprClass:
1362  return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1363  }
1364 }
1365 
1366 /// Given an object of the given canonical type, can we safely copy a
1367 /// value out of it based on its initializer?
1369  assert(type.isCanonical());
1370  assert(!type->isReferenceType());
1371 
1372  // Must be const-qualified but non-volatile.
1373  Qualifiers qs = type.getLocalQualifiers();
1374  if (!qs.hasConst() || qs.hasVolatile()) return false;
1375 
1376  // Otherwise, all object types satisfy this except C++ classes with
1377  // mutable subobjects or non-trivial copy/destroy behavior.
1378  if (const auto *RT = dyn_cast<RecordType>(type))
1379  if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1380  if (RD->hasMutableFields() || !RD->isTrivial())
1381  return false;
1382 
1383  return true;
1384 }
1385 
1386 /// Can we constant-emit a load of a reference to a variable of the
1387 /// given type? This is different from predicates like
1388 /// Decl::isUsableInConstantExpressions because we do want it to apply
1389 /// in situations that don't necessarily satisfy the language's rules
1390 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
1391 /// to do this with const float variables even if those variables
1392 /// aren't marked 'constexpr'.
1398 };
1400  type = type.getCanonicalType();
1401  if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1402  if (isConstantEmittableObjectType(ref->getPointeeType()))
1403  return CEK_AsValueOrReference;
1404  return CEK_AsReferenceOnly;
1405  }
1407  return CEK_AsValueOnly;
1408  return CEK_None;
1409 }
1410 
1411 /// Try to emit a reference to the given value without producing it as
1412 /// an l-value. This is actually more than an optimization: we can't
1413 /// produce an l-value for variables that we never actually captured
1414 /// in a block or lambda, which means const int variables or constexpr
1415 /// literals or similar.
1418  ValueDecl *value = refExpr->getDecl();
1419 
1420  // The value needs to be an enum constant or a constant variable.
1422  if (isa<ParmVarDecl>(value)) {
1423  CEK = CEK_None;
1424  } else if (auto *var = dyn_cast<VarDecl>(value)) {
1425  CEK = checkVarTypeForConstantEmission(var->getType());
1426  } else if (isa<EnumConstantDecl>(value)) {
1427  CEK = CEK_AsValueOnly;
1428  } else {
1429  CEK = CEK_None;
1430  }
1431  if (CEK == CEK_None) return ConstantEmission();
1432 
1433  Expr::EvalResult result;
1434  bool resultIsReference;
1435  QualType resultType;
1436 
1437  // It's best to evaluate all the way as an r-value if that's permitted.
1438  if (CEK != CEK_AsReferenceOnly &&
1439  refExpr->EvaluateAsRValue(result, getContext())) {
1440  resultIsReference = false;
1441  resultType = refExpr->getType();
1442 
1443  // Otherwise, try to evaluate as an l-value.
1444  } else if (CEK != CEK_AsValueOnly &&
1445  refExpr->EvaluateAsLValue(result, getContext())) {
1446  resultIsReference = true;
1447  resultType = value->getType();
1448 
1449  // Failure.
1450  } else {
1451  return ConstantEmission();
1452  }
1453 
1454  // In any case, if the initializer has side-effects, abandon ship.
1455  if (result.HasSideEffects)
1456  return ConstantEmission();
1457 
1458  // Emit as a constant.
1459  auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1460  result.Val, resultType);
1461 
1462  // Make sure we emit a debug reference to the global variable.
1463  // This should probably fire even for
1464  if (isa<VarDecl>(value)) {
1465  if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1466  EmitDeclRefExprDbgValue(refExpr, result.Val);
1467  } else {
1468  assert(isa<EnumConstantDecl>(value));
1469  EmitDeclRefExprDbgValue(refExpr, result.Val);
1470  }
1471 
1472  // If we emitted a reference constant, we need to dereference that.
1473  if (resultIsReference)
1475 
1476  return ConstantEmission::forValue(C);
1477 }
1478 
1480  const MemberExpr *ME) {
1481  if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1482  // Try to emit static variable member expressions as DREs.
1483  return DeclRefExpr::Create(
1485  /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1486  ME->getType(), ME->getValueKind());
1487  }
1488  return nullptr;
1489 }
1490 
1493  if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1494  return tryEmitAsConstant(DRE);
1495  return ConstantEmission();
1496 }
1497 
1499  const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1500  assert(Constant && "not a constant");
1501  if (Constant.isReference())
1502  return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1503  E->getExprLoc())
1504  .getScalarVal();
1505  return Constant.getValue();
1506 }
1507 
1509  SourceLocation Loc) {
1510  return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
1511  lvalue.getType(), Loc, lvalue.getBaseInfo(),
1512  lvalue.getTBAAInfo(), lvalue.isNontemporal());
1513 }
1514 
1516  if (Ty->isBooleanType())
1517  return true;
1518 
1519  if (const EnumType *ET = Ty->getAs<EnumType>())
1520  return ET->getDecl()->getIntegerType()->isBooleanType();
1521 
1522  if (const AtomicType *AT = Ty->getAs<AtomicType>())
1523  return hasBooleanRepresentation(AT->getValueType());
1524 
1525  return false;
1526 }
1527 
1529  llvm::APInt &Min, llvm::APInt &End,
1530  bool StrictEnums, bool IsBool) {
1531  const EnumType *ET = Ty->getAs<EnumType>();
1532  bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1533  ET && !ET->getDecl()->isFixed();
1534  if (!IsBool && !IsRegularCPlusPlusEnum)
1535  return false;
1536 
1537  if (IsBool) {
1538  Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1539  End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1540  } else {
1541  const EnumDecl *ED = ET->getDecl();
1542  llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
1543  unsigned Bitwidth = LTy->getScalarSizeInBits();
1544  unsigned NumNegativeBits = ED->getNumNegativeBits();
1545  unsigned NumPositiveBits = ED->getNumPositiveBits();
1546 
1547  if (NumNegativeBits) {
1548  unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
1549  assert(NumBits <= Bitwidth);
1550  End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
1551  Min = -End;
1552  } else {
1553  assert(NumPositiveBits <= Bitwidth);
1554  End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
1555  Min = llvm::APInt(Bitwidth, 0);
1556  }
1557  }
1558  return true;
1559 }
1560 
1561 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1562  llvm::APInt Min, End;
1563  if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1565  return nullptr;
1566 
1567  llvm::MDBuilder MDHelper(getLLVMContext());
1568  return MDHelper.createRange(Min, End);
1569 }
1570 
1572  SourceLocation Loc) {
1573  bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1574  bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1575  if (!HasBoolCheck && !HasEnumCheck)
1576  return false;
1577 
1578  bool IsBool = hasBooleanRepresentation(Ty) ||
1579  NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
1580  bool NeedsBoolCheck = HasBoolCheck && IsBool;
1581  bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1582  if (!NeedsBoolCheck && !NeedsEnumCheck)
1583  return false;
1584 
1585  // Single-bit booleans don't need to be checked. Special-case this to avoid
1586  // a bit width mismatch when handling bitfield values. This is handled by
1587  // EmitFromMemory for the non-bitfield case.
1588  if (IsBool &&
1589  cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1590  return false;
1591 
1592  llvm::APInt Min, End;
1593  if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1594  return true;
1595 
1596  auto &Ctx = getLLVMContext();
1597  SanitizerScope SanScope(this);
1598  llvm::Value *Check;
1599  --End;
1600  if (!Min) {
1601  Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1602  } else {
1603  llvm::Value *Upper =
1604  Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1605  llvm::Value *Lower =
1606  Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1607  Check = Builder.CreateAnd(Upper, Lower);
1608  }
1609  llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1612  NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1613  EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1614  StaticArgs, EmitCheckValue(Value));
1615  return true;
1616 }
1617 
1619  QualType Ty,
1620  SourceLocation Loc,
1621  LValueBaseInfo BaseInfo,
1622  TBAAAccessInfo TBAAInfo,
1623  bool isNontemporal) {
1624  if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1625  // For better performance, handle vector loads differently.
1626  if (Ty->isVectorType()) {
1627  const llvm::Type *EltTy = Addr.getElementType();
1628 
1629  const auto *VTy = cast<llvm::VectorType>(EltTy);
1630 
1631  // Handle vectors of size 3 like size 4 for better performance.
1632  if (VTy->getNumElements() == 3) {
1633 
1634  // Bitcast to vec4 type.
1635  llvm::VectorType *vec4Ty =
1636  llvm::VectorType::get(VTy->getElementType(), 4);
1637  Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
1638  // Now load value.
1639  llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1640 
1641  // Shuffle vector to get vec3.
1642  V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
1643  {0, 1, 2}, "extractVec");
1644  return EmitFromMemory(V, Ty);
1645  }
1646  }
1647  }
1648 
1649  // Atomic operations have to be done on integral types.
1650  LValue AtomicLValue =
1651  LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1652  if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1653  return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1654  }
1655 
1656  llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1657  if (isNontemporal) {
1658  llvm::MDNode *Node = llvm::MDNode::get(
1659  Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1660  Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1661  }
1662 
1663  CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
1664 
1665  if (EmitScalarRangeCheck(Load, Ty, Loc)) {
1666  // In order to prevent the optimizer from throwing away the check, don't
1667  // attach range metadata to the load.
1668  } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1669  if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1670  Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1671 
1672  return EmitFromMemory(Load, Ty);
1673 }
1674 
1676  // Bool has a different representation in memory than in registers.
1677  if (hasBooleanRepresentation(Ty)) {
1678  // This should really always be an i1, but sometimes it's already
1679  // an i8, and it's awkward to track those cases down.
1680  if (Value->getType()->isIntegerTy(1))
1681  return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
1682  assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1683  "wrong value rep of bool");
1684  }
1685 
1686  return Value;
1687 }
1688 
1690  // Bool has a different representation in memory than in registers.
1691  if (hasBooleanRepresentation(Ty)) {
1692  assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1693  "wrong value rep of bool");
1694  return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1695  }
1696 
1697  return Value;
1698 }
1699 
1701  bool Volatile, QualType Ty,
1702  LValueBaseInfo BaseInfo,
1703  TBAAAccessInfo TBAAInfo,
1704  bool isInit, bool isNontemporal) {
1705  if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1706  // Handle vectors differently to get better performance.
1707  if (Ty->isVectorType()) {
1708  llvm::Type *SrcTy = Value->getType();
1709  auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
1710  // Handle vec3 special.
1711  if (VecTy && VecTy->getNumElements() == 3) {
1712  // Our source is a vec3, do a shuffle vector to make it a vec4.
1713  llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1),
1714  Builder.getInt32(2),
1715  llvm::UndefValue::get(Builder.getInt32Ty())};
1716  llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1717  Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
1718  MaskV, "extractVec");
1719  SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
1720  }
1721  if (Addr.getElementType() != SrcTy) {
1722  Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
1723  }
1724  }
1725  }
1726 
1727  Value = EmitToMemory(Value, Ty);
1728 
1729  LValue AtomicLValue =
1730  LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1731  if (Ty->isAtomicType() ||
1732  (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
1733  EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
1734  return;
1735  }
1736 
1737  llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1738  if (isNontemporal) {
1739  llvm::MDNode *Node =
1740  llvm::MDNode::get(Store->getContext(),
1741  llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1742  Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1743  }
1744 
1745  CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
1746 }
1747 
1749  bool isInit) {
1750  EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
1751  lvalue.getType(), lvalue.getBaseInfo(),
1752  lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
1753 }
1754 
1755 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1756 /// method emits the address of the lvalue, then loads the result as an rvalue,
1757 /// returning the rvalue.
1759  if (LV.isObjCWeak()) {
1760  // load of a __weak object.
1761  Address AddrWeakObj = LV.getAddress();
1763  AddrWeakObj));
1764  }
1766  // In MRC mode, we do a load+autorelease.
1767  if (!getLangOpts().ObjCAutoRefCount) {
1768  return RValue::get(EmitARCLoadWeak(LV.getAddress()));
1769  }
1770 
1771  // In ARC mode, we load retained and then consume the value.
1773  Object = EmitObjCConsumeObject(LV.getType(), Object);
1774  return RValue::get(Object);
1775  }
1776 
1777  if (LV.isSimple()) {
1778  assert(!LV.getType()->isFunctionType());
1779 
1780  // Everything needs a load.
1781  return RValue::get(EmitLoadOfScalar(LV, Loc));
1782  }
1783 
1784  if (LV.isVectorElt()) {
1785  llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
1786  LV.isVolatileQualified());
1787  return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1788  "vecext"));
1789  }
1790 
1791  // If this is a reference to a subset of the elements of a vector, either
1792  // shuffle the input or extract/insert them as appropriate.
1793  if (LV.isExtVectorElt())
1795 
1796  // Global Register variables always invoke intrinsics
1797  if (LV.isGlobalReg())
1798  return EmitLoadOfGlobalRegLValue(LV);
1799 
1800  assert(LV.isBitField() && "Unknown LValue type!");
1801  return EmitLoadOfBitfieldLValue(LV, Loc);
1802 }
1803 
1805  SourceLocation Loc) {
1806  const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1807 
1808  // Get the output type.
1809  llvm::Type *ResLTy = ConvertType(LV.getType());
1810 
1811  Address Ptr = LV.getBitFieldAddress();
1812  llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
1813 
1814  if (Info.IsSigned) {
1815  assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
1816  unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
1817  if (HighBits)
1818  Val = Builder.CreateShl(Val, HighBits, "bf.shl");
1819  if (Info.Offset + HighBits)
1820  Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
1821  } else {
1822  if (Info.Offset)
1823  Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
1824  if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
1825  Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
1826  Info.Size),
1827  "bf.clear");
1828  }
1829  Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
1830  EmitScalarRangeCheck(Val, LV.getType(), Loc);
1831  return RValue::get(Val);
1832 }
1833 
1834 // If this is a reference to a subset of the elements of a vector, create an
1835 // appropriate shufflevector.
1838  LV.isVolatileQualified());
1839 
1840  const llvm::Constant *Elts = LV.getExtVectorElts();
1841 
1842  // If the result of the expression is a non-vector type, we must be extracting
1843  // a single element. Just codegen as an extractelement.
1844  const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1845  if (!ExprVT) {
1846  unsigned InIdx = getAccessedFieldNo(0, Elts);
1847  llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
1848  return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1849  }
1850 
1851  // Always use shuffle vector to try to retain the original program structure
1852  unsigned NumResultElts = ExprVT->getNumElements();
1853 
1855  for (unsigned i = 0; i != NumResultElts; ++i)
1856  Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
1857 
1858  llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
1859  Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1860  MaskV);
1861  return RValue::get(Vec);
1862 }
1863 
1864 /// Generates lvalue for partial ext_vector access.
1866  Address VectorAddress = LV.getExtVectorAddress();
1867  const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1868  QualType EQT = ExprVT->getElementType();
1869  llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
1870 
1871  Address CastToPointerElement =
1872  Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
1873  "conv.ptr.element");
1874 
1875  const llvm::Constant *Elts = LV.getExtVectorElts();
1876  unsigned ix = getAccessedFieldNo(0, Elts);
1877 
1878  Address VectorBasePtrPlusIx =
1879  Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
1880  getContext().getTypeSizeInChars(EQT),
1881  "vector.elt");
1882 
1883  return VectorBasePtrPlusIx;
1884 }
1885 
1886 /// Load of global gamed gegisters are always calls to intrinsics.
1888  assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
1889  "Bad type for register variable");
1890  llvm::MDNode *RegName = cast<llvm::MDNode>(
1891  cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
1892 
1893  // We accept integer and pointer types only
1894  llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
1895  llvm::Type *Ty = OrigTy;
1896  if (OrigTy->isPointerTy())
1897  Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
1898  llvm::Type *Types[] = { Ty };
1899 
1900  llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
1901  llvm::Value *Call = Builder.CreateCall(
1902  F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
1903  if (OrigTy->isPointerTy())
1904  Call = Builder.CreateIntToPtr(Call, OrigTy);
1905  return RValue::get(Call);
1906 }
1907 
1908 
1909 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
1910 /// lvalue, where both are guaranteed to the have the same type, and that type
1911 /// is 'Ty'.
1913  bool isInit) {
1914  if (!Dst.isSimple()) {
1915  if (Dst.isVectorElt()) {
1916  // Read/modify/write the vector, inserting the new element.
1918  Dst.isVolatileQualified());
1919  Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
1920  Dst.getVectorIdx(), "vecins");
1922  Dst.isVolatileQualified());
1923  return;
1924  }
1925 
1926  // If this is an update of extended vector elements, insert them as
1927  // appropriate.
1928  if (Dst.isExtVectorElt())
1930 
1931  if (Dst.isGlobalReg())
1932  return EmitStoreThroughGlobalRegLValue(Src, Dst);
1933 
1934  assert(Dst.isBitField() && "Unknown LValue type");
1935  return EmitStoreThroughBitfieldLValue(Src, Dst);
1936  }
1937 
1938  // There's special magic for assigning into an ARC-qualified l-value.
1939  if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
1940  switch (Lifetime) {
1941  case Qualifiers::OCL_None:
1942  llvm_unreachable("present but none");
1943 
1945  // nothing special
1946  break;
1947 
1949  if (isInit) {
1950  Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
1951  break;
1952  }
1953  EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
1954  return;
1955 
1956  case Qualifiers::OCL_Weak:
1957  if (isInit)
1958  // Initialize and then skip the primitive store.
1959  EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
1960  else
1961  EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
1962  return;
1963 
1966  Src.getScalarVal()));
1967  // fall into the normal path
1968  break;
1969  }
1970  }
1971 
1972  if (Dst.isObjCWeak() && !Dst.isNonGC()) {
1973  // load of a __weak object.
1974  Address LvalueDst = Dst.getAddress();
1975  llvm::Value *src = Src.getScalarVal();
1976  CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
1977  return;
1978  }
1979 
1980  if (Dst.isObjCStrong() && !Dst.isNonGC()) {
1981  // load of a __strong object.
1982  Address LvalueDst = Dst.getAddress();
1983  llvm::Value *src = Src.getScalarVal();
1984  if (Dst.isObjCIvar()) {
1985  assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
1986  llvm::Type *ResultType = IntPtrTy;
1988  llvm::Value *RHS = dst.getPointer();
1989  RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
1990  llvm::Value *LHS =
1991  Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
1992  "sub.ptr.lhs.cast");
1993  llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
1994  CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
1995  BytesBetween);
1996  } else if (Dst.isGlobalObjCRef()) {
1997  CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
1998  Dst.isThreadLocalRef());
1999  }
2000  else
2001  CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2002  return;
2003  }
2004 
2005  assert(Src.isScalar() && "Can't emit an agg store with this method");
2006  EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2007 }
2008 
2010  llvm::Value **Result) {
2011  const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2012  llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
2013  Address Ptr = Dst.getBitFieldAddress();
2014 
2015  // Get the source value, truncated to the width of the bit-field.
2016  llvm::Value *SrcVal = Src.getScalarVal();
2017 
2018  // Cast the source to the storage type and shift it into place.
2019  SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2020  /*IsSigned=*/false);
2021  llvm::Value *MaskedVal = SrcVal;
2022 
2023  // See if there are other bits in the bitfield's storage we'll need to load
2024  // and mask together with source before storing.
2025  if (Info.StorageSize != Info.Size) {
2026  assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
2027  llvm::Value *Val =
2028  Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2029 
2030  // Mask the source value as needed.
2031  if (!hasBooleanRepresentation(Dst.getType()))
2032  SrcVal = Builder.CreateAnd(SrcVal,
2033  llvm::APInt::getLowBitsSet(Info.StorageSize,
2034  Info.Size),
2035  "bf.value");
2036  MaskedVal = SrcVal;
2037  if (Info.Offset)
2038  SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
2039 
2040  // Mask out the original value.
2041  Val = Builder.CreateAnd(Val,
2042  ~llvm::APInt::getBitsSet(Info.StorageSize,
2043  Info.Offset,
2044  Info.Offset + Info.Size),
2045  "bf.clear");
2046 
2047  // Or together the unchanged values and the source value.
2048  SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2049  } else {
2050  assert(Info.Offset == 0);
2051  }
2052 
2053  // Write the new value back out.
2054  Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2055 
2056  // Return the new value of the bit-field, if requested.
2057  if (Result) {
2058  llvm::Value *ResultVal = MaskedVal;
2059 
2060  // Sign extend the value if needed.
2061  if (Info.IsSigned) {
2062  assert(Info.Size <= Info.StorageSize);
2063  unsigned HighBits = Info.StorageSize - Info.Size;
2064  if (HighBits) {
2065  ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2066  ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2067  }
2068  }
2069 
2070  ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2071  "bf.result.cast");
2072  *Result = EmitFromMemory(ResultVal, Dst.getType());
2073  }
2074 }
2075 
2077  LValue Dst) {
2078  // This access turns into a read/modify/write of the vector. Load the input
2079  // value now.
2081  Dst.isVolatileQualified());
2082  const llvm::Constant *Elts = Dst.getExtVectorElts();
2083 
2084  llvm::Value *SrcVal = Src.getScalarVal();
2085 
2086  if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2087  unsigned NumSrcElts = VTy->getNumElements();
2088  unsigned NumDstElts = Vec->getType()->getVectorNumElements();
2089  if (NumDstElts == NumSrcElts) {
2090  // Use shuffle vector is the src and destination are the same number of
2091  // elements and restore the vector mask since it is on the side it will be
2092  // stored.
2093  SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
2094  for (unsigned i = 0; i != NumSrcElts; ++i)
2095  Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
2096 
2097  llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
2098  Vec = Builder.CreateShuffleVector(SrcVal,
2099  llvm::UndefValue::get(Vec->getType()),
2100  MaskV);
2101  } else if (NumDstElts > NumSrcElts) {
2102  // Extended the source vector to the same length and then shuffle it
2103  // into the destination.
2104  // FIXME: since we're shuffling with undef, can we just use the indices
2105  // into that? This could be simpler.
2107  for (unsigned i = 0; i != NumSrcElts; ++i)
2108  ExtMask.push_back(Builder.getInt32(i));
2109  ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
2110  llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
2111  llvm::Value *ExtSrcVal =
2112  Builder.CreateShuffleVector(SrcVal,
2113  llvm::UndefValue::get(SrcVal->getType()),
2114  ExtMaskV);
2115  // build identity
2117  for (unsigned i = 0; i != NumDstElts; ++i)
2118  Mask.push_back(Builder.getInt32(i));
2119 
2120  // When the vector size is odd and .odd or .hi is used, the last element
2121  // of the Elts constant array will be one past the size of the vector.
2122  // Ignore the last element here, if it is greater than the mask size.
2123  if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2124  NumSrcElts--;
2125 
2126  // modify when what gets shuffled in
2127  for (unsigned i = 0; i != NumSrcElts; ++i)
2128  Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
2129  llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
2130  Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
2131  } else {
2132  // We should never shorten the vector
2133  llvm_unreachable("unexpected shorten vector length");
2134  }
2135  } else {
2136  // If the Src is a scalar (not a vector) it must be updating one element.
2137  unsigned InIdx = getAccessedFieldNo(0, Elts);
2138  llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2139  Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2140  }
2141 
2143  Dst.isVolatileQualified());
2144 }
2145 
2146 /// Store of global named registers are always calls to intrinsics.
2148  assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2149  "Bad type for register variable");
2150  llvm::MDNode *RegName = cast<llvm::MDNode>(
2151  cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2152  assert(RegName && "Register LValue is not metadata");
2153 
2154  // We accept integer and pointer types only
2155  llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2156  llvm::Type *Ty = OrigTy;
2157  if (OrigTy->isPointerTy())
2158  Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2159  llvm::Type *Types[] = { Ty };
2160 
2161  llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2162  llvm::Value *Value = Src.getScalarVal();
2163  if (OrigTy->isPointerTy())
2164  Value = Builder.CreatePtrToInt(Value, Ty);
2165  Builder.CreateCall(
2166  F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2167 }
2168 
2169 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2170 // generating write-barries API. It is currently a global, ivar,
2171 // or neither.
2172 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2173  LValue &LV,
2174  bool IsMemberAccess=false) {
2175  if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2176  return;
2177 
2178  if (isa<ObjCIvarRefExpr>(E)) {
2179  QualType ExpTy = E->getType();
2180  if (IsMemberAccess && ExpTy->isPointerType()) {
2181  // If ivar is a structure pointer, assigning to field of
2182  // this struct follows gcc's behavior and makes it a non-ivar
2183  // writer-barrier conservatively.
2184  ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
2185  if (ExpTy->isRecordType()) {
2186  LV.setObjCIvar(false);
2187  return;
2188  }
2189  }
2190  LV.setObjCIvar(true);
2191  auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2192  LV.setBaseIvarExp(Exp->getBase());
2193  LV.setObjCArray(E->getType()->isArrayType());
2194  return;
2195  }
2196 
2197  if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2198  if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2199  if (VD->hasGlobalStorage()) {
2200  LV.setGlobalObjCRef(true);
2201  LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2202  }
2203  }
2204  LV.setObjCArray(E->getType()->isArrayType());
2205  return;
2206  }
2207 
2208  if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2209  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2210  return;
2211  }
2212 
2213  if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2214  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2215  if (LV.isObjCIvar()) {
2216  // If cast is to a structure pointer, follow gcc's behavior and make it
2217  // a non-ivar write-barrier.
2218  QualType ExpTy = E->getType();
2219  if (ExpTy->isPointerType())
2220  ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
2221  if (ExpTy->isRecordType())
2222  LV.setObjCIvar(false);
2223  }
2224  return;
2225  }
2226 
2227  if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2228  setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2229  return;
2230  }
2231 
2232  if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2233  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2234  return;
2235  }
2236 
2237  if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2238  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2239  return;
2240  }
2241 
2242  if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2243  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2244  return;
2245  }
2246 
2247  if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2248  setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2249  if (LV.isObjCIvar() && !LV.isObjCArray())
2250  // Using array syntax to assigning to what an ivar points to is not
2251  // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2252  LV.setObjCIvar(false);
2253  else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2254  // Using array syntax to assigning to what global points to is not
2255  // same as assigning to the global itself. {id *G;} G[i] = 0;
2256  LV.setGlobalObjCRef(false);
2257  return;
2258  }
2259 
2260  if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2261  setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2262  // We don't know if member is an 'ivar', but this flag is looked at
2263  // only in the context of LV.isObjCIvar().
2264  LV.setObjCArray(E->getType()->isArrayType());
2265  return;
2266  }
2267 }
2268 
2269 static llvm::Value *
2271  llvm::Value *V, llvm::Type *IRType,
2272  StringRef Name = StringRef()) {
2273  unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
2274  return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
2275 }
2276 
2278  CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2279  llvm::Type *RealVarTy, SourceLocation Loc) {
2280  Addr = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2281  Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
2282  return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2283 }
2284 
2286  const VarDecl *VD, QualType T) {
2288  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2289  if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_To)
2290  return Address::invalid();
2291  assert(*Res == OMPDeclareTargetDeclAttr::MT_Link && "Expected link clause");
2292  QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2293  Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
2294  return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2295 }
2296 
2297 Address
2299  LValueBaseInfo *PointeeBaseInfo,
2300  TBAAAccessInfo *PointeeTBAAInfo) {
2301  llvm::LoadInst *Load = Builder.CreateLoad(RefLVal.getAddress(),
2302  RefLVal.isVolatile());
2303  CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
2304 
2306  PointeeBaseInfo, PointeeTBAAInfo,
2307  /* forPointeeType= */ true);
2308  return Address(Load, Align);
2309 }
2310 
2312  LValueBaseInfo PointeeBaseInfo;
2313  TBAAAccessInfo PointeeTBAAInfo;
2314  Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2315  &PointeeTBAAInfo);
2316  return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2317  PointeeBaseInfo, PointeeTBAAInfo);
2318 }
2319 
2321  const PointerType *PtrTy,
2322  LValueBaseInfo *BaseInfo,
2323  TBAAAccessInfo *TBAAInfo) {
2324  llvm::Value *Addr = Builder.CreateLoad(Ptr);
2325  return Address(Addr, getNaturalTypeAlignment(PtrTy->getPointeeType(),
2326  BaseInfo, TBAAInfo,
2327  /*forPointeeType=*/true));
2328 }
2329 
2331  const PointerType *PtrTy) {
2332  LValueBaseInfo BaseInfo;
2333  TBAAAccessInfo TBAAInfo;
2334  Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2335  return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2336 }
2337 
2339  const Expr *E, const VarDecl *VD) {
2340  QualType T = E->getType();
2341 
2342  // If it's thread_local, emit a call to its wrapper function instead.
2343  if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2345  return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2346  // Check if the variable is marked as declare target with link clause in
2347  // device codegen.
2348  if (CGF.getLangOpts().OpenMPIsDevice) {
2349  Address Addr = emitDeclTargetLinkVarDeclLValue(CGF, VD, T);
2350  if (Addr.isValid())
2351  return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2352  }
2353 
2354  llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2355  llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2356  V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
2357  CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2358  Address Addr(V, Alignment);
2359  // Emit reference to the private copy of the variable if it is an OpenMP
2360  // threadprivate variable.
2361  if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2362  VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2363  return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2364  E->getExprLoc());
2365  }
2366  LValue LV = VD->getType()->isReferenceType() ?
2367  CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2369  CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2370  setObjCGCLValueClass(CGF.getContext(), E, LV);
2371  return LV;
2372 }
2373 
2375  const FunctionDecl *FD) {
2376  if (FD->hasAttr<WeakRefAttr>()) {
2377  ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
2378  return aliasee.getPointer();
2379  }
2380 
2381  llvm::Constant *V = CGM.GetAddrOfFunction(FD);
2382  if (!FD->hasPrototype()) {
2383  if (const FunctionProtoType *Proto =
2384  FD->getType()->getAs<FunctionProtoType>()) {
2385  // Ugly case: for a K&R-style definition, the type of the definition
2386  // isn't the same as the type of a use. Correct for this with a
2387  // bitcast.
2388  QualType NoProtoType =
2389  CGM.getContext().getFunctionNoProtoType(Proto->getReturnType());
2390  NoProtoType = CGM.getContext().getPointerType(NoProtoType);
2391  V = llvm::ConstantExpr::getBitCast(V,
2392  CGM.getTypes().ConvertType(NoProtoType));
2393  }
2394  }
2395  return V;
2396 }
2397 
2399  const Expr *E, const FunctionDecl *FD) {
2400  llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, FD);
2401  CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2402  return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2404 }
2405 
2407  llvm::Value *ThisValue) {
2409  LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
2410  return CGF.EmitLValueForField(LV, FD);
2411 }
2412 
2413 /// Named Registers are named metadata pointing to the register name
2414 /// which will be read from/written to as an argument to the intrinsic
2415 /// @llvm.read/write_register.
2416 /// So far, only the name is being passed down, but other options such as
2417 /// register type, allocation type or even optimization options could be
2418 /// passed down via the metadata node.
2420  SmallString<64> Name("llvm.named.register.");
2421  AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2422  assert(Asm->getLabel().size() < 64-Name.size() &&
2423  "Register name too big");
2424  Name.append(Asm->getLabel());
2425  llvm::NamedMDNode *M =
2426  CGM.getModule().getOrInsertNamedMetadata(Name);
2427  if (M->getNumOperands() == 0) {
2428  llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2429  Asm->getLabel());
2430  llvm::Metadata *Ops[] = {Str};
2431  M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2432  }
2433 
2434  CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2435 
2436  llvm::Value *Ptr =
2437  llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2438  return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
2439 }
2440 
2442  const NamedDecl *ND = E->getDecl();
2443  QualType T = E->getType();
2444 
2445  if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2446  // Global Named registers access via intrinsics only
2447  if (VD->getStorageClass() == SC_Register &&
2448  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2449  return EmitGlobalNamedRegister(VD, CGM);
2450 
2451  // A DeclRefExpr for a reference initialized by a constant expression can
2452  // appear without being odr-used. Directly emit the constant initializer.
2453  const Expr *Init = VD->getAnyInitializer(VD);
2454  const auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl);
2455  if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() &&
2456  VD->isUsableInConstantExpressions(getContext()) &&
2457  VD->checkInitIsICE() &&
2458  // Do not emit if it is private OpenMP variable.
2460  ((CapturedStmtInfo &&
2461  (LocalDeclMap.count(VD->getCanonicalDecl()) ||
2462  CapturedStmtInfo->lookup(VD->getCanonicalDecl()))) ||
2463  LambdaCaptureFields.lookup(VD->getCanonicalDecl()) ||
2464  (BD && BD->capturesVariable(VD))))) {
2465  llvm::Constant *Val =
2467  *VD->evaluateValue(),
2468  VD->getType());
2469  assert(Val && "failed to emit reference constant expression");
2470  // FIXME: Eventually we will want to emit vector element references.
2471 
2472  // Should we be using the alignment of the constant pointer we emitted?
2473  CharUnits Alignment = getNaturalTypeAlignment(E->getType(),
2474  /* BaseInfo= */ nullptr,
2475  /* TBAAInfo= */ nullptr,
2476  /* forPointeeType= */ true);
2477  return MakeAddrLValue(Address(Val, Alignment), T, AlignmentSource::Decl);
2478  }
2479 
2480  // Check for captured variables.
2482  VD = VD->getCanonicalDecl();
2483  if (auto *FD = LambdaCaptureFields.lookup(VD))
2484  return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
2485  else if (CapturedStmtInfo) {
2486  auto I = LocalDeclMap.find(VD);
2487  if (I != LocalDeclMap.end()) {
2488  if (VD->getType()->isReferenceType())
2489  return EmitLoadOfReferenceLValue(I->second, VD->getType(),
2491  return MakeAddrLValue(I->second, T);
2492  }
2493  LValue CapLVal =
2496  return MakeAddrLValue(
2497  Address(CapLVal.getPointer(), getContext().getDeclAlign(VD)),
2499  CapLVal.getTBAAInfo());
2500  }
2501 
2502  assert(isa<BlockDecl>(CurCodeDecl));
2503  Address addr = GetAddrOfBlockDecl(VD);
2504  return MakeAddrLValue(addr, T, AlignmentSource::Decl);
2505  }
2506  }
2507 
2508  // FIXME: We should be able to assert this for FunctionDecls as well!
2509  // FIXME: We should be able to assert this for all DeclRefExprs, not just
2510  // those with a valid source location.
2511  assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
2512  !E->getLocation().isValid()) &&
2513  "Should not use decl without marking it used!");
2514 
2515  if (ND->hasAttr<WeakRefAttr>()) {
2516  const auto *VD = cast<ValueDecl>(ND);
2517  ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
2518  return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
2519  }
2520 
2521  if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2522  // Check if this is a global variable.
2523  if (VD->hasLinkage() || VD->isStaticDataMember())
2524  return EmitGlobalVarDeclLValue(*this, E, VD);
2525 
2526  Address addr = Address::invalid();
2527 
2528  // The variable should generally be present in the local decl map.
2529  auto iter = LocalDeclMap.find(VD);
2530  if (iter != LocalDeclMap.end()) {
2531  addr = iter->second;
2532 
2533  // Otherwise, it might be static local we haven't emitted yet for
2534  // some reason; most likely, because it's in an outer function.
2535  } else if (VD->isStaticLocal()) {
2537  *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)),
2538  getContext().getDeclAlign(VD));
2539 
2540  // No other cases for now.
2541  } else {
2542  llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
2543  }
2544 
2545 
2546  // Check for OpenMP threadprivate variables.
2547  if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
2548  VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2550  *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
2551  E->getExprLoc());
2552  }
2553 
2554  // Drill into block byref variables.
2555  bool isBlockByref = VD->isEscapingByref();
2556  if (isBlockByref) {
2557  addr = emitBlockByrefAddress(addr, VD);
2558  }
2559 
2560  // Drill into reference types.
2561  LValue LV = VD->getType()->isReferenceType() ?
2564 
2565  bool isLocalStorage = VD->hasLocalStorage();
2566 
2567  bool NonGCable = isLocalStorage &&
2568  !VD->getType()->isReferenceType() &&
2569  !isBlockByref;
2570  if (NonGCable) {
2571  LV.getQuals().removeObjCGCAttr();
2572  LV.setNonGC(true);
2573  }
2574 
2575  bool isImpreciseLifetime =
2576  (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
2577  if (isImpreciseLifetime)
2579  setObjCGCLValueClass(getContext(), E, LV);
2580  return LV;
2581  }
2582 
2583  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2584  return EmitFunctionDeclLValue(*this, E, FD);
2585 
2586  // FIXME: While we're emitting a binding from an enclosing scope, all other
2587  // DeclRefExprs we see should be implicitly treated as if they also refer to
2588  // an enclosing scope.
2589  if (const auto *BD = dyn_cast<BindingDecl>(ND))
2590  return EmitLValue(BD->getBinding());
2591 
2592  llvm_unreachable("Unhandled DeclRefExpr");
2593 }
2594 
2596  // __extension__ doesn't affect lvalue-ness.
2597  if (E->getOpcode() == UO_Extension)
2598  return EmitLValue(E->getSubExpr());
2599 
2601  switch (E->getOpcode()) {
2602  default: llvm_unreachable("Unknown unary operator lvalue!");
2603  case UO_Deref: {
2604  QualType T = E->getSubExpr()->getType()->getPointeeType();
2605  assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
2606 
2607  LValueBaseInfo BaseInfo;
2608  TBAAAccessInfo TBAAInfo;
2609  Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
2610  &TBAAInfo);
2611  LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
2612  LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
2613 
2614  // We should not generate __weak write barrier on indirect reference
2615  // of a pointer to object; as in void foo (__weak id *param); *param = 0;
2616  // But, we continue to generate __strong write barrier on indirect write
2617  // into a pointer to object.
2618  if (getLangOpts().ObjC &&
2619  getLangOpts().getGC() != LangOptions::NonGC &&
2620  LV.isObjCWeak())
2621  LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
2622  return LV;
2623  }
2624  case UO_Real:
2625  case UO_Imag: {
2626  LValue LV = EmitLValue(E->getSubExpr());
2627  assert(LV.isSimple() && "real/imag on non-ordinary l-value");
2628 
2629  // __real is valid on scalars. This is a faster way of testing that.
2630  // __imag can only produce an rvalue on scalars.
2631  if (E->getOpcode() == UO_Real &&
2632  !LV.getAddress().getElementType()->isStructTy()) {
2633  assert(E->getSubExpr()->getType()->isArithmeticType());
2634  return LV;
2635  }
2636 
2637  QualType T = ExprTy->castAs<ComplexType>()->getElementType();
2638 
2639  Address Component =
2640  (E->getOpcode() == UO_Real
2643  LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
2644  CGM.getTBAAInfoForSubobject(LV, T));
2645  ElemLV.getQuals().addQualifiers(LV.getQuals());
2646  return ElemLV;
2647  }
2648  case UO_PreInc:
2649  case UO_PreDec: {
2650  LValue LV = EmitLValue(E->getSubExpr());
2651  bool isInc = E->getOpcode() == UO_PreInc;
2652 
2653  if (E->getType()->isAnyComplexType())
2654  EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
2655  else
2656  EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
2657  return LV;
2658  }
2659  }
2660 }
2661 
2665 }
2666 
2670 }
2671 
2673  auto SL = E->getFunctionName();
2674  assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
2675  StringRef FnName = CurFn->getName();
2676  if (FnName.startswith("\01"))
2677  FnName = FnName.substr(1);
2678  StringRef NameItems[] = {
2680  std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
2681  if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
2682  std::string Name = SL->getString();
2683  if (!Name.empty()) {
2684  unsigned Discriminator =
2685  CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
2686  if (Discriminator)
2687  Name += "_" + Twine(Discriminator + 1).str();
2688  auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
2689  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2690  } else {
2691  auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
2692  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2693  }
2694  }
2695  auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
2696  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2697 }
2698 
2699 /// Emit a type description suitable for use by a runtime sanitizer library. The
2700 /// format of a type descriptor is
2701 ///
2702 /// \code
2703 /// { i16 TypeKind, i16 TypeInfo }
2704 /// \endcode
2705 ///
2706 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
2707 /// integer, 1 for a floating point value, and -1 for anything else.
2709  // Only emit each type's descriptor once.
2710  if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
2711  return C;
2712 
2713  uint16_t TypeKind = -1;
2714  uint16_t TypeInfo = 0;
2715 
2716  if (T->isIntegerType()) {
2717  TypeKind = 0;
2718  TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
2719  (T->isSignedIntegerType() ? 1 : 0);
2720  } else if (T->isFloatingType()) {
2721  TypeKind = 1;
2722  TypeInfo = getContext().getTypeSize(T);
2723  }
2724 
2725  // Format the type name as if for a diagnostic, including quotes and
2726  // optionally an 'aka'.
2727  SmallString<32> Buffer;
2729  (intptr_t)T.getAsOpaquePtr(),
2730  StringRef(), StringRef(), None, Buffer,
2731  None);
2732 
2733  llvm::Constant *Components[] = {
2734  Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
2735  llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
2736  };
2737  llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
2738 
2739  auto *GV = new llvm::GlobalVariable(
2740  CGM.getModule(), Descriptor->getType(),
2741  /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
2742  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2744 
2745  // Remember the descriptor for this type.
2746  CGM.setTypeDescriptorInMap(T, GV);
2747 
2748  return GV;
2749 }
2750 
2752  llvm::Type *TargetTy = IntPtrTy;
2753 
2754  if (V->getType() == TargetTy)
2755  return V;
2756 
2757  // Floating-point types which fit into intptr_t are bitcast to integers
2758  // and then passed directly (after zero-extension, if necessary).
2759  if (V->getType()->isFloatingPointTy()) {
2760  unsigned Bits = V->getType()->getPrimitiveSizeInBits();
2761  if (Bits <= TargetTy->getIntegerBitWidth())
2762  V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
2763  Bits));
2764  }
2765 
2766  // Integers which fit in intptr_t are zero-extended and passed directly.
2767  if (V->getType()->isIntegerTy() &&
2768  V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
2769  return Builder.CreateZExt(V, TargetTy);
2770 
2771  // Pointers are passed directly, everything else is passed by address.
2772  if (!V->getType()->isPointerTy()) {
2773  Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
2774  Builder.CreateStore(V, Ptr);
2775  V = Ptr.getPointer();
2776  }
2777  return Builder.CreatePtrToInt(V, TargetTy);
2778 }
2779 
2780 /// Emit a representation of a SourceLocation for passing to a handler
2781 /// in a sanitizer runtime library. The format for this data is:
2782 /// \code
2783 /// struct SourceLocation {
2784 /// const char *Filename;
2785 /// int32_t Line, Column;
2786 /// };
2787 /// \endcode
2788 /// For an invalid SourceLocation, the Filename pointer is null.
2790  llvm::Constant *Filename;
2791  int Line, Column;
2792 
2794  if (PLoc.isValid()) {
2795  StringRef FilenameString = PLoc.getFilename();
2796 
2797  int PathComponentsToStrip =
2798  CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
2799  if (PathComponentsToStrip < 0) {
2800  assert(PathComponentsToStrip != INT_MIN);
2801  int PathComponentsToKeep = -PathComponentsToStrip;
2802  auto I = llvm::sys::path::rbegin(FilenameString);
2803  auto E = llvm::sys::path::rend(FilenameString);
2804  while (I != E && --PathComponentsToKeep)
2805  ++I;
2806 
2807  FilenameString = FilenameString.substr(I - E);
2808  } else if (PathComponentsToStrip > 0) {
2809  auto I = llvm::sys::path::begin(FilenameString);
2810  auto E = llvm::sys::path::end(FilenameString);
2811  while (I != E && PathComponentsToStrip--)
2812  ++I;
2813 
2814  if (I != E)
2815  FilenameString =
2816  FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
2817  else
2818  FilenameString = llvm::sys::path::filename(FilenameString);
2819  }
2820 
2821  auto FilenameGV = CGM.GetAddrOfConstantCString(FilenameString, ".src");
2823  cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
2824  Filename = FilenameGV.getPointer();
2825  Line = PLoc.getLine();
2826  Column = PLoc.getColumn();
2827  } else {
2828  Filename = llvm::Constant::getNullValue(Int8PtrTy);
2829  Line = Column = 0;
2830  }
2831 
2832  llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
2833  Builder.getInt32(Column)};
2834 
2835  return llvm::ConstantStruct::getAnon(Data);
2836 }
2837 
2838 namespace {
2839 /// Specify under what conditions this check can be recovered
2841  /// Always terminate program execution if this check fails.
2842  Unrecoverable,
2843  /// Check supports recovering, runtime has both fatal (noreturn) and
2844  /// non-fatal handlers for this check.
2845  Recoverable,
2846  /// Runtime conditionally aborts, always need to support recovery.
2848 };
2849 }
2850 
2852  assert(llvm::countPopulation(Kind) == 1);
2853  switch (Kind) {
2854  case SanitizerKind::Vptr:
2856  case SanitizerKind::Return:
2857  case SanitizerKind::Unreachable:
2859  default:
2860  return CheckRecoverableKind::Recoverable;
2861  }
2862 }
2863 
2864 namespace {
2865 struct SanitizerHandlerInfo {
2866  char const *const Name;
2867  unsigned Version;
2868 };
2869 }
2870 
2871 const SanitizerHandlerInfo SanitizerHandlers[] = {
2872 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
2874 #undef SANITIZER_CHECK
2875 };
2876 
2878  llvm::FunctionType *FnType,
2879  ArrayRef<llvm::Value *> FnArgs,
2880  SanitizerHandler CheckHandler,
2881  CheckRecoverableKind RecoverKind, bool IsFatal,
2882  llvm::BasicBlock *ContBB) {
2883  assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
2885  if (!CGF.Builder.getCurrentDebugLocation()) {
2886  // Ensure that the call has at least an artificial debug location.
2887  DL.emplace(CGF, SourceLocation());
2888  }
2889  bool NeedsAbortSuffix =
2890  IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
2891  bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
2892  const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
2893  const StringRef CheckName = CheckInfo.Name;
2894  std::string FnName = "__ubsan_handle_" + CheckName.str();
2895  if (CheckInfo.Version && !MinimalRuntime)
2896  FnName += "_v" + llvm::utostr(CheckInfo.Version);
2897  if (MinimalRuntime)
2898  FnName += "_minimal";
2899  if (NeedsAbortSuffix)
2900  FnName += "_abort";
2901  bool MayReturn =
2902  !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
2903 
2904  llvm::AttrBuilder B;
2905  if (!MayReturn) {
2906  B.addAttribute(llvm::Attribute::NoReturn)
2907  .addAttribute(llvm::Attribute::NoUnwind);
2908  }
2909  B.addAttribute(llvm::Attribute::UWTable);
2910 
2912  FnType, FnName,
2913  llvm::AttributeList::get(CGF.getLLVMContext(),
2914  llvm::AttributeList::FunctionIndex, B),
2915  /*Local=*/true);
2916  llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
2917  if (!MayReturn) {
2918  HandlerCall->setDoesNotReturn();
2919  CGF.Builder.CreateUnreachable();
2920  } else {
2921  CGF.Builder.CreateBr(ContBB);
2922  }
2923 }
2924 
2926  ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
2927  SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
2928  ArrayRef<llvm::Value *> DynamicArgs) {
2929  assert(IsSanitizerScope);
2930  assert(Checked.size() > 0);
2931  assert(CheckHandler >= 0 &&
2932  size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers));
2933  const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
2934 
2935  llvm::Value *FatalCond = nullptr;
2936  llvm::Value *RecoverableCond = nullptr;
2937  llvm::Value *TrapCond = nullptr;
2938  for (int i = 0, n = Checked.size(); i < n; ++i) {
2939  llvm::Value *Check = Checked[i].first;
2940  // -fsanitize-trap= overrides -fsanitize-recover=.
2941  llvm::Value *&Cond =
2942  CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
2943  ? TrapCond
2944  : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
2945  ? RecoverableCond
2946  : FatalCond;
2947  Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
2948  }
2949 
2950  if (TrapCond)
2951  EmitTrapCheck(TrapCond);
2952  if (!FatalCond && !RecoverableCond)
2953  return;
2954 
2955  llvm::Value *JointCond;
2956  if (FatalCond && RecoverableCond)
2957  JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
2958  else
2959  JointCond = FatalCond ? FatalCond : RecoverableCond;
2960  assert(JointCond);
2961 
2962  CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
2963  assert(SanOpts.has(Checked[0].second));
2964 #ifndef NDEBUG
2965  for (int i = 1, n = Checked.size(); i < n; ++i) {
2966  assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
2967  "All recoverable kinds in a single check must be same!");
2968  assert(SanOpts.has(Checked[i].second));
2969  }
2970 #endif
2971 
2972  llvm::BasicBlock *Cont = createBasicBlock("cont");
2973  llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
2974  llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
2975  // Give hint that we very much don't expect to execute the handler
2976  // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
2977  llvm::MDBuilder MDHelper(getLLVMContext());
2978  llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
2979  Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
2980  EmitBlock(Handlers);
2981 
2982  // Handler functions take an i8* pointing to the (handler-specific) static
2983  // information block, followed by a sequence of intptr_t arguments
2984  // representing operand values.
2987  if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
2988  Args.reserve(DynamicArgs.size() + 1);
2989  ArgTypes.reserve(DynamicArgs.size() + 1);
2990 
2991  // Emit handler arguments and create handler function type.
2992  if (!StaticArgs.empty()) {
2993  llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
2994  auto *InfoPtr =
2995  new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
2996  llvm::GlobalVariable::PrivateLinkage, Info);
2997  InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2999  Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
3000  ArgTypes.push_back(Int8PtrTy);
3001  }
3002 
3003  for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3004  Args.push_back(EmitCheckValue(DynamicArgs[i]));
3005  ArgTypes.push_back(IntPtrTy);
3006  }
3007  }
3008 
3009  llvm::FunctionType *FnType =
3010  llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3011 
3012  if (!FatalCond || !RecoverableCond) {
3013  // Simple case: we need to generate a single handler call, either
3014  // fatal, or non-fatal.
3015  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3016  (FatalCond != nullptr), Cont);
3017  } else {
3018  // Emit two handler calls: first one for set of unrecoverable checks,
3019  // another one for recoverable.
3020  llvm::BasicBlock *NonFatalHandlerBB =
3021  createBasicBlock("non_fatal." + CheckName);
3022  llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3023  Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3024  EmitBlock(FatalHandlerBB);
3025  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3026  NonFatalHandlerBB);
3027  EmitBlock(NonFatalHandlerBB);
3028  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3029  Cont);
3030  }
3031 
3032  EmitBlock(Cont);
3033 }
3034 
3036  SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3037  llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3038  llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3039 
3040  llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3041  llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3042 
3043  llvm::MDBuilder MDHelper(getLLVMContext());
3044  llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3045  BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3046 
3047  EmitBlock(CheckBB);
3048 
3049  bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3050 
3051  llvm::CallInst *CheckCall;
3052  llvm::Constant *SlowPathFn;
3053  if (WithDiag) {
3054  llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3055  auto *InfoPtr =
3056  new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3057  llvm::GlobalVariable::PrivateLinkage, Info);
3058  InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3060 
3061  SlowPathFn = CGM.getModule().getOrInsertFunction(
3062  "__cfi_slowpath_diag",
3063  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3064  false));
3065  CheckCall = Builder.CreateCall(
3066  SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)});
3067  } else {
3068  SlowPathFn = CGM.getModule().getOrInsertFunction(
3069  "__cfi_slowpath",
3070  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3071  CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3072  }
3073 
3074  CGM.setDSOLocal(cast<llvm::GlobalValue>(SlowPathFn->stripPointerCasts()));
3075  CheckCall->setDoesNotThrow();
3076 
3077  EmitBlock(Cont);
3078 }
3079 
3080 // Emit a stub for __cfi_check function so that the linker knows about this
3081 // symbol in LTO mode.
3083  llvm::Module *M = &CGM.getModule();
3084  auto &Ctx = M->getContext();
3085  llvm::Function *F = llvm::Function::Create(
3086  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
3087  llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3088  CGM.setDSOLocal(F);
3089  llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3090  // FIXME: consider emitting an intrinsic call like
3091  // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
3092  // which can be lowered in CrossDSOCFI pass to the actual contents of
3093  // __cfi_check. This would allow inlining of __cfi_check calls.
3095  llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB);
3096  llvm::ReturnInst::Create(Ctx, nullptr, BB);
3097 }
3098 
3099 // This function is basically a switch over the CFI failure kind, which is
3100 // extracted from CFICheckFailData (1st function argument). Each case is either
3101 // llvm.trap or a call to one of the two runtime handlers, based on
3102 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3103 // failure kind) traps, but this should really never happen. CFICheckFailData
3104 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3105 // check kind; in this case __cfi_check_fail traps as well.
3107  SanitizerScope SanScope(this);
3108  FunctionArgList Args;
3113  Args.push_back(&ArgData);
3114  Args.push_back(&ArgAddr);
3115 
3116  const CGFunctionInfo &FI =
3118 
3119  llvm::Function *F = llvm::Function::Create(
3120  llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3121  llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3122  F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3123 
3124  StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3125  SourceLocation());
3126 
3127  // This function should not be affected by blacklist. This function does
3128  // not have a source location, but "src:*" would still apply. Revert any
3129  // changes to SanOpts made in StartFunction.
3131 
3132  llvm::Value *Data =
3133  EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3134  CGM.getContext().VoidPtrTy, ArgData.getLocation());
3135  llvm::Value *Addr =
3136  EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3137  CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3138 
3139  // Data == nullptr means the calling module has trap behaviour for this check.
3140  llvm::Value *DataIsNotNullPtr =
3141  Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3142  EmitTrapCheck(DataIsNotNullPtr);
3143 
3144  llvm::StructType *SourceLocationTy =
3145  llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3146  llvm::StructType *CfiCheckFailDataTy =
3147  llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3148 
3149  llvm::Value *V = Builder.CreateConstGEP2_32(
3150  CfiCheckFailDataTy,
3151  Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3152  0);
3153  Address CheckKindAddr(V, getIntAlign());
3154  llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3155 
3156  llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3157  CGM.getLLVMContext(),
3158  llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3159  llvm::Value *ValidVtable = Builder.CreateZExt(
3160  Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3161  {Addr, AllVtables}),
3162  IntPtrTy);
3163 
3164  const std::pair<int, SanitizerMask> CheckKinds[] = {
3165  {CFITCK_VCall, SanitizerKind::CFIVCall},
3166  {CFITCK_NVCall, SanitizerKind::CFINVCall},
3167  {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3168  {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3169  {CFITCK_ICall, SanitizerKind::CFIICall}};
3170 
3172  for (auto CheckKindMaskPair : CheckKinds) {
3173  int Kind = CheckKindMaskPair.first;
3174  SanitizerMask Mask = CheckKindMaskPair.second;
3175  llvm::Value *Cond =
3176  Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3177  if (CGM.getLangOpts().Sanitize.has(Mask))
3178  EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3179  {Data, Addr, ValidVtable});
3180  else
3181  EmitTrapCheck(Cond);
3182  }
3183 
3184  FinishFunction();
3185  // The only reference to this function will be created during LTO link.
3186  // Make sure it survives until then.
3187  CGM.addUsedGlobal(F);
3188 }
3189 
3191  if (SanOpts.has(SanitizerKind::Unreachable)) {
3192  SanitizerScope SanScope(this);
3193  EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3194  SanitizerKind::Unreachable),
3195  SanitizerHandler::BuiltinUnreachable,
3196  EmitCheckSourceLocation(Loc), None);
3197  }
3198  Builder.CreateUnreachable();
3199 }
3200 
3202  llvm::BasicBlock *Cont = createBasicBlock("cont");
3203 
3204  // If we're optimizing, collapse all calls to trap down to just one per
3205  // function to save on code size.
3206  if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
3207  TrapBB = createBasicBlock("trap");
3208  Builder.CreateCondBr(Checked, Cont, TrapBB);
3209  EmitBlock(TrapBB);
3210  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3211  TrapCall->setDoesNotReturn();
3212  TrapCall->setDoesNotThrow();
3213  Builder.CreateUnreachable();
3214  } else {
3215  Builder.CreateCondBr(Checked, Cont, TrapBB);
3216  }
3217 
3218  EmitBlock(Cont);
3219 }
3220 
3222  llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID));
3223 
3224  if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3225  auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3227  TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A);
3228  }
3229 
3230  return TrapCall;
3231 }
3232 
3234  LValueBaseInfo *BaseInfo,
3235  TBAAAccessInfo *TBAAInfo) {
3236  assert(E->getType()->isArrayType() &&
3237  "Array to pointer decay must have array source type!");
3238 
3239  // Expressions of array type can't be bitfields or vector elements.
3240  LValue LV = EmitLValue(E);
3241  Address Addr = LV.getAddress();
3242 
3243  // If the array type was an incomplete type, we need to make sure
3244  // the decay ends up being the right type.
3245  llvm::Type *NewTy = ConvertType(E->getType());
3246  Addr = Builder.CreateElementBitCast(Addr, NewTy);
3247 
3248  // Note that VLA pointers are always decayed, so we don't need to do
3249  // anything here.
3250  if (!E->getType()->isVariableArrayType()) {
3251  assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3252  "Expected pointer to array");
3253  Addr = Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(), "arraydecay");
3254  }
3255 
3256  // The result of this decay conversion points to an array element within the
3257  // base lvalue. However, since TBAA currently does not support representing
3258  // accesses to elements of member arrays, we conservatively represent accesses
3259  // to the pointee object as if it had no any base lvalue specified.
3260  // TODO: Support TBAA for member arrays.
3262  if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3263  if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3264 
3265  return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
3266 }
3267 
3268 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3269 /// array to pointer, return the array subexpression.
3270 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3271  // If this isn't just an array->pointer decay, bail out.
3272  const auto *CE = dyn_cast<CastExpr>(E);
3273  if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3274  return nullptr;
3275 
3276  // If this is a decay from variable width array, bail out.
3277  const Expr *SubExpr = CE->getSubExpr();
3278  if (SubExpr->getType()->isVariableArrayType())
3279  return nullptr;
3280 
3281  return SubExpr;
3282 }
3283 
3285  llvm::Value *ptr,
3286  ArrayRef<llvm::Value*> indices,
3287  bool inbounds,
3288  bool signedIndices,
3289  SourceLocation loc,
3290  const llvm::Twine &name = "arrayidx") {
3291  if (inbounds) {
3292  return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices,
3294  name);
3295  } else {
3296  return CGF.Builder.CreateGEP(ptr, indices, name);
3297  }
3298 }
3299 
3301  llvm::Value *idx,
3302  CharUnits eltSize) {
3303  // If we have a constant index, we can use the exact offset of the
3304  // element we're accessing.
3305  if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3306  CharUnits offset = constantIdx->getZExtValue() * eltSize;
3307  return arrayAlign.alignmentAtOffset(offset);
3308 
3309  // Otherwise, use the worst-case alignment for any element.
3310  } else {
3311  return arrayAlign.alignmentOfArrayElement(eltSize);
3312  }
3313 }
3314 
3316  const VariableArrayType *vla) {
3317  QualType eltType;
3318  do {
3319  eltType = vla->getElementType();
3320  } while ((vla = ctx.getAsVariableArrayType(eltType)));
3321  return eltType;
3322 }
3323 
3325  ArrayRef<llvm::Value *> indices,
3326  QualType eltType, bool inbounds,
3327  bool signedIndices, SourceLocation loc,
3328  const llvm::Twine &name = "arrayidx") {
3329  // All the indices except that last must be zero.
3330 #ifndef NDEBUG
3331  for (auto idx : indices.drop_back())
3332  assert(isa<llvm::ConstantInt>(idx) &&
3333  cast<llvm::ConstantInt>(idx)->isZero());
3334 #endif
3335 
3336  // Determine the element size of the statically-sized base. This is
3337  // the thing that the indices are expressed in terms of.
3338  if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
3339  eltType = getFixedSizeElementType(CGF.getContext(), vla);
3340  }
3341 
3342  // We can use that to compute the best alignment of the element.
3343  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
3344  CharUnits eltAlign =
3345  getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
3346 
3348  CGF, addr.getPointer(), indices, inbounds, signedIndices, loc, name);
3349  return Address(eltPtr, eltAlign);
3350 }
3351 
3353  bool Accessed) {
3354  // The index must always be an integer, which is not an aggregate. Emit it
3355  // in lexical order (this complexity is, sadly, required by C++17).
3356  llvm::Value *IdxPre =
3357  (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
3358  bool SignedIndices = false;
3359  auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
3360  auto *Idx = IdxPre;
3361  if (E->getLHS() != E->getIdx()) {
3362  assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
3363  Idx = EmitScalarExpr(E->getIdx());
3364  }
3365 
3366  QualType IdxTy = E->getIdx()->getType();
3367  bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
3368  SignedIndices |= IdxSigned;
3369 
3370  if (SanOpts.has(SanitizerKind::ArrayBounds))
3371  EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
3372 
3373  // Extend or truncate the index type to 32 or 64-bits.
3374  if (Promote && Idx->getType() != IntPtrTy)
3375  Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
3376 
3377  return Idx;
3378  };
3379  IdxPre = nullptr;
3380 
3381  // If the base is a vector type, then we are forming a vector element lvalue
3382  // with this subscript.
3383  if (E->getBase()->getType()->isVectorType() &&
3384  !isa<ExtVectorElementExpr>(E->getBase())) {
3385  // Emit the vector as an lvalue to get its address.
3386  LValue LHS = EmitLValue(E->getBase());
3387  auto *Idx = EmitIdxAfterBase(/*Promote*/false);
3388  assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
3389  return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
3390  LHS.getBaseInfo(), TBAAAccessInfo());
3391  }
3392 
3393  // All the other cases basically behave like simple offsetting.
3394 
3395  // Handle the extvector case we ignored above.
3396  if (isa<ExtVectorElementExpr>(E->getBase())) {
3397  LValue LV = EmitLValue(E->getBase());
3398  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3400 
3401  QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
3402  Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
3403  SignedIndices, E->getExprLoc());
3404  return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
3405  CGM.getTBAAInfoForSubobject(LV, EltType));
3406  }
3407 
3408  LValueBaseInfo EltBaseInfo;
3409  TBAAAccessInfo EltTBAAInfo;
3410  Address Addr = Address::invalid();
3411  if (const VariableArrayType *vla =
3412  getContext().getAsVariableArrayType(E->getType())) {
3413  // The base must be a pointer, which is not an aggregate. Emit
3414  // it. It needs to be emitted first in case it's what captures
3415  // the VLA bounds.
3416  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3417  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3418 
3419  // The element count here is the total number of non-VLA elements.
3420  llvm::Value *numElements = getVLASize(vla).NumElts;
3421 
3422  // Effectively, the multiply by the VLA size is part of the GEP.
3423  // GEP indexes are signed, and scaling an index isn't permitted to
3424  // signed-overflow, so we use the same semantics for our explicit
3425  // multiply. We suppress this if overflow is not undefined behavior.
3426  if (getLangOpts().isSignedOverflowDefined()) {
3427  Idx = Builder.CreateMul(Idx, numElements);
3428  } else {
3429  Idx = Builder.CreateNSWMul(Idx, numElements);
3430  }
3431 
3432  Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
3434  SignedIndices, E->getExprLoc());
3435 
3436  } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
3437  // Indexing over an interface, as in "NSString *P; P[4];"
3438 
3439  // Emit the base pointer.
3440  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3441  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3442 
3443  CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
3444  llvm::Value *InterfaceSizeVal =
3445  llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
3446 
3447  llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
3448 
3449  // We don't necessarily build correct LLVM struct types for ObjC
3450  // interfaces, so we can't rely on GEP to do this scaling
3451  // correctly, so we need to cast to i8*. FIXME: is this actually
3452  // true? A lot of other things in the fragile ABI would break...
3453  llvm::Type *OrigBaseTy = Addr.getType();
3454  Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
3455 
3456  // Do the GEP.
3457  CharUnits EltAlign =
3458  getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
3459  llvm::Value *EltPtr =
3460  emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false,
3461  SignedIndices, E->getExprLoc());
3462  Addr = Address(EltPtr, EltAlign);
3463 
3464  // Cast back.
3465  Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
3466  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
3467  // If this is A[i] where A is an array, the frontend will have decayed the
3468  // base to be a ArrayToPointerDecay implicit cast. While correct, it is
3469  // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3470  // "gep x, i" here. Emit one "gep A, 0, i".
3471  assert(Array->getType()->isArrayType() &&
3472  "Array to pointer decay must have array source type!");
3473  LValue ArrayLV;
3474  // For simple multidimensional array indexing, set the 'accessed' flag for
3475  // better bounds-checking of the base expression.
3476  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
3477  ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
3478  else
3479  ArrayLV = EmitLValue(Array);
3480  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3481 
3482  // Propagate the alignment from the array itself to the result.
3483  Addr = emitArraySubscriptGEP(
3484  *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
3485  E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
3486  E->getExprLoc());
3487  EltBaseInfo = ArrayLV.getBaseInfo();
3488  EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
3489  } else {
3490  // The base must be a pointer; emit it with an estimate of its alignment.
3491  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3492  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3493  Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
3495  SignedIndices, E->getExprLoc());
3496  }
3497 
3498  LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
3499 
3500  if (getLangOpts().ObjC &&
3501  getLangOpts().getGC() != LangOptions::NonGC) {
3503  setObjCGCLValueClass(getContext(), E, LV);
3504  }
3505  return LV;
3506 }
3507 
3509  LValueBaseInfo &BaseInfo,
3510  TBAAAccessInfo &TBAAInfo,
3511  QualType BaseTy, QualType ElTy,
3512  bool IsLowerBound) {
3513  LValue BaseLVal;
3514  if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) {
3515  BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound);
3516  if (BaseTy->isArrayType()) {
3517  Address Addr = BaseLVal.getAddress();
3518  BaseInfo = BaseLVal.getBaseInfo();
3519 
3520  // If the array type was an incomplete type, we need to make sure
3521  // the decay ends up being the right type.
3522  llvm::Type *NewTy = CGF.ConvertType(BaseTy);
3523  Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy);
3524 
3525  // Note that VLA pointers are always decayed, so we don't need to do
3526  // anything here.
3527  if (!BaseTy->isVariableArrayType()) {
3528  assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3529  "Expected pointer to array");
3530  Addr = CGF.Builder.CreateStructGEP(Addr, 0, CharUnits::Zero(),
3531  "arraydecay");
3532  }
3533 
3534  return CGF.Builder.CreateElementBitCast(Addr,
3535  CGF.ConvertTypeForMem(ElTy));
3536  }
3537  LValueBaseInfo TypeBaseInfo;
3538  TBAAAccessInfo TypeTBAAInfo;
3539  CharUnits Align = CGF.getNaturalTypeAlignment(ElTy, &TypeBaseInfo,
3540  &TypeTBAAInfo);
3541  BaseInfo.mergeForCast(TypeBaseInfo);
3542  TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
3543  return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()), Align);
3544  }
3545  return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
3546 }
3547 
3549  bool IsLowerBound) {
3551  QualType ResultExprTy;
3552  if (auto *AT = getContext().getAsArrayType(BaseTy))
3553  ResultExprTy = AT->getElementType();
3554  else
3555  ResultExprTy = BaseTy->getPointeeType();
3556  llvm::Value *Idx = nullptr;
3557  if (IsLowerBound || E->getColonLoc().isInvalid()) {
3558  // Requesting lower bound or upper bound, but without provided length and
3559  // without ':' symbol for the default length -> length = 1.
3560  // Idx = LowerBound ?: 0;
3561  if (auto *LowerBound = E->getLowerBound()) {
3562  Idx = Builder.CreateIntCast(
3563  EmitScalarExpr(LowerBound), IntPtrTy,
3564  LowerBound->getType()->hasSignedIntegerRepresentation());
3565  } else
3566  Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
3567  } else {
3568  // Try to emit length or lower bound as constant. If this is possible, 1
3569  // is subtracted from constant length or lower bound. Otherwise, emit LLVM
3570  // IR (LB + Len) - 1.
3571  auto &C = CGM.getContext();
3572  auto *Length = E->getLength();
3573  llvm::APSInt ConstLength;
3574  if (Length) {
3575  // Idx = LowerBound + Length - 1;
3576  if (Length->isIntegerConstantExpr(ConstLength, C)) {
3577  ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
3578  Length = nullptr;
3579  }
3580  auto *LowerBound = E->getLowerBound();
3581  llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
3582  if (LowerBound && LowerBound->isIntegerConstantExpr(ConstLowerBound, C)) {
3583  ConstLowerBound = ConstLowerBound.zextOrTrunc(PointerWidthInBits);
3584  LowerBound = nullptr;
3585  }
3586  if (!Length)
3587  --ConstLength;
3588  else if (!LowerBound)
3589  --ConstLowerBound;
3590 
3591  if (Length || LowerBound) {
3592  auto *LowerBoundVal =
3593  LowerBound
3594  ? Builder.CreateIntCast(
3595  EmitScalarExpr(LowerBound), IntPtrTy,
3596  LowerBound->getType()->hasSignedIntegerRepresentation())
3597  : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
3598  auto *LengthVal =
3599  Length
3600  ? Builder.CreateIntCast(
3601  EmitScalarExpr(Length), IntPtrTy,
3602  Length->getType()->hasSignedIntegerRepresentation())
3603  : llvm::ConstantInt::get(IntPtrTy, ConstLength);
3604  Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
3605  /*HasNUW=*/false,
3606  !getLangOpts().isSignedOverflowDefined());
3607  if (Length && LowerBound) {
3608  Idx = Builder.CreateSub(
3609  Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
3610  /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
3611  }
3612  } else
3613  Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
3614  } else {
3615  // Idx = ArraySize - 1;
3616  QualType ArrayTy = BaseTy->isPointerType()
3617  ? E->getBase()->IgnoreParenImpCasts()->getType()
3618  : BaseTy;
3619  if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
3620  Length = VAT->getSizeExpr();
3621  if (Length->isIntegerConstantExpr(ConstLength, C))
3622  Length = nullptr;
3623  } else {
3624  auto *CAT = C.getAsConstantArrayType(ArrayTy);
3625  ConstLength = CAT->getSize();
3626  }
3627  if (Length) {
3628  auto *LengthVal = Builder.CreateIntCast(
3629  EmitScalarExpr(Length), IntPtrTy,
3630  Length->getType()->hasSignedIntegerRepresentation());
3631  Idx = Builder.CreateSub(
3632  LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
3633  /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
3634  } else {
3635  ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
3636  --ConstLength;
3637  Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
3638  }
3639  }
3640  }
3641  assert(Idx);
3642 
3643  Address EltPtr = Address::invalid();
3644  LValueBaseInfo BaseInfo;
3645  TBAAAccessInfo TBAAInfo;
3646  if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
3647  // The base must be a pointer, which is not an aggregate. Emit
3648  // it. It needs to be emitted first in case it's what captures
3649  // the VLA bounds.
3650  Address Base =
3651  emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
3652  BaseTy, VLA->getElementType(), IsLowerBound);
3653  // The element count here is the total number of non-VLA elements.
3654  llvm::Value *NumElements = getVLASize(VLA).NumElts;
3655 
3656  // Effectively, the multiply by the VLA size is part of the GEP.
3657  // GEP indexes are signed, and scaling an index isn't permitted to
3658  // signed-overflow, so we use the same semantics for our explicit
3659  // multiply. We suppress this if overflow is not undefined behavior.
3660  if (getLangOpts().isSignedOverflowDefined())
3661  Idx = Builder.CreateMul(Idx, NumElements);
3662  else
3663  Idx = Builder.CreateNSWMul(Idx, NumElements);
3664  EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
3666  /*SignedIndices=*/false, E->getExprLoc());
3667  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
3668  // If this is A[i] where A is an array, the frontend will have decayed the
3669  // base to be a ArrayToPointerDecay implicit cast. While correct, it is
3670  // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3671  // "gep x, i" here. Emit one "gep A, 0, i".
3672  assert(Array->getType()->isArrayType() &&
3673  "Array to pointer decay must have array source type!");
3674  LValue ArrayLV;
3675  // For simple multidimensional array indexing, set the 'accessed' flag for
3676  // better bounds-checking of the base expression.
3677  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
3678  ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
3679  else
3680  ArrayLV = EmitLValue(Array);
3681 
3682  // Propagate the alignment from the array itself to the result.
3683  EltPtr = emitArraySubscriptGEP(
3684  *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
3685  ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
3686  /*SignedIndices=*/false, E->getExprLoc());
3687  BaseInfo = ArrayLV.getBaseInfo();
3688  TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
3689  } else {
3690  Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
3691  TBAAInfo, BaseTy, ResultExprTy,
3692  IsLowerBound);
3693  EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
3694  !getLangOpts().isSignedOverflowDefined(),
3695  /*SignedIndices=*/false, E->getExprLoc());
3696  }
3697 
3698  return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
3699 }
3700 
3703  // Emit the base vector as an l-value.
3704  LValue Base;
3705 
3706  // ExtVectorElementExpr's base can either be a vector or pointer to vector.
3707  if (E->isArrow()) {
3708  // If it is a pointer to a vector, emit the address and form an lvalue with
3709  // it.
3710  LValueBaseInfo BaseInfo;
3711  TBAAAccessInfo TBAAInfo;
3712  Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
3713  const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
3714  Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
3715  Base.getQuals().removeObjCGCAttr();
3716  } else if (E->getBase()->isGLValue()) {
3717  // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
3718  // emit the base as an lvalue.
3719  assert(E->getBase()->getType()->isVectorType());
3720  Base = EmitLValue(E->getBase());
3721  } else {
3722  // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
3723  assert(E->getBase()->getType()->isVectorType() &&
3724  "Result must be a vector");
3725  llvm::Value *Vec = EmitScalarExpr(E->getBase());
3726 
3727  // Store the vector to memory (because LValue wants an address).
3728  Address VecMem = CreateMemTemp(E->getBase()->getType());
3729  Builder.CreateStore(Vec, VecMem);
3730  Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
3732  }
3733 
3734  QualType type =
3736 
3737  // Encode the element access list into a vector of unsigned indices.
3738  SmallVector<uint32_t, 4> Indices;
3739  E->getEncodedElementAccess(Indices);
3740 
3741  if (Base.isSimple()) {
3742  llvm::Constant *CV =
3743  llvm::ConstantDataVector::get(getLLVMContext(), Indices);
3744  return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
3745  Base.getBaseInfo(), TBAAAccessInfo());
3746  }
3747  assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
3748 
3749  llvm::Constant *BaseElts = Base.getExtVectorElts();
3751 
3752  for (unsigned i = 0, e = Indices.size(); i != e; ++i)
3753  CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
3754  llvm::Constant *CV = llvm::ConstantVector::get(CElts);
3756  Base.getBaseInfo(), TBAAAccessInfo());
3757 }
3758 
3760  if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
3761  EmitIgnoredExpr(E->getBase());
3762  return EmitDeclRefLValue(DRE);
3763  }
3764 
3765  Expr *BaseExpr = E->getBase();
3766  // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
3767  LValue BaseLV;
3768  if (E->isArrow()) {
3769  LValueBaseInfo BaseInfo;
3770  TBAAAccessInfo TBAAInfo;
3771  Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
3772  QualType PtrTy = BaseExpr->getType()->getPointeeType();
3773  SanitizerSet SkippedChecks;
3774  bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
3775  if (IsBaseCXXThis)
3776  SkippedChecks.set(SanitizerKind::Alignment, true);
3777  if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
3778  SkippedChecks.set(SanitizerKind::Null, true);
3779  EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
3780  /*Alignment=*/CharUnits::Zero(), SkippedChecks);
3781  BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
3782  } else
3783  BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
3784 
3785  NamedDecl *ND = E->getMemberDecl();
3786  if (auto *Field = dyn_cast<FieldDecl>(ND)) {
3787  LValue LV = EmitLValueForField(BaseLV, Field);
3788  setObjCGCLValueClass(getContext(), E, LV);
3789  return LV;
3790  }
3791 
3792  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
3793  return EmitFunctionDeclLValue(*this, E, FD);
3794 
3795  llvm_unreachable("Unhandled member declaration!");
3796 }
3797 
3798 /// Given that we are currently emitting a lambda, emit an l-value for
3799 /// one of its members.
3801  assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
3802  assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
3803  QualType LambdaTagType =
3804  getContext().getTagDeclType(Field->getParent());
3805  LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
3806  return EmitLValueForField(LambdaLV, Field);
3807 }
3808 
3809 /// Drill down to the storage of a field without walking into
3810 /// reference types.
3811 ///
3812 /// The resulting address doesn't necessarily have the right type.
3814  const FieldDecl *field) {
3815  const RecordDecl *rec = field->getParent();
3816 
3817  unsigned idx =
3818  CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
3819 
3820  CharUnits offset;
3821  // Adjust the alignment down to the given offset.
3822  // As a special case, if the LLVM field index is 0, we know that this
3823  // is zero.
3824  assert((idx != 0 || CGF.getContext().getASTRecordLayout(rec)
3825  .getFieldOffset(field->getFieldIndex()) == 0) &&
3826  "LLVM field at index zero had non-zero offset?");
3827  if (idx != 0) {
3828  auto &recLayout = CGF.getContext().getASTRecordLayout(rec);
3829  auto offsetInBits = recLayout.getFieldOffset(field->getFieldIndex());
3830  offset = CGF.getContext().toCharUnitsFromBits(offsetInBits);
3831  }
3832 
3833  return CGF.Builder.CreateStructGEP(base, idx, offset, field->getName());
3834 }
3835 
3836 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
3837  const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
3838  if (!RD)
3839  return false;
3840 
3841  if (RD->isDynamicClass())
3842  return true;
3843 
3844  for (const auto &Base : RD->bases())
3845  if (hasAnyVptr(Base.getType(), Context))
3846  return true;
3847 
3848  for (const FieldDecl *Field : RD->fields())
3849  if (hasAnyVptr(Field->getType(), Context))
3850  return true;
3851 
3852  return false;
3853 }
3854 
3856  const FieldDecl *field) {
3857  LValueBaseInfo BaseInfo = base.getBaseInfo();
3858 
3859  if (field->isBitField()) {
3860  const CGRecordLayout &RL =
3862  const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
3863  Address Addr = base.getAddress();
3864  unsigned Idx = RL.getLLVMFieldNo(field);
3865  if (Idx != 0)
3866  // For structs, we GEP to the field that the record layout suggests.
3867  Addr = Builder.CreateStructGEP(Addr, Idx, Info.StorageOffset,
3868  field->getName());
3869  // Get the access type.
3870  llvm::Type *FieldIntTy =
3871  llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
3872  if (Addr.getElementType() != FieldIntTy)
3873  Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
3874 
3875  QualType fieldType =
3876  field->getType().withCVRQualifiers(base.getVRQualifiers());
3877  // TODO: Support TBAA for bit fields.
3878  LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
3879  return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
3880  TBAAAccessInfo());
3881  }
3882 
3883  // Fields of may-alias structures are may-alias themselves.
3884  // FIXME: this should get propagated down through anonymous structs
3885  // and unions.
3886  QualType FieldType = field->getType();
3887  const RecordDecl *rec = field->getParent();
3888  AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
3889  LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
3890  TBAAAccessInfo FieldTBAAInfo;
3891  if (base.getTBAAInfo().isMayAlias() ||
3892  rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
3893  FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
3894  } else if (rec->isUnion()) {
3895  // TODO: Support TBAA for unions.
3896  FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
3897  } else {
3898  // If no base type been assigned for the base access, then try to generate
3899  // one for this base lvalue.
3900  FieldTBAAInfo = base.getTBAAInfo();
3901  if (!FieldTBAAInfo.BaseType) {
3902  FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
3903  assert(!FieldTBAAInfo.Offset &&
3904  "Nonzero offset for an access with no base type!");
3905  }
3906 
3907  // Adjust offset to be relative to the base type.
3908  const ASTRecordLayout &Layout =
3910  unsigned CharWidth = getContext().getCharWidth();
3911  if (FieldTBAAInfo.BaseType)
3912  FieldTBAAInfo.Offset +=
3913  Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
3914 
3915  // Update the final access type and size.
3916  FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
3917  FieldTBAAInfo.Size =
3918  getContext().getTypeSizeInChars(FieldType).getQuantity();
3919  }
3920 
3921  Address addr = base.getAddress();
3922  if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
3923  if (CGM.getCodeGenOpts().StrictVTablePointers &&
3924  ClassDef->isDynamicClass()) {
3925  // Getting to any field of dynamic object requires stripping dynamic
3926  // information provided by invariant.group. This is because accessing
3927  // fields may leak the real address of dynamic object, which could result
3928  // in miscompilation when leaked pointer would be compared.
3929  auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
3930  addr = Address(stripped, addr.getAlignment());
3931  }
3932  }
3933 
3934  unsigned RecordCVR = base.getVRQualifiers();
3935  if (rec->isUnion()) {
3936  // For unions, there is no pointer adjustment.
3937  assert(!FieldType->isReferenceType() && "union has reference member");
3938  if (CGM.getCodeGenOpts().StrictVTablePointers &&
3939  hasAnyVptr(FieldType, getContext()))
3940  // Because unions can easily skip invariant.barriers, we need to add
3941  // a barrier every time CXXRecord field with vptr is referenced.
3942  addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
3943  addr.getAlignment());
3944  } else {
3945  // For structs, we GEP to the field that the record layout suggests.
3946  addr = emitAddrOfFieldStorage(*this, addr, field);
3947 
3948  // If this is a reference field, load the reference right now.
3949  if (FieldType->isReferenceType()) {
3950  LValue RefLVal = MakeAddrLValue(addr, FieldType, FieldBaseInfo,
3951  FieldTBAAInfo);
3952  if (RecordCVR & Qualifiers::Volatile)
3953  RefLVal.getQuals().addVolatile();
3954  addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
3955 
3956  // Qualifiers on the struct don't apply to the referencee.
3957  RecordCVR = 0;
3958  FieldType = FieldType->getPointeeType();
3959  }
3960  }
3961 
3962  // Make sure that the address is pointing to the right type. This is critical
3963  // for both unions and structs. A union needs a bitcast, a struct element
3964  // will need a bitcast if the LLVM type laid out doesn't match the desired
3965  // type.
3967  addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
3968 
3969  if (field->hasAttr<AnnotateAttr>())
3970  addr = EmitFieldAnnotations(field, addr);
3971 
3972  LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
3973  LV.getQuals().addCVRQualifiers(RecordCVR);
3974 
3975  // __weak attribute on a field is ignored.
3976  if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
3977  LV.getQuals().removeObjCGCAttr();
3978 
3979  return LV;
3980 }
3981 
3982 LValue
3984  const FieldDecl *Field) {
3985  QualType FieldType = Field->getType();
3986 
3987  if (!FieldType->isReferenceType())
3988  return EmitLValueForField(Base, Field);
3989 
3990  Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
3991 
3992  // Make sure that the address is pointing to the right type.
3993  llvm::Type *llvmType = ConvertTypeForMem(FieldType);
3994  V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
3995 
3996  // TODO: Generate TBAA information that describes this access as a structure
3997  // member access and not just an access to an object of the field's type. This
3998  // should be similar to what we do in EmitLValueForField().
3999  LValueBaseInfo BaseInfo = Base.getBaseInfo();
4000  AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
4001  LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
4002  return MakeAddrLValue(V, FieldType, FieldBaseInfo,
4003  CGM.getTBAAInfoForSubobject(Base, FieldType));
4004 }
4005 
4007  if (E->isFileScope()) {
4009  return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
4010  }
4011  if (E->getType()->isVariablyModifiedType())
4012  // make sure to emit the VLA size.
4014 
4015  Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
4016  const Expr *InitExpr = E->getInitializer();
4017  LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
4018 
4019  EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
4020  /*Init*/ true);
4021 
4022  return Result;
4023 }
4024 
4026  if (!E->isGLValue())
4027  // Initializing an aggregate temporary in C++11: T{...}.
4028  return EmitAggExprToLValue(E);
4029 
4030  // An lvalue initializer list must be initializing a reference.
4031  assert(E->isTransparent() && "non-transparent glvalue init list");
4032  return EmitLValue(E->getInit(0));
4033 }
4034 
4035 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
4036 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
4037 /// LValue is returned and the current block has been terminated.
4039  const Expr *Operand) {
4040  if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
4041  CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
4042  return None;
4043  }
4044 
4045  return CGF.EmitLValue(Operand);
4046 }
4047 
4050  if (!expr->isGLValue()) {
4051  // ?: here should be an aggregate.
4052  assert(hasAggregateEvaluationKind(expr->getType()) &&
4053  "Unexpected conditional operator!");
4054  return EmitAggExprToLValue(expr);
4055  }
4056 
4057  OpaqueValueMapping binding(*this, expr);
4058 
4059  const Expr *condExpr = expr->getCond();
4060  bool CondExprBool;
4061  if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4062  const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
4063  if (!CondExprBool) std::swap(live, dead);
4064 
4065  if (!ContainsLabel(dead)) {
4066  // If the true case is live, we need to track its region.
4067  if (CondExprBool)
4069  return EmitLValue(live);
4070  }
4071  }
4072 
4073  llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
4074  llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
4075  llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
4076 
4077  ConditionalEvaluation eval(*this);
4078  EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr));
4079 
4080  // Any temporaries created here are conditional.
4081  EmitBlock(lhsBlock);
4083  eval.begin(*this);
4084  Optional<LValue> lhs =
4085  EmitLValueOrThrowExpression(*this, expr->getTrueExpr());
4086  eval.end(*this);
4087 
4088  if (lhs && !lhs->isSimple())
4089  return EmitUnsupportedLValue(expr, "conditional operator");
4090 
4091  lhsBlock = Builder.GetInsertBlock();
4092  if (lhs)
4093  Builder.CreateBr(contBlock);
4094 
4095  // Any temporaries created here are conditional.
4096  EmitBlock(rhsBlock);
4097  eval.begin(*this);
4098  Optional<LValue> rhs =
4099  EmitLValueOrThrowExpression(*this, expr->getFalseExpr());
4100  eval.end(*this);
4101  if (rhs && !rhs->isSimple())
4102  return EmitUnsupportedLValue(expr, "conditional operator");
4103  rhsBlock = Builder.GetInsertBlock();
4104 
4105  EmitBlock(contBlock);
4106 
4107  if (lhs && rhs) {
4108  llvm::PHINode *phi = Builder.CreatePHI(lhs->getPointer()->getType(),
4109  2, "cond-lvalue");
4110  phi->addIncoming(lhs->getPointer(), lhsBlock);
4111  phi->addIncoming(rhs->getPointer(), rhsBlock);
4112  Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
4113  AlignmentSource alignSource =
4114  std::max(lhs->getBaseInfo().getAlignmentSource(),
4115  rhs->getBaseInfo().getAlignmentSource());
4117  lhs->getTBAAInfo(), rhs->getTBAAInfo());
4118  return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
4119  TBAAInfo);
4120  } else {
4121  assert((lhs || rhs) &&
4122  "both operands of glvalue conditional are throw-expressions?");
4123  return lhs ? *lhs : *rhs;
4124  }
4125 }
4126 
4127 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
4128 /// type. If the cast is to a reference, we can have the usual lvalue result,
4129 /// otherwise if a cast is needed by the code generator in an lvalue context,
4130 /// then it must mean that we need the address of an aggregate in order to
4131 /// access one of its members. This can happen for all the reasons that casts
4132 /// are permitted with aggregate result, including noop aggregate casts, and
4133 /// cast from scalar to union.
4135  switch (E->getCastKind()) {
4136  case CK_ToVoid:
4137  case CK_BitCast:
4138  case CK_ArrayToPointerDecay:
4139  case CK_FunctionToPointerDecay:
4140  case CK_NullToMemberPointer:
4141  case CK_NullToPointer:
4142  case CK_IntegralToPointer:
4143  case CK_PointerToIntegral:
4144  case CK_PointerToBoolean:
4145  case CK_VectorSplat:
4146  case CK_IntegralCast:
4147  case CK_BooleanToSignedIntegral:
4148  case CK_IntegralToBoolean:
4149  case CK_IntegralToFloating:
4150  case CK_FloatingToIntegral:
4151  case CK_FloatingToBoolean:
4152  case CK_FloatingCast:
4153  case CK_FloatingRealToComplex:
4154  case CK_FloatingComplexToReal:
4155  case CK_FloatingComplexToBoolean:
4156  case CK_FloatingComplexCast:
4157  case CK_FloatingComplexToIntegralComplex:
4158  case CK_IntegralRealToComplex:
4159  case CK_IntegralComplexToReal:
4160  case CK_IntegralComplexToBoolean:
4161  case CK_IntegralComplexCast:
4162  case CK_IntegralComplexToFloatingComplex:
4163  case CK_DerivedToBaseMemberPointer:
4164  case CK_BaseToDerivedMemberPointer:
4165  case CK_MemberPointerToBoolean:
4166  case CK_ReinterpretMemberPointer:
4167  case CK_AnyPointerToBlockPointerCast:
4168  case CK_ARCProduceObject:
4169  case CK_ARCConsumeObject:
4170  case CK_ARCReclaimReturnedObject:
4171  case CK_ARCExtendBlockObject:
4172  case CK_CopyAndAutoreleaseBlockObject:
4173  case CK_IntToOCLSampler:
4174  case CK_FixedPointCast:
4175  case CK_FixedPointToBoolean:
4176  return EmitUnsupportedLValue(E, "unexpected cast lvalue");
4177 
4178  case CK_Dependent:
4179  llvm_unreachable("dependent cast kind in IR gen!");
4180 
4181  case CK_BuiltinFnToFnPtr:
4182  llvm_unreachable("builtin functions are handled elsewhere");
4183 
4184  // These are never l-values; just use the aggregate emission code.
4185  case CK_NonAtomicToAtomic:
4186  case CK_AtomicToNonAtomic:
4187  return EmitAggExprToLValue(E);
4188 
4189  case CK_Dynamic: {
4190  LValue LV = EmitLValue(E->getSubExpr());
4191  Address V = LV.getAddress();
4192  const auto *DCE = cast<CXXDynamicCastExpr>(E);
4193  return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
4194  }
4195 
4196  case CK_ConstructorConversion:
4197  case CK_UserDefinedConversion:
4198  case CK_CPointerToObjCPointerCast:
4199  case CK_BlockPointerToObjCPointerCast:
4200  case CK_NoOp:
4201  case CK_LValueToRValue:
4202  return EmitLValue(E->getSubExpr());
4203 
4204  case CK_UncheckedDerivedToBase:
4205  case CK_DerivedToBase: {
4206  const RecordType *DerivedClassTy =
4207  E->getSubExpr()->getType()->getAs<RecordType>();
4208  auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
4209 
4210  LValue LV = EmitLValue(E->getSubExpr());
4211  Address This = LV.getAddress();
4212 
4213  // Perform the derived-to-base conversion
4215  This, DerivedClassDecl, E->path_begin(), E->path_end(),
4216  /*NullCheckValue=*/false, E->getExprLoc());
4217 
4218  // TODO: Support accesses to members of base classes in TBAA. For now, we
4219  // conservatively pretend that the complete object is of the base class
4220  // type.
4221  return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
4222  CGM.getTBAAInfoForSubobject(LV, E->getType()));
4223  }
4224  case CK_ToUnion:
4225  return EmitAggExprToLValue(E);
4226  case CK_BaseToDerived: {
4227  const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
4228  auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
4229 
4230  LValue LV = EmitLValue(E->getSubExpr());
4231 
4232  // Perform the base-to-derived conversion
4233  Address Derived =
4234  GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
4235  E->path_begin(), E->path_end(),
4236  /*NullCheckValue=*/false);
4237 
4238  // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
4239  // performed and the object is not of the derived type.
4242  Derived.getPointer(), E->getType());
4243 
4244  if (SanOpts.has(SanitizerKind::CFIDerivedCast))
4245  EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
4246  /*MayBeNull=*/false, CFITCK_DerivedCast,
4247  E->getBeginLoc());
4248 
4249  return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
4250  CGM.getTBAAInfoForSubobject(LV, E->getType()));
4251  }
4252  case CK_LValueBitCast: {
4253  // This must be a reinterpret_cast (or c-style equivalent).
4254  const auto *CE = cast<ExplicitCastExpr>(E);
4255 
4256  CGM.EmitExplicitCastExprType(CE, this);
4257  LValue LV = EmitLValue(E->getSubExpr());
4259  ConvertType(CE->getTypeAsWritten()));
4260 
4261  if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
4262  EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
4263  /*MayBeNull=*/false, CFITCK_UnrelatedCast,
4264  E->getBeginLoc());
4265 
4266  return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
4267  CGM.getTBAAInfoForSubobject(LV, E->getType()));
4268  }
4269  case CK_AddressSpaceConversion: {
4270  LValue LV = EmitLValue(E->getSubExpr());
4271  QualType DestTy = getContext().getPointerType(E->getType());
4273  *this, LV.getPointer(), E->getSubExpr()->getType().getAddressSpace(),
4274  E->getType().getAddressSpace(), ConvertType(DestTy));
4275  return MakeAddrLValue(Address(V, LV.getAddress().getAlignment()),
4276  E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
4277  }
4278  case CK_ObjCObjectLValueCast: {
4279  LValue LV = EmitLValue(E->getSubExpr());
4281  ConvertType(E->getType()));
4282  return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
4283  CGM.getTBAAInfoForSubobject(LV, E->getType()));
4284  }
4285  case CK_ZeroToOCLOpaqueType:
4286  llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
4287  }
4288 
4289  llvm_unreachable("Unhandled lvalue cast kind?");
4290 }
4291 
4295 }
4296 
4297 LValue
4300 
4301  llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
4302  it = OpaqueLValues.find(e);
4303 
4304  if (it != OpaqueLValues.end())
4305  return it->second;
4306 
4307  assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
4308  return EmitLValue(e->getSourceExpr());
4309 }
4310 
4311 RValue
4314 
4315  llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
4316  it = OpaqueRValues.find(e);
4317 
4318  if (it != OpaqueRValues.end())
4319  return it->second;
4320 
4321  assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
4322  return EmitAnyExpr(e->getSourceExpr());
4323 }
4324 
4326  const FieldDecl *FD,
4327  SourceLocation Loc) {
4328  QualType FT = FD->getType();
4329  LValue FieldLV = EmitLValueForField(LV, FD);
4330  switch (getEvaluationKind(FT)) {
4331  case TEK_Complex:
4332  return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
4333  case TEK_Aggregate:
4334  return FieldLV.asAggregateRValue();
4335  case TEK_Scalar:
4336  // This routine is used to load fields one-by-one to perform a copy, so
4337  // don't load reference fields.
4338  if (FD->getType()->isReferenceType())
4339  return RValue::get(FieldLV.getPointer());
4340  return EmitLoadOfLValue(FieldLV, Loc);
4341  }
4342  llvm_unreachable("bad evaluation kind");
4343 }
4344 
4345 //===--------------------------------------------------------------------===//
4346 // Expression Emission
4347 //===--------------------------------------------------------------------===//
4348 
4351  // Builtins never have block type.
4352  if (E->getCallee()->getType()->isBlockPointerType())
4353  return EmitBlockCallExpr(E, ReturnValue);
4354 
4355  if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
4356  return EmitCXXMemberCallExpr(CE, ReturnValue);
4357 
4358  if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
4359  return EmitCUDAKernelCallExpr(CE, ReturnValue);
4360 
4361  if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
4362  if (const CXXMethodDecl *MD =
4363  dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl()))
4364  return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
4365 
4366  CGCallee callee = EmitCallee(E->getCallee());
4367 
4368  if (callee.isBuiltin()) {
4369  return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
4370  E, ReturnValue);
4371  }
4372 
4373  if (callee.isPseudoDestructor()) {
4375  }
4376 
4377  return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
4378 }
4379 
4380 /// Emit a CallExpr without considering whether it might be a subclass.
4383  CGCallee Callee = EmitCallee(E->getCallee());
4384  return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
4385 }
4386 
4388  if (auto builtinID = FD->getBuiltinID()) {
4389  return CGCallee::forBuiltin(builtinID, FD);
4390  }
4391 
4392  llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD);
4393  return CGCallee::forDirect(calleePtr, GlobalDecl(FD));
4394 }
4395 
4397  E = E->IgnoreParens();
4398 
4399  // Look through function-to-pointer decay.
4400  if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
4401  if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
4402  ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
4403  return EmitCallee(ICE->getSubExpr());
4404  }
4405 
4406  // Resolve direct calls.
4407  } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
4408  if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
4409  return EmitDirectCallee(*this, FD);
4410  }
4411  } else if (auto ME = dyn_cast<MemberExpr>(E)) {
4412  if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
4413  EmitIgnoredExpr(ME->getBase());
4414  return EmitDirectCallee(*this, FD);
4415  }
4416 
4417  // Look through template substitutions.
4418  } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
4419  return EmitCallee(NTTP->getReplacement());
4420 
4421  // Treat pseudo-destructor calls differently.
4422  } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
4423  return CGCallee::forPseudoDestructor(PDE);
4424  }
4425 
4426  // Otherwise, we have an indirect reference.
4427  llvm::Value *calleePtr;
4429  if (auto ptrType = E->getType()->getAs<PointerType>()) {
4430  calleePtr = EmitScalarExpr(E);
4431  functionType = ptrType->getPointeeType();
4432  } else {
4433  functionType = E->getType();
4434  calleePtr = EmitLValue(E).getPointer();
4435  }
4436  assert(functionType->isFunctionType());
4437 
4438  GlobalDecl GD;
4439  if (const auto *VD =
4440  dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
4441  GD = GlobalDecl(VD);
4442 
4443  CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
4444  CGCallee callee(calleeInfo, calleePtr);
4445  return callee;
4446 }
4447 
4449  // Comma expressions just emit their LHS then their RHS as an l-value.
4450  if (E->getOpcode() == BO_Comma) {
4451  EmitIgnoredExpr(E->getLHS());
4453  return EmitLValue(E->getRHS());
4454  }
4455 
4456  if (E->getOpcode() == BO_PtrMemD ||
4457  E->getOpcode() == BO_PtrMemI)
4459 
4460  assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
4461 
4462  // Note that in all of these cases, __block variables need the RHS
4463  // evaluated first just in case the variable gets moved by the RHS.
4464 
4465  switch (getEvaluationKind(E->getType())) {
4466  case TEK_Scalar: {
4467  switch (E->getLHS()->getType().getObjCLifetime()) {
4469  return EmitARCStoreStrong(E, /*ignored*/ false).first;
4470 
4472  return EmitARCStoreAutoreleasing(E).first;
4473 
4474  // No reason to do any of these differently.
4475  case Qualifiers::OCL_None:
4477  case Qualifiers::OCL_Weak:
4478  break;
4479  }
4480 
4481  RValue RV = EmitAnyExpr(E->getRHS());
4483  if (RV.isScalar())
4485  EmitStoreThroughLValue(RV, LV);
4486  return LV;
4487  }
4488 
4489  case TEK_Complex:
4490  return EmitComplexAssignmentLValue(E);
4491 
4492  case TEK_Aggregate:
4493  return EmitAggExprToLValue(E);
4494  }
4495  llvm_unreachable("bad evaluation kind");
4496 }
4497 
4499  RValue RV = EmitCallExpr(E);
4500 
4501  if (!RV.isScalar())
4502  return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
4504 
4505  assert(E->getCallReturnType(getContext())->isReferenceType() &&
4506  "Can't have a scalar return unless the return type is a "
4507  "reference type!");
4508 
4510 }
4511 
4513  // FIXME: This shouldn't require another copy.
4514  return EmitAggExprToLValue(E);
4515 }
4516 
4519  && "binding l-value to type which needs a temporary");
4520  AggValueSlot Slot = CreateAggTemp(E->getType());
4521  EmitCXXConstructExpr(E, Slot);
4522  return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
4523 }
4524 
4525 LValue
4528 }
4529 
4532  ConvertType(E->getType()));
4533 }
4534 
4536  return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
4538 }
4539 
4540 LValue
4542  AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
4543  Slot.setExternallyDestructed();
4544  EmitAggExpr(E->getSubExpr(), Slot);
4545  EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
4547 }
4548 
4550  RValue RV = EmitObjCMessageExpr(E);
4551 
4552  if (!RV.isScalar())
4553  return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
4555 
4556  assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
4557  "Can't have a scalar return unless the return type is a "
4558  "reference type!");
4559 
4561 }
4562 
4564  Address V =
4566  return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
4567 }
4568 
4570  const ObjCIvarDecl *Ivar) {
4571  return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
4572 }
4573 
4575  llvm::Value *BaseValue,
4576  const ObjCIvarDecl *Ivar,
4577  unsigned CVRQualifiers) {
4578  return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
4579  Ivar, CVRQualifiers);
4580 }
4581 
4583  // FIXME: A lot of the code below could be shared with EmitMemberExpr.
4584  llvm::Value *BaseValue = nullptr;
4585  const Expr *BaseExpr = E->getBase();
4586  Qualifiers BaseQuals;
4587  QualType ObjectTy;
4588  if (E->isArrow()) {
4589  BaseValue = EmitScalarExpr(BaseExpr);
4590  ObjectTy = BaseExpr->getType()->getPointeeType();
4591  BaseQuals = ObjectTy.getQualifiers();
4592  } else {
4593  LValue BaseLV = EmitLValue(BaseExpr);
4594  BaseValue = BaseLV.getPointer();
4595  ObjectTy = BaseExpr->getType();
4596  BaseQuals = ObjectTy.getQualifiers();
4597  }
4598 
4599  LValue LV =
4600  EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
4601  BaseQuals.getCVRQualifiers());
4602  setObjCGCLValueClass(getContext(), E, LV);
4603  return LV;
4604 }
4605 
4607  // Can only get l-value for message expression returning aggregate type
4608  RValue RV = EmitAnyExprToTemp(E);
4609  return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
4611 }
4612 
4613 RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
4615  llvm::Value *Chain) {
4616  // Get the actual function type. The callee type will always be a pointer to
4617  // function type or a block pointer type.
4618  assert(CalleeType->isFunctionPointerType() &&
4619  "Call must have function pointer type!");
4620 
4621  const Decl *TargetDecl =
4622  OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
4623 
4624  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
4625  // We can only guarantee that a function is called from the correct
4626  // context/function based on the appropriate target attributes,
4627  // so only check in the case where we have both always_inline and target
4628  // since otherwise we could be making a conditional call after a check for
4629  // the proper cpu features (and it won't cause code generation issues due to
4630  // function based code generation).
4631  if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4632  TargetDecl->hasAttr<TargetAttr>())
4633  checkTargetFeatures(E, FD);
4634 
4635  CalleeType = getContext().getCanonicalType(CalleeType);
4636 
4637  auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
4638 
4639  CGCallee Callee = OrigCallee;
4640 
4641  if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
4642  (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
4643  if (llvm::Constant *PrefixSig =
4645  SanitizerScope SanScope(this);
4646  // Remove any (C++17) exception specifications, to allow calling e.g. a
4647  // noexcept function through a non-noexcept pointer.
4648  auto ProtoTy =
4650  llvm::Constant *FTRTTIConst =
4651  CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
4652  llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty};
4653  llvm::StructType *PrefixStructTy = llvm::StructType::get(
4654  CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
4655 
4656  llvm::Value *CalleePtr = Callee.getFunctionPointer();
4657 
4658  llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
4659  CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy));
4660  llvm::Value *CalleeSigPtr =
4661  Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
4662  llvm::Value *CalleeSig =
4663  Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign());
4664  llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
4665 
4666  llvm::BasicBlock *Cont = createBasicBlock("cont");
4667  llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
4668  Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
4669 
4670  EmitBlock(TypeCheck);
4671  llvm::Value *CalleeRTTIPtr =
4672  Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
4673  llvm::Value *CalleeRTTIEncoded =
4674  Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign());
4675  llvm::Value *CalleeRTTI =
4676  DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded);
4677  llvm::Value *CalleeRTTIMatch =
4678  Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
4679  llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
4680  EmitCheckTypeDescriptor(CalleeType)};
4681  EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
4682  SanitizerHandler::FunctionTypeMismatch, StaticData, CalleePtr);
4683 
4684  Builder.CreateBr(Cont);
4685  EmitBlock(Cont);
4686  }
4687  }
4688 
4689  const auto *FnType = cast<FunctionType>(PointeeType);
4690 
4691  // If we are checking indirect calls and this call is indirect, check that the
4692  // function pointer is a member of the bit set for the function type.
4693  if (SanOpts.has(SanitizerKind::CFIICall) &&
4694  (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
4695  SanitizerScope SanScope(this);
4696  EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
4697 
4698  llvm::Metadata *MD;
4699  if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
4701  else
4702  MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
4703 
4704  llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
4705 
4706  llvm::Value *CalleePtr = Callee.getFunctionPointer();
4707  llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy);
4708  llvm::Value *TypeTest = Builder.CreateCall(
4709  CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId});
4710 
4711  auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
4712  llvm::Constant *StaticData[] = {
4713  llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
4715  EmitCheckTypeDescriptor(QualType(FnType, 0)),
4716  };
4717  if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && CrossDsoTypeId) {
4718  EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
4719  CastedCallee, StaticData);
4720  } else {
4721  EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
4722  SanitizerHandler::CFICheckFail, StaticData,
4723  {CastedCallee, llvm::UndefValue::get(IntPtrTy)});
4724  }
4725  }
4726 
4727  CallArgList Args;
4728  if (Chain)
4731 
4732  // C++17 requires that we evaluate arguments to a call using assignment syntax
4733  // right-to-left, and that we evaluate arguments to certain other operators
4734  // left-to-right. Note that we allow this to override the order dictated by
4735  // the calling convention on the MS ABI, which means that parameter
4736  // destruction order is not necessarily reverse construction order.
4737  // FIXME: Revisit this based on C++ committee response to unimplementability.
4739  if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
4740  if (OCE->isAssignmentOp())
4742  else {
4743  switch (OCE->getOperator()) {
4744  case OO_LessLess:
4745  case OO_GreaterGreater:
4746  case OO_AmpAmp:
4747  case OO_PipePipe:
4748  case OO_Comma:
4749  case OO_ArrowStar:
4751  break;
4752  default:
4753  break;
4754  }
4755  }
4756  }
4757 
4758  EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(),
4759  E->getDirectCallee(), /*ParamsToSkip*/ 0, Order);
4760 
4762  Args, FnType, /*isChainCall=*/Chain);
4763 
4764  // C99 6.5.2.2p6:
4765  // If the expression that denotes the called function has a type
4766  // that does not include a prototype, [the default argument
4767  // promotions are performed]. If the number of arguments does not
4768  // equal the number of parameters, the behavior is undefined. If
4769  // the function is defined with a type that includes a prototype,
4770  // and either the prototype ends with an ellipsis (, ...) or the
4771  // types of the arguments after promotion are not compatible with
4772  // the types of the parameters, the behavior is undefined. If the
4773  // function is defined with a type that does not include a
4774  // prototype, and the types of the arguments after promotion are
4775  // not compatible with those of the parameters after promotion,
4776  // the behavior is undefined [except in some trivial cases].
4777  // That is, in the general case, we should assume that a call
4778  // through an unprototyped function type works like a *non-variadic*
4779  // call. The way we make this work is to cast to the exact type
4780  // of the promoted arguments.
4781  //
4782  // Chain calls use this same code path to add the invisible chain parameter
4783  // to the function type.
4784  if (isa<FunctionNoProtoType>(FnType) || Chain) {
4785  llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
4786  CalleeTy = CalleeTy->getPointerTo();
4787 
4788  llvm::Value *CalleePtr = Callee.getFunctionPointer();
4789  CalleePtr =