clang  15.0.0git
CGExpr.cpp
Go to the documentation of this file.
1 //===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCUDARuntime.h"
14 #include "CGCXXABI.h"
15 #include "CGCall.h"
16 #include "CGCleanup.h"
17 #include "CGDebugInfo.h"
18 #include "CGObjCRuntime.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenFunction.h"
22 #include "CodeGenModule.h"
23 #include "ConstantEmitter.h"
24 #include "TargetInfo.h"
25 #include "clang/AST/ASTContext.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/NSAPI.h"
29 #include "clang/Basic/Builtins.h"
32 #include "llvm/ADT/Hashing.h"
33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/LLVMContext.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/MatrixBuilder.h"
39 #include "llvm/Support/ConvertUTF.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Path.h"
42 #include "llvm/Support/SaveAndRestore.h"
43 #include "llvm/Transforms/Utils/SanitizerStats.h"
44 
45 #include <string>
46 
47 using namespace clang;
48 using namespace CodeGen;
49 
50 //===--------------------------------------------------------------------===//
51 // Miscellaneous Helper Methods
52 //===--------------------------------------------------------------------===//
53 
54 llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
55  unsigned addressSpace =
56  cast<llvm::PointerType>(value->getType())->getAddressSpace();
57 
58  llvm::PointerType *destType = Int8PtrTy;
59  if (addressSpace)
60  destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
61 
62  if (value->getType() == destType) return value;
63  return Builder.CreateBitCast(value, destType);
64 }
65 
66 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
67 /// block.
69  CharUnits Align,
70  const Twine &Name,
71  llvm::Value *ArraySize) {
72  auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
73  Alloca->setAlignment(Align.getAsAlign());
74  return Address(Alloca, Ty, Align);
75 }
76 
77 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
78 /// block. The alloca is casted to default address space if necessary.
80  const Twine &Name,
81  llvm::Value *ArraySize,
82  Address *AllocaAddr) {
83  auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
84  if (AllocaAddr)
85  *AllocaAddr = Alloca;
86  llvm::Value *V = Alloca.getPointer();
87  // Alloca always returns a pointer in alloca address space, which may
88  // be different from the type defined by the language. For example,
89  // in C++ the auto variables are in the default address space. Therefore
90  // cast alloca to the default address space when necessary.
92  auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
93  llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
94  // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
95  // otherwise alloca is inserted at the current insertion point of the
96  // builder.
97  if (!ArraySize)
98  Builder.SetInsertPoint(getPostAllocaInsertPoint());
101  Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
102  }
103 
104  return Address(V, Ty, Align);
105 }
106 
107 /// CreateTempAlloca - This creates an alloca and inserts it into the entry
108 /// block if \p ArraySize is nullptr, otherwise inserts it at the current
109 /// insertion point of the builder.
110 llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
111  const Twine &Name,
112  llvm::Value *ArraySize) {
113  if (ArraySize)
114  return Builder.CreateAlloca(Ty, ArraySize, Name);
115  return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
116  ArraySize, Name, AllocaInsertPt);
117 }
118 
119 /// CreateDefaultAlignTempAlloca - This creates an alloca with the
120 /// default alignment of the corresponding LLVM type, which is *not*
121 /// guaranteed to be related in any way to the expected alignment of
122 /// an AST type that might have been lowered to Ty.
124  const Twine &Name) {
125  CharUnits Align =
126  CharUnits::fromQuantity(CGM.getDataLayout().getPrefTypeAlignment(Ty));
127  return CreateTempAlloca(Ty, Align, Name);
128 }
129 
132  return CreateTempAlloca(ConvertType(Ty), Align, Name);
133 }
134 
136  Address *Alloca) {
137  // FIXME: Should we prefer the preferred type alignment here?
138  return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
139 }
140 
142  const Twine &Name, Address *Alloca) {
143  Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
144  /*ArraySize=*/nullptr, Alloca);
145 
146  if (Ty->isConstantMatrixType()) {
147  auto *ArrayTy = cast<llvm::ArrayType>(Result.getElementType());
148  auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
149  ArrayTy->getNumElements());
150 
151  Result = Address(
152  Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
153  VectorTy, Result.getAlignment());
154  }
155  return Result;
156 }
157 
159  const Twine &Name) {
160  return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
161 }
162 
164  const Twine &Name) {
165  return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
166  Name);
167 }
168 
169 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
170 /// expression and compare the result against zero, returning an Int1Ty value.
172  PGO.setCurrentStmt(E);
173  if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
174  llvm::Value *MemPtr = EmitScalarExpr(E);
175  return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
176  }
177 
178  QualType BoolTy = getContext().BoolTy;
179  SourceLocation Loc = E->getExprLoc();
180  CGFPOptionsRAII FPOptsRAII(*this, E);
181  if (!E->getType()->isAnyComplexType())
182  return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
183 
184  return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
185  Loc);
186 }
187 
188 /// EmitIgnoredExpr - Emit code to compute the specified expression,
189 /// ignoring the result.
191  if (E->isPRValue())
192  return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
193 
194  // if this is a bitfield-resulting conditional operator, we can special case
195  // emit this. The normal 'EmitLValue' version of this is particularly
196  // difficult to codegen for, since creating a single "LValue" for two
197  // different sized arguments here is not particularly doable.
198  if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
200  if (CondOp->getObjectKind() == OK_BitField)
201  return EmitIgnoredConditionalOperator(CondOp);
202  }
203 
204  // Just emit it as an l-value and drop the result.
205  EmitLValue(E);
206 }
207 
208 /// EmitAnyExpr - Emit code to compute the specified expression which
209 /// can have any type. The result is returned as an RValue struct.
210 /// If this is an aggregate expression, AggSlot indicates where the
211 /// result should be returned.
213  AggValueSlot aggSlot,
214  bool ignoreResult) {
215  switch (getEvaluationKind(E->getType())) {
216  case TEK_Scalar:
217  return RValue::get(EmitScalarExpr(E, ignoreResult));
218  case TEK_Complex:
219  return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
220  case TEK_Aggregate:
221  if (!ignoreResult && aggSlot.isIgnored())
222  aggSlot = CreateAggTemp(E->getType(), "agg-temp");
223  EmitAggExpr(E, aggSlot);
224  return aggSlot.asRValue();
225  }
226  llvm_unreachable("bad evaluation kind");
227 }
228 
229 /// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
230 /// always be accessible even if no aggregate location is provided.
233 
235  AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
236  return EmitAnyExpr(E, AggSlot);
237 }
238 
239 /// EmitAnyExprToMem - Evaluate an expression into a given memory
240 /// location.
242  Address Location,
243  Qualifiers Quals,
244  bool IsInit) {
245  // FIXME: This function should take an LValue as an argument.
246  switch (getEvaluationKind(E->getType())) {
247  case TEK_Complex:
249  /*isInit*/ false);
250  return;
251 
252  case TEK_Aggregate: {
253  EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
256  AggValueSlot::IsAliased_t(!IsInit),
258  return;
259  }
260 
261  case TEK_Scalar: {
262  RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
263  LValue LV = MakeAddrLValue(Location, E->getType());
264  EmitStoreThroughLValue(RV, LV);
265  return;
266  }
267  }
268  llvm_unreachable("bad evaluation kind");
269 }
270 
271 static void
273  const Expr *E, Address ReferenceTemporary) {
274  // Objective-C++ ARC:
275  // If we are binding a reference to a temporary that has ownership, we
276  // need to perform retain/release operations on the temporary.
277  //
278  // FIXME: This should be looking at E, not M.
279  if (auto Lifetime = M->getType().getObjCLifetime()) {
280  switch (Lifetime) {
283  // Carry on to normal cleanup handling.
284  break;
285 
287  // Nothing to do; cleaned up by an autorelease pool.
288  return;
289 
292  switch (StorageDuration Duration = M->getStorageDuration()) {
293  case SD_Static:
294  // Note: we intentionally do not register a cleanup to release
295  // the object on program termination.
296  return;
297 
298  case SD_Thread:
299  // FIXME: We should probably register a cleanup in this case.
300  return;
301 
302  case SD_Automatic:
303  case SD_FullExpression:
306  if (Lifetime == Qualifiers::OCL_Strong) {
307  const ValueDecl *VD = M->getExtendingDecl();
308  bool Precise =
309  VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
313  } else {
314  // __weak objects always get EH cleanups; otherwise, exceptions
315  // could cause really nasty crashes instead of mere leaks.
318  }
319  if (Duration == SD_FullExpression)
320  CGF.pushDestroy(CleanupKind, ReferenceTemporary,
321  M->getType(), *Destroy,
323  else
324  CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
325  M->getType(),
327  return;
328 
329  case SD_Dynamic:
330  llvm_unreachable("temporary cannot have dynamic storage duration");
331  }
332  llvm_unreachable("unknown storage duration");
333  }
334  }
335 
336  CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
337  if (const RecordType *RT =
339  // Get the destructor for the reference temporary.
340  auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
341  if (!ClassDecl->hasTrivialDestructor())
342  ReferenceTemporaryDtor = ClassDecl->getDestructor();
343  }
344 
345  if (!ReferenceTemporaryDtor)
346  return;
347 
348  // Call the destructor for the temporary.
349  switch (M->getStorageDuration()) {
350  case SD_Static:
351  case SD_Thread: {
352  llvm::FunctionCallee CleanupFn;
353  llvm::Constant *CleanupArg;
354  if (E->getType()->isArrayType()) {
355  CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
356  ReferenceTemporary, E->getType(),
358  dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
359  CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
360  } else {
361  CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
362  GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
363  CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
364  }
366  CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
367  break;
368  }
369 
370  case SD_FullExpression:
371  CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
373  CGF.getLangOpts().Exceptions);
374  break;
375 
376  case SD_Automatic:
378  ReferenceTemporary, E->getType(),
380  CGF.getLangOpts().Exceptions);
381  break;
382 
383  case SD_Dynamic:
384  llvm_unreachable("temporary cannot have dynamic storage duration");
385  }
386 }
387 
389  const MaterializeTemporaryExpr *M,
390  const Expr *Inner,
391  Address *Alloca = nullptr) {
392  auto &TCG = CGF.getTargetHooks();
393  switch (M->getStorageDuration()) {
394  case SD_FullExpression:
395  case SD_Automatic: {
396  // If we have a constant temporary array or record try to promote it into a
397  // constant global under the same rules a normal constant would've been
398  // promoted. This is easier on the optimizer and generally emits fewer
399  // instructions.
400  QualType Ty = Inner->getType();
401  if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
402  (Ty->isArrayType() || Ty->isRecordType()) &&
403  CGF.CGM.isTypeConstant(Ty, true))
404  if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
405  auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
406  auto *GV = new llvm::GlobalVariable(
407  CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
408  llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
409  llvm::GlobalValue::NotThreadLocal,
411  CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
412  GV->setAlignment(alignment.getAsAlign());
413  llvm::Constant *C = GV;
414  if (AS != LangAS::Default)
415  C = TCG.performAddrSpaceCast(
416  CGF.CGM, GV, AS, LangAS::Default,
417  GV->getValueType()->getPointerTo(
419  // FIXME: Should we put the new global into a COMDAT?
420  return Address(C, GV->getValueType(), alignment);
421  }
422  return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
423  }
424  case SD_Thread:
425  case SD_Static:
426  return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
427 
428  case SD_Dynamic:
429  llvm_unreachable("temporary can't have dynamic storage duration");
430  }
431  llvm_unreachable("unknown storage duration");
432 }
433 
434 /// Helper method to check if the underlying ABI is AAPCS
435 static bool isAAPCS(const TargetInfo &TargetInfo) {
436  return TargetInfo.getABI().startswith("aapcs");
437 }
438 
441  const Expr *E = M->getSubExpr();
442 
443  assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
444  !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
445  "Reference should never be pseudo-strong!");
446 
447  // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
448  // as that will cause the lifetime adjustment to be lost for ARC
449  auto ownership = M->getType().getObjCLifetime();
450  if (ownership != Qualifiers::OCL_None &&
451  ownership != Qualifiers::OCL_ExplicitNone) {
452  Address Object = createReferenceTemporary(*this, M, E);
453  if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
454  llvm::Type *Ty = ConvertTypeForMem(E->getType());
455  Object = Address(llvm::ConstantExpr::getBitCast(
456  Var, Ty->getPointerTo(Object.getAddressSpace())),
457  Ty, Object.getAlignment());
458 
459  // createReferenceTemporary will promote the temporary to a global with a
460  // constant initializer if it can. It can only do this to a value of
461  // ARC-manageable type if the value is global and therefore "immune" to
462  // ref-counting operations. Therefore we have no need to emit either a
463  // dynamic initialization or a cleanup and we can just return the address
464  // of the temporary.
465  if (Var->hasInitializer())
466  return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
467 
468  Var->setInitializer(CGM.EmitNullConstant(E->getType()));
469  }
470  LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
472 
473  switch (getEvaluationKind(E->getType())) {
474  default: llvm_unreachable("expected scalar or aggregate expression");
475  case TEK_Scalar:
476  EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
477  break;
478  case TEK_Aggregate: {
480  E->getType().getQualifiers(),
485  break;
486  }
487  }
488 
489  pushTemporaryCleanup(*this, M, E, Object);
490  return RefTempDst;
491  }
492 
495  E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
496 
497  for (const auto &Ignored : CommaLHSs)
498  EmitIgnoredExpr(Ignored);
499 
500  if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
501  if (opaque->getType()->isRecordType()) {
502  assert(Adjustments.empty());
503  return EmitOpaqueValueLValue(opaque);
504  }
505  }
506 
507  // Create and initialize the reference temporary.
508  Address Alloca = Address::invalid();
509  Address Object = createReferenceTemporary(*this, M, E, &Alloca);
510  if (auto *Var = dyn_cast<llvm::GlobalVariable>(
511  Object.getPointer()->stripPointerCasts())) {
512  llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
513  Object = Address(llvm::ConstantExpr::getBitCast(
514  cast<llvm::Constant>(Object.getPointer()),
515  TemporaryType->getPointerTo()),
516  TemporaryType,
517  Object.getAlignment());
518  // If the temporary is a global and has a constant initializer or is a
519  // constant temporary that we promoted to a global, we may have already
520  // initialized it.
521  if (!Var->hasInitializer()) {
522  Var->setInitializer(CGM.EmitNullConstant(E->getType()));
523  EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
524  }
525  } else {
526  switch (M->getStorageDuration()) {
527  case SD_Automatic:
528  if (auto *Size = EmitLifetimeStart(
529  CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
530  Alloca.getPointer())) {
531  pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
532  Alloca, Size);
533  }
534  break;
535 
536  case SD_FullExpression: {
537  if (!ShouldEmitLifetimeMarkers)
538  break;
539 
540  // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
541  // marker. Instead, start the lifetime of a conditional temporary earlier
542  // so that it's unconditional. Don't do this with sanitizers which need
543  // more precise lifetime marks.
544  ConditionalEvaluation *OldConditional = nullptr;
545  CGBuilderTy::InsertPoint OldIP;
547  !SanOpts.has(SanitizerKind::HWAddress) &&
548  !SanOpts.has(SanitizerKind::Memory) &&
549  !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
550  OldConditional = OutermostConditional;
551  OutermostConditional = nullptr;
552 
553  OldIP = Builder.saveIP();
554  llvm::BasicBlock *Block = OldConditional->getStartingBlock();
555  Builder.restoreIP(CGBuilderTy::InsertPoint(
556  Block, llvm::BasicBlock::iterator(Block->back())));
557  }
558 
559  if (auto *Size = EmitLifetimeStart(
560  CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
561  Alloca.getPointer())) {
562  pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
563  Size);
564  }
565 
566  if (OldConditional) {
567  OutermostConditional = OldConditional;
568  Builder.restoreIP(OldIP);
569  }
570  break;
571  }
572 
573  default:
574  break;
575  }
576  EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
577  }
578  pushTemporaryCleanup(*this, M, E, Object);
579 
580  // Perform derived-to-base casts and/or field accesses, to get from the
581  // temporary object we created (and, potentially, for which we extended
582  // the lifetime) to the subobject we're binding the reference to.
583  for (SubobjectAdjustment &Adjustment : llvm::reverse(Adjustments)) {
584  switch (Adjustment.Kind) {
586  Object =
587  GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
588  Adjustment.DerivedToBase.BasePath->path_begin(),
589  Adjustment.DerivedToBase.BasePath->path_end(),
590  /*NullCheckValue=*/ false, E->getExprLoc());
591  break;
592 
595  LV = EmitLValueForField(LV, Adjustment.Field);
596  assert(LV.isSimple() &&
597  "materialized temporary field is not a simple lvalue");
598  Object = LV.getAddress(*this);
599  break;
600  }
601 
603  llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
604  Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
605  Adjustment.Ptr.MPT);
606  break;
607  }
608  }
609  }
610 
611  return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
612 }
613 
614 RValue
616  // Emit the expression as an lvalue.
617  LValue LV = EmitLValue(E);
618  assert(LV.isSimple());
619  llvm::Value *Value = LV.getPointer(*this);
620 
621  if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) {
622  // C++11 [dcl.ref]p5 (as amended by core issue 453):
623  // If a glvalue to which a reference is directly bound designates neither
624  // an existing object or function of an appropriate type nor a region of
625  // storage of suitable size and alignment to contain an object of the
626  // reference's type, the behavior is undefined.
627  QualType Ty = E->getType();
629  }
630 
631  return RValue::get(Value);
632 }
633 
634 
635 /// getAccessedFieldNo - Given an encoded value and a result number, return the
636 /// input field number being accessed.
638  const llvm::Constant *Elts) {
639  return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
640  ->getZExtValue();
641 }
642 
643 /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
644 static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
645  llvm::Value *High) {
646  llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
647  llvm::Value *K47 = Builder.getInt64(47);
648  llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
649  llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
650  llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
651  llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
652  return Builder.CreateMul(B1, KMul);
653 }
654 
656  return TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
658 }
659 
661  CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
662  return (RD && RD->hasDefinition() && RD->isDynamicClass()) &&
663  (TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
664  TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
666 }
667 
669  return SanOpts.has(SanitizerKind::Null) ||
670  SanOpts.has(SanitizerKind::Alignment) ||
671  SanOpts.has(SanitizerKind::ObjectSize) ||
672  SanOpts.has(SanitizerKind::Vptr);
673 }
674 
676  llvm::Value *Ptr, QualType Ty,
677  CharUnits Alignment,
678  SanitizerSet SkippedChecks,
679  llvm::Value *ArraySize) {
681  return;
682 
683  // Don't check pointers outside the default address space. The null check
684  // isn't correct, the object-size check isn't supported by LLVM, and we can't
685  // communicate the addresses to the runtime handler for the vptr check.
686  if (Ptr->getType()->getPointerAddressSpace())
687  return;
688 
689  // Don't check pointers to volatile data. The behavior here is implementation-
690  // defined.
691  if (Ty.isVolatileQualified())
692  return;
693 
694  SanitizerScope SanScope(this);
695 
697  llvm::BasicBlock *Done = nullptr;
698 
699  // Quickly determine whether we have a pointer to an alloca. It's possible
700  // to skip null checks, and some alignment checks, for these pointers. This
701  // can reduce compile-time significantly.
702  auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
703 
704  llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
705  llvm::Value *IsNonNull = nullptr;
706  bool IsGuaranteedNonNull =
707  SkippedChecks.has(SanitizerKind::Null) || PtrToAlloca;
708  bool AllowNullPointers = isNullPointerAllowed(TCK);
709  if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
710  !IsGuaranteedNonNull) {
711  // The glvalue must not be an empty glvalue.
712  IsNonNull = Builder.CreateIsNotNull(Ptr);
713 
714  // The IR builder can constant-fold the null check if the pointer points to
715  // a constant.
716  IsGuaranteedNonNull = IsNonNull == True;
717 
718  // Skip the null check if the pointer is known to be non-null.
719  if (!IsGuaranteedNonNull) {
720  if (AllowNullPointers) {
721  // When performing pointer casts, it's OK if the value is null.
722  // Skip the remaining checks in that case.
723  Done = createBasicBlock("null");
724  llvm::BasicBlock *Rest = createBasicBlock("not.null");
725  Builder.CreateCondBr(IsNonNull, Rest, Done);
726  EmitBlock(Rest);
727  } else {
728  Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
729  }
730  }
731  }
732 
733  if (SanOpts.has(SanitizerKind::ObjectSize) &&
734  !SkippedChecks.has(SanitizerKind::ObjectSize) &&
735  !Ty->isIncompleteType()) {
737  llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
738  if (ArraySize)
739  Size = Builder.CreateMul(Size, ArraySize);
740 
741  // Degenerate case: new X[0] does not need an objectsize check.
742  llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
743  if (!ConstantSize || !ConstantSize->isNullValue()) {
744  // The glvalue must refer to a large enough storage region.
745  // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
746  // to check this.
747  // FIXME: Get object address space
748  llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
749  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
750  llvm::Value *Min = Builder.getFalse();
751  llvm::Value *NullIsUnknown = Builder.getFalse();
752  llvm::Value *Dynamic = Builder.getFalse();
753  llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
754  llvm::Value *LargeEnough = Builder.CreateICmpUGE(
755  Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size);
756  Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
757  }
758  }
759 
760  llvm::MaybeAlign AlignVal;
761  llvm::Value *PtrAsInt = nullptr;
762 
763  if (SanOpts.has(SanitizerKind::Alignment) &&
764  !SkippedChecks.has(SanitizerKind::Alignment)) {
765  AlignVal = Alignment.getAsMaybeAlign();
766  if (!Ty->isIncompleteType() && !AlignVal)
767  AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
768  /*ForPointeeType=*/true)
769  .getAsMaybeAlign();
770 
771  // The glvalue must be suitably aligned.
772  if (AlignVal && *AlignVal > llvm::Align(1) &&
773  (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
774  PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
775  llvm::Value *Align = Builder.CreateAnd(
776  PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
777  llvm::Value *Aligned =
778  Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
779  if (Aligned != True)
780  Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
781  }
782  }
783 
784  if (Checks.size() > 0) {
785  llvm::Constant *StaticData[] = {
787  llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
788  llvm::ConstantInt::get(Int8Ty, TCK)};
789  EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
790  PtrAsInt ? PtrAsInt : Ptr);
791  }
792 
793  // If possible, check that the vptr indicates that there is a subobject of
794  // type Ty at offset zero within this object.
795  //
796  // C++11 [basic.life]p5,6:
797  // [For storage which does not refer to an object within its lifetime]
798  // The program has undefined behavior if:
799  // -- the [pointer or glvalue] is used to access a non-static data member
800  // or call a non-static member function
801  if (SanOpts.has(SanitizerKind::Vptr) &&
802  !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
803  // Ensure that the pointer is non-null before loading it. If there is no
804  // compile-time guarantee, reuse the run-time null check or emit a new one.
805  if (!IsGuaranteedNonNull) {
806  if (!IsNonNull)
807  IsNonNull = Builder.CreateIsNotNull(Ptr);
808  if (!Done)
809  Done = createBasicBlock("vptr.null");
810  llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
811  Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
812  EmitBlock(VptrNotNull);
813  }
814 
815  // Compute a hash of the mangled name of the type.
816  //
817  // FIXME: This is not guaranteed to be deterministic! Move to a
818  // fingerprinting mechanism once LLVM provides one. For the time
819  // being the implementation happens to be deterministic.
820  SmallString<64> MangledName;
821  llvm::raw_svector_ostream Out(MangledName);
823  Out);
824 
825  // Contained in NoSanitizeList based on the mangled type.
826  if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
827  Out.str())) {
828  llvm::hash_code TypeHash = hash_value(Out.str());
829 
830  // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
831  llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
832  llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
833  Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), IntPtrTy,
834  getPointerAlign());
835  llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
836  llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
837 
838  llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
839  Hash = Builder.CreateTrunc(Hash, IntPtrTy);
840 
841  // Look the hash up in our cache.
842  const int CacheSize = 128;
843  llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
844  llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
845  "__ubsan_vptr_type_cache");
846  llvm::Value *Slot = Builder.CreateAnd(Hash,
847  llvm::ConstantInt::get(IntPtrTy,
848  CacheSize-1));
849  llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
850  llvm::Value *CacheVal = Builder.CreateAlignedLoad(
851  IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
852  getPointerAlign());
853 
854  // If the hash isn't in the cache, call a runtime handler to perform the
855  // hard work of checking whether the vptr is for an object of the right
856  // type. This will either fill in the cache and return, or produce a
857  // diagnostic.
858  llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
859  llvm::Constant *StaticData[] = {
863  llvm::ConstantInt::get(Int8Ty, TCK)
864  };
865  llvm::Value *DynamicData[] = { Ptr, Hash };
866  EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
867  SanitizerHandler::DynamicTypeCacheMiss, StaticData,
868  DynamicData);
869  }
870  }
871 
872  if (Done) {
873  Builder.CreateBr(Done);
874  EmitBlock(Done);
875  }
876 }
877 
878 /// Determine whether this expression refers to a flexible array member in a
879 /// struct. We disable array bounds checks for such members.
880 static bool isFlexibleArrayMemberExpr(const Expr *E) {
881  // For compatibility with existing code, we treat arrays of length 0 or
882  // 1 as flexible array members.
883  // FIXME: This is inconsistent with the warning code in SemaChecking. Unify
884  // the two mechanisms.
885  const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
886  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
887  // FIXME: Sema doesn't treat [1] as a flexible array member if the bound
888  // was produced by macro expansion.
889  if (CAT->getSize().ugt(1))
890  return false;
891  } else if (!isa<IncompleteArrayType>(AT))
892  return false;
893 
894  E = E->IgnoreParens();
895 
896  // A flexible array member must be the last member in the class.
897  if (const auto *ME = dyn_cast<MemberExpr>(E)) {
898  // FIXME: If the base type of the member expr is not FD->getParent(),
899  // this should not be treated as a flexible array member access.
900  if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
901  // FIXME: Sema doesn't treat a T[1] union member as a flexible array
902  // member, only a T[0] or T[] member gets that treatment.
903  if (FD->getParent()->isUnion())
904  return true;
906  DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
907  return ++FI == FD->getParent()->field_end();
908  }
909  } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
910  return IRE->getDecl()->getNextIvar() == nullptr;
911  }
912 
913  return false;
914 }
915 
917  QualType EltTy) {
918  ASTContext &C = getContext();
919  uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
920  if (!EltSize)
921  return nullptr;
922 
923  auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
924  if (!ArrayDeclRef)
925  return nullptr;
926 
927  auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
928  if (!ParamDecl)
929  return nullptr;
930 
931  auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
932  if (!POSAttr)
933  return nullptr;
934 
935  // Don't load the size if it's a lower bound.
936  int POSType = POSAttr->getType();
937  if (POSType != 0 && POSType != 1)
938  return nullptr;
939 
940  // Find the implicit size parameter.
941  auto PassedSizeIt = SizeArguments.find(ParamDecl);
942  if (PassedSizeIt == SizeArguments.end())
943  return nullptr;
944 
945  const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
946  assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
947  Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
948  llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
949  C.getSizeType(), E->getExprLoc());
950  llvm::Value *SizeOfElement =
951  llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
952  return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
953 }
954 
955 /// If Base is known to point to the start of an array, return the length of
956 /// that array. Return 0 if the length cannot be determined.
957 static llvm::Value *getArrayIndexingBound(
958  CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
959  // For the vector indexing extension, the bound is the number of elements.
960  if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
961  IndexedType = Base->getType();
962  return CGF.Builder.getInt32(VT->getNumElements());
963  }
964 
965  Base = Base->IgnoreParens();
966 
967  if (const auto *CE = dyn_cast<CastExpr>(Base)) {
968  if (CE->getCastKind() == CK_ArrayToPointerDecay &&
969  !isFlexibleArrayMemberExpr(CE->getSubExpr())) {
970  IndexedType = CE->getSubExpr()->getType();
971  const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
972  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
973  return CGF.Builder.getInt(CAT->getSize());
974  else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
975  return CGF.getVLASize(VAT).NumElts;
976  // Ignore pass_object_size here. It's not applicable on decayed pointers.
977  }
978  }
979 
980  QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
981  if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
982  IndexedType = Base->getType();
983  return POS;
984  }
985 
986  return nullptr;
987 }
988 
990  llvm::Value *Index, QualType IndexType,
991  bool Accessed) {
992  assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
993  "should not be called unless adding bounds checks");
994  SanitizerScope SanScope(this);
995 
996  QualType IndexedType;
997  llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
998  if (!Bound)
999  return;
1000 
1001  bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1002  llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1003  llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1004 
1005  llvm::Constant *StaticData[] = {
1007  EmitCheckTypeDescriptor(IndexedType),
1008  EmitCheckTypeDescriptor(IndexType)
1009  };
1010  llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1011  : Builder.CreateICmpULE(IndexVal, BoundVal);
1012  EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1013  SanitizerHandler::OutOfBounds, StaticData, Index);
1014 }
1015 
1016 
1019  bool isInc, bool isPre) {
1020  ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1021 
1022  llvm::Value *NextVal;
1023  if (isa<llvm::IntegerType>(InVal.first->getType())) {
1024  uint64_t AmountVal = isInc ? 1 : -1;
1025  NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1026 
1027  // Add the inc/dec to the real part.
1028  NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1029  } else {
1030  QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1031  llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1032  if (!isInc)
1033  FVal.changeSign();
1034  NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1035 
1036  // Add the inc/dec to the real part.
1037  NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1038  }
1039 
1040  ComplexPairTy IncVal(NextVal, InVal.second);
1041 
1042  // Store the updated result through the lvalue.
1043  EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1044  if (getLangOpts().OpenMP)
1046  E->getSubExpr());
1047 
1048  // If this is a postinc, return the value read from memory, otherwise use the
1049  // updated value.
1050  return isPre ? IncVal : InVal;
1051 }
1052 
1054  CodeGenFunction *CGF) {
1055  // Bind VLAs in the cast type.
1056  if (CGF && E->getType()->isVariablyModifiedType())
1057  CGF->EmitVariablyModifiedType(E->getType());
1058 
1059  if (CGDebugInfo *DI = getModuleDebugInfo())
1060  DI->EmitExplicitCastType(E->getType());
1061 }
1062 
1063 //===----------------------------------------------------------------------===//
1064 // LValue Expression Emission
1065 //===----------------------------------------------------------------------===//
1066 
1067 /// EmitPointerWithAlignment - Given an expression of pointer type, try to
1068 /// derive a more accurate bound on the alignment of the pointer.
1070  LValueBaseInfo *BaseInfo,
1071  TBAAAccessInfo *TBAAInfo) {
1072  // We allow this with ObjC object pointers because of fragile ABIs.
1073  assert(E->getType()->isPointerType() ||
1075  E = E->IgnoreParens();
1076 
1077  // Casts:
1078  if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1079  if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1080  CGM.EmitExplicitCastExprType(ECE, this);
1081 
1082  switch (CE->getCastKind()) {
1083  // Non-converting casts (but not C's implicit conversion from void*).
1084  case CK_BitCast:
1085  case CK_NoOp:
1086  case CK_AddressSpaceConversion:
1087  if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1088  if (PtrTy->getPointeeType()->isVoidType())
1089  break;
1090 
1091  LValueBaseInfo InnerBaseInfo;
1092  TBAAAccessInfo InnerTBAAInfo;
1093  Address Addr = EmitPointerWithAlignment(CE->getSubExpr(),
1094  &InnerBaseInfo,
1095  &InnerTBAAInfo);
1096  if (BaseInfo) *BaseInfo = InnerBaseInfo;
1097  if (TBAAInfo) *TBAAInfo = InnerTBAAInfo;
1098 
1099  if (isa<ExplicitCastExpr>(CE)) {
1100  LValueBaseInfo TargetTypeBaseInfo;
1101  TBAAAccessInfo TargetTypeTBAAInfo;
1103  E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1104  if (TBAAInfo)
1105  *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
1106  TargetTypeTBAAInfo);
1107  // If the source l-value is opaque, honor the alignment of the
1108  // casted-to type.
1109  if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1110  if (BaseInfo)
1111  BaseInfo->mergeForCast(TargetTypeBaseInfo);
1112  Addr = Address(Addr.getPointer(), Addr.getElementType(), Align);
1113  }
1114  }
1115 
1116  if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1117  CE->getCastKind() == CK_BitCast) {
1118  if (auto PT = E->getType()->getAs<PointerType>())
1119  EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
1120  /*MayBeNull=*/true,
1122  CE->getBeginLoc());
1123  }
1124 
1125  llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
1126  Addr = Builder.CreateElementBitCast(Addr, ElemTy);
1127  if (CE->getCastKind() == CK_AddressSpaceConversion)
1128  Addr = Builder.CreateAddrSpaceCast(Addr, ConvertType(E->getType()));
1129  return Addr;
1130  }
1131  break;
1132 
1133  // Array-to-pointer decay.
1134  case CK_ArrayToPointerDecay:
1135  return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1136 
1137  // Derived-to-base conversions.
1138  case CK_UncheckedDerivedToBase:
1139  case CK_DerivedToBase: {
1140  // TODO: Support accesses to members of base classes in TBAA. For now, we
1141  // conservatively pretend that the complete object is of the base class
1142  // type.
1143  if (TBAAInfo)
1144  *TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
1145  Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
1146  auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1147  return GetAddressOfBaseClass(Addr, Derived,
1148  CE->path_begin(), CE->path_end(),
1150  CE->getExprLoc());
1151  }
1152 
1153  // TODO: Is there any reason to treat base-to-derived conversions
1154  // specially?
1155  default:
1156  break;
1157  }
1158  }
1159 
1160  // Unary &.
1161  if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1162  if (UO->getOpcode() == UO_AddrOf) {
1163  LValue LV = EmitLValue(UO->getSubExpr());
1164  if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1165  if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1166  return LV.getAddress(*this);
1167  }
1168  }
1169 
1170  // std::addressof and variants.
1171  if (auto *Call = dyn_cast<CallExpr>(E)) {
1172  switch (Call->getBuiltinCallee()) {
1173  default:
1174  break;
1175  case Builtin::BIaddressof:
1176  case Builtin::BI__addressof:
1177  case Builtin::BI__builtin_addressof: {
1178  LValue LV = EmitLValue(Call->getArg(0));
1179  if (BaseInfo) *BaseInfo = LV.getBaseInfo();
1180  if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
1181  return LV.getAddress(*this);
1182  }
1183  }
1184  }
1185 
1186  // TODO: conditional operators, comma.
1187 
1188  // Otherwise, use the alignment of the type.
1189  CharUnits Align =
1190  CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
1191  llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
1192  return Address(EmitScalarExpr(E), ElemTy, Align);
1193 }
1194 
1196  llvm::Value *V = RV.getScalarVal();
1197  if (auto MPT = T->getAs<MemberPointerType>())
1198  return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, V, MPT);
1199  return Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
1200 }
1201 
1203  if (Ty->isVoidType())
1204  return RValue::get(nullptr);
1205 
1206  switch (getEvaluationKind(Ty)) {
1207  case TEK_Complex: {
1208  llvm::Type *EltTy =
1210  llvm::Value *U = llvm::UndefValue::get(EltTy);
1211  return RValue::getComplex(std::make_pair(U, U));
1212  }
1213 
1214  // If this is a use of an undefined aggregate type, the aggregate must have an
1215  // identifiable address. Just because the contents of the value are undefined
1216  // doesn't mean that the address can't be taken and compared.
1217  case TEK_Aggregate: {
1218  Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1219  return RValue::getAggregate(DestPtr);
1220  }
1221 
1222  case TEK_Scalar:
1223  return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1224  }
1225  llvm_unreachable("bad evaluation kind");
1226 }
1227 
1229  const char *Name) {
1230  ErrorUnsupported(E, Name);
1231  return GetUndefRValue(E->getType());
1232 }
1233 
1235  const char *Name) {
1236  ErrorUnsupported(E, Name);
1237  llvm::Type *ElTy = ConvertType(E->getType());
1238  llvm::Type *Ty = llvm::PointerType::getUnqual(ElTy);
1239  return MakeAddrLValue(
1240  Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
1241 }
1242 
1244  const Expr *Base = Obj;
1245  while (!isa<CXXThisExpr>(Base)) {
1246  // The result of a dynamic_cast can be null.
1247  if (isa<CXXDynamicCastExpr>(Base))
1248  return false;
1249 
1250  if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1251  Base = CE->getSubExpr();
1252  } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1253  Base = PE->getSubExpr();
1254  } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1255  if (UO->getOpcode() == UO_Extension)
1256  Base = UO->getSubExpr();
1257  else
1258  return false;
1259  } else {
1260  return false;
1261  }
1262  }
1263  return true;
1264 }
1265 
1267  LValue LV;
1268  if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
1269  LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1270  else
1271  LV = EmitLValue(E);
1272  if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
1273  SanitizerSet SkippedChecks;
1274  if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1275  bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1276  if (IsBaseCXXThis)
1277  SkippedChecks.set(SanitizerKind::Alignment, true);
1278  if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
1279  SkippedChecks.set(SanitizerKind::Null, true);
1280  }
1281  EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(),
1282  LV.getAlignment(), SkippedChecks);
1283  }
1284  return LV;
1285 }
1286 
1287 /// EmitLValue - Emit code to compute a designator that specifies the location
1288 /// of the expression.
1289 ///
1290 /// This can return one of two things: a simple address or a bitfield reference.
1291 /// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1292 /// an LLVM pointer type.
1293 ///
1294 /// If this returns a bitfield reference, nothing about the pointee type of the
1295 /// LLVM value is known: For example, it may not be a pointer to an integer.
1296 ///
1297 /// If this returns a normal address, and if the lvalue's C type is fixed size,
1298 /// this method guarantees that the returned pointer type will point to an LLVM
1299 /// type of the same size of the lvalue's type. If the lvalue has a variable
1300 /// length type, this is not possible.
1301 ///
1303  ApplyDebugLocation DL(*this, E);
1304  switch (E->getStmtClass()) {
1305  default: return EmitUnsupportedLValue(E, "l-value expression");
1306 
1307  case Expr::ObjCPropertyRefExprClass:
1308  llvm_unreachable("cannot emit a property reference directly");
1309 
1310  case Expr::ObjCSelectorExprClass:
1311  return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1312  case Expr::ObjCIsaExprClass:
1313  return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1314  case Expr::BinaryOperatorClass:
1315  return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1316  case Expr::CompoundAssignOperatorClass: {
1317  QualType Ty = E->getType();
1318  if (const AtomicType *AT = Ty->getAs<AtomicType>())
1319  Ty = AT->getValueType();
1320  if (!Ty->isAnyComplexType())
1321  return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1322  return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1323  }
1324  case Expr::CallExprClass:
1325  case Expr::CXXMemberCallExprClass:
1326  case Expr::CXXOperatorCallExprClass:
1327  case Expr::UserDefinedLiteralClass:
1328  return EmitCallExprLValue(cast<CallExpr>(E));
1329  case Expr::CXXRewrittenBinaryOperatorClass:
1330  return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm());
1331  case Expr::VAArgExprClass:
1332  return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1333  case Expr::DeclRefExprClass:
1334  return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1335  case Expr::ConstantExprClass: {
1336  const ConstantExpr *CE = cast<ConstantExpr>(E);
1337  if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1338  QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
1339  ->getCallReturnType(getContext())
1340  ->getPointeeType();
1341  return MakeNaturalAlignAddrLValue(Result, RetType);
1342  }
1343  return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
1344  }
1345  case Expr::ParenExprClass:
1346  return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
1347  case Expr::GenericSelectionExprClass:
1348  return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
1349  case Expr::PredefinedExprClass:
1350  return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1351  case Expr::StringLiteralClass:
1352  return EmitStringLiteralLValue(cast<StringLiteral>(E));
1353  case Expr::ObjCEncodeExprClass:
1354  return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1355  case Expr::PseudoObjectExprClass:
1356  return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1357  case Expr::InitListExprClass:
1358  return EmitInitListLValue(cast<InitListExpr>(E));
1359  case Expr::CXXTemporaryObjectExprClass:
1360  case Expr::CXXConstructExprClass:
1361  return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1362  case Expr::CXXBindTemporaryExprClass:
1363  return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1364  case Expr::CXXUuidofExprClass:
1365  return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1366  case Expr::LambdaExprClass:
1367  return EmitAggExprToLValue(E);
1368 
1369  case Expr::ExprWithCleanupsClass: {
1370  const auto *cleanups = cast<ExprWithCleanups>(E);
1371  RunCleanupsScope Scope(*this);
1372  LValue LV = EmitLValue(cleanups->getSubExpr());
1373  if (LV.isSimple()) {
1374  // Defend against branches out of gnu statement expressions surrounded by
1375  // cleanups.
1376  Address Addr = LV.getAddress(*this);
1377  llvm::Value *V = Addr.getPointer();
1378  Scope.ForceCleanup({&V});
1379  return LValue::MakeAddr(Addr.withPointer(V), LV.getType(), getContext(),
1380  LV.getBaseInfo(), LV.getTBAAInfo());
1381  }
1382  // FIXME: Is it possible to create an ExprWithCleanups that produces a
1383  // bitfield lvalue or some other non-simple lvalue?
1384  return LV;
1385  }
1386 
1387  case Expr::CXXDefaultArgExprClass: {
1388  auto *DAE = cast<CXXDefaultArgExpr>(E);
1389  CXXDefaultArgExprScope Scope(*this, DAE);
1390  return EmitLValue(DAE->getExpr());
1391  }
1392  case Expr::CXXDefaultInitExprClass: {
1393  auto *DIE = cast<CXXDefaultInitExpr>(E);
1394  CXXDefaultInitExprScope Scope(*this, DIE);
1395  return EmitLValue(DIE->getExpr());
1396  }
1397  case Expr::CXXTypeidExprClass:
1398  return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1399 
1400  case Expr::ObjCMessageExprClass:
1401  return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1402  case Expr::ObjCIvarRefExprClass:
1403  return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1404  case Expr::StmtExprClass:
1405  return EmitStmtExprLValue(cast<StmtExpr>(E));
1406  case Expr::UnaryOperatorClass:
1407  return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1408  case Expr::ArraySubscriptExprClass:
1409  return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1410  case Expr::MatrixSubscriptExprClass:
1411  return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1412  case Expr::OMPArraySectionExprClass:
1413  return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
1414  case Expr::ExtVectorElementExprClass:
1415  return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1416  case Expr::MemberExprClass:
1417  return EmitMemberExpr(cast<MemberExpr>(E));
1418  case Expr::CompoundLiteralExprClass:
1419  return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1420  case Expr::ConditionalOperatorClass:
1421  return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1422  case Expr::BinaryConditionalOperatorClass:
1423  return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1424  case Expr::ChooseExprClass:
1425  return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
1426  case Expr::OpaqueValueExprClass:
1427  return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1428  case Expr::SubstNonTypeTemplateParmExprClass:
1429  return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
1430  case Expr::ImplicitCastExprClass:
1431  case Expr::CStyleCastExprClass:
1432  case Expr::CXXFunctionalCastExprClass:
1433  case Expr::CXXStaticCastExprClass:
1434  case Expr::CXXDynamicCastExprClass:
1435  case Expr::CXXReinterpretCastExprClass:
1436  case Expr::CXXConstCastExprClass:
1437  case Expr::CXXAddrspaceCastExprClass:
1438  case Expr::ObjCBridgedCastExprClass:
1439  return EmitCastLValue(cast<CastExpr>(E));
1440 
1441  case Expr::MaterializeTemporaryExprClass:
1442  return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1443 
1444  case Expr::CoawaitExprClass:
1445  return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1446  case Expr::CoyieldExprClass:
1447  return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1448  }
1449 }
1450 
1451 /// Given an object of the given canonical type, can we safely copy a
1452 /// value out of it based on its initializer?
1454  assert(type.isCanonical());
1455  assert(!type->isReferenceType());
1456 
1457  // Must be const-qualified but non-volatile.
1458  Qualifiers qs = type.getLocalQualifiers();
1459  if (!qs.hasConst() || qs.hasVolatile()) return false;
1460 
1461  // Otherwise, all object types satisfy this except C++ classes with
1462  // mutable subobjects or non-trivial copy/destroy behavior.
1463  if (const auto *RT = dyn_cast<RecordType>(type))
1464  if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1465  if (RD->hasMutableFields() || !RD->isTrivial())
1466  return false;
1467 
1468  return true;
1469 }
1470 
1471 /// Can we constant-emit a load of a reference to a variable of the
1472 /// given type? This is different from predicates like
1473 /// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1474 /// in situations that don't necessarily satisfy the language's rules
1475 /// for this (e.g. C++'s ODR-use rules). For example, we want to able
1476 /// to do this with const float variables even if those variables
1477 /// aren't marked 'constexpr'.
1483 };
1485  type = type.getCanonicalType();
1486  if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1487  if (isConstantEmittableObjectType(ref->getPointeeType()))
1488  return CEK_AsValueOrReference;
1489  return CEK_AsReferenceOnly;
1490  }
1492  return CEK_AsValueOnly;
1493  return CEK_None;
1494 }
1495 
1496 /// Try to emit a reference to the given value without producing it as
1497 /// an l-value. This is just an optimization, but it avoids us needing
1498 /// to emit global copies of variables if they're named without triggering
1499 /// a formal use in a context where we can't emit a direct reference to them,
1500 /// for instance if a block or lambda or a member of a local class uses a
1501 /// const int variable or constexpr variable from an enclosing function.
1504  ValueDecl *value = refExpr->getDecl();
1505 
1506  // The value needs to be an enum constant or a constant variable.
1508  if (isa<ParmVarDecl>(value)) {
1509  CEK = CEK_None;
1510  } else if (auto *var = dyn_cast<VarDecl>(value)) {
1511  CEK = checkVarTypeForConstantEmission(var->getType());
1512  } else if (isa<EnumConstantDecl>(value)) {
1513  CEK = CEK_AsValueOnly;
1514  } else {
1515  CEK = CEK_None;
1516  }
1517  if (CEK == CEK_None) return ConstantEmission();
1518 
1519  Expr::EvalResult result;
1520  bool resultIsReference;
1521  QualType resultType;
1522 
1523  // It's best to evaluate all the way as an r-value if that's permitted.
1524  if (CEK != CEK_AsReferenceOnly &&
1525  refExpr->EvaluateAsRValue(result, getContext())) {
1526  resultIsReference = false;
1527  resultType = refExpr->getType();
1528 
1529  // Otherwise, try to evaluate as an l-value.
1530  } else if (CEK != CEK_AsValueOnly &&
1531  refExpr->EvaluateAsLValue(result, getContext())) {
1532  resultIsReference = true;
1533  resultType = value->getType();
1534 
1535  // Failure.
1536  } else {
1537  return ConstantEmission();
1538  }
1539 
1540  // In any case, if the initializer has side-effects, abandon ship.
1541  if (result.HasSideEffects)
1542  return ConstantEmission();
1543 
1544  // In CUDA/HIP device compilation, a lambda may capture a reference variable
1545  // referencing a global host variable by copy. In this case the lambda should
1546  // make a copy of the value of the global host variable. The DRE of the
1547  // captured reference variable cannot be emitted as load from the host
1548  // global variable as compile time constant, since the host variable is not
1549  // accessible on device. The DRE of the captured reference variable has to be
1550  // loaded from captures.
1551  if (CGM.getLangOpts().CUDAIsDevice && result.Val.isLValue() &&
1553  auto *MD = dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl);
1554  if (MD && MD->getParent()->isLambda() &&
1555  MD->getOverloadedOperator() == OO_Call) {
1556  const APValue::LValueBase &base = result.Val.getLValueBase();
1557  if (const ValueDecl *D = base.dyn_cast<const ValueDecl *>()) {
1558  if (const VarDecl *VD = dyn_cast<const VarDecl>(D)) {
1559  if (!VD->hasAttr<CUDADeviceAttr>()) {
1560  return ConstantEmission();
1561  }
1562  }
1563  }
1564  }
1565  }
1566 
1567  // Emit as a constant.
1568  auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1569  result.Val, resultType);
1570 
1571  // Make sure we emit a debug reference to the global variable.
1572  // This should probably fire even for
1573  if (isa<VarDecl>(value)) {
1574  if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1575  EmitDeclRefExprDbgValue(refExpr, result.Val);
1576  } else {
1577  assert(isa<EnumConstantDecl>(value));
1578  EmitDeclRefExprDbgValue(refExpr, result.Val);
1579  }
1580 
1581  // If we emitted a reference constant, we need to dereference that.
1582  if (resultIsReference)
1584 
1585  return ConstantEmission::forValue(C);
1586 }
1587 
1589  const MemberExpr *ME) {
1590  if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1591  // Try to emit static variable member expressions as DREs.
1592  return DeclRefExpr::Create(
1594  /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1595  ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1596  }
1597  return nullptr;
1598 }
1599 
1602  if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1603  return tryEmitAsConstant(DRE);
1604  return ConstantEmission();
1605 }
1606 
1608  const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1609  assert(Constant && "not a constant");
1610  if (Constant.isReference())
1611  return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1612  E->getExprLoc())
1613  .getScalarVal();
1614  return Constant.getValue();
1615 }
1616 
1618  SourceLocation Loc) {
1619  return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
1620  lvalue.getType(), Loc, lvalue.getBaseInfo(),
1621  lvalue.getTBAAInfo(), lvalue.isNontemporal());
1622 }
1623 
1625  if (Ty->isBooleanType())
1626  return true;
1627 
1628  if (const EnumType *ET = Ty->getAs<EnumType>())
1629  return ET->getDecl()->getIntegerType()->isBooleanType();
1630 
1631  if (const AtomicType *AT = Ty->getAs<AtomicType>())
1632  return hasBooleanRepresentation(AT->getValueType());
1633 
1634  return false;
1635 }
1636 
1638  llvm::APInt &Min, llvm::APInt &End,
1639  bool StrictEnums, bool IsBool) {
1640  const EnumType *ET = Ty->getAs<EnumType>();
1641  bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
1642  ET && !ET->getDecl()->isFixed();
1643  if (!IsBool && !IsRegularCPlusPlusEnum)
1644  return false;
1645 
1646  if (IsBool) {
1647  Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1648  End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1649  } else {
1650  const EnumDecl *ED = ET->getDecl();
1651  llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
1652  unsigned Bitwidth = LTy->getScalarSizeInBits();
1653  unsigned NumNegativeBits = ED->getNumNegativeBits();
1654  unsigned NumPositiveBits = ED->getNumPositiveBits();
1655 
1656  if (NumNegativeBits) {
1657  unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
1658  assert(NumBits <= Bitwidth);
1659  End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
1660  Min = -End;
1661  } else {
1662  assert(NumPositiveBits <= Bitwidth);
1663  End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
1664  Min = llvm::APInt::getZero(Bitwidth);
1665  }
1666  }
1667  return true;
1668 }
1669 
1670 llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1671  llvm::APInt Min, End;
1672  if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1674  return nullptr;
1675 
1676  llvm::MDBuilder MDHelper(getLLVMContext());
1677  return MDHelper.createRange(Min, End);
1678 }
1679 
1681  SourceLocation Loc) {
1682  bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1683  bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1684  if (!HasBoolCheck && !HasEnumCheck)
1685  return false;
1686 
1687  bool IsBool = hasBooleanRepresentation(Ty) ||
1689  bool NeedsBoolCheck = HasBoolCheck && IsBool;
1690  bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
1691  if (!NeedsBoolCheck && !NeedsEnumCheck)
1692  return false;
1693 
1694  // Single-bit booleans don't need to be checked. Special-case this to avoid
1695  // a bit width mismatch when handling bitfield values. This is handled by
1696  // EmitFromMemory for the non-bitfield case.
1697  if (IsBool &&
1698  cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1699  return false;
1700 
1701  llvm::APInt Min, End;
1702  if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1703  return true;
1704 
1705  auto &Ctx = getLLVMContext();
1706  SanitizerScope SanScope(this);
1707  llvm::Value *Check;
1708  --End;
1709  if (!Min) {
1710  Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1711  } else {
1712  llvm::Value *Upper =
1713  Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1714  llvm::Value *Lower =
1715  Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1716  Check = Builder.CreateAnd(Upper, Lower);
1717  }
1718  llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1721  NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
1722  EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1723  StaticArgs, EmitCheckValue(Value));
1724  return true;
1725 }
1726 
1727 llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1728  QualType Ty,
1729  SourceLocation Loc,
1730  LValueBaseInfo BaseInfo,
1731  TBAAAccessInfo TBAAInfo,
1732  bool isNontemporal) {
1733  if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1734  // Boolean vectors use `iN` as storage type.
1735  if (ClangVecTy->isExtVectorBoolType()) {
1736  llvm::Type *ValTy = ConvertType(Ty);
1737  unsigned ValNumElems =
1738  cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1739  // Load the `iP` storage object (P is the padded vector size).
1740  auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
1741  const auto *RawIntTy = RawIntV->getType();
1742  assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
1743  // Bitcast iP --> <P x i1>.
1744  auto *PaddedVecTy = llvm::FixedVectorType::get(
1745  Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1746  llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
1747  // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1748  V = emitBoolVecConversion(V, ValNumElems, "extractvec");
1749 
1750  return EmitFromMemory(V, Ty);
1751  }
1752 
1753  // Handle vectors of size 3 like size 4 for better performance.
1754  const llvm::Type *EltTy = Addr.getElementType();
1755  const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1756 
1757  if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
1758 
1759  // Bitcast to vec4 type.
1760  llvm::VectorType *vec4Ty =
1761  llvm::FixedVectorType::get(VTy->getElementType(), 4);
1762  Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
1763  // Now load value.
1764  llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1765 
1766  // Shuffle vector to get vec3.
1767  V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
1768  return EmitFromMemory(V, Ty);
1769  }
1770  }
1771 
1772  // Atomic operations have to be done on integral types.
1773  LValue AtomicLValue =
1774  LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1775  if (Ty->isAtomicType() || LValueIsSuitableForInlineAtomic(AtomicLValue)) {
1776  return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1777  }
1778 
1779  llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1780  if (isNontemporal) {
1781  llvm::MDNode *Node = llvm::MDNode::get(
1782  Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1783  Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1784  }
1785 
1787 
1788  if (EmitScalarRangeCheck(Load, Ty, Loc)) {
1789  // In order to prevent the optimizer from throwing away the check, don't
1790  // attach range metadata to the load.
1791  } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1792  if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1793  Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1794 
1795  return EmitFromMemory(Load, Ty);
1796 }
1797 
1798 llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1799  // Bool has a different representation in memory than in registers.
1800  if (hasBooleanRepresentation(Ty)) {
1801  // This should really always be an i1, but sometimes it's already
1802  // an i8, and it's awkward to track those cases down.
1803  if (Value->getType()->isIntegerTy(1))
1804  return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
1805  assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1806  "wrong value rep of bool");
1807  }
1808 
1809  return Value;
1810 }
1811 
1812 llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1813  // Bool has a different representation in memory than in registers.
1814  if (hasBooleanRepresentation(Ty)) {
1815  assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1816  "wrong value rep of bool");
1817  return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1818  }
1819  if (Ty->isExtVectorBoolType()) {
1820  const auto *RawIntTy = Value->getType();
1821  // Bitcast iP --> <P x i1>.
1822  auto *PaddedVecTy = llvm::FixedVectorType::get(
1823  Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
1824  auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
1825  // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
1826  llvm::Type *ValTy = ConvertType(Ty);
1827  unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
1828  return emitBoolVecConversion(V, ValNumElems, "extractvec");
1829  }
1830 
1831  return Value;
1832 }
1833 
1834 // Convert the pointer of \p Addr to a pointer to a vector (the value type of
1835 // MatrixType), if it points to a array (the memory type of MatrixType).
1837  bool IsVector = true) {
1838  auto *ArrayTy = dyn_cast<llvm::ArrayType>(Addr.getElementType());
1839  if (ArrayTy && IsVector) {
1840  auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
1841  ArrayTy->getNumElements());
1842 
1843  return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
1844  }
1845  auto *VectorTy = dyn_cast<llvm::VectorType>(Addr.getElementType());
1846  if (VectorTy && !IsVector) {
1847  auto *ArrayTy = llvm::ArrayType::get(
1848  VectorTy->getElementType(),
1849  cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
1850 
1851  return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
1852  }
1853 
1854  return Addr;
1855 }
1856 
1857 // Emit a store of a matrix LValue. This may require casting the original
1858 // pointer to memory address (ArrayType) to a pointer to the value type
1859 // (VectorType).
1860 static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
1861  bool isInit, CodeGenFunction &CGF) {
1862  Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
1863  value->getType()->isVectorTy());
1864  CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
1865  lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
1866  lvalue.isNontemporal());
1867 }
1868 
1870  bool Volatile, QualType Ty,
1871  LValueBaseInfo BaseInfo,
1872  TBAAAccessInfo TBAAInfo,
1873  bool isInit, bool isNontemporal) {
1874  llvm::Type *SrcTy = Value->getType();
1875  if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
1876  auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
1877  if (VecTy && ClangVecTy->isExtVectorBoolType()) {
1878  auto *MemIntTy = cast<llvm::IntegerType>(Addr.getElementType());
1879  // Expand to the memory bit width.
1880  unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits();
1881  // <N x i1> --> <P x i1>.
1882  Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
1883  // <P x i1> --> iP.
1884  Value = Builder.CreateBitCast(Value, MemIntTy);
1885  } else if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1886  // Handle vec3 special.
1887  if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
1888  // Our source is a vec3, do a shuffle vector to make it a vec4.
1889  Value = Builder.CreateShuffleVector(Value, ArrayRef<int>{0, 1, 2, -1},
1890  "extractVec");
1891  SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
1892  }
1893  if (Addr.getElementType() != SrcTy) {
1894  Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
1895  }
1896  }
1897  }
1898 
1899  Value = EmitToMemory(Value, Ty);
1900 
1901  LValue AtomicLValue =
1902  LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1903  if (Ty->isAtomicType() ||
1904  (!isInit && LValueIsSuitableForInlineAtomic(AtomicLValue))) {
1905  EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
1906  return;
1907  }
1908 
1909  llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1910  if (isNontemporal) {
1911  llvm::MDNode *Node =
1912  llvm::MDNode::get(Store->getContext(),
1913  llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1914  Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1915  }
1916 
1918 }
1919 
1920 void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1921  bool isInit) {
1922  if (lvalue.getType()->isConstantMatrixType()) {
1923  EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
1924  return;
1925  }
1926 
1927  EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
1928  lvalue.getType(), lvalue.getBaseInfo(),
1929  lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
1930 }
1931 
1932 // Emit a load of a LValue of matrix type. This may require casting the pointer
1933 // to memory address (ArrayType) to a pointer to the value type (VectorType).
1935  CodeGenFunction &CGF) {
1936  assert(LV.getType()->isConstantMatrixType());
1937  Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
1938  LV.setAddress(Addr);
1939  return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
1940 }
1941 
1942 /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1943 /// method emits the address of the lvalue, then loads the result as an rvalue,
1944 /// returning the rvalue.
1946  if (LV.isObjCWeak()) {
1947  // load of a __weak object.
1948  Address AddrWeakObj = LV.getAddress(*this);
1950  AddrWeakObj));
1951  }
1953  // In MRC mode, we do a load+autorelease.
1954  if (!getLangOpts().ObjCAutoRefCount) {
1955  return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
1956  }
1957 
1958  // In ARC mode, we load retained and then consume the value.
1959  llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
1960  Object = EmitObjCConsumeObject(LV.getType(), Object);
1961  return RValue::get(Object);
1962  }
1963 
1964  if (LV.isSimple()) {
1965  assert(!LV.getType()->isFunctionType());
1966 
1967  if (LV.getType()->isConstantMatrixType())
1968  return EmitLoadOfMatrixLValue(LV, Loc, *this);
1969 
1970  // Everything needs a load.
1971  return RValue::get(EmitLoadOfScalar(LV, Loc));
1972  }
1973 
1974  if (LV.isVectorElt()) {
1975  llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
1976  LV.isVolatileQualified());
1977  return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1978  "vecext"));
1979  }
1980 
1981  // If this is a reference to a subset of the elements of a vector, either
1982  // shuffle the input or extract/insert them as appropriate.
1983  if (LV.isExtVectorElt()) {
1985  }
1986 
1987  // Global Register variables always invoke intrinsics
1988  if (LV.isGlobalReg())
1989  return EmitLoadOfGlobalRegLValue(LV);
1990 
1991  if (LV.isMatrixElt()) {
1992  llvm::Value *Idx = LV.getMatrixIdx();
1993  if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
1994  const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
1995  llvm::MatrixBuilder MB(Builder);
1996  MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
1997  }
1998  llvm::LoadInst *Load =
2000  return RValue::get(Builder.CreateExtractElement(Load, Idx, "matrixext"));
2001  }
2002 
2003  assert(LV.isBitField() && "Unknown LValue type!");
2004  return EmitLoadOfBitfieldLValue(LV, Loc);
2005 }
2006 
2008  SourceLocation Loc) {
2009  const CGBitFieldInfo &Info = LV.getBitFieldInfo();
2010 
2011  // Get the output type.
2012  llvm::Type *ResLTy = ConvertType(LV.getType());
2013 
2014  Address Ptr = LV.getBitFieldAddress();
2015  llvm::Value *Val =
2016  Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
2017 
2018  bool UseVolatile = LV.isVolatileQualified() &&
2019  Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2020  const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2021  const unsigned StorageSize =
2022  UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2023  if (Info.IsSigned) {
2024  assert(static_cast<unsigned>(Offset + Info.Size) <= StorageSize);
2025  unsigned HighBits = StorageSize - Offset - Info.Size;
2026  if (HighBits)
2027  Val = Builder.CreateShl(Val, HighBits, "bf.shl");
2028  if (Offset + HighBits)
2029  Val = Builder.CreateAShr(Val, Offset + HighBits, "bf.ashr");
2030  } else {
2031  if (Offset)
2032  Val = Builder.CreateLShr(Val, Offset, "bf.lshr");
2033  if (static_cast<unsigned>(Offset) + Info.Size < StorageSize)
2034  Val = Builder.CreateAnd(
2035  Val, llvm::APInt::getLowBitsSet(StorageSize, Info.Size), "bf.clear");
2036  }
2037  Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
2038  EmitScalarRangeCheck(Val, LV.getType(), Loc);
2039  return RValue::get(Val);
2040 }
2041 
2042 // If this is a reference to a subset of the elements of a vector, create an
2043 // appropriate shufflevector.
2045  llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
2046  LV.isVolatileQualified());
2047 
2048  const llvm::Constant *Elts = LV.getExtVectorElts();
2049 
2050  // If the result of the expression is a non-vector type, we must be extracting
2051  // a single element. Just codegen as an extractelement.
2052  const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
2053  if (!ExprVT) {
2054  unsigned InIdx = getAccessedFieldNo(0, Elts);
2055  llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2056  return RValue::get(Builder.CreateExtractElement(Vec, Elt));
2057  }
2058 
2059  // Always use shuffle vector to try to retain the original program structure
2060  unsigned NumResultElts = ExprVT->getNumElements();
2061 
2062  SmallVector<int, 4> Mask;
2063  for (unsigned i = 0; i != NumResultElts; ++i)
2064  Mask.push_back(getAccessedFieldNo(i, Elts));
2065 
2066  Vec = Builder.CreateShuffleVector(Vec, Mask);
2067  return RValue::get(Vec);
2068 }
2069 
2070 /// Generates lvalue for partial ext_vector access.
2072  Address VectorAddress = LV.getExtVectorAddress();
2073  QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
2074  llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
2075 
2076  Address CastToPointerElement =
2077  Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
2078  "conv.ptr.element");
2079 
2080  const llvm::Constant *Elts = LV.getExtVectorElts();
2081  unsigned ix = getAccessedFieldNo(0, Elts);
2082 
2083  Address VectorBasePtrPlusIx =
2084  Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
2085  "vector.elt");
2086 
2087  return VectorBasePtrPlusIx;
2088 }
2089 
2090 /// Load of global gamed gegisters are always calls to intrinsics.
2092  assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2093  "Bad type for register variable");
2094  llvm::MDNode *RegName = cast<llvm::MDNode>(
2095  cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2096 
2097  // We accept integer and pointer types only
2098  llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2099  llvm::Type *Ty = OrigTy;
2100  if (OrigTy->isPointerTy())
2101  Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2102  llvm::Type *Types[] = { Ty };
2103 
2104  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2105  llvm::Value *Call = Builder.CreateCall(
2106  F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2107  if (OrigTy->isPointerTy())
2108  Call = Builder.CreateIntToPtr(Call, OrigTy);
2109  return RValue::get(Call);
2110 }
2111 
2112 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2113 /// lvalue, where both are guaranteed to the have the same type, and that type
2114 /// is 'Ty'.
2116  bool isInit) {
2117  if (!Dst.isSimple()) {
2118  if (Dst.isVectorElt()) {
2119  // Read/modify/write the vector, inserting the new element.
2120  llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2121  Dst.isVolatileQualified());
2122  auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
2123  if (IRStoreTy) {
2124  auto *IRVecTy = llvm::FixedVectorType::get(
2125  Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
2126  Vec = Builder.CreateBitCast(Vec, IRVecTy);
2127  // iN --> <N x i1>.
2128  }
2129  Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2130  Dst.getVectorIdx(), "vecins");
2131  if (IRStoreTy) {
2132  // <N x i1> --> <iN>.
2133  Vec = Builder.CreateBitCast(Vec, IRStoreTy);
2134  }
2136  Dst.isVolatileQualified());
2137  return;
2138  }
2139 
2140  // If this is an update of extended vector elements, insert them as
2141  // appropriate.
2142  if (Dst.isExtVectorElt())
2144 
2145  if (Dst.isGlobalReg())
2146  return EmitStoreThroughGlobalRegLValue(Src, Dst);
2147 
2148  if (Dst.isMatrixElt()) {
2149  llvm::Value *Idx = Dst.getMatrixIdx();
2150  if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
2151  const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
2152  llvm::MatrixBuilder MB(Builder);
2153  MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
2154  }
2155  llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
2156  llvm::Value *Vec =
2157  Builder.CreateInsertElement(Load, Src.getScalarVal(), Idx, "matins");
2159  Dst.isVolatileQualified());
2160  return;
2161  }
2162 
2163  assert(Dst.isBitField() && "Unknown LValue type");
2164  return EmitStoreThroughBitfieldLValue(Src, Dst);
2165  }
2166 
2167  // There's special magic for assigning into an ARC-qualified l-value.
2168  if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2169  switch (Lifetime) {
2170  case Qualifiers::OCL_None:
2171  llvm_unreachable("present but none");
2172 
2174  // nothing special
2175  break;
2176 
2178  if (isInit) {
2179  Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2180  break;
2181  }
2182  EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2183  return;
2184 
2185  case Qualifiers::OCL_Weak:
2186  if (isInit)
2187  // Initialize and then skip the primitive store.
2188  EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
2189  else
2190  EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
2191  /*ignore*/ true);
2192  return;
2193 
2196  Src.getScalarVal()));
2197  // fall into the normal path
2198  break;
2199  }
2200  }
2201 
2202  if (Dst.isObjCWeak() && !Dst.isNonGC()) {
2203  // load of a __weak object.
2204  Address LvalueDst = Dst.getAddress(*this);
2205  llvm::Value *src = Src.getScalarVal();
2206  CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2207  return;
2208  }
2209 
2210  if (Dst.isObjCStrong() && !Dst.isNonGC()) {
2211  // load of a __strong object.
2212  Address LvalueDst = Dst.getAddress(*this);
2213  llvm::Value *src = Src.getScalarVal();
2214  if (Dst.isObjCIvar()) {
2215  assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2216  llvm::Type *ResultType = IntPtrTy;
2218  llvm::Value *RHS = dst.getPointer();
2219  RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2220  llvm::Value *LHS =
2221  Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
2222  "sub.ptr.lhs.cast");
2223  llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2224  CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
2225  BytesBetween);
2226  } else if (Dst.isGlobalObjCRef()) {
2227  CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2228  Dst.isThreadLocalRef());
2229  }
2230  else
2231  CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2232  return;
2233  }
2234 
2235  assert(Src.isScalar() && "Can't emit an agg store with this method");
2236  EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2237 }
2238 
2240  llvm::Value **Result) {
2241  const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2242  llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
2243  Address Ptr = Dst.getBitFieldAddress();
2244 
2245  // Get the source value, truncated to the width of the bit-field.
2246  llvm::Value *SrcVal = Src.getScalarVal();
2247 
2248  // Cast the source to the storage type and shift it into place.
2249  SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2250  /*isSigned=*/false);
2251  llvm::Value *MaskedVal = SrcVal;
2252 
2253  const bool UseVolatile =
2254  CGM.getCodeGenOpts().AAPCSBitfieldWidth && Dst.isVolatileQualified() &&
2255  Info.VolatileStorageSize != 0 && isAAPCS(CGM.getTarget());
2256  const unsigned StorageSize =
2257  UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
2258  const unsigned Offset = UseVolatile ? Info.VolatileOffset : Info.Offset;
2259  // See if there are other bits in the bitfield's storage we'll need to load
2260  // and mask together with source before storing.
2261  if (StorageSize != Info.Size) {
2262  assert(StorageSize > Info.Size && "Invalid bitfield size.");
2263  llvm::Value *Val =
2264  Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2265 
2266  // Mask the source value as needed.
2267  if (!hasBooleanRepresentation(Dst.getType()))
2268  SrcVal = Builder.CreateAnd(
2269  SrcVal, llvm::APInt::getLowBitsSet(StorageSize, Info.Size),
2270  "bf.value");
2271  MaskedVal = SrcVal;
2272  if (Offset)
2273  SrcVal = Builder.CreateShl(SrcVal, Offset, "bf.shl");
2274 
2275  // Mask out the original value.
2276  Val = Builder.CreateAnd(
2277  Val, ~llvm::APInt::getBitsSet(StorageSize, Offset, Offset + Info.Size),
2278  "bf.clear");
2279 
2280  // Or together the unchanged values and the source value.
2281  SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2282  } else {
2283  assert(Offset == 0);
2284  // According to the AACPS:
2285  // When a volatile bit-field is written, and its container does not overlap
2286  // with any non-bit-field member, its container must be read exactly once
2287  // and written exactly once using the access width appropriate to the type
2288  // of the container. The two accesses are not atomic.
2289  if (Dst.isVolatileQualified() && isAAPCS(CGM.getTarget()) &&
2290  CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2291  Builder.CreateLoad(Ptr, true, "bf.load");
2292  }
2293 
2294  // Write the new value back out.
2295  Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2296 
2297  // Return the new value of the bit-field, if requested.
2298  if (Result) {
2299  llvm::Value *ResultVal = MaskedVal;
2300 
2301  // Sign extend the value if needed.
2302  if (Info.IsSigned) {
2303  assert(Info.Size <= StorageSize);
2304  unsigned HighBits = StorageSize - Info.Size;
2305  if (HighBits) {
2306  ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2307  ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2308  }
2309  }
2310 
2311  ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2312  "bf.result.cast");
2313  *Result = EmitFromMemory(ResultVal, Dst.getType());
2314  }
2315 }
2316 
2318  LValue Dst) {
2319  // This access turns into a read/modify/write of the vector. Load the input
2320  // value now.
2321  llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(),
2322  Dst.isVolatileQualified());
2323  const llvm::Constant *Elts = Dst.getExtVectorElts();
2324 
2325  llvm::Value *SrcVal = Src.getScalarVal();
2326 
2327  if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2328  unsigned NumSrcElts = VTy->getNumElements();
2329  unsigned NumDstElts =
2330  cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2331  if (NumDstElts == NumSrcElts) {
2332  // Use shuffle vector is the src and destination are the same number of
2333  // elements and restore the vector mask since it is on the side it will be
2334  // stored.
2335  SmallVector<int, 4> Mask(NumDstElts);
2336  for (unsigned i = 0; i != NumSrcElts; ++i)
2337  Mask[getAccessedFieldNo(i, Elts)] = i;
2338 
2339  Vec = Builder.CreateShuffleVector(SrcVal, Mask);
2340  } else if (NumDstElts > NumSrcElts) {
2341  // Extended the source vector to the same length and then shuffle it
2342  // into the destination.
2343  // FIXME: since we're shuffling with undef, can we just use the indices
2344  // into that? This could be simpler.
2345  SmallVector<int, 4> ExtMask;
2346  for (unsigned i = 0; i != NumSrcElts; ++i)
2347  ExtMask.push_back(i);
2348  ExtMask.resize(NumDstElts, -1);
2349  llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, ExtMask);
2350  // build identity
2351  SmallVector<int, 4> Mask;
2352  for (unsigned i = 0; i != NumDstElts; ++i)
2353  Mask.push_back(i);
2354 
2355  // When the vector size is odd and .odd or .hi is used, the last element
2356  // of the Elts constant array will be one past the size of the vector.
2357  // Ignore the last element here, if it is greater than the mask size.
2358  if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2359  NumSrcElts--;
2360 
2361  // modify when what gets shuffled in
2362  for (unsigned i = 0; i != NumSrcElts; ++i)
2363  Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2364  Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2365  } else {
2366  // We should never shorten the vector
2367  llvm_unreachable("unexpected shorten vector length");
2368  }
2369  } else {
2370  // If the Src is a scalar (not a vector) it must be updating one element.
2371  unsigned InIdx = getAccessedFieldNo(0, Elts);
2372  llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2373  Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2374  }
2375 
2377  Dst.isVolatileQualified());
2378 }
2379 
2380 /// Store of global named registers are always calls to intrinsics.
2382  assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2383  "Bad type for register variable");
2384  llvm::MDNode *RegName = cast<llvm::MDNode>(
2385  cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2386  assert(RegName && "Register LValue is not metadata");
2387 
2388  // We accept integer and pointer types only
2389  llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2390  llvm::Type *Ty = OrigTy;
2391  if (OrigTy->isPointerTy())
2392  Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2393  llvm::Type *Types[] = { Ty };
2394 
2395  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2396  llvm::Value *Value = Src.getScalarVal();
2397  if (OrigTy->isPointerTy())
2398  Value = Builder.CreatePtrToInt(Value, Ty);
2399  Builder.CreateCall(
2400  F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2401 }
2402 
2403 // setObjCGCLValueClass - sets class of the lvalue for the purpose of
2404 // generating write-barries API. It is currently a global, ivar,
2405 // or neither.
2406 static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2407  LValue &LV,
2408  bool IsMemberAccess=false) {
2409  if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2410  return;
2411 
2412  if (isa<ObjCIvarRefExpr>(E)) {
2413  QualType ExpTy = E->getType();
2414  if (IsMemberAccess && ExpTy->isPointerType()) {
2415  // If ivar is a structure pointer, assigning to field of
2416  // this struct follows gcc's behavior and makes it a non-ivar
2417  // writer-barrier conservatively.
2418  ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2419  if (ExpTy->isRecordType()) {
2420  LV.setObjCIvar(false);
2421  return;
2422  }
2423  }
2424  LV.setObjCIvar(true);
2425  auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2426  LV.setBaseIvarExp(Exp->getBase());
2427  LV.setObjCArray(E->getType()->isArrayType());
2428  return;
2429  }
2430 
2431  if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2432  if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2433  if (VD->hasGlobalStorage()) {
2434  LV.setGlobalObjCRef(true);
2435  LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2436  }
2437  }
2438  LV.setObjCArray(E->getType()->isArrayType());
2439  return;
2440  }
2441 
2442  if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2443  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2444  return;
2445  }
2446 
2447  if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2448  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2449  if (LV.isObjCIvar()) {
2450  // If cast is to a structure pointer, follow gcc's behavior and make it
2451  // a non-ivar write-barrier.
2452  QualType ExpTy = E->getType();
2453  if (ExpTy->isPointerType())
2454  ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2455  if (ExpTy->isRecordType())
2456  LV.setObjCIvar(false);
2457  }
2458  return;
2459  }
2460 
2461  if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2462  setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2463  return;
2464  }
2465 
2466  if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2467  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2468  return;
2469  }
2470 
2471  if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2472  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2473  return;
2474  }
2475 
2476  if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2477  setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2478  return;
2479  }
2480 
2481  if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2482  setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2483  if (LV.isObjCIvar() && !LV.isObjCArray())
2484  // Using array syntax to assigning to what an ivar points to is not
2485  // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2486  LV.setObjCIvar(false);
2487  else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
2488  // Using array syntax to assigning to what global points to is not
2489  // same as assigning to the global itself. {id *G;} G[i] = 0;
2490  LV.setGlobalObjCRef(false);
2491  return;
2492  }
2493 
2494  if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2495  setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2496  // We don't know if member is an 'ivar', but this flag is looked at
2497  // only in the context of LV.isObjCIvar().
2498  LV.setObjCArray(E->getType()->isArrayType());
2499  return;
2500  }
2501 }
2502 
2503 static llvm::Value *
2505  llvm::Value *V, llvm::Type *IRType,
2506  StringRef Name = StringRef()) {
2507  unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
2508  return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
2509 }
2510 
2512  CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2513  llvm::Type *RealVarTy, SourceLocation Loc) {
2514  if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2516  CGF, VD, Addr, Loc);
2517  else
2518  Addr =
2519  CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2520 
2521  Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
2522  return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2523 }
2524 
2526  const VarDecl *VD, QualType T) {
2528  OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2529  // Return an invalid address if variable is MT_To and unified
2530  // memory is not enabled. For all other cases: MT_Link and
2531  // MT_To with unified memory, return a valid address.
2532  if (!Res || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2534  return Address::invalid();
2535  assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2536  (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2538  "Expected link clause OR to clause with unified memory enabled.");
2539  QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2541  return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2542 }
2543 
2544 Address
2546  LValueBaseInfo *PointeeBaseInfo,
2547  TBAAAccessInfo *PointeeTBAAInfo) {
2548  llvm::LoadInst *Load =
2549  Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
2551 
2552  QualType PointeeType = RefLVal.getType()->getPointeeType();
2554  PointeeType, PointeeBaseInfo, PointeeTBAAInfo,
2555  /* forPointeeType= */ true);
2556  return Address(Load, ConvertTypeForMem(PointeeType), Align);
2557 }
2558 
2560  LValueBaseInfo PointeeBaseInfo;
2561  TBAAAccessInfo PointeeTBAAInfo;
2562  Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2563  &PointeeTBAAInfo);
2564  return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2565  PointeeBaseInfo, PointeeTBAAInfo);
2566 }
2567 
2569  const PointerType *PtrTy,
2570  LValueBaseInfo *BaseInfo,
2571  TBAAAccessInfo *TBAAInfo) {
2572  llvm::Value *Addr = Builder.CreateLoad(Ptr);
2573  return Address(Addr, ConvertTypeForMem(PtrTy->getPointeeType()),
2574  CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo,
2575  TBAAInfo,
2576  /*forPointeeType=*/true));
2577 }
2578 
2580  const PointerType *PtrTy) {
2581  LValueBaseInfo BaseInfo;
2582  TBAAAccessInfo TBAAInfo;
2583  Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2584  return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2585 }
2586 
2588  const Expr *E, const VarDecl *VD) {
2589  QualType T = E->getType();
2590 
2591  // If it's thread_local, emit a call to its wrapper function instead.
2592  if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2594  return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2595  // Check if the variable is marked as declare target with link clause in
2596  // device codegen.
2597  if (CGF.getLangOpts().OpenMPIsDevice) {
2598  Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2599  if (Addr.isValid())
2600  return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2601  }
2602 
2603  llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2604  llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2605  V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
2606  CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2607  Address Addr(V, RealVarTy, Alignment);
2608  // Emit reference to the private copy of the variable if it is an OpenMP
2609  // threadprivate variable.
2610  if (CGF.getLangOpts().OpenMP && !CGF.getLangOpts().OpenMPSimd &&
2611  VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2612  return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2613  E->getExprLoc());
2614  }
2615  LValue LV = VD->getType()->isReferenceType() ?
2616  CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2618  CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2619  setObjCGCLValueClass(CGF.getContext(), E, LV);
2620  return LV;
2621 }
2622 
2623 static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
2624  GlobalDecl GD) {
2625  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2626  if (FD->hasAttr<WeakRefAttr>()) {
2627  ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
2628  return aliasee.getPointer();
2629  }
2630 
2631  llvm::Constant *V = CGM.GetAddrOfFunction(GD);
2632  if (!FD->hasPrototype()) {
2633  if (const FunctionProtoType *Proto =
2634  FD->getType()->getAs<FunctionProtoType>()) {
2635  // Ugly case: for a K&R-style definition, the type of the definition
2636  // isn't the same as the type of a use. Correct for this with a
2637  // bitcast.
2638  QualType NoProtoType =
2639  CGM.getContext().getFunctionNoProtoType(Proto->getReturnType());
2640  NoProtoType = CGM.getContext().getPointerType(NoProtoType);
2641  V = llvm::ConstantExpr::getBitCast(V,
2642  CGM.getTypes().ConvertType(NoProtoType));
2643  }
2644  }
2645  return V;
2646 }
2647 
2649  GlobalDecl GD) {
2650  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2651  llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD);
2652  CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2653  return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2655 }
2656 
2658  llvm::Value *ThisValue) {
2660  LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
2661  return CGF.EmitLValueForField(LV, FD);
2662 }
2663 
2664 /// Named Registers are named metadata pointing to the register name
2665 /// which will be read from/written to as an argument to the intrinsic
2666 /// @llvm.read/write_register.
2667 /// So far, only the name is being passed down, but other options such as
2668 /// register type, allocation type or even optimization options could be
2669 /// passed down via the metadata node.
2671  SmallString<64> Name("llvm.named.register.");
2672  AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2673  assert(Asm->getLabel().size() < 64-Name.size() &&
2674  "Register name too big");
2675  Name.append(Asm->getLabel());
2676  llvm::NamedMDNode *M =
2677  CGM.getModule().getOrInsertNamedMetadata(Name);
2678  if (M->getNumOperands() == 0) {
2679  llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2680  Asm->getLabel());
2681  llvm::Metadata *Ops[] = {Str};
2682  M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2683  }
2684 
2685  CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2686 
2687  llvm::Value *Ptr =
2688  llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2689  return LValue::MakeGlobalReg(Ptr, Alignment, VD->getType());
2690 }
2691 
2692 /// Determine whether we can emit a reference to \p VD from the current
2693 /// context, despite not necessarily having seen an odr-use of the variable in
2694 /// this context.
2696  const DeclRefExpr *E,
2697  const VarDecl *VD,
2698  bool IsConstant) {
2699  // For a variable declared in an enclosing scope, do not emit a spurious
2700  // reference even if we have a capture, as that will emit an unwarranted
2701  // reference to our capture state, and will likely generate worse code than
2702  // emitting a local copy.
2704  return false;
2705 
2706  // For a local declaration declared in this function, we can always reference
2707  // it even if we don't have an odr-use.
2708  if (VD->hasLocalStorage()) {
2709  return VD->getDeclContext() ==
2710  dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2711  }
2712 
2713  // For a global declaration, we can emit a reference to it if we know
2714  // for sure that we are able to emit a definition of it.
2715  VD = VD->getDefinition(CGF.getContext());
2716  if (!VD)
2717  return false;
2718 
2719  // Don't emit a spurious reference if it might be to a variable that only
2720  // exists on a different device / target.
2721  // FIXME: This is unnecessarily broad. Check whether this would actually be a
2722  // cross-target reference.
2723  if (CGF.getLangOpts().OpenMP || CGF.getLangOpts().CUDA ||
2724  CGF.getLangOpts().OpenCL) {
2725  return false;
2726  }
2727 
2728  // We can emit a spurious reference only if the linkage implies that we'll
2729  // be emitting a non-interposable symbol that will be retained until link
2730  // time.
2731  switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) {
2733  case llvm::GlobalValue::LinkOnceODRLinkage:
2734  case llvm::GlobalValue::WeakODRLinkage:
2736  case llvm::GlobalValue::PrivateLinkage:
2737  return true;
2738  default:
2739  return false;
2740  }
2741 }
2742 
2744  const NamedDecl *ND = E->getDecl();
2745  QualType T = E->getType();
2746 
2747  assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2748  "should not emit an unevaluated operand");
2749 
2750  if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2751  // Global Named registers access via intrinsics only
2752  if (VD->getStorageClass() == SC_Register &&
2753  VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())
2754  return EmitGlobalNamedRegister(VD, CGM);
2755 
2756  // If this DeclRefExpr does not constitute an odr-use of the variable,
2757  // we're not permitted to emit a reference to it in general, and it might
2758  // not be captured if capture would be necessary for a use. Emit the
2759  // constant value directly instead.
2760  if (E->isNonOdrUse() == NOUR_Constant &&
2761  (VD->getType()->isReferenceType() ||
2762  !canEmitSpuriousReferenceToVariable(*this, E, VD, true))) {
2763  VD->getAnyInitializer(VD);
2764  llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
2765  E->getLocation(), *VD->evaluateValue(), VD->getType());
2766  assert(Val && "failed to emit constant expression");
2767 
2768  Address Addr = Address::invalid();
2769  if (!VD->getType()->isReferenceType()) {
2770  // Spill the constant value to a global.
2771  Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
2772  getContext().getDeclAlign(VD));
2773  llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
2774  auto *PTy = llvm::PointerType::get(
2775  VarTy, getContext().getTargetAddressSpace(VD->getType()));
2776  Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
2777  } else {
2778  // Should we be using the alignment of the constant pointer we emitted?
2779  CharUnits Alignment =
2781  /* BaseInfo= */ nullptr,
2782  /* TBAAInfo= */ nullptr,
2783  /* forPointeeType= */ true);
2784  Addr = Address(Val, ConvertTypeForMem(E->getType()), Alignment);
2785  }
2786  return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2787  }
2788 
2789  // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
2790 
2791  // Check for captured variables.
2793  VD = VD->getCanonicalDecl();
2794  if (auto *FD = LambdaCaptureFields.lookup(VD))
2795  return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
2796  if (CapturedStmtInfo) {
2797  auto I = LocalDeclMap.find(VD);
2798  if (I != LocalDeclMap.end()) {
2799  LValue CapLVal;
2800  if (VD->getType()->isReferenceType())
2801  CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
2803  else
2804  CapLVal = MakeAddrLValue(I->second, T);
2805  // Mark lvalue as nontemporal if the variable is marked as nontemporal
2806  // in simd context.
2807  if (getLangOpts().OpenMP &&
2809  CapLVal.setNontemporal(/*Value=*/true);
2810  return CapLVal;
2811  }
2812  LValue CapLVal =
2815  Address LValueAddress = CapLVal.getAddress(*this);
2816  CapLVal = MakeAddrLValue(
2817  Address(LValueAddress.getPointer(), LValueAddress.getElementType(),
2818  getContext().getDeclAlign(VD)),
2820  CapLVal.getTBAAInfo());
2821  // Mark lvalue as nontemporal if the variable is marked as nontemporal
2822  // in simd context.
2823  if (getLangOpts().OpenMP &&
2825  CapLVal.setNontemporal(/*Value=*/true);
2826  return CapLVal;
2827  }
2828 
2829  assert(isa<BlockDecl>(CurCodeDecl));
2830  Address addr = GetAddrOfBlockDecl(VD);
2831  return MakeAddrLValue(addr, T, AlignmentSource::Decl);
2832  }
2833  }
2834 
2835  // FIXME: We should be able to assert this for FunctionDecls as well!
2836  // FIXME: We should be able to assert this for all DeclRefExprs, not just
2837  // those with a valid source location.
2838  assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
2839  !E->getLocation().isValid()) &&
2840  "Should not use decl without marking it used!");
2841 
2842  if (ND->hasAttr<WeakRefAttr>()) {
2843  const auto *VD = cast<ValueDecl>(ND);
2844  ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
2845  return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
2846  }
2847 
2848  if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2849  // Check if this is a global variable.
2850  if (VD->hasLinkage() || VD->isStaticDataMember())
2851  return EmitGlobalVarDeclLValue(*this, E, VD);
2852 
2853  Address addr = Address::invalid();
2854 
2855  // The variable should generally be present in the local decl map.
2856  auto iter = LocalDeclMap.find(VD);
2857  if (iter != LocalDeclMap.end()) {
2858  addr = iter->second;
2859 
2860  // Otherwise, it might be static local we haven't emitted yet for
2861  // some reason; most likely, because it's in an outer function.
2862  } else if (VD->isStaticLocal()) {
2863  llvm::Constant *var = CGM.getOrCreateStaticVarDecl(
2864  *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false));
2865  addr = Address(
2866  var, ConvertTypeForMem(VD->getType()), getContext().getDeclAlign(VD));
2867 
2868  // No other cases for now.
2869  } else {
2870  llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
2871  }
2872 
2873 
2874  // Check for OpenMP threadprivate variables.
2875  if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
2876  VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2878  *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
2879  E->getExprLoc());
2880  }
2881 
2882  // Drill into block byref variables.
2883  bool isBlockByref = VD->isEscapingByref();
2884  if (isBlockByref) {
2885  addr = emitBlockByrefAddress(addr, VD);
2886  }
2887 
2888  // Drill into reference types.
2889  LValue LV = VD->getType()->isReferenceType() ?
2892 
2893  bool isLocalStorage = VD->hasLocalStorage();
2894 
2895  bool NonGCable = isLocalStorage &&
2896  !VD->getType()->isReferenceType() &&
2897  !isBlockByref;
2898  if (NonGCable) {
2899  LV.getQuals().removeObjCGCAttr();
2900  LV.setNonGC(true);
2901  }
2902 
2903  bool isImpreciseLifetime =
2904  (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>());
2905  if (isImpreciseLifetime)
2907  setObjCGCLValueClass(getContext(), E, LV);
2908  return LV;
2909  }
2910 
2911  if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
2912  LValue LV = EmitFunctionDeclLValue(*this, E, FD);
2913 
2914  // Emit debuginfo for the function declaration if the target wants to.
2915  if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
2916  if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) {
2917  auto *Fn =
2918  cast<llvm::Function>(LV.getPointer(*this)->stripPointerCasts());
2919  if (!Fn->getSubprogram())
2920  DI->EmitFunctionDecl(FD, FD->getLocation(), T, Fn);
2921  }
2922  }
2923 
2924  return LV;
2925  }
2926 
2927  // FIXME: While we're emitting a binding from an enclosing scope, all other
2928  // DeclRefExprs we see should be implicitly treated as if they also refer to
2929  // an enclosing scope.
2930  if (const auto *BD = dyn_cast<BindingDecl>(ND))
2931  return EmitLValue(BD->getBinding());
2932 
2933  // We can form DeclRefExprs naming GUID declarations when reconstituting
2934  // non-type template parameters into expressions.
2935  if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
2936  return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
2938 
2939  if (const auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND))
2942 
2943  llvm_unreachable("Unhandled DeclRefExpr");
2944 }
2945 
2947  // __extension__ doesn't affect lvalue-ness.
2948  if (E->getOpcode() == UO_Extension)
2949  return EmitLValue(E->getSubExpr());
2950 
2952  switch (E->getOpcode()) {
2953  default: llvm_unreachable("Unknown unary operator lvalue!");
2954  case UO_Deref: {
2955  QualType T = E->getSubExpr()->getType()->getPointeeType();
2956  assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
2957 
2958  LValueBaseInfo BaseInfo;
2959  TBAAAccessInfo TBAAInfo;
2960  Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
2961  &TBAAInfo);
2962  LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
2963  LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
2964 
2965  // We should not generate __weak write barrier on indirect reference
2966  // of a pointer to object; as in void foo (__weak id *param); *param = 0;
2967  // But, we continue to generate __strong write barrier on indirect write
2968  // into a pointer to object.
2969  if (getLangOpts().ObjC &&
2970  getLangOpts().getGC() != LangOptions::NonGC &&
2971  LV.isObjCWeak())
2973  return LV;
2974  }
2975  case UO_Real:
2976  case UO_Imag: {
2977  LValue LV = EmitLValue(E->getSubExpr());
2978  assert(LV.isSimple() && "real/imag on non-ordinary l-value");
2979 
2980  // __real is valid on scalars. This is a faster way of testing that.
2981  // __imag can only produce an rvalue on scalars.
2982  if (E->getOpcode() == UO_Real &&
2983  !LV.getAddress(*this).getElementType()->isStructTy()) {
2984  assert(E->getSubExpr()->getType()->isArithmeticType());
2985  return LV;
2986  }
2987 
2988  QualType T = ExprTy->castAs<ComplexType>()->getElementType();
2989 
2990  Address Component =
2991  (E->getOpcode() == UO_Real
2992  ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
2993  : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
2994  LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
2995  CGM.getTBAAInfoForSubobject(LV, T));
2996  ElemLV.getQuals().addQualifiers(LV.getQuals());
2997  return ElemLV;
2998  }
2999  case UO_PreInc:
3000  case UO_PreDec: {
3001  LValue LV = EmitLValue(E->getSubExpr());
3002  bool isInc = E->getOpcode() == UO_PreInc;
3003 
3004  if (E->getType()->isAnyComplexType())
3005  EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
3006  else
3007  EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
3008  return LV;
3009  }
3010  }
3011 }
3012 
3016 }
3017 
3021 }
3022 
3024  auto SL = E->getFunctionName();
3025  assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
3026  StringRef FnName = CurFn->getName();
3027  if (FnName.startswith("\01"))
3028  FnName = FnName.substr(1);
3029  StringRef NameItems[] = {
3031  std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
3032  if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
3033  std::string Name = std::string(SL->getString());
3034  if (!Name.empty()) {
3035  unsigned Discriminator =
3036  CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
3037  if (Discriminator)
3038  Name += "_" + Twine(Discriminator + 1).str();
3039  auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
3040  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3041  } else {
3042  auto C =
3043  CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
3044  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3045  }
3046  }
3047  auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
3048  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
3049 }
3050 
3051 /// Emit a type description suitable for use by a runtime sanitizer library. The
3052 /// format of a type descriptor is
3053 ///
3054 /// \code
3055 /// { i16 TypeKind, i16 TypeInfo }
3056 /// \endcode
3057 ///
3058 /// followed by an array of i8 containing the type name. TypeKind is 0 for an
3059 /// integer, 1 for a floating point value, and -1 for anything else.
3061  // Only emit each type's descriptor once.
3062  if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
3063  return C;
3064 
3065  uint16_t TypeKind = -1;
3066  uint16_t TypeInfo = 0;
3067 
3068  if (T->isIntegerType()) {
3069  TypeKind = 0;
3070  TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
3071  (T->isSignedIntegerType() ? 1 : 0);
3072  } else if (T->isFloatingType()) {
3073  TypeKind = 1;
3075  }
3076 
3077  // Format the type name as if for a diagnostic, including quotes and
3078  // optionally an 'aka'.
3079  SmallString<32> Buffer;
3081  (intptr_t)T.getAsOpaquePtr(),
3082  StringRef(), StringRef(), None, Buffer,
3083  None);
3084 
3085  llvm::Constant *Components[] = {
3086  Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
3087  llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
3088  };
3089  llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
3090 
3091  auto *GV = new llvm::GlobalVariable(
3092  CGM.getModule(), Descriptor->getType(),
3093  /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
3094  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3096 
3097  // Remember the descriptor for this type.
3098  CGM.setTypeDescriptorInMap(T, GV);
3099 
3100  return GV;
3101 }
3102 
3103 llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
3104  llvm::Type *TargetTy = IntPtrTy;
3105 
3106  if (V->getType() == TargetTy)
3107  return V;
3108 
3109  // Floating-point types which fit into intptr_t are bitcast to integers
3110  // and then passed directly (after zero-extension, if necessary).
3111  if (V->getType()->isFloatingPointTy()) {
3112  unsigned Bits = V->getType()->getPrimitiveSizeInBits().getFixedSize();
3113  if (Bits <= TargetTy->getIntegerBitWidth())
3114  V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
3115  Bits));
3116  }
3117 
3118  // Integers which fit in intptr_t are zero-extended and passed directly.
3119  if (V->getType()->isIntegerTy() &&
3120  V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
3121  return Builder.CreateZExt(V, TargetTy);
3122 
3123  // Pointers are passed directly, everything else is passed by address.
3124  if (!V->getType()->isPointerTy()) {
3125  Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
3126  Builder.CreateStore(V, Ptr);
3127  V = Ptr.getPointer();
3128  }
3129  return Builder.CreatePtrToInt(V, TargetTy);
3130 }
3131 
3132 /// Emit a representation of a SourceLocation for passing to a handler
3133 /// in a sanitizer runtime library. The format for this data is:
3134 /// \code
3135 /// struct SourceLocation {
3136 /// const char *Filename;
3137 /// int32_t Line, Column;
3138 /// };
3139 /// \endcode
3140 /// For an invalid SourceLocation, the Filename pointer is null.
3142  llvm::Constant *Filename;
3143  int Line, Column;
3144 
3146  if (PLoc.isValid()) {
3147  StringRef FilenameString = PLoc.getFilename();
3148 
3149  int PathComponentsToStrip =
3150  CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3151  if (PathComponentsToStrip < 0) {
3152  assert(PathComponentsToStrip != INT_MIN);
3153  int PathComponentsToKeep = -PathComponentsToStrip;
3154  auto I = llvm::sys::path::rbegin(FilenameString);
3155  auto E = llvm::sys::path::rend(FilenameString);
3156  while (I != E && --PathComponentsToKeep)
3157  ++I;
3158 
3159  FilenameString = FilenameString.substr(I - E);
3160  } else if (PathComponentsToStrip > 0) {
3161  auto I = llvm::sys::path::begin(FilenameString);
3162  auto E = llvm::sys::path::end(FilenameString);
3163  while (I != E && PathComponentsToStrip--)
3164  ++I;
3165 
3166  if (I != E)
3167  FilenameString =
3168  FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3169  else
3170  FilenameString = llvm::sys::path::filename(FilenameString);
3171  }
3172 
3173  auto FilenameGV =
3174  CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3176  cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
3177  Filename = FilenameGV.getPointer();
3178  Line = PLoc.getLine();
3179  Column = PLoc.getColumn();
3180  } else {
3181  Filename = llvm::Constant::getNullValue(Int8PtrTy);
3182  Line = Column = 0;
3183  }
3184 
3185  llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3186  Builder.getInt32(Column)};
3187 
3188  return llvm::ConstantStruct::getAnon(Data);
3189 }
3190 
3191 namespace {
3192 /// Specify under what conditions this check can be recovered
3193 enum class CheckRecoverableKind {
3194  /// Always terminate program execution if this check fails.
3195  Unrecoverable,
3196  /// Check supports recovering, runtime has both fatal (noreturn) and
3197  /// non-fatal handlers for this check.
3198  Recoverable,
3199  /// Runtime conditionally aborts, always need to support recovery.
3200  AlwaysRecoverable
3201 };
3202 }
3203 
3204 static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3205  assert(Kind.countPopulation() == 1);
3206  if (Kind == SanitizerKind::Function || Kind == SanitizerKind::Vptr)
3208  else if (Kind == SanitizerKind::Return || Kind == SanitizerKind::Unreachable)
3210  else
3211  return CheckRecoverableKind::Recoverable;
3212 }
3213 
3214 namespace {
3215 struct SanitizerHandlerInfo {
3216  char const *const Name;
3217  unsigned Version;
3218 };
3219 }
3220 
3221 const SanitizerHandlerInfo SanitizerHandlers[] = {
3222 #define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3224 #undef SANITIZER_CHECK
3225 };
3226 
3228  llvm::FunctionType *FnType,
3229  ArrayRef<llvm::Value *> FnArgs,
3230  SanitizerHandler CheckHandler,
3231  CheckRecoverableKind RecoverKind, bool IsFatal,
3232  llvm::BasicBlock *ContBB) {
3233  assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3235  if (!CGF.Builder.getCurrentDebugLocation()) {
3236  // Ensure that the call has at least an artificial debug location.
3237  DL.emplace(CGF, SourceLocation());
3238  }
3239  bool NeedsAbortSuffix =
3240  IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
3241  bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3242  const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3243  const StringRef CheckName = CheckInfo.Name;
3244  std::string FnName = "__ubsan_handle_" + CheckName.str();
3245  if (CheckInfo.Version && !MinimalRuntime)
3246  FnName += "_v" + llvm::utostr(CheckInfo.Version);
3247  if (MinimalRuntime)
3248  FnName += "_minimal";
3249  if (NeedsAbortSuffix)
3250  FnName += "_abort";
3251  bool MayReturn =
3252  !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
3253 
3254  llvm::AttrBuilder B(CGF.getLLVMContext());
3255  if (!MayReturn) {
3256  B.addAttribute(llvm::Attribute::NoReturn)
3257  .addAttribute(llvm::Attribute::NoUnwind);
3258  }
3259  B.addUWTableAttr(llvm::UWTableKind::Default);
3260 
3261  llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3262  FnType, FnName,
3263  llvm::AttributeList::get(CGF.getLLVMContext(),
3264  llvm::AttributeList::FunctionIndex, B),
3265  /*Local=*/true);
3266  llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3267  if (!MayReturn) {
3268  HandlerCall->setDoesNotReturn();
3269  CGF.Builder.CreateUnreachable();
3270  } else {
3271  CGF.Builder.CreateBr(ContBB);
3272  }
3273 }
3274 
3276  ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3277  SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3278  ArrayRef<llvm::Value *> DynamicArgs) {
3279  assert(IsSanitizerScope);
3280  assert(Checked.size() > 0);
3281  assert(CheckHandler >= 0 &&
3282  size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers));
3283  const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3284 
3285  llvm::Value *FatalCond = nullptr;
3286  llvm::Value *RecoverableCond = nullptr;
3287  llvm::Value *TrapCond = nullptr;
3288  for (int i = 0, n = Checked.size(); i < n; ++i) {
3289  llvm::Value *Check = Checked[i].first;
3290  // -fsanitize-trap= overrides -fsanitize-recover=.
3291  llvm::Value *&Cond =
3292  CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3293  ? TrapCond
3294  : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3295  ? RecoverableCond
3296  : FatalCond;
3297  Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
3298  }
3299 
3300  if (TrapCond)
3301  EmitTrapCheck(TrapCond, CheckHandler);
3302  if (!FatalCond && !RecoverableCond)
3303  return;
3304 
3305  llvm::Value *JointCond;
3306  if (FatalCond && RecoverableCond)
3307  JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3308  else
3309  JointCond = FatalCond ? FatalCond : RecoverableCond;
3310  assert(JointCond);
3311 
3312  CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3313  assert(SanOpts.has(Checked[0].second));
3314 #ifndef NDEBUG
3315  for (int i = 1, n = Checked.size(); i < n; ++i) {
3316  assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3317  "All recoverable kinds in a single check must be same!");
3318  assert(SanOpts.has(Checked[i].second));
3319  }
3320 #endif
3321 
3322  llvm::BasicBlock *Cont = createBasicBlock("cont");
3323  llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3324  llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3325  // Give hint that we very much don't expect to execute the handler
3326  // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3327  llvm::MDBuilder MDHelper(getLLVMContext());
3328  llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3329  Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3330  EmitBlock(Handlers);
3331 
3332  // Handler functions take an i8* pointing to the (handler-specific) static
3333  // information block, followed by a sequence of intptr_t arguments
3334  // representing operand values.
3337  if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3338  Args.reserve(DynamicArgs.size() + 1);
3339  ArgTypes.reserve(DynamicArgs.size() + 1);
3340 
3341  // Emit handler arguments and create handler function type.
3342  if (!StaticArgs.empty()) {
3343  llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3344  auto *InfoPtr =
3345  new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3346  llvm::GlobalVariable::PrivateLinkage, Info);
3347  InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3349  Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
3350  ArgTypes.push_back(Int8PtrTy);
3351  }
3352 
3353  for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) {
3354  Args.push_back(EmitCheckValue(DynamicArgs[i]));
3355  ArgTypes.push_back(IntPtrTy);
3356  }
3357  }
3358 
3359  llvm::FunctionType *FnType =
3360  llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3361 
3362  if (!FatalCond || !RecoverableCond) {
3363  // Simple case: we need to generate a single handler call, either
3364  // fatal, or non-fatal.
3365  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3366  (FatalCond != nullptr), Cont);
3367  } else {
3368  // Emit two handler calls: first one for set of unrecoverable checks,
3369  // another one for recoverable.
3370  llvm::BasicBlock *NonFatalHandlerBB =
3371  createBasicBlock("non_fatal." + CheckName);
3372  llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3373  Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3374  EmitBlock(FatalHandlerBB);
3375  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3376  NonFatalHandlerBB);
3377  EmitBlock(NonFatalHandlerBB);
3378  emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3379  Cont);
3380  }
3381 
3382  EmitBlock(Cont);
3383 }
3384 
3386  SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3387  llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3388  llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3389 
3390  llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3391  llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3392 
3393  llvm::MDBuilder MDHelper(getLLVMContext());
3394  llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3395  BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3396 
3397  EmitBlock(CheckBB);
3398 
3399  bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3400 
3401  llvm::CallInst *CheckCall;
3402  llvm::FunctionCallee SlowPathFn;
3403  if (WithDiag) {
3404  llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3405  auto *InfoPtr =
3406  new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3407  llvm::GlobalVariable::PrivateLinkage, Info);
3408  InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3410 
3411  SlowPathFn = CGM.getModule().getOrInsertFunction(
3412  "__cfi_slowpath_diag",
3413  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3414  false));
3415  CheckCall = Builder.CreateCall(
3416  SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)});
3417  } else {
3418  SlowPathFn = CGM.getModule().getOrInsertFunction(
3419  "__cfi_slowpath",
3420  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3421  CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3422  }
3423 
3424  CGM.setDSOLocal(
3425  cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3426  CheckCall->setDoesNotThrow();
3427 
3428  EmitBlock(Cont);
3429 }
3430 
3431 // Emit a stub for __cfi_check function so that the linker knows about this
3432 // symbol in LTO mode.
3434  llvm::Module *M = &CGM.getModule();
3435  auto &Ctx = M->getContext();
3436  llvm::Function *F = llvm::Function::Create(
3437  llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
3438  llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3439  CGM.setDSOLocal(F);
3440  llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3441  // FIXME: consider emitting an intrinsic call like
3442  // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
3443  // which can be lowered in CrossDSOCFI pass to the actual contents of
3444  // __cfi_check. This would allow inlining of __cfi_check calls.
3445  llvm::CallInst::Create(
3446  llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB);
3447  llvm::ReturnInst::Create(Ctx, nullptr, BB);
3448 }
3449 
3450 // This function is basically a switch over the CFI failure kind, which is
3451 // extracted from CFICheckFailData (1st function argument). Each case is either
3452 // llvm.trap or a call to one of the two runtime handlers, based on
3453 // -fsanitize-trap and -fsanitize-recover settings. Default case (invalid
3454 // failure kind) traps, but this should really never happen. CFICheckFailData
3455 // can be nullptr if the calling module has -fsanitize-trap behavior for this
3456 // check kind; in this case __cfi_check_fail traps as well.
3458  SanitizerScope SanScope(this);
3459  FunctionArgList Args;
3464  Args.push_back(&ArgData);
3465  Args.push_back(&ArgAddr);
3466 
3467  const CGFunctionInfo &FI =
3469 
3470  llvm::Function *F = llvm::Function::Create(
3471  llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3472  llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3473 
3474  CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
3476  F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3477 
3478  StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3479  SourceLocation());
3480 
3481  // This function is not affected by NoSanitizeList. This function does
3482  // not have a source location, but "src:*" would still apply. Revert any
3483  // changes to SanOpts made in StartFunction.
3485 
3486  llvm::Value *Data =
3487  EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3488  CGM.getContext().VoidPtrTy, ArgData.getLocation());
3489  llvm::Value *Addr =
3490  EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3491  CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3492 
3493  // Data == nullptr means the calling module has trap behaviour for this check.
3494  llvm::Value *DataIsNotNullPtr =
3495  Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3496  EmitTrapCheck(DataIsNotNullPtr, SanitizerHandler::CFICheckFail);
3497 
3498  llvm::StructType *SourceLocationTy =
3499  llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3500  llvm::StructType *CfiCheckFailDataTy =
3501  llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3502 
3503  llvm::Value *V = Builder.CreateConstGEP2_32(
3504  CfiCheckFailDataTy,
3505  Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3506  0);
3507 
3508  Address CheckKindAddr(V, Int8Ty, getIntAlign());
3509  llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3510 
3511  llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3512  CGM.getLLVMContext(),
3513  llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3514  llvm::Value *ValidVtable = Builder.CreateZExt(
3515  Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3516  {Addr, AllVtables}),
3517  IntPtrTy);
3518 
3519  const std::pair<int, SanitizerMask> CheckKinds[] = {
3520  {CFITCK_VCall, SanitizerKind::CFIVCall},
3521  {CFITCK_NVCall, SanitizerKind::CFINVCall},
3522  {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3523  {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3524  {CFITCK_ICall, SanitizerKind::CFIICall}};
3525 
3527  for (auto CheckKindMaskPair : CheckKinds) {
3528  int Kind = CheckKindMaskPair.first;
3529  SanitizerMask Mask = CheckKindMaskPair.second;
3530  llvm::Value *Cond =
3531  Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3532  if (CGM.getLangOpts().Sanitize.has(Mask))
3533  EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3534  {Data, Addr, ValidVtable});
3535  else
3536  EmitTrapCheck(Cond, SanitizerHandler::CFICheckFail);
3537  }
3538 
3539  FinishFunction();
3540  // The only reference to this function will be created during LTO link.
3541  // Make sure it survives until then.
3542  CGM.addUsedGlobal(F);
3543 }
3544 
3546  if (SanOpts.has(SanitizerKind::Unreachable)) {
3547  SanitizerScope SanScope(this);
3548  EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3549  SanitizerKind::Unreachable),
3550  SanitizerHandler::BuiltinUnreachable,
3551  EmitCheckSourceLocation(Loc), None);
3552  }
3553  Builder.CreateUnreachable();
3554 }
3555 
3556 void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked,
3557  SanitizerHandler CheckHandlerID) {
3558  llvm::BasicBlock *Cont = createBasicBlock("cont");
3559 
3560  // If we're optimizing, collapse all calls to trap down to just one per
3561  // check-type per function to save on code size.
3562  if (TrapBBs.size() <= CheckHandlerID)
3563  TrapBBs.resize(CheckHandlerID + 1);
3564  llvm::BasicBlock *&TrapBB = TrapBBs[CheckHandlerID];
3565 
3566  if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) {
3567  TrapBB = createBasicBlock("trap");
3568  Builder.CreateCondBr(Checked, Cont, TrapBB);
3569  EmitBlock(TrapBB);
3570 
3571  llvm::CallInst *TrapCall =
3572  Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::ubsantrap),
3573  llvm::ConstantInt::get(CGM.Int8Ty, CheckHandlerID));
3574 
3575  if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3576  auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3578  TrapCall->addFnAttr(A);
3579  }
3580  TrapCall->setDoesNotReturn();
3581  TrapCall->setDoesNotThrow();
3582  Builder.CreateUnreachable();
3583  } else {
3584  auto Call = TrapBB->begin();
3585  assert(isa<llvm::CallInst>(Call) && "Expected call in trap BB");
3586 
3587  Call->applyMergedLocation(Call->getDebugLoc(),
3588  Builder.getCurrentDebugLocation());
3589  Builder.CreateCondBr(Checked, Cont, TrapBB);
3590  }
3591 
3592  EmitBlock(Cont);
3593 }
3594 
3596  llvm::CallInst *TrapCall =
3597  Builder.CreateCall(CGM.getIntrinsic(IntrID));
3598 
3599  if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3600  auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3602  TrapCall->addFnAttr(A);
3603  }
3604 
3605  return TrapCall;
3606 }
3607 
3609  LValueBaseInfo *BaseInfo,
3610  TBAAAccessInfo *TBAAInfo) {
3611  assert(E->getType()->isArrayType() &&
3612  "Array to pointer decay must have array source type!");
3613 
3614  // Expressions of array type can't be bitfields or vector elements.
3615  LValue LV = EmitLValue(E);
3616  Address Addr = LV.getAddress(*this);
3617 
3618  // If the array type was an incomplete type, we need to make sure
3619  // the decay ends up being the right type.
3620  llvm::Type *NewTy = ConvertType(E->getType());
3621  Addr = Builder.CreateElementBitCast(Addr, NewTy);
3622 
3623  // Note that VLA pointers are always decayed, so we don't need to do
3624  // anything here.
3625  if (!E->getType()->isVariableArrayType()) {
3626  assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3627  "Expected pointer to array");
3628  Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3629  }
3630 
3631  // The result of this decay conversion points to an array element within the
3632  // base lvalue. However, since TBAA currently does not support representing
3633  // accesses to elements of member arrays, we conservatively represent accesses
3634  // to the pointee object as if it had no any base lvalue specified.
3635  // TODO: Support TBAA for member arrays.
3637  if (BaseInfo) *BaseInfo = LV.getBaseInfo();
3638  if (TBAAInfo) *TBAAInfo = CGM.getTBAAAccessInfo(EltType);
3639 
3640  return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
3641 }
3642 
3643 /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3644 /// array to pointer, return the array subexpression.
3645 static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3646  // If this isn't just an array->pointer decay, bail out.
3647  const auto *CE = dyn_cast<CastExpr>(E);
3648  if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay)
3649  return nullptr;
3650 
3651  // If this is a decay from variable width array, bail out.
3652  const Expr *SubExpr = CE->getSubExpr();
3653  if (SubExpr->getType()->isVariableArrayType())
3654  return nullptr;
3655 
3656  return SubExpr;
3657 }
3658 
3659 static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
3660  llvm::Type *elemType,
3661  llvm::Value *ptr,
3662  ArrayRef<llvm::Value*> indices,
3663  bool inbounds,
3664  bool signedIndices,
3665  SourceLocation loc,
3666  const llvm::Twine &name = "arrayidx") {
3667  if (inbounds) {
3668  return CGF.EmitCheckedInBoundsGEP(elemType, ptr, indices, signedIndices,
3670  name);
3671  } else {
3672  return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
3673  }
3674 }
3675 
3677  llvm::Value *idx,
3678  CharUnits eltSize) {
3679  // If we have a constant index, we can use the exact offset of the
3680  // element we're accessing.
3681  if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3682  CharUnits offset = constantIdx->getZExtValue() * eltSize;
3683  return arrayAlign.alignmentAtOffset(offset);
3684 
3685  // Otherwise, use the worst-case alignment for any element.
3686  } else {
3687  return arrayAlign.alignmentOfArrayElement(eltSize);
3688  }
3689 }
3690 
3692  const VariableArrayType *vla) {
3693  QualType eltType;
3694  do {
3695  eltType = vla->getElementType();
3696  } while ((vla = ctx.getAsVariableArrayType(eltType)));
3697  return eltType;
3698 }
3699 
3700 /// Given an array base, check whether its member access belongs to a record
3701 /// with preserve_access_index attribute or not.
3702 static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
3703  if (!ArrayBase || !CGF.getDebugInfo())
3704  return false;
3705 
3706  // Only support base as either a MemberExpr or DeclRefExpr.
3707  // DeclRefExpr to cover cases like:
3708  // struct s { int a; int b[10]; };
3709  // struct s *p;
3710  // p[1].a
3711  // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
3712  // p->b[5] is a MemberExpr example.
3713  const Expr *E = ArrayBase->IgnoreImpCasts();
3714  if (const auto *ME = dyn_cast<MemberExpr>(E))
3715  return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
3716 
3717  if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
3718  const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
3719  if (!VarDef)
3720  return false;
3721 
3722  const auto *PtrT = VarDef->getType()->getAs<PointerType>();
3723  if (!PtrT)
3724  return false;
3725 
3726  const auto *PointeeT = PtrT->getPointeeType()
3728  if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
3729  return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
3730  return false;
3731  }
3732 
3733  return false;
3734 }
3735 
3737  ArrayRef<llvm::Value *> indices,
3738  QualType eltType, bool inbounds,
3739  bool signedIndices, SourceLocation loc,
3740  QualType *arrayType = nullptr,
3741  const Expr *Base = nullptr,
3742  const llvm::Twine &name = "arrayidx") {
3743  // All the indices except that last must be zero.
3744 #ifndef NDEBUG
3745  for (auto idx : indices.drop_back())
3746  assert(isa<llvm::ConstantInt>(idx) &&
3747  cast<llvm::ConstantInt>(idx)->isZero());
3748 #endif
3749 
3750  // Determine the element size of the statically-sized base. This is
3751  // the thing that the indices are expressed in terms of.
3752  if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
3753  eltType = getFixedSizeElementType(CGF.getContext(), vla);
3754  }
3755 
3756  // We can use that to compute the best alignment of the element.
3757  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
3758  CharUnits eltAlign =
3759  getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
3760 
3761  llvm::Value *eltPtr;
3762  auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
3763  if (!LastIndex ||
3765  eltPtr = emitArraySubscriptGEP(
3766  CGF, addr.getElementType(), addr.getPointer(), indices, inbounds,
3767  signedIndices, loc, name);
3768  } else {
3769  // Remember the original array subscript for bpf target
3770  unsigned idx = LastIndex->getZExtValue();
3771  llvm::DIType *DbgInfo = nullptr;
3772  if (arrayType)
3773  DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
3774  eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(),
3775  addr.getPointer(),
3776  indices.size() - 1,
3777  idx, DbgInfo);
3778  }
3779 
3780  return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
3781 }
3782 
3784  bool Accessed) {
3785  // The index must always be an integer, which is not an aggregate. Emit it
3786  // in lexical order (this complexity is, sadly, required by C++17).
3787  llvm::Value *IdxPre =
3788  (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
3789  bool SignedIndices = false;
3790  auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
3791  auto *Idx = IdxPre;
3792  if (E->getLHS() != E->getIdx()) {
3793  assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
3794  Idx = EmitScalarExpr(E->getIdx());
3795  }
3796 
3797  QualType IdxTy = E->getIdx()->getType();
3798  bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
3799  SignedIndices |= IdxSigned;
3800 
3801  if (SanOpts.has(SanitizerKind::ArrayBounds))
3802  EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
3803 
3804  // Extend or truncate the index type to 32 or 64-bits.
3805  if (Promote && Idx->getType() != IntPtrTy)
3806  Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
3807 
3808  return Idx;
3809  };
3810  IdxPre = nullptr;
3811 
3812  // If the base is a vector type, then we are forming a vector element lvalue
3813  // with this subscript.
3814  if (E->getBase()->getType()->isVectorType() &&
3815  !isa<ExtVectorElementExpr>(E->getBase())) {
3816  // Emit the vector as an lvalue to get its address.
3817  LValue LHS = EmitLValue(E->getBase());
3818  auto *Idx = EmitIdxAfterBase(/*Promote*/false);
3819  assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
3820  return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
3821  E->getBase()->getType(), LHS.getBaseInfo(),
3822  TBAAAccessInfo());
3823  }
3824 
3825  // All the other cases basically behave like simple offsetting.
3826 
3827  // Handle the extvector case we ignored above.
3828  if (isa<ExtVectorElementExpr>(E->getBase())) {
3829  LValue LV = EmitLValue(E->getBase());
3830  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3832 
3833  QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
3834  Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
3835  SignedIndices, E->getExprLoc());
3836  return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
3837  CGM.getTBAAInfoForSubobject(LV, EltType));
3838  }
3839 
3840  LValueBaseInfo EltBaseInfo;
3841  TBAAAccessInfo EltTBAAInfo;
3842  Address Addr = Address::invalid();
3843  if (const VariableArrayType *vla =
3844  getContext().getAsVariableArrayType(E->getType())) {
3845  // The base must be a pointer, which is not an aggregate. Emit
3846  // it. It needs to be emitted first in case it's what captures
3847  // the VLA bounds.
3848  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3849  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3850 
3851  // The element count here is the total number of non-VLA elements.
3852  llvm::Value *numElements = getVLASize(vla).NumElts;
3853 
3854  // Effectively, the multiply by the VLA size is part of the GEP.
3855  // GEP indexes are signed, and scaling an index isn't permitted to
3856  // signed-overflow, so we use the same semantics for our explicit
3857  // multiply. We suppress this if overflow is not undefined behavior.
3858  if (getLangOpts().isSignedOverflowDefined()) {
3859  Idx = Builder.CreateMul(Idx, numElements);
3860  } else {
3861  Idx = Builder.CreateNSWMul(Idx, numElements);
3862  }
3863 
3864  Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
3865  !getLangOpts().isSignedOverflowDefined(),
3866  SignedIndices, E->getExprLoc());
3867 
3868  } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
3869  // Indexing over an interface, as in "NSString *P; P[4];"
3870 
3871  // Emit the base pointer.
3872  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3873  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3874 
3875  CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
3876  llvm::Value *InterfaceSizeVal =
3877  llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
3878 
3879  llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
3880 
3881  // We don't necessarily build correct LLVM struct types for ObjC
3882  // interfaces, so we can't rely on GEP to do this scaling
3883  // correctly, so we need to cast to i8*. FIXME: is this actually
3884  // true? A lot of other things in the fragile ABI would break...
3885  llvm::Type *OrigBaseElemTy = Addr.getElementType();
3886  Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
3887 
3888  // Do the GEP.
3889  CharUnits EltAlign =
3890  getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
3891  llvm::Value *EltPtr =
3892  emitArraySubscriptGEP(*this, Addr.getElementType(), Addr.getPointer(),
3893  ScaledIdx, false, SignedIndices, E->getExprLoc());
3894  Addr = Address(EltPtr, Addr.getElementType(), EltAlign);
3895 
3896  // Cast back.
3897  Addr = Builder.CreateElementBitCast(Addr, OrigBaseElemTy);
3898  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
3899  // If this is A[i] where A is an array, the frontend will have decayed the
3900  // base to be a ArrayToPointerDecay implicit cast. While correct, it is
3901  // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3902  // "gep x, i" here. Emit one "gep A, 0, i".
3903  assert(Array->getType()->isArrayType() &&
3904  "Array to pointer decay must have array source type!");
3905  LValue ArrayLV;
3906  // For simple multidimensional array indexing, set the 'accessed' flag for
3907  // better bounds-checking of the base expression.
3908  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
3909  ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
3910  else
3911  ArrayLV = EmitLValue(Array);
3912  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3913 
3914  // Propagate the alignment from the array itself to the result.
3915  QualType arrayType = Array->getType();
3916  Addr = emitArraySubscriptGEP(
3917  *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
3918  E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
3919  E->getExprLoc(), &arrayType, E->getBase());
3920  EltBaseInfo = ArrayLV.getBaseInfo();
3921  EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
3922  } else {
3923  // The base must be a pointer; emit it with an estimate of its alignment.
3924  Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3925  auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3926  QualType ptrType = E->getBase()->getType();
3927  Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
3928  !getLangOpts().isSignedOverflowDefined(),
3929  SignedIndices, E->getExprLoc(), &ptrType,
3930  E->getBase());
3931  }
3932 
3933  LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
3934 
3935  if (getLangOpts().ObjC &&
3936  getLangOpts().getGC() != LangOptions::NonGC) {
3938  setObjCGCLValueClass(getContext(), E, LV);
3939  }
3940  return LV;
3941 }
3942 
3944  assert(
3945  !E->isIncomplete() &&
3946  "incomplete matrix subscript expressions should be rejected during Sema");
3947  LValue Base = EmitLValue(E->getBase());
3948  llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
3949  llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
3950  llvm::Value *NumRows = Builder.getIntN(
3951  RowIdx->getType()->getScalarSizeInBits(),
3953  llvm::Value *FinalIdx =
3954  Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
3955  return LValue::MakeMatrixElt(
3956  MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
3957  E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
3958 }
3959 
3961  LValueBaseInfo &BaseInfo,
3962  TBAAAccessInfo &TBAAInfo,
3963  QualType BaseTy, QualType ElTy,
3964  bool IsLowerBound) {
3965  LValue BaseLVal;
3966  if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) {
3967  BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound);
3968  if (BaseTy->isArrayType()) {
3969  Address Addr = BaseLVal.getAddress(CGF);
3970  BaseInfo = BaseLVal.getBaseInfo();
3971 
3972  // If the array type was an incomplete type, we need to make sure
3973  // the decay ends up being the right type.
3974  llvm::Type *NewTy = CGF.ConvertType(BaseTy);
3975  Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy);
3976 
3977  // Note that VLA pointers are always decayed, so we don't need to do
3978  // anything here.
3979  if (!BaseTy->isVariableArrayType()) {
3980  assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3981  "Expected pointer to array");
3982  Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3983  }
3984 
3985  return CGF.Builder.CreateElementBitCast(Addr,
3986  CGF.ConvertTypeForMem(ElTy));
3987  }
3988  LValueBaseInfo TypeBaseInfo;
3989  TBAAAccessInfo TypeTBAAInfo;
3990  CharUnits Align =
3991  CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
3992  BaseInfo.mergeForCast(TypeBaseInfo);
3993  TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
3994  return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
3995  CGF.ConvertTypeForMem(ElTy), Align);
3996  }
3997  return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
3998 }
3999 
4001  bool IsLowerBound) {
4003  QualType ResultExprTy;
4004  if (auto *AT = getContext().getAsArrayType(BaseTy))
4005  ResultExprTy = AT->getElementType();
4006  else
4007  ResultExprTy = BaseTy->getPointeeType();
4008  llvm::Value *Idx = nullptr;
4009  if (IsLowerBound || E->getColonLocFirst().isInvalid()) {
4010  // Requesting lower bound or upper bound, but without provided length and
4011  // without ':' symbol for the default length -> length = 1.
4012  // Idx = LowerBound ?: 0;
4013  if (auto *LowerBound = E->getLowerBound()) {
4014  Idx = Builder.CreateIntCast(
4015  EmitScalarExpr(LowerBound), IntPtrTy,
4016  LowerBound->getType()->hasSignedIntegerRepresentation());
4017  } else
4018  Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
4019  } else {
4020  // Try to emit length or lower bound as constant. If this is possible, 1
4021  // is subtracted from constant length or lower bound. Otherwise, emit LLVM
4022  // IR (LB + Len) - 1.
4023  auto &C = CGM.getContext();
4024  auto *Length = E->getLength();
4025  llvm::APSInt ConstLength;
4026  if (Length) {
4027  // Idx = LowerBound + Length - 1;
4028  if (Optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
4029  ConstLength = CL->zextOrTrunc(PointerWidthInBits);
4030  Length = nullptr;
4031  }
4032  auto *LowerBound = E->getLowerBound();
4033  llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
4034  if (LowerBound) {
4035  if (Optional<llvm::APSInt> LB = LowerBound->getIntegerConstantExpr(C)) {
4036  ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
4037  LowerBound = nullptr;
4038  }
4039  }
4040  if (!Length)
4041  --ConstLength;
4042  else if (!LowerBound)
4043  --ConstLowerBound;
4044 
4045  if (Length || LowerBound) {
4046  auto *LowerBoundVal =
4047  LowerBound
4048  ? Builder.CreateIntCast(
4049  EmitScalarExpr(LowerBound), IntPtrTy,
4050  LowerBound->getType()->hasSignedIntegerRepresentation())
4051  : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
4052  auto *LengthVal =
4053  Length
4054  ? Builder.CreateIntCast(
4055  EmitScalarExpr(Length), IntPtrTy,
4056  Length->getType()->hasSignedIntegerRepresentation())
4057  : llvm::ConstantInt::get(IntPtrTy, ConstLength);
4058  Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
4059  /*HasNUW=*/false,
4060  !getLangOpts().isSignedOverflowDefined());
4061  if (Length && LowerBound) {
4062  Idx = Builder.CreateSub(
4063  Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
4064  /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4065  }
4066  } else
4067  Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
4068  } else {
4069  // Idx = ArraySize - 1;
4070  QualType ArrayTy = BaseTy->isPointerType()
4071  ? E->getBase()->IgnoreParenImpCasts()->getType()
4072  : BaseTy;
4073  if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
4074  Length = VAT->getSizeExpr();
4075  if (Optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
4076  ConstLength = *L;
4077  Length = nullptr;
4078  }
4079  } else {
4080  auto *CAT = C.getAsConstantArrayType(ArrayTy);
4081  ConstLength = CAT->getSize();
4082  }
4083  if (Length) {
4084  auto *LengthVal = Builder.CreateIntCast(
4085  EmitScalarExpr(Length), IntPtrTy,
4086  Length->getType()->hasSignedIntegerRepresentation());
4087  Idx = Builder.CreateSub(
4088  LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
4089  /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
4090  } else {
4091  ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
4092  --ConstLength;
4093  Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
4094  }
4095  }
4096  }
4097  assert(Idx);
4098 
4099  Address EltPtr = Address::invalid();
4100  LValueBaseInfo BaseInfo;
4101  TBAAAccessInfo TBAAInfo;
4102  if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
4103  // The base must be a pointer, which is not an aggregate. Emit
4104  // it. It needs to be emitted first in case it's what captures
4105  // the VLA bounds.
4106  Address Base =
4107  emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
4108  BaseTy, VLA->getElementType(), IsLowerBound);
4109  // The element count here is the total number of non-VLA elements.
4110  llvm::Value *NumElements = getVLASize(VLA).NumElts;
4111 
4112  // Effectively, the multiply by the VLA size is part of the GEP.
4113  // GEP indexes are signed, and scaling an index isn't permitted to
4114  // signed-overflow, so we use the same semantics for our explicit
4115  // multiply. We suppress this if overflow is not undefined behavior.
4116  if (getLangOpts().isSignedOverflowDefined())
4117  Idx = Builder.CreateMul(Idx, NumElements);
4118  else
4119  Idx = Builder.CreateNSWMul(Idx, NumElements);
4120  EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
4121  !getLangOpts().isSignedOverflowDefined(),
4122  /*signedIndices=*/false, E->getExprLoc());
4123  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
4124  // If this is A[i] where A is an array, the frontend will have decayed the
4125  // base to be a ArrayToPointerDecay implicit cast. While correct, it is
4126  // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
4127  // "gep x, i" here. Emit one "gep A, 0, i".
4128  assert(Array->getType()->isArrayType() &&
4129  "Array to pointer decay must have array source type!");
4130  LValue ArrayLV;
4131  // For simple multidimensional array indexing, set the 'accessed' flag for
4132  // better bounds-checking of the base expression.
4133  if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
4134  ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
4135  else
4136  ArrayLV = EmitLValue(Array);
4137 
4138  // Propagate the alignment from the array itself to the result.
4139  EltPtr = emitArraySubscriptGEP(
4140  *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
4141  ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
4142  /*signedIndices=*/false, E->getExprLoc());
4143  BaseInfo = ArrayLV.getBaseInfo();
4144  TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
4145  } else {
4146  Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
4147  TBAAInfo, BaseTy, ResultExprTy,
4148  IsLowerBound);
4149  EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
4150  !getLangOpts().isSignedOverflowDefined(),
4151  /*signedIndices=*/false, E->getExprLoc());
4152  }
4153 
4154  return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4155 }
4156 
4159  // Emit the base vector as an l-value.
4160  LValue Base;
4161 
4162  // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4163  if (E->isArrow()) {
4164  // If it is a pointer to a vector, emit the address and form an lvalue with
4165  // it.
4166  LValueBaseInfo BaseInfo;
4167  TBAAAccessInfo TBAAInfo;
4168  Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4169  const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4170  Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4171  Base.getQuals().removeObjCGCAttr();
4172  } else if (E->getBase()->isGLValue()) {
4173  // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4174  // emit the base as an lvalue.
4175  assert(E->getBase()->getType()->isVectorType());
4176  Base = EmitLValue(E->getBase());
4177  } else {
4178  // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4179  assert(E->getBase()->getType()->isVectorType() &&
4180  "Result must be a vector");
4181  llvm::Value *Vec = EmitScalarExpr(E->getBase());
4182 
4183  // Store the vector to memory (because LValue wants an address).
4184  Address VecMem = CreateMemTemp(E->getBase()->getType());
4185  Builder.CreateStore(Vec, VecMem);
4186  Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4188  }
4189 
4190  QualType type =
4191  E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4192 
4193  // Encode the element access list into a vector of unsigned indices.
4194  SmallVector<uint32_t, 4> Indices;
4195  E->getEncodedElementAccess(Indices);
4196 
4197  if (Base.isSimple()) {
4198  llvm::Constant *CV =
4199  llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4200  return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
4201  Base.getBaseInfo(), TBAAAccessInfo());
4202  }
4203  assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4204 
4205  llvm::Constant *BaseElts = Base.getExtVectorElts();
4207 
4208  for (unsigned i = 0, e = Indices.size(); i != e; ++i)
4209  CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4210  llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4211  return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4212  Base.getBaseInfo(), TBAAAccessInfo());
4213 }
4214 
4216  if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4217  EmitIgnoredExpr(E->getBase());
4218  return EmitDeclRefLValue(DRE);
4219  }
4220 
4221  Expr *BaseExpr = E->getBase();
4222  // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
4223  LValue BaseLV;
4224  if (E->isArrow()) {
4225  LValueBaseInfo BaseInfo;
4226  TBAAAccessInfo TBAAInfo;
4227  Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4228  QualType PtrTy = BaseExpr->getType()->getPointeeType();
4229  SanitizerSet SkippedChecks;
4230  bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4231  if (IsBaseCXXThis)
4232  SkippedChecks.set(SanitizerKind::Alignment, true);
4233  if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
4234  SkippedChecks.set(SanitizerKind::Null, true);
4235  EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
4236  /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4237  BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4238  } else
4239  BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4240 
4241  NamedDecl *ND = E->getMemberDecl();
4242  if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4243  LValue LV = EmitLValueForField(BaseLV, Field);
4244  setObjCGCLValueClass(getContext(), E, LV);
4245  if (getLangOpts().OpenMP) {
4246  // If the member was explicitly marked as nontemporal, mark it as
4247  // nontemporal. If the base lvalue is marked as nontemporal, mark access
4248  // to children as nontemporal too.
4249  if ((IsWrappedCXXThis(BaseExpr) &&
4251  BaseLV.isNontemporal())
4252  LV.setNontemporal(/*Value=*/true);
4253  }
4254  return LV;
4255  }
4256 
4257  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4258  return EmitFunctionDeclLValue(*this, E, FD);
4259 
4260  llvm_unreachable("Unhandled member declaration!");
4261 }
4262 
4263 /// Given that we are currently emitting a lambda, emit an l-value for
4264 /// one of its members.
4266  if (CurCodeDecl) {
4267  assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
4268  assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
4269  }
4270  QualType LambdaTagType =
4271  getContext().getTagDeclType(Field->getParent());
4272  LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
4273  return EmitLValueForField(LambdaLV, Field);
4274 }
4275 
4276 /// Get the field index in the debug info. The debug info structure/union
4277 /// will ignore the unnamed bitfields.
4279  unsigned FieldIndex) {
4280  unsigned I = 0, Skipped = 0;
4281 
4282  for (auto F : Rec->getDefinition()->fields()) {
4283  if (I == FieldIndex)
4284  break;
4285  if (F->isUnnamedBitfield())
4286  Skipped++;
4287  I++;
4288  }
4289 
4290  return FieldIndex - Skipped;
4291 }
4292 
4293 /// Get the address of a zero-sized field within a record. The resulting
4294 /// address doesn't necessarily have the right type.
4296  const FieldDecl *Field) {
4298  CGF.getContext().getFieldOffset(Field));
4299  if (Offset.isZero())
4300  return Base;
4303 }
4304 
4305 /// Drill down to the storage of a field without walking into
4306 /// reference types.
4307 ///
4308 /// The resulting address doesn't necessarily have the right type.
4310  const FieldDecl *field) {
4311  if (field->isZeroSize(CGF.getContext()))
4312  return emitAddrOfZeroSizeField(CGF, base, field);
4313 
4314  const RecordDecl *rec = field->getParent();
4315 
4316  unsigned idx =
4317  CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4318 
4319  return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4320 }
4321 
4323  Address addr, const FieldDecl *field) {
4324  const RecordDecl *rec = field->getParent();
4325  llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4326  base.getType(), rec->getLocation());
4327 
4328  unsigned idx =
4329  CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4330 
4332  addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4333 }
4334 
4335 static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4336  const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4337  if (!RD)
4338  return false;
4339 
4340  if (RD->isDynamicClass())
4341  return true;
4342 
4343  for (const auto &Base : RD->bases())
4344  if (hasAnyVptr(Base.getType(), Context))
4345  return true;
4346 
4347  for (const FieldDecl *Field : RD->fields())
4348  if (hasAnyVptr(Field->getType(), Context))
4349  return true;
4350 
4351  return false;
4352 }
4353 
4355  const FieldDecl *field) {
4356  LValueBaseInfo BaseInfo = base.getBaseInfo();
4357 
4358  if (field->isBitField()) {
4359  const CGRecordLayout &RL =
4361  const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4362  const bool UseVolatile = isAAPCS(CGM.getTarget()) &&
4363  CGM.getCodeGenOpts().AAPCSBitfieldWidth &&
4364  Info.VolatileStorageSize != 0 &&
4365  field->getType()
4368  Address Addr = base.getAddress(*this);
4369  unsigned Idx = RL.getLLVMFieldNo(field);
4370  const RecordDecl *rec = field->getParent();
4371  if (!UseVolatile) {
4372  if (!IsInPreservedAIRegion &&
4373  (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4374  if (Idx != 0)
4375  // For structs, we GEP to the field that the record layout suggests.
4376  Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4377  } else {
4378  llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4379  getContext().getRecordType(rec), rec->getLocation());
4381  Addr, Idx, getDebugInfoFIndex(rec, field->getFieldIndex()),
4382  DbgInfo);
4383  }
4384  }
4385  const unsigned SS =
4386  UseVolatile ? Info.VolatileStorageSize : Info.StorageSize;
4387  // Get the access type.
4388  llvm::Type *FieldIntTy = llvm::Type::getIntNTy(getLLVMContext(), SS);
4389  if (Addr.getElementType() != FieldIntTy)
4390  Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
4391  if (UseVolatile) {
4392  const unsigned VolatileOffset = Info.VolatileStorageOffset.getQuantity();
4393  if (VolatileOffset)
4394  Addr = Builder.CreateConstInBoundsGEP(Addr, VolatileOffset);
4395  }
4396 
4397  QualType fieldType =
4398  field->getType().withCVRQualifiers(base.getVRQualifiers());
4399  // TODO: Support TBAA for bit fields.
4400  LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4401  return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4402  TBAAAccessInfo());
4403  }
4404 
4405  // Fields of may-alias structures are may-alias themselves.
4406  // FIXME: this should get propagated down through anonymous structs
4407  // and unions.
4408  QualType FieldType = field->getType();
4409  const RecordDecl *rec = field->getParent();
4410  AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4411  LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4412  TBAAAccessInfo FieldTBAAInfo;
4413  if (base.getTBAAInfo().isMayAlias() ||
4414  rec->hasAttr<MayAliasAttr>() || FieldType->isVectorType()) {
4415  FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4416  } else if (rec->isUnion()) {
4417  // TODO: Support TBAA for unions.
4418  FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4419  } else {
4420  // If no base type been assigned for the base access, then try to generate
4421  // one for this base lvalue.
4422  FieldTBAAInfo = base.getTBAAInfo();
4423  if (!FieldTBAAInfo.BaseType) {
4424  FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4425  assert(!FieldTBAAInfo.Offset &&
4426  "Nonzero offset for an access with no base type!");
4427  }
4428 
4429  // Adjust offset to be relative to the base type.
4430  const ASTRecordLayout &Layout =
4432  unsigned CharWidth = getContext().getCharWidth();
4433  if (FieldTBAAInfo.BaseType)
4434  FieldTBAAInfo.Offset +=
4435  Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4436 
4437  // Update the final access type and size.
4438  FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4439  FieldTBAAInfo.Size =
4440  getContext().getTypeSizeInChars(FieldType).getQuantity();
4441  }
4442 
4443  Address addr = base.getAddress(*this);
4444  if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4445  if (CGM.getCodeGenOpts().StrictVTablePointers &&
4446  ClassDef->isDynamicClass()) {
4447  // Getting to any field of dynamic object requires stripping dynamic
4448  // information provided by invariant.group. This is because accessing
4449  // fields may leak the real address of dynamic object, which could result
4450  // in miscompilation when leaked pointer would be compared.
4451  auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
4452  addr = Address(stripped, addr.getElementType(), addr.getAlignment());
4453  }
4454  }
4455 
4456  unsigned RecordCVR = base.getVRQualifiers();
4457  if (rec->isUnion()) {
4458  // For unions, there is no pointer adjustment.
4459  if (CGM.getCodeGenOpts().StrictVTablePointers &&
4460  hasAnyVptr(FieldType, getContext()))
4461  // Because unions can easily skip invariant.barriers, we need to add
4462  // a barrier every time CXXRecord field with vptr is referenced.
4463  addr = Builder.CreateLaunderInvariantGroup(addr);
4464 
4465  if (IsInPreservedAIRegion ||
4466  (getDebugInfo() && rec->hasAttr<BPFPreserveAccessIndexAttr>())) {
4467  // Remember the original union field index
4468  llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
4469  rec->getLocation());
4470  addr = Address(
4471  Builder.CreatePreserveUnionAccessIndex(
4472  addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
4473  addr.getElementType(), addr.getAlignment());
4474  }
4475 
4476  if (FieldType->isReferenceType())
4478  addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
4479  } else {
4480  if (!IsInPreservedAIRegion &&
4481  (!getDebugInfo() || !rec->hasAttr<BPFPreserveAccessIndexAttr>()))
4482  // For structs, we GEP to the field that the record layout suggests.
4483  addr = emitAddrOfFieldStorage(*this, addr, field);
4484  else
4485  // Remember the original struct field index
4486  addr = emitPreserveStructAccess(*this, base, addr, field);
4487  }
4488 
4489  // If this is a reference field, load the reference right now.
4490  if (FieldType->isReferenceType()) {
4491  LValue RefLVal =
4492  MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4493  if (RecordCVR & Qualifiers::Volatile)
4494  RefLVal.getQuals().addVolatile();
4495  addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
4496 
4497  // Qualifiers on the struct don't apply to the referencee.
4498  RecordCVR = 0;
4499  FieldType = FieldType->getPointeeType();
4500  }
4501 
4502  // Make sure that the address is pointing to the right type. This is critical
4503  // for both unions and structs. A union needs a bitcast, a struct element
4504  // will need a bitcast if the LLVM type laid out doesn't match the desired
4505  // type.
4507  addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
4508 
4509  if (field->hasAttr<AnnotateAttr>())
4510  addr = EmitFieldAnnotations(field, addr);
4511 
4512  LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4513  LV.getQuals().addCVRQualifiers(RecordCVR);
4514 
4515  // __weak attribute on a field is ignored.
4516  if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
4517  LV.getQuals().removeObjCGCAttr();
4518 
4519  return LV;
4520 }
4521 
4522 LValue
4524  const FieldDecl *Field) {
4525  QualType FieldType = Field->getType();
4526 
4527  if (!FieldType->isReferenceType())
4528  return EmitLValueForField(Base, Field);
4529 
4530  Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
4531 
4532  // Make sure that the address is pointing to the right type.
4533  llvm::Type *llvmType = ConvertTypeForMem(FieldType);
4534  V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
4535 
4536  // TODO: Generate TBAA information that describes this access as a structure
4537  // member access and not just an access to an object of the field's type. This
4538  // should be similar to what we do in EmitLValueForField().
4539  LValueBaseInfo BaseInfo = Base.getBaseInfo();
4540  AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
4541  LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
4542  return MakeAddrLValue(V, FieldType, FieldBaseInfo,
4543  CGM.getTBAAInfoForSubobject(Base, FieldType));
4544 }
4545 
4547  if (E->isFileScope()) {
4549  return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
4550  }
4551  if (E->getType()->isVariablyModifiedType())
4552  // make sure to emit the VLA size.
4554 
4555  Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
4556  const Expr *InitExpr = E->getInitializer();
4557  LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
4558 
4559  EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
4560  /*Init*/ true);
4561 
4562  // Block-scope compound literals are destroyed at the end of the enclosing
4563  // scope in C.
4564  if (!getLangOpts().CPlusPlus)
4565  if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
4566  pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
4567  E->getType(), getDestroyer(DtorKind),
4568  DtorKind & EHCleanup);
4569 
4570  return Result;
4571 }
4572 
4574  if (!E->isGLValue())
4575  // Initializing an aggregate temporary in C++11: T{...}.
4576  return EmitAggExprToLValue(E);
4577 
4578  // An lvalue initializer list must be initializing a reference.
4579  assert(E->isTransparent() && "non-transparent glvalue init list");
4580  return EmitLValue(E->getInit(0));
4581 }
4582 
4583 /// Emit the operand of a glvalue conditional operator. This is either a glvalue
4584 /// or a (possibly-parenthesized) throw-expression. If this is a throw, no
4585 /// LValue is returned and the current block has been terminated.
4587  const Expr *Operand) {
4588  if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
4589  CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
4590  return None;
4591  }
4592 
4593  return CGF.EmitLValue(Operand);
4594 }
4595 
4596 namespace {
4597 // Handle the case where the condition is a constant evaluatable simple integer,
4598 // which means we don't have to separately handle the true/false blocks.
4599 llvm::Optional<LValue> HandleConditionalOperatorLValueSimpleCase(
4601  const Expr *condExpr = E->getCond();
4602  bool CondExprBool;
4603  if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4604  const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
4605  if (!CondExprBool)
4606  std::swap(Live, Dead);
4607 
4608  if (!CGF.ContainsLabel(Dead)) {
4609  // If the true case is live, we need to track its region.
4610  if (CondExprBool)
4611  CGF.incrementProfileCounter(E);
4612  // If a throw expression we emit it and return an undefined lvalue
4613  // because it can't be used.
4614  if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
4615  CGF.EmitCXXThrowExpr(ThrowExpr);
4616  llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
4617  llvm::Type *Ty = llvm::PointerType::getUnqual(ElemTy);
4618  return CGF.MakeAddrLValue(
4619  Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
4620  Dead->getType());
4621  }
4622  return CGF.EmitLValue(Live);
4623  }
4624  }
4625  return llvm::None;
4626 }
4627 struct ConditionalInfo {
4628  llvm::BasicBlock *lhsBlock, *rhsBlock;
4629  Optional<LValue> LHS, RHS;
4630 };
4631 
4632 // Create and generate the 3 blocks for a conditional operator.
4633 // Leaves the 'current block' in the continuation basic block.
4634 template<typename FuncTy>
4635 ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
4636  const AbstractConditionalOperator *E,
4637  const FuncTy &BranchGenFunc) {
4638  ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
4639  CGF.createBasicBlock("cond.false"), llvm::None,
4640  llvm::None};
4641  llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
4642 
4644  CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
4645  CGF.getProfileCount(E));
4646 
4647  // Any temporaries created here are conditional.
4648  CGF.EmitBlock(Info.lhsBlock);
4649  CGF.incrementProfileCounter(E);
4650  eval.begin(CGF);
4651  Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
4652  eval.end(CGF);
4653  Info.lhsBlock = CGF.Builder.GetInsertBlock();
4654 
4655  if (Info.LHS)
4656  CGF.Builder.CreateBr(endBlock);
4657 
4658  // Any temporaries created here are conditional.
4659  CGF.EmitBlock(Info.rhsBlock);
4660  eval.begin(CGF);
4661  Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
4662  eval.end(CGF);
4663  Info.rhsBlock = CGF.Builder.GetInsertBlock();
4664  CGF.EmitBlock(endBlock);
4665 
4666  return Info;
4667 }
4668 } // namespace
4669 
4671  const AbstractConditionalOperator *E) {
4672  if (!E->isGLValue()) {
4673  // ?: here should be an aggregate.
4674  assert(hasAggregateEvaluationKind(E->getType()) &&
4675  "Unexpected conditional operator!");
4676  return (void)EmitAggExprToLValue(E);
4677  }
4678 
4679  OpaqueValueMapping binding(*this, E);
4680  if (HandleConditionalOperatorLValueSimpleCase(*this, E))
4681  return;
4682 
4683  EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
4684  CGF.EmitIgnoredExpr(E);
4685  return LValue{};
4686  });
4687 }
4690  if (!expr->isGLValue()) {
4691  // ?: here should be an aggregate.
4692  assert(hasAggregateEvaluationKind(expr->getType()) &&
4693  "Unexpected conditional operator!");
4694  return EmitAggExprToLValue(expr);
4695  }
4696 
4697  OpaqueValueMapping binding(*this, expr);
4698  if (llvm::Optional<LValue> Res =
4699  HandleConditionalOperatorLValueSimpleCase(*this, expr))
4700  return *Res;
4701 
4702  ConditionalInfo Info = EmitConditionalBlocks(
4703  *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
4704  return EmitLValueOrThrowExpression(CGF, E);
4705  });
4706 
4707  if ((Info.LHS && !Info.LHS->isSimple()) ||
4708  (Info.RHS && !Info.RHS->isSimple()))
4709  return EmitUnsupportedLValue(expr, "conditional operator");
4710 
4711  if (Info.LHS && Info.RHS) {
4712  Address lhsAddr = Info.LHS->getAddress(*this);
4713  Address rhsAddr = Info.RHS->getAddress(*this);
4714  llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue");
4715  phi->addIncoming(lhsAddr.getPointer(), Info.lhsBlock);
4716  phi->addIncoming(rhsAddr.getPointer(), Info.rhsBlock);
4717  Address result(phi, lhsAddr.getElementType(),
4718  std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
4719  AlignmentSource alignSource =
4720  std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
4721  Info.RHS->getBaseInfo().getAlignmentSource());
4723  Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
4724  return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
4725  TBAAInfo);
4726  } else {
4727  assert((Info.LHS || Info.RHS) &&
4728  "both operands of glvalue conditional are throw-expressions?");
4729  return Info.LHS ? *Info.LHS : *Info.RHS;
4730  }
4731 }
4732 
4733 /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
4734 /// type. If the cast is to a reference, we can have the usual lvalue result,
4735 /// otherwise if a cast is needed by the code generator in an lvalue context,
4736 /// then it must mean that we need the address of an aggregate in order to
4737 /// access one of its members. This can happen for all the reasons that casts
4738 /// are permitted with aggregate result, including noop aggregate casts, and
4739 /// cast from scalar to union.
4741  switch (E->getCastKind()) {
4742  case CK_ToVoid:
4743  case CK_BitCast:
4744  case CK_LValueToRValueBitCast:
4745  case CK_ArrayToPointerDecay:
4746  case CK_FunctionToPointerDecay:
4747  case CK_NullToMemberPointer:
4748  case CK_NullToPointer:
4749  case CK_IntegralToPointer:
4750  case CK_PointerToIntegral:
4751  case CK_PointerToBoolean:
4752  case CK_VectorSplat:
4753  case CK_IntegralCast:
4754  case CK_BooleanToSignedIntegral:
4755  case CK_IntegralToBoolean:
4756  case CK_IntegralToFloating:
4757  case CK_FloatingToIntegral:
4758  case CK_FloatingToBoolean:
4759  case CK_FloatingCast:
4760  case CK_FloatingRealToComplex:
4761  case CK_FloatingComplexToReal:
4762  case CK_FloatingComplexToBoolean:
4763  case CK_FloatingComplexCast:
4764  case CK_FloatingComplexToIntegralComplex:
4765  case CK_IntegralRealToComplex:
4766  case CK_IntegralComplexToReal:
4767  case CK_IntegralComplexToBoolean:
4768  case CK_IntegralComplexCast:
4769  case CK_IntegralComplexToFloatingComplex:
4770  case CK_DerivedToBaseMemberPointer:
4771  case CK_BaseToDerivedMemberPointer:
4772  case CK_MemberPointerToBoolean:
4773  case CK_ReinterpretMemberPointer:
4774  case CK_AnyPointerToBlockPointerCast:
4775  case CK_ARCProduceObject:
4776  case CK_ARCConsumeObject:
4777  case CK_ARCReclaimReturnedObject:
4778  case CK_ARCExtendBlockObject:
4779  case CK_CopyAndAutoreleaseBlockObject:
4780  case CK_IntToOCLSampler:
4781  case CK_FloatingToFixedPoint:
4782  case CK_FixedPointToFloating:
4783  case CK_FixedPointCast:
4784  case CK_FixedPointToBoolean:
4785  case CK_FixedPointToIntegral:
4786  case CK_IntegralToFixedPoint:
4787  case CK_MatrixCast:
4788  return EmitUnsupportedLValue(E, "unexpected cast lvalue");
4789 
4790  case CK_Dependent:
4791  llvm_unreachable("dependent cast kind in IR gen!");
4792 
4793  case CK_BuiltinFnToFnPtr:
4794  llvm_unreachable("builtin functions are handled elsewhere");
4795 
4796  // These are never l-values; just use the aggregate emission code.
4797  case CK_NonAtomicToAtomic:
4798  case CK_AtomicToNonAtomic:
4799  return EmitAggExprToLValue(E);
4800 
4801  case CK_Dynamic: {
4802  LValue LV = EmitLValue(E->getSubExpr());
4803  Address V = LV.getAddress(*this);
4804  const auto *DCE = cast<CXXDynamicCastExpr>(E);
4806  }
4807