clang  9.0.0svn
CGExprScalar.cpp
Go to the documentation of this file.
1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCXXABI.h"
14 #include "CGCleanup.h"
15 #include "CGDebugInfo.h"
16 #include "CGObjCRuntime.h"
17 #include "CodeGenFunction.h"
18 #include "CodeGenModule.h"
19 #include "TargetInfo.h"
20 #include "clang/AST/ASTContext.h"
21 #include "clang/AST/DeclObjC.h"
22 #include "clang/AST/Expr.h"
23 #include "clang/AST/RecordLayout.h"
24 #include "clang/AST/StmtVisitor.h"
26 #include "clang/Basic/FixedPoint.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "llvm/ADT/Optional.h"
29 #include "llvm/IR/CFG.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GetElementPtrTypeIterator.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/Module.h"
37 #include <cstdarg>
38 
39 using namespace clang;
40 using namespace CodeGen;
41 using llvm::Value;
42 
43 //===----------------------------------------------------------------------===//
44 // Scalar Expression Emitter
45 //===----------------------------------------------------------------------===//
46 
47 namespace {
48 
49 /// Determine whether the given binary operation may overflow.
50 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
51 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
52 /// the returned overflow check is precise. The returned value is 'true' for
53 /// all other opcodes, to be conservative.
54 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
55  BinaryOperator::Opcode Opcode, bool Signed,
56  llvm::APInt &Result) {
57  // Assume overflow is possible, unless we can prove otherwise.
58  bool Overflow = true;
59  const auto &LHSAP = LHS->getValue();
60  const auto &RHSAP = RHS->getValue();
61  if (Opcode == BO_Add) {
62  if (Signed)
63  Result = LHSAP.sadd_ov(RHSAP, Overflow);
64  else
65  Result = LHSAP.uadd_ov(RHSAP, Overflow);
66  } else if (Opcode == BO_Sub) {
67  if (Signed)
68  Result = LHSAP.ssub_ov(RHSAP, Overflow);
69  else
70  Result = LHSAP.usub_ov(RHSAP, Overflow);
71  } else if (Opcode == BO_Mul) {
72  if (Signed)
73  Result = LHSAP.smul_ov(RHSAP, Overflow);
74  else
75  Result = LHSAP.umul_ov(RHSAP, Overflow);
76  } else if (Opcode == BO_Div || Opcode == BO_Rem) {
77  if (Signed && !RHS->isZero())
78  Result = LHSAP.sdiv_ov(RHSAP, Overflow);
79  else
80  return false;
81  }
82  return Overflow;
83 }
84 
85 struct BinOpInfo {
86  Value *LHS;
87  Value *RHS;
88  QualType Ty; // Computation Type.
89  BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
90  FPOptions FPFeatures;
91  const Expr *E; // Entire expr, for error unsupported. May not be binop.
92 
93  /// Check if the binop can result in integer overflow.
94  bool mayHaveIntegerOverflow() const {
95  // Without constant input, we can't rule out overflow.
96  auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
97  auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
98  if (!LHSCI || !RHSCI)
99  return true;
100 
101  llvm::APInt Result;
102  return ::mayHaveIntegerOverflow(
103  LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
104  }
105 
106  /// Check if the binop computes a division or a remainder.
107  bool isDivremOp() const {
108  return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
109  Opcode == BO_RemAssign;
110  }
111 
112  /// Check if the binop can result in an integer division by zero.
113  bool mayHaveIntegerDivisionByZero() const {
114  if (isDivremOp())
115  if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
116  return CI->isZero();
117  return true;
118  }
119 
120  /// Check if the binop can result in a float division by zero.
121  bool mayHaveFloatDivisionByZero() const {
122  if (isDivremOp())
123  if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
124  return CFP->isZero();
125  return true;
126  }
127 
128  /// Check if either operand is a fixed point type or integer type, with at
129  /// least one being a fixed point type. In any case, this
130  /// operation did not follow usual arithmetic conversion and both operands may
131  /// not be the same.
132  bool isFixedPointBinOp() const {
133  // We cannot simply check the result type since comparison operations return
134  // an int.
135  if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
136  QualType LHSType = BinOp->getLHS()->getType();
137  QualType RHSType = BinOp->getRHS()->getType();
138  return LHSType->isFixedPointType() || RHSType->isFixedPointType();
139  }
140  return false;
141  }
142 };
143 
144 static bool MustVisitNullValue(const Expr *E) {
145  // If a null pointer expression's type is the C++0x nullptr_t, then
146  // it's not necessarily a simple constant and it must be evaluated
147  // for its potential side effects.
148  return E->getType()->isNullPtrType();
149 }
150 
151 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
152 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
153  const Expr *E) {
154  const Expr *Base = E->IgnoreImpCasts();
155  if (E == Base)
156  return llvm::None;
157 
158  QualType BaseTy = Base->getType();
159  if (!BaseTy->isPromotableIntegerType() ||
160  Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
161  return llvm::None;
162 
163  return BaseTy;
164 }
165 
166 /// Check if \p E is a widened promoted integer.
167 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
168  return getUnwidenedIntegerType(Ctx, E).hasValue();
169 }
170 
171 /// Check if we can skip the overflow check for \p Op.
172 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
173  assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
174  "Expected a unary or binary operator");
175 
176  // If the binop has constant inputs and we can prove there is no overflow,
177  // we can elide the overflow check.
178  if (!Op.mayHaveIntegerOverflow())
179  return true;
180 
181  // If a unary op has a widened operand, the op cannot overflow.
182  if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
183  return !UO->canOverflow();
184 
185  // We usually don't need overflow checks for binops with widened operands.
186  // Multiplication with promoted unsigned operands is a special case.
187  const auto *BO = cast<BinaryOperator>(Op.E);
188  auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
189  if (!OptionalLHSTy)
190  return false;
191 
192  auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
193  if (!OptionalRHSTy)
194  return false;
195 
196  QualType LHSTy = *OptionalLHSTy;
197  QualType RHSTy = *OptionalRHSTy;
198 
199  // This is the simple case: binops without unsigned multiplication, and with
200  // widened operands. No overflow check is needed here.
201  if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
202  !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
203  return true;
204 
205  // For unsigned multiplication the overflow check can be elided if either one
206  // of the unpromoted types are less than half the size of the promoted type.
207  unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
208  return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
209  (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
210 }
211 
212 /// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
213 static void updateFastMathFlags(llvm::FastMathFlags &FMF,
214  FPOptions FPFeatures) {
215  FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
216 }
217 
218 /// Propagate fast-math flags from \p Op to the instruction in \p V.
219 static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
220  if (auto *I = dyn_cast<llvm::Instruction>(V)) {
221  llvm::FastMathFlags FMF = I->getFastMathFlags();
222  updateFastMathFlags(FMF, Op.FPFeatures);
223  I->setFastMathFlags(FMF);
224  }
225  return V;
226 }
227 
228 class ScalarExprEmitter
229  : public StmtVisitor<ScalarExprEmitter, Value*> {
230  CodeGenFunction &CGF;
231  CGBuilderTy &Builder;
232  bool IgnoreResultAssign;
233  llvm::LLVMContext &VMContext;
234 public:
235 
236  ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
237  : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
238  VMContext(cgf.getLLVMContext()) {
239  }
240 
241  //===--------------------------------------------------------------------===//
242  // Utilities
243  //===--------------------------------------------------------------------===//
244 
245  bool TestAndClearIgnoreResultAssign() {
246  bool I = IgnoreResultAssign;
247  IgnoreResultAssign = false;
248  return I;
249  }
250 
251  llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
252  LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
253  LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
254  return CGF.EmitCheckedLValue(E, TCK);
255  }
256 
257  void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
258  const BinOpInfo &Info);
259 
260  Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
261  return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
262  }
263 
264  void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
265  const AlignValueAttr *AVAttr = nullptr;
266  if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
267  const ValueDecl *VD = DRE->getDecl();
268 
269  if (VD->getType()->isReferenceType()) {
270  if (const auto *TTy =
271  dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
272  AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
273  } else {
274  // Assumptions for function parameters are emitted at the start of the
275  // function, so there is no need to repeat that here,
276  // unless the alignment-assumption sanitizer is enabled,
277  // then we prefer the assumption over alignment attribute
278  // on IR function param.
279  if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
280  return;
281 
282  AVAttr = VD->getAttr<AlignValueAttr>();
283  }
284  }
285 
286  if (!AVAttr)
287  if (const auto *TTy =
288  dyn_cast<TypedefType>(E->getType()))
289  AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
290 
291  if (!AVAttr)
292  return;
293 
294  Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
295  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
296  CGF.EmitAlignmentAssumption(V, E, AVAttr->getLocation(),
297  AlignmentCI->getZExtValue());
298  }
299 
300  /// EmitLoadOfLValue - Given an expression with complex type that represents a
301  /// value l-value, this method emits the address of the l-value, then loads
302  /// and returns the result.
303  Value *EmitLoadOfLValue(const Expr *E) {
304  Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
305  E->getExprLoc());
306 
307  EmitLValueAlignmentAssumption(E, V);
308  return V;
309  }
310 
311  /// EmitConversionToBool - Convert the specified expression value to a
312  /// boolean (i1) truth value. This is equivalent to "Val != 0".
313  Value *EmitConversionToBool(Value *Src, QualType DstTy);
314 
315  /// Emit a check that a conversion to or from a floating-point type does not
316  /// overflow.
317  void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
318  Value *Src, QualType SrcType, QualType DstType,
319  llvm::Type *DstTy, SourceLocation Loc);
320 
321  /// Known implicit conversion check kinds.
322  /// Keep in sync with the enum of the same name in ubsan_handlers.h
323  enum ImplicitConversionCheckKind : unsigned char {
324  ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
325  ICCK_UnsignedIntegerTruncation = 1,
326  ICCK_SignedIntegerTruncation = 2,
327  ICCK_IntegerSignChange = 3,
328  ICCK_SignedIntegerTruncationOrSignChange = 4,
329  };
330 
331  /// Emit a check that an [implicit] truncation of an integer does not
332  /// discard any bits. It is not UB, so we use the value after truncation.
333  void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
334  QualType DstType, SourceLocation Loc);
335 
336  /// Emit a check that an [implicit] conversion of an integer does not change
337  /// the sign of the value. It is not UB, so we use the value after conversion.
338  /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
339  void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
340  QualType DstType, SourceLocation Loc);
341 
342  /// Emit a conversion from the specified type to the specified destination
343  /// type, both of which are LLVM scalar types.
344  struct ScalarConversionOpts {
345  bool TreatBooleanAsSigned;
346  bool EmitImplicitIntegerTruncationChecks;
347  bool EmitImplicitIntegerSignChangeChecks;
348 
349  ScalarConversionOpts()
350  : TreatBooleanAsSigned(false),
351  EmitImplicitIntegerTruncationChecks(false),
352  EmitImplicitIntegerSignChangeChecks(false) {}
353 
354  ScalarConversionOpts(clang::SanitizerSet SanOpts)
355  : TreatBooleanAsSigned(false),
356  EmitImplicitIntegerTruncationChecks(
357  SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
358  EmitImplicitIntegerSignChangeChecks(
359  SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
360  };
361  Value *
362  EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
363  SourceLocation Loc,
364  ScalarConversionOpts Opts = ScalarConversionOpts());
365 
366  /// Convert between either a fixed point and other fixed point or fixed point
367  /// and an integer.
368  Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
369  SourceLocation Loc);
370  Value *EmitFixedPointConversion(Value *Src, FixedPointSemantics &SrcFixedSema,
371  FixedPointSemantics &DstFixedSema,
372  SourceLocation Loc,
373  bool DstIsInteger = false);
374 
375  /// Emit a conversion from the specified complex type to the specified
376  /// destination type, where the destination type is an LLVM scalar type.
377  Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
378  QualType SrcTy, QualType DstTy,
379  SourceLocation Loc);
380 
381  /// EmitNullValue - Emit a value that corresponds to null for the given type.
382  Value *EmitNullValue(QualType Ty);
383 
384  /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
385  Value *EmitFloatToBoolConversion(Value *V) {
386  // Compare against 0.0 for fp scalars.
387  llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
388  return Builder.CreateFCmpUNE(V, Zero, "tobool");
389  }
390 
391  /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
392  Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
393  Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
394 
395  return Builder.CreateICmpNE(V, Zero, "tobool");
396  }
397 
398  Value *EmitIntToBoolConversion(Value *V) {
399  // Because of the type rules of C, we often end up computing a
400  // logical value, then zero extending it to int, then wanting it
401  // as a logical value again. Optimize this common case.
402  if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
403  if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
404  Value *Result = ZI->getOperand(0);
405  // If there aren't any more uses, zap the instruction to save space.
406  // Note that there can be more uses, for example if this
407  // is the result of an assignment.
408  if (ZI->use_empty())
409  ZI->eraseFromParent();
410  return Result;
411  }
412  }
413 
414  return Builder.CreateIsNotNull(V, "tobool");
415  }
416 
417  //===--------------------------------------------------------------------===//
418  // Visitor Methods
419  //===--------------------------------------------------------------------===//
420 
421  Value *Visit(Expr *E) {
422  ApplyDebugLocation DL(CGF, E);
424  }
425 
426  Value *VisitStmt(Stmt *S) {
427  S->dump(CGF.getContext().getSourceManager());
428  llvm_unreachable("Stmt can't have complex result type!");
429  }
430  Value *VisitExpr(Expr *S);
431 
432  Value *VisitConstantExpr(ConstantExpr *E) {
433  return Visit(E->getSubExpr());
434  }
435  Value *VisitParenExpr(ParenExpr *PE) {
436  return Visit(PE->getSubExpr());
437  }
438  Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
439  return Visit(E->getReplacement());
440  }
441  Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
442  return Visit(GE->getResultExpr());
443  }
444  Value *VisitCoawaitExpr(CoawaitExpr *S) {
445  return CGF.EmitCoawaitExpr(*S).getScalarVal();
446  }
447  Value *VisitCoyieldExpr(CoyieldExpr *S) {
448  return CGF.EmitCoyieldExpr(*S).getScalarVal();
449  }
450  Value *VisitUnaryCoawait(const UnaryOperator *E) {
451  return Visit(E->getSubExpr());
452  }
453 
454  // Leaves.
455  Value *VisitIntegerLiteral(const IntegerLiteral *E) {
456  return Builder.getInt(E->getValue());
457  }
458  Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
459  return Builder.getInt(E->getValue());
460  }
461  Value *VisitFloatingLiteral(const FloatingLiteral *E) {
462  return llvm::ConstantFP::get(VMContext, E->getValue());
463  }
464  Value *VisitCharacterLiteral(const CharacterLiteral *E) {
465  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
466  }
467  Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
468  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
469  }
470  Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
471  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
472  }
473  Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
474  return EmitNullValue(E->getType());
475  }
476  Value *VisitGNUNullExpr(const GNUNullExpr *E) {
477  return EmitNullValue(E->getType());
478  }
479  Value *VisitOffsetOfExpr(OffsetOfExpr *E);
480  Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
481  Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
482  llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
483  return Builder.CreateBitCast(V, ConvertType(E->getType()));
484  }
485 
486  Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
487  return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
488  }
489 
490  Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
491  return CGF.EmitPseudoObjectRValue(E).getScalarVal();
492  }
493 
494  Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
495  if (E->isGLValue())
496  return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
497  E->getExprLoc());
498 
499  // Otherwise, assume the mapping is the scalar directly.
501  }
502 
503  // l-values.
504  Value *VisitDeclRefExpr(DeclRefExpr *E) {
506  return CGF.emitScalarConstant(Constant, E);
507  return EmitLoadOfLValue(E);
508  }
509 
510  Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
511  return CGF.EmitObjCSelectorExpr(E);
512  }
513  Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
514  return CGF.EmitObjCProtocolExpr(E);
515  }
516  Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
517  return EmitLoadOfLValue(E);
518  }
519  Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
520  if (E->getMethodDecl() &&
522  return EmitLoadOfLValue(E);
523  return CGF.EmitObjCMessageExpr(E).getScalarVal();
524  }
525 
526  Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
527  LValue LV = CGF.EmitObjCIsaExpr(E);
528  Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
529  return V;
530  }
531 
532  Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
533  VersionTuple Version = E->getVersion();
534 
535  // If we're checking for a platform older than our minimum deployment
536  // target, we can fold the check away.
537  if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
538  return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
539 
540  Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
541  llvm::Value *Args[] = {
542  llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
543  llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
544  llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
545  };
546 
547  return CGF.EmitBuiltinAvailable(Args);
548  }
549 
550  Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
551  Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
552  Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
553  Value *VisitMemberExpr(MemberExpr *E);
554  Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
555  Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
556  return EmitLoadOfLValue(E);
557  }
558 
559  Value *VisitInitListExpr(InitListExpr *E);
560 
561  Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
562  assert(CGF.getArrayInitIndex() &&
563  "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
564  return CGF.getArrayInitIndex();
565  }
566 
567  Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
568  return EmitNullValue(E->getType());
569  }
570  Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
571  CGF.CGM.EmitExplicitCastExprType(E, &CGF);
572  return VisitCastExpr(E);
573  }
574  Value *VisitCastExpr(CastExpr *E);
575 
576  Value *VisitCallExpr(const CallExpr *E) {
577  if (E->getCallReturnType(CGF.getContext())->isReferenceType())
578  return EmitLoadOfLValue(E);
579 
580  Value *V = CGF.EmitCallExpr(E).getScalarVal();
581 
582  EmitLValueAlignmentAssumption(E, V);
583  return V;
584  }
585 
586  Value *VisitStmtExpr(const StmtExpr *E);
587 
588  // Unary Operators.
589  Value *VisitUnaryPostDec(const UnaryOperator *E) {
590  LValue LV = EmitLValue(E->getSubExpr());
591  return EmitScalarPrePostIncDec(E, LV, false, false);
592  }
593  Value *VisitUnaryPostInc(const UnaryOperator *E) {
594  LValue LV = EmitLValue(E->getSubExpr());
595  return EmitScalarPrePostIncDec(E, LV, true, false);
596  }
597  Value *VisitUnaryPreDec(const UnaryOperator *E) {
598  LValue LV = EmitLValue(E->getSubExpr());
599  return EmitScalarPrePostIncDec(E, LV, false, true);
600  }
601  Value *VisitUnaryPreInc(const UnaryOperator *E) {
602  LValue LV = EmitLValue(E->getSubExpr());
603  return EmitScalarPrePostIncDec(E, LV, true, true);
604  }
605 
606  llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
607  llvm::Value *InVal,
608  bool IsInc);
609 
610  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
611  bool isInc, bool isPre);
612 
613 
614  Value *VisitUnaryAddrOf(const UnaryOperator *E) {
615  if (isa<MemberPointerType>(E->getType())) // never sugared
616  return CGF.CGM.getMemberPointerConstant(E);
617 
618  return EmitLValue(E->getSubExpr()).getPointer();
619  }
620  Value *VisitUnaryDeref(const UnaryOperator *E) {
621  if (E->getType()->isVoidType())
622  return Visit(E->getSubExpr()); // the actual value should be unused
623  return EmitLoadOfLValue(E);
624  }
625  Value *VisitUnaryPlus(const UnaryOperator *E) {
626  // This differs from gcc, though, most likely due to a bug in gcc.
627  TestAndClearIgnoreResultAssign();
628  return Visit(E->getSubExpr());
629  }
630  Value *VisitUnaryMinus (const UnaryOperator *E);
631  Value *VisitUnaryNot (const UnaryOperator *E);
632  Value *VisitUnaryLNot (const UnaryOperator *E);
633  Value *VisitUnaryReal (const UnaryOperator *E);
634  Value *VisitUnaryImag (const UnaryOperator *E);
635  Value *VisitUnaryExtension(const UnaryOperator *E) {
636  return Visit(E->getSubExpr());
637  }
638 
639  // C++
640  Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
641  return EmitLoadOfLValue(E);
642  }
643 
644  Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
645  return Visit(DAE->getExpr());
646  }
647  Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
649  return Visit(DIE->getExpr());
650  }
651  Value *VisitCXXThisExpr(CXXThisExpr *TE) {
652  return CGF.LoadCXXThis();
653  }
654 
655  Value *VisitExprWithCleanups(ExprWithCleanups *E);
656  Value *VisitCXXNewExpr(const CXXNewExpr *E) {
657  return CGF.EmitCXXNewExpr(E);
658  }
659  Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
660  CGF.EmitCXXDeleteExpr(E);
661  return nullptr;
662  }
663 
664  Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
665  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
666  }
667 
668  Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
669  return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
670  }
671 
672  Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
673  return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
674  }
675 
676  Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
677  // C++ [expr.pseudo]p1:
678  // The result shall only be used as the operand for the function call
679  // operator (), and the result of such a call has type void. The only
680  // effect is the evaluation of the postfix-expression before the dot or
681  // arrow.
682  CGF.EmitScalarExpr(E->getBase());
683  return nullptr;
684  }
685 
686  Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
687  return EmitNullValue(E->getType());
688  }
689 
690  Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
691  CGF.EmitCXXThrowExpr(E);
692  return nullptr;
693  }
694 
695  Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
696  return Builder.getInt1(E->getValue());
697  }
698 
699  // Binary Operators.
700  Value *EmitMul(const BinOpInfo &Ops) {
701  if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
702  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
704  return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
706  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
707  return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
708  LLVM_FALLTHROUGH;
710  if (CanElideOverflowCheck(CGF.getContext(), Ops))
711  return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
712  return EmitOverflowCheckedBinOp(Ops);
713  }
714  }
715 
716  if (Ops.Ty->isUnsignedIntegerType() &&
717  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
718  !CanElideOverflowCheck(CGF.getContext(), Ops))
719  return EmitOverflowCheckedBinOp(Ops);
720 
721  if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
722  Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
723  return propagateFMFlags(V, Ops);
724  }
725  return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
726  }
727  /// Create a binary op that checks for overflow.
728  /// Currently only supports +, - and *.
729  Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
730 
731  // Check for undefined division and modulus behaviors.
732  void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
733  llvm::Value *Zero,bool isDiv);
734  // Common helper for getting how wide LHS of shift is.
735  static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
736  Value *EmitDiv(const BinOpInfo &Ops);
737  Value *EmitRem(const BinOpInfo &Ops);
738  Value *EmitAdd(const BinOpInfo &Ops);
739  Value *EmitSub(const BinOpInfo &Ops);
740  Value *EmitShl(const BinOpInfo &Ops);
741  Value *EmitShr(const BinOpInfo &Ops);
742  Value *EmitAnd(const BinOpInfo &Ops) {
743  return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
744  }
745  Value *EmitXor(const BinOpInfo &Ops) {
746  return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
747  }
748  Value *EmitOr (const BinOpInfo &Ops) {
749  return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
750  }
751 
752  // Helper functions for fixed point binary operations.
753  Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
754 
755  BinOpInfo EmitBinOps(const BinaryOperator *E);
756  LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
757  Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
758  Value *&Result);
759 
760  Value *EmitCompoundAssign(const CompoundAssignOperator *E,
761  Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
762 
763  // Binary operators and binary compound assignment operators.
764 #define HANDLEBINOP(OP) \
765  Value *VisitBin ## OP(const BinaryOperator *E) { \
766  return Emit ## OP(EmitBinOps(E)); \
767  } \
768  Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
769  return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
770  }
771  HANDLEBINOP(Mul)
772  HANDLEBINOP(Div)
773  HANDLEBINOP(Rem)
774  HANDLEBINOP(Add)
775  HANDLEBINOP(Sub)
776  HANDLEBINOP(Shl)
777  HANDLEBINOP(Shr)
779  HANDLEBINOP(Xor)
780  HANDLEBINOP(Or)
781 #undef HANDLEBINOP
782 
783  // Comparisons.
784  Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
785  llvm::CmpInst::Predicate SICmpOpc,
786  llvm::CmpInst::Predicate FCmpOpc);
787 #define VISITCOMP(CODE, UI, SI, FP) \
788  Value *VisitBin##CODE(const BinaryOperator *E) { \
789  return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
790  llvm::FCmpInst::FP); }
791  VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
792  VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
793  VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
794  VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
795  VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
796  VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
797 #undef VISITCOMP
798 
799  Value *VisitBinAssign (const BinaryOperator *E);
800 
801  Value *VisitBinLAnd (const BinaryOperator *E);
802  Value *VisitBinLOr (const BinaryOperator *E);
803  Value *VisitBinComma (const BinaryOperator *E);
804 
805  Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
806  Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
807 
808  // Other Operators.
809  Value *VisitBlockExpr(const BlockExpr *BE);
810  Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
811  Value *VisitChooseExpr(ChooseExpr *CE);
812  Value *VisitVAArgExpr(VAArgExpr *VE);
813  Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
814  return CGF.EmitObjCStringLiteral(E);
815  }
816  Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
817  return CGF.EmitObjCBoxedExpr(E);
818  }
819  Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
820  return CGF.EmitObjCArrayLiteral(E);
821  }
822  Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
823  return CGF.EmitObjCDictionaryLiteral(E);
824  }
825  Value *VisitAsTypeExpr(AsTypeExpr *CE);
826  Value *VisitAtomicExpr(AtomicExpr *AE);
827 };
828 } // end anonymous namespace.
829 
830 //===----------------------------------------------------------------------===//
831 // Utilities
832 //===----------------------------------------------------------------------===//
833 
834 /// EmitConversionToBool - Convert the specified expression value to a
835 /// boolean (i1) truth value. This is equivalent to "Val != 0".
836 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
837  assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
838 
839  if (SrcType->isRealFloatingType())
840  return EmitFloatToBoolConversion(Src);
841 
842  if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
843  return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
844 
845  assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
846  "Unknown scalar type to convert");
847 
848  if (isa<llvm::IntegerType>(Src->getType()))
849  return EmitIntToBoolConversion(Src);
850 
851  assert(isa<llvm::PointerType>(Src->getType()));
852  return EmitPointerToBoolConversion(Src, SrcType);
853 }
854 
855 void ScalarExprEmitter::EmitFloatConversionCheck(
856  Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
857  QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
858  CodeGenFunction::SanitizerScope SanScope(&CGF);
859  using llvm::APFloat;
860  using llvm::APSInt;
861 
862  llvm::Type *SrcTy = Src->getType();
863 
864  llvm::Value *Check = nullptr;
865  if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
866  // Integer to floating-point. This can fail for unsigned short -> __half
867  // or unsigned __int128 -> float.
868  assert(DstType->isFloatingType());
869  bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
870 
871  APFloat LargestFloat =
872  APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
873  APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
874 
875  bool IsExact;
876  if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
877  &IsExact) != APFloat::opOK)
878  // The range of representable values of this floating point type includes
879  // all values of this integer type. Don't need an overflow check.
880  return;
881 
882  llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
883  if (SrcIsUnsigned)
884  Check = Builder.CreateICmpULE(Src, Max);
885  else {
886  llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
887  llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
888  llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
889  Check = Builder.CreateAnd(GE, LE);
890  }
891  } else {
892  const llvm::fltSemantics &SrcSema =
893  CGF.getContext().getFloatTypeSemantics(OrigSrcType);
894  if (isa<llvm::IntegerType>(DstTy)) {
895  // Floating-point to integer. This has undefined behavior if the source is
896  // +-Inf, NaN, or doesn't fit into the destination type (after truncation
897  // to an integer).
898  unsigned Width = CGF.getContext().getIntWidth(DstType);
899  bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
900 
901  APSInt Min = APSInt::getMinValue(Width, Unsigned);
902  APFloat MinSrc(SrcSema, APFloat::uninitialized);
903  if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
904  APFloat::opOverflow)
905  // Don't need an overflow check for lower bound. Just check for
906  // -Inf/NaN.
907  MinSrc = APFloat::getInf(SrcSema, true);
908  else
909  // Find the largest value which is too small to represent (before
910  // truncation toward zero).
911  MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
912 
913  APSInt Max = APSInt::getMaxValue(Width, Unsigned);
914  APFloat MaxSrc(SrcSema, APFloat::uninitialized);
915  if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
916  APFloat::opOverflow)
917  // Don't need an overflow check for upper bound. Just check for
918  // +Inf/NaN.
919  MaxSrc = APFloat::getInf(SrcSema, false);
920  else
921  // Find the smallest value which is too large to represent (before
922  // truncation toward zero).
923  MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
924 
925  // If we're converting from __half, convert the range to float to match
926  // the type of src.
927  if (OrigSrcType->isHalfType()) {
928  const llvm::fltSemantics &Sema =
929  CGF.getContext().getFloatTypeSemantics(SrcType);
930  bool IsInexact;
931  MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
932  MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
933  }
934 
935  llvm::Value *GE =
936  Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
937  llvm::Value *LE =
938  Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
939  Check = Builder.CreateAnd(GE, LE);
940  } else {
941  // FIXME: Maybe split this sanitizer out from float-cast-overflow.
942  //
943  // Floating-point to floating-point. This has undefined behavior if the
944  // source is not in the range of representable values of the destination
945  // type. The C and C++ standards are spectacularly unclear here. We
946  // diagnose finite out-of-range conversions, but allow infinities and NaNs
947  // to convert to the corresponding value in the smaller type.
948  //
949  // C11 Annex F gives all such conversions defined behavior for IEC 60559
950  // conforming implementations. Unfortunately, LLVM's fptrunc instruction
951  // does not.
952 
953  // Converting from a lower rank to a higher rank can never have
954  // undefined behavior, since higher-rank types must have a superset
955  // of values of lower-rank types.
956  if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
957  return;
958 
959  assert(!OrigSrcType->isHalfType() &&
960  "should not check conversion from __half, it has the lowest rank");
961 
962  const llvm::fltSemantics &DstSema =
963  CGF.getContext().getFloatTypeSemantics(DstType);
964  APFloat MinBad = APFloat::getLargest(DstSema, false);
965  APFloat MaxBad = APFloat::getInf(DstSema, false);
966 
967  bool IsInexact;
968  MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
969  MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
970 
971  Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
972  CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
973  llvm::Value *GE =
974  Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
975  llvm::Value *LE =
976  Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
977  Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
978  }
979  }
980 
981  llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
982  CGF.EmitCheckTypeDescriptor(OrigSrcType),
983  CGF.EmitCheckTypeDescriptor(DstType)};
984  CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
985  SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
986 }
987 
988 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
989 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
990 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
991  std::pair<llvm::Value *, SanitizerMask>>
993  QualType DstType, CGBuilderTy &Builder) {
994  llvm::Type *SrcTy = Src->getType();
995  llvm::Type *DstTy = Dst->getType();
996  (void)DstTy; // Only used in assert()
997 
998  // This should be truncation of integral types.
999  assert(Src != Dst);
1000  assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1001  assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1002  "non-integer llvm type");
1003 
1004  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1005  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1006 
1007  // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1008  // Else, it is a signed truncation.
1009  ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1010  SanitizerMask Mask;
1011  if (!SrcSigned && !DstSigned) {
1012  Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1013  Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
1014  } else {
1015  Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1016  Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
1017  }
1018 
1019  llvm::Value *Check = nullptr;
1020  // 1. Extend the truncated value back to the same width as the Src.
1021  Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1022  // 2. Equality-compare with the original source value
1023  Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1024  // If the comparison result is 'i1 false', then the truncation was lossy.
1025  return std::make_pair(Kind, std::make_pair(Check, Mask));
1026 }
1027 
1028 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1029  Value *Dst, QualType DstType,
1030  SourceLocation Loc) {
1031  if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1032  return;
1033 
1034  // We only care about int->int conversions here.
1035  // We ignore conversions to/from pointer and/or bool.
1036  if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
1037  return;
1038 
1039  unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1040  unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1041  // This must be truncation. Else we do not care.
1042  if (SrcBits <= DstBits)
1043  return;
1044 
1045  assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1046 
1047  // If the integer sign change sanitizer is enabled,
1048  // and we are truncating from larger unsigned type to smaller signed type,
1049  // let that next sanitizer deal with it.
1050  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1051  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1052  if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1053  (!SrcSigned && DstSigned))
1054  return;
1055 
1056  CodeGenFunction::SanitizerScope SanScope(&CGF);
1057 
1058  std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1059  std::pair<llvm::Value *, SanitizerMask>>
1060  Check =
1061  EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1062  // If the comparison result is 'i1 false', then the truncation was lossy.
1063 
1064  // Do we care about this type of truncation?
1065  if (!CGF.SanOpts.has(Check.second.second))
1066  return;
1067 
1068  llvm::Constant *StaticArgs[] = {
1069  CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1070  CGF.EmitCheckTypeDescriptor(DstType),
1071  llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1072  CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1073  {Src, Dst});
1074 }
1075 
1076 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1077 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1078 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1079  std::pair<llvm::Value *, SanitizerMask>>
1081  QualType DstType, CGBuilderTy &Builder) {
1082  llvm::Type *SrcTy = Src->getType();
1083  llvm::Type *DstTy = Dst->getType();
1084 
1085  assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1086  "non-integer llvm type");
1087 
1088  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1089  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1090  (void)SrcSigned; // Only used in assert()
1091  (void)DstSigned; // Only used in assert()
1092  unsigned SrcBits = SrcTy->getScalarSizeInBits();
1093  unsigned DstBits = DstTy->getScalarSizeInBits();
1094  (void)SrcBits; // Only used in assert()
1095  (void)DstBits; // Only used in assert()
1096 
1097  assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1098  "either the widths should be different, or the signednesses.");
1099 
1100  // NOTE: zero value is considered to be non-negative.
1101  auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1102  const char *Name) -> Value * {
1103  // Is this value a signed type?
1104  bool VSigned = VType->isSignedIntegerOrEnumerationType();
1105  llvm::Type *VTy = V->getType();
1106  if (!VSigned) {
1107  // If the value is unsigned, then it is never negative.
1108  // FIXME: can we encounter non-scalar VTy here?
1109  return llvm::ConstantInt::getFalse(VTy->getContext());
1110  }
1111  // Get the zero of the same type with which we will be comparing.
1112  llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1113  // %V.isnegative = icmp slt %V, 0
1114  // I.e is %V *strictly* less than zero, does it have negative value?
1115  return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1116  llvm::Twine(Name) + "." + V->getName() +
1117  ".negativitycheck");
1118  };
1119 
1120  // 1. Was the old Value negative?
1121  llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1122  // 2. Is the new Value negative?
1123  llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1124  // 3. Now, was the 'negativity status' preserved during the conversion?
1125  // NOTE: conversion from negative to zero is considered to change the sign.
1126  // (We want to get 'false' when the conversion changed the sign)
1127  // So we should just equality-compare the negativity statuses.
1128  llvm::Value *Check = nullptr;
1129  Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1130  // If the comparison result is 'false', then the conversion changed the sign.
1131  return std::make_pair(
1132  ScalarExprEmitter::ICCK_IntegerSignChange,
1133  std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1134 }
1135 
1136 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1137  Value *Dst, QualType DstType,
1138  SourceLocation Loc) {
1139  if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1140  return;
1141 
1142  llvm::Type *SrcTy = Src->getType();
1143  llvm::Type *DstTy = Dst->getType();
1144 
1145  // We only care about int->int conversions here.
1146  // We ignore conversions to/from pointer and/or bool.
1147  if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
1148  return;
1149 
1150  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1151  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1152  unsigned SrcBits = SrcTy->getScalarSizeInBits();
1153  unsigned DstBits = DstTy->getScalarSizeInBits();
1154 
1155  // Now, we do not need to emit the check in *all* of the cases.
1156  // We can avoid emitting it in some obvious cases where it would have been
1157  // dropped by the opt passes (instcombine) always anyways.
1158  // If it's a cast between effectively the same type, no check.
1159  // NOTE: this is *not* equivalent to checking the canonical types.
1160  if (SrcSigned == DstSigned && SrcBits == DstBits)
1161  return;
1162  // At least one of the values needs to have signed type.
1163  // If both are unsigned, then obviously, neither of them can be negative.
1164  if (!SrcSigned && !DstSigned)
1165  return;
1166  // If the conversion is to *larger* *signed* type, then no check is needed.
1167  // Because either sign-extension happens (so the sign will remain),
1168  // or zero-extension will happen (the sign bit will be zero.)
1169  if ((DstBits > SrcBits) && DstSigned)
1170  return;
1171  if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1172  (SrcBits > DstBits) && SrcSigned) {
1173  // If the signed integer truncation sanitizer is enabled,
1174  // and this is a truncation from signed type, then no check is needed.
1175  // Because here sign change check is interchangeable with truncation check.
1176  return;
1177  }
1178  // That's it. We can't rule out any more cases with the data we have.
1179 
1180  CodeGenFunction::SanitizerScope SanScope(&CGF);
1181 
1182  std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1183  std::pair<llvm::Value *, SanitizerMask>>
1184  Check;
1185 
1186  // Each of these checks needs to return 'false' when an issue was detected.
1187  ImplicitConversionCheckKind CheckKind;
1189  // So we can 'and' all the checks together, and still get 'false',
1190  // if at least one of the checks detected an issue.
1191 
1192  Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1193  CheckKind = Check.first;
1194  Checks.emplace_back(Check.second);
1195 
1196  if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1197  (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1198  // If the signed integer truncation sanitizer was enabled,
1199  // and we are truncating from larger unsigned type to smaller signed type,
1200  // let's handle the case we skipped in that check.
1201  Check =
1202  EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1203  CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1204  Checks.emplace_back(Check.second);
1205  // If the comparison result is 'i1 false', then the truncation was lossy.
1206  }
1207 
1208  llvm::Constant *StaticArgs[] = {
1209  CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1210  CGF.EmitCheckTypeDescriptor(DstType),
1211  llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1212  // EmitCheck() will 'and' all the checks together.
1213  CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1214  {Src, Dst});
1215 }
1216 
1217 /// Emit a conversion from the specified type to the specified destination type,
1218 /// both of which are LLVM scalar types.
1219 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1220  QualType DstType,
1221  SourceLocation Loc,
1222  ScalarConversionOpts Opts) {
1223  // All conversions involving fixed point types should be handled by the
1224  // EmitFixedPoint family functions. This is done to prevent bloating up this
1225  // function more, and although fixed point numbers are represented by
1226  // integers, we do not want to follow any logic that assumes they should be
1227  // treated as integers.
1228  // TODO(leonardchan): When necessary, add another if statement checking for
1229  // conversions to fixed point types from other types.
1230  if (SrcType->isFixedPointType()) {
1231  if (DstType->isBooleanType())
1232  // It is important that we check this before checking if the dest type is
1233  // an integer because booleans are technically integer types.
1234  // We do not need to check the padding bit on unsigned types if unsigned
1235  // padding is enabled because overflow into this bit is undefined
1236  // behavior.
1237  return Builder.CreateIsNotNull(Src, "tobool");
1238  if (DstType->isFixedPointType() || DstType->isIntegerType())
1239  return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1240 
1241  llvm_unreachable(
1242  "Unhandled scalar conversion from a fixed point type to another type.");
1243  } else if (DstType->isFixedPointType()) {
1244  if (SrcType->isIntegerType())
1245  // This also includes converting booleans and enums to fixed point types.
1246  return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1247 
1248  llvm_unreachable(
1249  "Unhandled scalar conversion to a fixed point type from another type.");
1250  }
1251 
1252  QualType NoncanonicalSrcType = SrcType;
1253  QualType NoncanonicalDstType = DstType;
1254 
1255  SrcType = CGF.getContext().getCanonicalType(SrcType);
1256  DstType = CGF.getContext().getCanonicalType(DstType);
1257  if (SrcType == DstType) return Src;
1258 
1259  if (DstType->isVoidType()) return nullptr;
1260 
1261  llvm::Value *OrigSrc = Src;
1262  QualType OrigSrcType = SrcType;
1263  llvm::Type *SrcTy = Src->getType();
1264 
1265  // Handle conversions to bool first, they are special: comparisons against 0.
1266  if (DstType->isBooleanType())
1267  return EmitConversionToBool(Src, SrcType);
1268 
1269  llvm::Type *DstTy = ConvertType(DstType);
1270 
1271  // Cast from half through float if half isn't a native type.
1272  if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1273  // Cast to FP using the intrinsic if the half type itself isn't supported.
1274  if (DstTy->isFloatingPointTy()) {
1276  return Builder.CreateCall(
1277  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1278  Src);
1279  } else {
1280  // Cast to other types through float, using either the intrinsic or FPExt,
1281  // depending on whether the half type itself is supported
1282  // (as opposed to operations on half, available with NativeHalfType).
1284  Src = Builder.CreateCall(
1285  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1286  CGF.CGM.FloatTy),
1287  Src);
1288  } else {
1289  Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1290  }
1291  SrcType = CGF.getContext().FloatTy;
1292  SrcTy = CGF.FloatTy;
1293  }
1294  }
1295 
1296  // Ignore conversions like int -> uint.
1297  if (SrcTy == DstTy) {
1298  if (Opts.EmitImplicitIntegerSignChangeChecks)
1299  EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1300  NoncanonicalDstType, Loc);
1301 
1302  return Src;
1303  }
1304 
1305  // Handle pointer conversions next: pointers can only be converted to/from
1306  // other pointers and integers. Check for pointer types in terms of LLVM, as
1307  // some native types (like Obj-C id) may map to a pointer type.
1308  if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1309  // The source value may be an integer, or a pointer.
1310  if (isa<llvm::PointerType>(SrcTy))
1311  return Builder.CreateBitCast(Src, DstTy, "conv");
1312 
1313  assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1314  // First, convert to the correct width so that we control the kind of
1315  // extension.
1316  llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1317  bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1318  llvm::Value* IntResult =
1319  Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1320  // Then, cast to pointer.
1321  return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1322  }
1323 
1324  if (isa<llvm::PointerType>(SrcTy)) {
1325  // Must be an ptr to int cast.
1326  assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1327  return Builder.CreatePtrToInt(Src, DstTy, "conv");
1328  }
1329 
1330  // A scalar can be splatted to an extended vector of the same element type
1331  if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1332  // Sema should add casts to make sure that the source expression's type is
1333  // the same as the vector's element type (sans qualifiers)
1334  assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1335  SrcType.getTypePtr() &&
1336  "Splatted expr doesn't match with vector element type?");
1337 
1338  // Splat the element across to all elements
1339  unsigned NumElements = DstTy->getVectorNumElements();
1340  return Builder.CreateVectorSplat(NumElements, Src, "splat");
1341  }
1342 
1343  if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1344  // Allow bitcast from vector to integer/fp of the same size.
1345  unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1346  unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1347  if (SrcSize == DstSize)
1348  return Builder.CreateBitCast(Src, DstTy, "conv");
1349 
1350  // Conversions between vectors of different sizes are not allowed except
1351  // when vectors of half are involved. Operations on storage-only half
1352  // vectors require promoting half vector operands to float vectors and
1353  // truncating the result, which is either an int or float vector, to a
1354  // short or half vector.
1355 
1356  // Source and destination are both expected to be vectors.
1357  llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
1358  llvm::Type *DstElementTy = DstTy->getVectorElementType();
1359  (void)DstElementTy;
1360 
1361  assert(((SrcElementTy->isIntegerTy() &&
1362  DstElementTy->isIntegerTy()) ||
1363  (SrcElementTy->isFloatingPointTy() &&
1364  DstElementTy->isFloatingPointTy())) &&
1365  "unexpected conversion between a floating-point vector and an "
1366  "integer vector");
1367 
1368  // Truncate an i32 vector to an i16 vector.
1369  if (SrcElementTy->isIntegerTy())
1370  return Builder.CreateIntCast(Src, DstTy, false, "conv");
1371 
1372  // Truncate a float vector to a half vector.
1373  if (SrcSize > DstSize)
1374  return Builder.CreateFPTrunc(Src, DstTy, "conv");
1375 
1376  // Promote a half vector to a float vector.
1377  return Builder.CreateFPExt(Src, DstTy, "conv");
1378  }
1379 
1380  // Finally, we have the arithmetic types: real int/float.
1381  Value *Res = nullptr;
1382  llvm::Type *ResTy = DstTy;
1383 
1384  // An overflowing conversion has undefined behavior if either the source type
1385  // or the destination type is a floating-point type.
1386  if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1387  (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
1388  EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1389  Loc);
1390 
1391  // Cast to half through float if half isn't a native type.
1392  if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1393  // Make sure we cast in a single step if from another FP type.
1394  if (SrcTy->isFloatingPointTy()) {
1395  // Use the intrinsic if the half type itself isn't supported
1396  // (as opposed to operations on half, available with NativeHalfType).
1398  return Builder.CreateCall(
1399  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1400  // If the half type is supported, just use an fptrunc.
1401  return Builder.CreateFPTrunc(Src, DstTy);
1402  }
1403  DstTy = CGF.FloatTy;
1404  }
1405 
1406  if (isa<llvm::IntegerType>(SrcTy)) {
1407  bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1408  if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1409  InputSigned = true;
1410  }
1411  if (isa<llvm::IntegerType>(DstTy))
1412  Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1413  else if (InputSigned)
1414  Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1415  else
1416  Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1417  } else if (isa<llvm::IntegerType>(DstTy)) {
1418  assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
1419  if (DstType->isSignedIntegerOrEnumerationType())
1420  Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1421  else
1422  Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1423  } else {
1424  assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
1425  "Unknown real conversion");
1426  if (DstTy->getTypeID() < SrcTy->getTypeID())
1427  Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1428  else
1429  Res = Builder.CreateFPExt(Src, DstTy, "conv");
1430  }
1431 
1432  if (DstTy != ResTy) {
1434  assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1435  Res = Builder.CreateCall(
1436  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1437  Res);
1438  } else {
1439  Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1440  }
1441  }
1442 
1443  if (Opts.EmitImplicitIntegerTruncationChecks)
1444  EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1445  NoncanonicalDstType, Loc);
1446 
1447  if (Opts.EmitImplicitIntegerSignChangeChecks)
1448  EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1449  NoncanonicalDstType, Loc);
1450 
1451  return Res;
1452 }
1453 
1454 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1455  QualType DstTy,
1456  SourceLocation Loc) {
1457  FixedPointSemantics SrcFPSema =
1458  CGF.getContext().getFixedPointSemantics(SrcTy);
1459  FixedPointSemantics DstFPSema =
1460  CGF.getContext().getFixedPointSemantics(DstTy);
1461  return EmitFixedPointConversion(Src, SrcFPSema, DstFPSema, Loc,
1462  DstTy->isIntegerType());
1463 }
1464 
1465 Value *ScalarExprEmitter::EmitFixedPointConversion(
1466  Value *Src, FixedPointSemantics &SrcFPSema, FixedPointSemantics &DstFPSema,
1467  SourceLocation Loc, bool DstIsInteger) {
1468  using llvm::APInt;
1469  using llvm::ConstantInt;
1470  using llvm::Value;
1471 
1472  unsigned SrcWidth = SrcFPSema.getWidth();
1473  unsigned DstWidth = DstFPSema.getWidth();
1474  unsigned SrcScale = SrcFPSema.getScale();
1475  unsigned DstScale = DstFPSema.getScale();
1476  bool SrcIsSigned = SrcFPSema.isSigned();
1477  bool DstIsSigned = DstFPSema.isSigned();
1478 
1479  llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
1480 
1481  Value *Result = Src;
1482  unsigned ResultWidth = SrcWidth;
1483 
1484  // Downscale.
1485  if (DstScale < SrcScale) {
1486  // When converting to integers, we round towards zero. For negative numbers,
1487  // right shifting rounds towards negative infinity. In this case, we can
1488  // just round up before shifting.
1489  if (DstIsInteger && SrcIsSigned) {
1490  Value *Zero = llvm::Constant::getNullValue(Result->getType());
1491  Value *IsNegative = Builder.CreateICmpSLT(Result, Zero);
1492  Value *LowBits = ConstantInt::get(
1493  CGF.getLLVMContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
1494  Value *Rounded = Builder.CreateAdd(Result, LowBits);
1495  Result = Builder.CreateSelect(IsNegative, Rounded, Result);
1496  }
1497 
1498  Result = SrcIsSigned
1499  ? Builder.CreateAShr(Result, SrcScale - DstScale, "downscale")
1500  : Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1501  }
1502 
1503  if (!DstFPSema.isSaturated()) {
1504  // Resize.
1505  Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1506 
1507  // Upscale.
1508  if (DstScale > SrcScale)
1509  Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1510  } else {
1511  // Adjust the number of fractional bits.
1512  if (DstScale > SrcScale) {
1513  // Compare to DstWidth to prevent resizing twice.
1514  ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
1515  llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
1516  Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
1517  Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1518  }
1519 
1520  // Handle saturation.
1521  bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
1522  if (LessIntBits) {
1523  Value *Max = ConstantInt::get(
1524  CGF.getLLVMContext(),
1525  APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
1526  Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
1527  : Builder.CreateICmpUGT(Result, Max);
1528  Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
1529  }
1530  // Cannot overflow min to dest type if src is unsigned since all fixed
1531  // point types can cover the unsigned min of 0.
1532  if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
1533  Value *Min = ConstantInt::get(
1534  CGF.getLLVMContext(),
1535  APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
1536  Value *TooLow = Builder.CreateICmpSLT(Result, Min);
1537  Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
1538  }
1539 
1540  // Resize the integer part to get the final destination size.
1541  if (ResultWidth != DstWidth)
1542  Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1543  }
1544  return Result;
1545 }
1546 
1547 /// Emit a conversion from the specified complex type to the specified
1548 /// destination type, where the destination type is an LLVM scalar type.
1549 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1551  SourceLocation Loc) {
1552  // Get the source element type.
1553  SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1554 
1555  // Handle conversions to bool first, they are special: comparisons against 0.
1556  if (DstTy->isBooleanType()) {
1557  // Complex != 0 -> (Real != 0) | (Imag != 0)
1558  Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1559  Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1560  return Builder.CreateOr(Src.first, Src.second, "tobool");
1561  }
1562 
1563  // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1564  // the imaginary part of the complex value is discarded and the value of the
1565  // real part is converted according to the conversion rules for the
1566  // corresponding real type.
1567  return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1568 }
1569 
1570 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1571  return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1572 }
1573 
1574 /// Emit a sanitization check for the given "binary" operation (which
1575 /// might actually be a unary increment which has been lowered to a binary
1576 /// operation). The check passes if all values in \p Checks (which are \c i1),
1577 /// are \c true.
1578 void ScalarExprEmitter::EmitBinOpCheck(
1579  ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1580  assert(CGF.IsSanitizerScope);
1581  SanitizerHandler Check;
1583  SmallVector<llvm::Value *, 2> DynamicData;
1584 
1585  BinaryOperatorKind Opcode = Info.Opcode;
1588 
1589  StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1590  const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1591  if (UO && UO->getOpcode() == UO_Minus) {
1592  Check = SanitizerHandler::NegateOverflow;
1593  StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1594  DynamicData.push_back(Info.RHS);
1595  } else {
1596  if (BinaryOperator::isShiftOp(Opcode)) {
1597  // Shift LHS negative or too large, or RHS out of bounds.
1598  Check = SanitizerHandler::ShiftOutOfBounds;
1599  const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1600  StaticData.push_back(
1601  CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1602  StaticData.push_back(
1603  CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1604  } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1605  // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1606  Check = SanitizerHandler::DivremOverflow;
1607  StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1608  } else {
1609  // Arithmetic overflow (+, -, *).
1610  switch (Opcode) {
1611  case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1612  case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1613  case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1614  default: llvm_unreachable("unexpected opcode for bin op check");
1615  }
1616  StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1617  }
1618  DynamicData.push_back(Info.LHS);
1619  DynamicData.push_back(Info.RHS);
1620  }
1621 
1622  CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1623 }
1624 
1625 //===----------------------------------------------------------------------===//
1626 // Visitor Methods
1627 //===----------------------------------------------------------------------===//
1628 
1629 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1630  CGF.ErrorUnsupported(E, "scalar expression");
1631  if (E->getType()->isVoidType())
1632  return nullptr;
1633  return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1634 }
1635 
1636 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1637  // Vector Mask Case
1638  if (E->getNumSubExprs() == 2) {
1639  Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1640  Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1641  Value *Mask;
1642 
1643  llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
1644  unsigned LHSElts = LTy->getNumElements();
1645 
1646  Mask = RHS;
1647 
1648  llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1649 
1650  // Mask off the high bits of each shuffle index.
1651  Value *MaskBits =
1652  llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1653  Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1654 
1655  // newv = undef
1656  // mask = mask & maskbits
1657  // for each elt
1658  // n = extract mask i
1659  // x = extract val n
1660  // newv = insert newv, x, i
1661  llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1662  MTy->getNumElements());
1663  Value* NewV = llvm::UndefValue::get(RTy);
1664  for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1665  Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1666  Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1667 
1668  Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1669  NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1670  }
1671  return NewV;
1672  }
1673 
1674  Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1675  Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1676 
1678  for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1679  llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1680  // Check for -1 and output it as undef in the IR.
1681  if (Idx.isSigned() && Idx.isAllOnesValue())
1682  indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1683  else
1684  indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1685  }
1686 
1687  Value *SV = llvm::ConstantVector::get(indices);
1688  return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1689 }
1690 
1691 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1692  QualType SrcType = E->getSrcExpr()->getType(),
1693  DstType = E->getType();
1694 
1695  Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1696 
1697  SrcType = CGF.getContext().getCanonicalType(SrcType);
1698  DstType = CGF.getContext().getCanonicalType(DstType);
1699  if (SrcType == DstType) return Src;
1700 
1701  assert(SrcType->isVectorType() &&
1702  "ConvertVector source type must be a vector");
1703  assert(DstType->isVectorType() &&
1704  "ConvertVector destination type must be a vector");
1705 
1706  llvm::Type *SrcTy = Src->getType();
1707  llvm::Type *DstTy = ConvertType(DstType);
1708 
1709  // Ignore conversions like int -> uint.
1710  if (SrcTy == DstTy)
1711  return Src;
1712 
1713  QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(),
1714  DstEltType = DstType->getAs<VectorType>()->getElementType();
1715 
1716  assert(SrcTy->isVectorTy() &&
1717  "ConvertVector source IR type must be a vector");
1718  assert(DstTy->isVectorTy() &&
1719  "ConvertVector destination IR type must be a vector");
1720 
1721  llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1722  *DstEltTy = DstTy->getVectorElementType();
1723 
1724  if (DstEltType->isBooleanType()) {
1725  assert((SrcEltTy->isFloatingPointTy() ||
1726  isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1727 
1728  llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1729  if (SrcEltTy->isFloatingPointTy()) {
1730  return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1731  } else {
1732  return Builder.CreateICmpNE(Src, Zero, "tobool");
1733  }
1734  }
1735 
1736  // We have the arithmetic types: real int/float.
1737  Value *Res = nullptr;
1738 
1739  if (isa<llvm::IntegerType>(SrcEltTy)) {
1740  bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1741  if (isa<llvm::IntegerType>(DstEltTy))
1742  Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1743  else if (InputSigned)
1744  Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1745  else
1746  Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1747  } else if (isa<llvm::IntegerType>(DstEltTy)) {
1748  assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1749  if (DstEltType->isSignedIntegerOrEnumerationType())
1750  Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1751  else
1752  Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1753  } else {
1754  assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1755  "Unknown real conversion");
1756  if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1757  Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1758  else
1759  Res = Builder.CreateFPExt(Src, DstTy, "conv");
1760  }
1761 
1762  return Res;
1763 }
1764 
1765 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1766  if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1767  CGF.EmitIgnoredExpr(E->getBase());
1768  return CGF.emitScalarConstant(Constant, E);
1769  } else {
1770  Expr::EvalResult Result;
1771  if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1772  llvm::APSInt Value = Result.Val.getInt();
1773  CGF.EmitIgnoredExpr(E->getBase());
1774  return Builder.getInt(Value);
1775  }
1776  }
1777 
1778  return EmitLoadOfLValue(E);
1779 }
1780 
1781 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1782  TestAndClearIgnoreResultAssign();
1783 
1784  // Emit subscript expressions in rvalue context's. For most cases, this just
1785  // loads the lvalue formed by the subscript expr. However, we have to be
1786  // careful, because the base of a vector subscript is occasionally an rvalue,
1787  // so we can't get it as an lvalue.
1788  if (!E->getBase()->getType()->isVectorType())
1789  return EmitLoadOfLValue(E);
1790 
1791  // Handle the vector case. The base must be a vector, the index must be an
1792  // integer value.
1793  Value *Base = Visit(E->getBase());
1794  Value *Idx = Visit(E->getIdx());
1795  QualType IdxTy = E->getIdx()->getType();
1796 
1797  if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1798  CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1799 
1800  return Builder.CreateExtractElement(Base, Idx, "vecext");
1801 }
1802 
1803 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1804  unsigned Off, llvm::Type *I32Ty) {
1805  int MV = SVI->getMaskValue(Idx);
1806  if (MV == -1)
1807  return llvm::UndefValue::get(I32Ty);
1808  return llvm::ConstantInt::get(I32Ty, Off+MV);
1809 }
1810 
1811 static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1812  if (C->getBitWidth() != 32) {
1813  assert(llvm::ConstantInt::isValueValidForType(I32Ty,
1814  C->getZExtValue()) &&
1815  "Index operand too large for shufflevector mask!");
1816  return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1817  }
1818  return C;
1819 }
1820 
1821 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1822  bool Ignore = TestAndClearIgnoreResultAssign();
1823  (void)Ignore;
1824  assert (Ignore == false && "init list ignored");
1825  unsigned NumInitElements = E->getNumInits();
1826 
1827  if (E->hadArrayRangeDesignator())
1828  CGF.ErrorUnsupported(E, "GNU array range designator extension");
1829 
1830  llvm::VectorType *VType =
1831  dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1832 
1833  if (!VType) {
1834  if (NumInitElements == 0) {
1835  // C++11 value-initialization for the scalar.
1836  return EmitNullValue(E->getType());
1837  }
1838  // We have a scalar in braces. Just use the first element.
1839  return Visit(E->getInit(0));
1840  }
1841 
1842  unsigned ResElts = VType->getNumElements();
1843 
1844  // Loop over initializers collecting the Value for each, and remembering
1845  // whether the source was swizzle (ExtVectorElementExpr). This will allow
1846  // us to fold the shuffle for the swizzle into the shuffle for the vector
1847  // initializer, since LLVM optimizers generally do not want to touch
1848  // shuffles.
1849  unsigned CurIdx = 0;
1850  bool VIsUndefShuffle = false;
1851  llvm::Value *V = llvm::UndefValue::get(VType);
1852  for (unsigned i = 0; i != NumInitElements; ++i) {
1853  Expr *IE = E->getInit(i);
1854  Value *Init = Visit(IE);
1856 
1857  llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1858 
1859  // Handle scalar elements. If the scalar initializer is actually one
1860  // element of a different vector of the same width, use shuffle instead of
1861  // extract+insert.
1862  if (!VVT) {
1863  if (isa<ExtVectorElementExpr>(IE)) {
1864  llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1865 
1866  if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1867  llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1868  Value *LHS = nullptr, *RHS = nullptr;
1869  if (CurIdx == 0) {
1870  // insert into undef -> shuffle (src, undef)
1871  // shufflemask must use an i32
1872  Args.push_back(getAsInt32(C, CGF.Int32Ty));
1873  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1874 
1875  LHS = EI->getVectorOperand();
1876  RHS = V;
1877  VIsUndefShuffle = true;
1878  } else if (VIsUndefShuffle) {
1879  // insert into undefshuffle && size match -> shuffle (v, src)
1880  llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1881  for (unsigned j = 0; j != CurIdx; ++j)
1882  Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1883  Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1884  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1885 
1886  LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1887  RHS = EI->getVectorOperand();
1888  VIsUndefShuffle = false;
1889  }
1890  if (!Args.empty()) {
1891  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1892  V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1893  ++CurIdx;
1894  continue;
1895  }
1896  }
1897  }
1898  V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1899  "vecinit");
1900  VIsUndefShuffle = false;
1901  ++CurIdx;
1902  continue;
1903  }
1904 
1905  unsigned InitElts = VVT->getNumElements();
1906 
1907  // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1908  // input is the same width as the vector being constructed, generate an
1909  // optimized shuffle of the swizzle input into the result.
1910  unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1911  if (isa<ExtVectorElementExpr>(IE)) {
1912  llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1913  Value *SVOp = SVI->getOperand(0);
1914  llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1915 
1916  if (OpTy->getNumElements() == ResElts) {
1917  for (unsigned j = 0; j != CurIdx; ++j) {
1918  // If the current vector initializer is a shuffle with undef, merge
1919  // this shuffle directly into it.
1920  if (VIsUndefShuffle) {
1921  Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1922  CGF.Int32Ty));
1923  } else {
1924  Args.push_back(Builder.getInt32(j));
1925  }
1926  }
1927  for (unsigned j = 0, je = InitElts; j != je; ++j)
1928  Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1929  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1930 
1931  if (VIsUndefShuffle)
1932  V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1933 
1934  Init = SVOp;
1935  }
1936  }
1937 
1938  // Extend init to result vector length, and then shuffle its contribution
1939  // to the vector initializer into V.
1940  if (Args.empty()) {
1941  for (unsigned j = 0; j != InitElts; ++j)
1942  Args.push_back(Builder.getInt32(j));
1943  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1944  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1945  Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1946  Mask, "vext");
1947 
1948  Args.clear();
1949  for (unsigned j = 0; j != CurIdx; ++j)
1950  Args.push_back(Builder.getInt32(j));
1951  for (unsigned j = 0; j != InitElts; ++j)
1952  Args.push_back(Builder.getInt32(j+Offset));
1953  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1954  }
1955 
1956  // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1957  // merging subsequent shuffles into this one.
1958  if (CurIdx == 0)
1959  std::swap(V, Init);
1960  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1961  V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1962  VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1963  CurIdx += InitElts;
1964  }
1965 
1966  // FIXME: evaluate codegen vs. shuffling against constant null vector.
1967  // Emit remaining default initializers.
1968  llvm::Type *EltTy = VType->getElementType();
1969 
1970  // Emit remaining default initializers
1971  for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1972  Value *Idx = Builder.getInt32(CurIdx);
1973  llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1974  V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1975  }
1976  return V;
1977 }
1978 
1980  const Expr *E = CE->getSubExpr();
1981 
1982  if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1983  return false;
1984 
1985  if (isa<CXXThisExpr>(E->IgnoreParens())) {
1986  // We always assume that 'this' is never null.
1987  return false;
1988  }
1989 
1990  if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1991  // And that glvalue casts are never null.
1992  if (ICE->getValueKind() != VK_RValue)
1993  return false;
1994  }
1995 
1996  return true;
1997 }
1998 
1999 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2000 // have to handle a more broad range of conversions than explicit casts, as they
2001 // handle things like function to ptr-to-function decay etc.
2002 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2003  Expr *E = CE->getSubExpr();
2004  QualType DestTy = CE->getType();
2005  CastKind Kind = CE->getCastKind();
2006 
2007  // These cases are generally not written to ignore the result of
2008  // evaluating their sub-expressions, so we clear this now.
2009  bool Ignored = TestAndClearIgnoreResultAssign();
2010 
2011  // Since almost all cast kinds apply to scalars, this switch doesn't have
2012  // a default case, so the compiler will warn on a missing case. The cases
2013  // are in the same order as in the CastKind enum.
2014  switch (Kind) {
2015  case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2016  case CK_BuiltinFnToFnPtr:
2017  llvm_unreachable("builtin functions are handled elsewhere");
2018 
2019  case CK_LValueBitCast:
2020  case CK_ObjCObjectLValueCast: {
2021  Address Addr = EmitLValue(E).getAddress();
2022  Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
2023  LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2024  return EmitLoadOfLValue(LV, CE->getExprLoc());
2025  }
2026 
2027  case CK_CPointerToObjCPointerCast:
2028  case CK_BlockPointerToObjCPointerCast:
2029  case CK_AnyPointerToBlockPointerCast:
2030  case CK_BitCast: {
2031  Value *Src = Visit(const_cast<Expr*>(E));
2032  llvm::Type *SrcTy = Src->getType();
2033  llvm::Type *DstTy = ConvertType(DestTy);
2034  if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2035  SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2036  llvm_unreachable("wrong cast for pointers in different address spaces"
2037  "(must be an address space cast)!");
2038  }
2039 
2040  if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2041  if (auto PT = DestTy->getAs<PointerType>())
2042  CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
2043  /*MayBeNull=*/true,
2045  CE->getBeginLoc());
2046  }
2047 
2048  if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2049  const QualType SrcType = E->getType();
2050 
2051  if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2052  // Casting to pointer that could carry dynamic information (provided by
2053  // invariant.group) requires launder.
2054  Src = Builder.CreateLaunderInvariantGroup(Src);
2055  } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2056  // Casting to pointer that does not carry dynamic information (provided
2057  // by invariant.group) requires stripping it. Note that we don't do it
2058  // if the source could not be dynamic type and destination could be
2059  // dynamic because dynamic information is already laundered. It is
2060  // because launder(strip(src)) == launder(src), so there is no need to
2061  // add extra strip before launder.
2062  Src = Builder.CreateStripInvariantGroup(Src);
2063  }
2064  }
2065 
2066  return Builder.CreateBitCast(Src, DstTy);
2067  }
2068  case CK_AddressSpaceConversion: {
2069  Expr::EvalResult Result;
2070  if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2071  Result.Val.isNullPointer()) {
2072  // If E has side effect, it is emitted even if its final result is a
2073  // null pointer. In that case, a DCE pass should be able to
2074  // eliminate the useless instructions emitted during translating E.
2075  if (Result.HasSideEffects)
2076  Visit(E);
2077  return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2078  ConvertType(DestTy)), DestTy);
2079  }
2080  // Since target may map different address spaces in AST to the same address
2081  // space, an address space conversion may end up as a bitcast.
2083  CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2084  DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2085  }
2086  case CK_AtomicToNonAtomic:
2087  case CK_NonAtomicToAtomic:
2088  case CK_NoOp:
2089  case CK_UserDefinedConversion:
2090  return Visit(const_cast<Expr*>(E));
2091 
2092  case CK_BaseToDerived: {
2093  const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2094  assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2095 
2096  Address Base = CGF.EmitPointerWithAlignment(E);
2097  Address Derived =
2098  CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2099  CE->path_begin(), CE->path_end(),
2101 
2102  // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2103  // performed and the object is not of the derived type.
2104  if (CGF.sanitizePerformTypeCheck())
2106  Derived.getPointer(), DestTy->getPointeeType());
2107 
2108  if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2110  DestTy->getPointeeType(), Derived.getPointer(),
2111  /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2112  CE->getBeginLoc());
2113 
2114  return Derived.getPointer();
2115  }
2116  case CK_UncheckedDerivedToBase:
2117  case CK_DerivedToBase: {
2118  // The EmitPointerWithAlignment path does this fine; just discard
2119  // the alignment.
2120  return CGF.EmitPointerWithAlignment(CE).getPointer();
2121  }
2122 
2123  case CK_Dynamic: {
2124  Address V = CGF.EmitPointerWithAlignment(E);
2125  const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2126  return CGF.EmitDynamicCast(V, DCE);
2127  }
2128 
2129  case CK_ArrayToPointerDecay:
2130  return CGF.EmitArrayToPointerDecay(E).getPointer();
2131  case CK_FunctionToPointerDecay:
2132  return EmitLValue(E).getPointer();
2133 
2134  case CK_NullToPointer:
2135  if (MustVisitNullValue(E))
2136  (void) Visit(E);
2137 
2138  return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2139  DestTy);
2140 
2141  case CK_NullToMemberPointer: {
2142  if (MustVisitNullValue(E))
2143  (void) Visit(E);
2144 
2145  const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2146  return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2147  }
2148 
2149  case CK_ReinterpretMemberPointer:
2150  case CK_BaseToDerivedMemberPointer:
2151  case CK_DerivedToBaseMemberPointer: {
2152  Value *Src = Visit(E);
2153 
2154  // Note that the AST doesn't distinguish between checked and
2155  // unchecked member pointer conversions, so we always have to
2156  // implement checked conversions here. This is inefficient when
2157  // actual control flow may be required in order to perform the
2158  // check, which it is for data member pointers (but not member
2159  // function pointers on Itanium and ARM).
2160  return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2161  }
2162 
2163  case CK_ARCProduceObject:
2164  return CGF.EmitARCRetainScalarExpr(E);
2165  case CK_ARCConsumeObject:
2166  return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2167  case CK_ARCReclaimReturnedObject:
2168  return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2169  case CK_ARCExtendBlockObject:
2170  return CGF.EmitARCExtendBlockObject(E);
2171 
2172  case CK_CopyAndAutoreleaseBlockObject:
2173  return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2174 
2175  case CK_FloatingRealToComplex:
2176  case CK_FloatingComplexCast:
2177  case CK_IntegralRealToComplex:
2178  case CK_IntegralComplexCast:
2179  case CK_IntegralComplexToFloatingComplex:
2180  case CK_FloatingComplexToIntegralComplex:
2181  case CK_ConstructorConversion:
2182  case CK_ToUnion:
2183  llvm_unreachable("scalar cast to non-scalar value");
2184 
2185  case CK_LValueToRValue:
2186  assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2187  assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2188  return Visit(const_cast<Expr*>(E));
2189 
2190  case CK_IntegralToPointer: {
2191  Value *Src = Visit(const_cast<Expr*>(E));
2192 
2193  // First, convert to the correct width so that we control the kind of
2194  // extension.
2195  auto DestLLVMTy = ConvertType(DestTy);
2196  llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2197  bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2198  llvm::Value* IntResult =
2199  Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2200 
2201  auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2202 
2203  if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2204  // Going from integer to pointer that could be dynamic requires reloading
2205  // dynamic information from invariant.group.
2206  if (DestTy.mayBeDynamicClass())
2207  IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2208  }
2209  return IntToPtr;
2210  }
2211  case CK_PointerToIntegral: {
2212  assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2213  auto *PtrExpr = Visit(E);
2214 
2215  if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2216  const QualType SrcType = E->getType();
2217 
2218  // Casting to integer requires stripping dynamic information as it does
2219  // not carries it.
2220  if (SrcType.mayBeDynamicClass())
2221  PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2222  }
2223 
2224  return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2225  }
2226  case CK_ToVoid: {
2227  CGF.EmitIgnoredExpr(E);
2228  return nullptr;
2229  }
2230  case CK_VectorSplat: {
2231  llvm::Type *DstTy = ConvertType(DestTy);
2232  Value *Elt = Visit(const_cast<Expr*>(E));
2233  // Splat the element across to all elements
2234  unsigned NumElements = DstTy->getVectorNumElements();
2235  return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2236  }
2237 
2238  case CK_FixedPointCast:
2239  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2240  CE->getExprLoc());
2241 
2242  case CK_FixedPointToBoolean:
2243  assert(E->getType()->isFixedPointType() &&
2244  "Expected src type to be fixed point type");
2245  assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2246  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2247  CE->getExprLoc());
2248 
2249  case CK_FixedPointToIntegral:
2250  assert(E->getType()->isFixedPointType() &&
2251  "Expected src type to be fixed point type");
2252  assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2253  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2254  CE->getExprLoc());
2255 
2256  case CK_IntegralToFixedPoint:
2257  assert(E->getType()->isIntegerType() &&
2258  "Expected src type to be an integer");
2259  assert(DestTy->isFixedPointType() &&
2260  "Expected dest type to be fixed point type");
2261  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2262  CE->getExprLoc());
2263 
2264  case CK_IntegralCast: {
2265  ScalarConversionOpts Opts;
2266  if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2267  if (!ICE->isPartOfExplicitCast())
2268  Opts = ScalarConversionOpts(CGF.SanOpts);
2269  }
2270  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2271  CE->getExprLoc(), Opts);
2272  }
2273  case CK_IntegralToFloating:
2274  case CK_FloatingToIntegral:
2275  case CK_FloatingCast:
2276  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2277  CE->getExprLoc());
2278  case CK_BooleanToSignedIntegral: {
2279  ScalarConversionOpts Opts;
2280  Opts.TreatBooleanAsSigned = true;
2281  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2282  CE->getExprLoc(), Opts);
2283  }
2284  case CK_IntegralToBoolean:
2285  return EmitIntToBoolConversion(Visit(E));
2286  case CK_PointerToBoolean:
2287  return EmitPointerToBoolConversion(Visit(E), E->getType());
2288  case CK_FloatingToBoolean:
2289  return EmitFloatToBoolConversion(Visit(E));
2290  case CK_MemberPointerToBoolean: {
2291  llvm::Value *MemPtr = Visit(E);
2292  const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2293  return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2294  }
2295 
2296  case CK_FloatingComplexToReal:
2297  case CK_IntegralComplexToReal:
2298  return CGF.EmitComplexExpr(E, false, true).first;
2299 
2300  case CK_FloatingComplexToBoolean:
2301  case CK_IntegralComplexToBoolean: {
2303 
2304  // TODO: kill this function off, inline appropriate case here
2305  return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2306  CE->getExprLoc());
2307  }
2308 
2309  case CK_ZeroToOCLOpaqueType: {
2310  assert((DestTy->isEventT() || DestTy->isQueueT() ||
2311  DestTy->isOCLIntelSubgroupAVCType()) &&
2312  "CK_ZeroToOCLEvent cast on non-event type");
2313  return llvm::Constant::getNullValue(ConvertType(DestTy));
2314  }
2315 
2316  case CK_IntToOCLSampler:
2317  return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2318 
2319  } // end of switch
2320 
2321  llvm_unreachable("unknown scalar cast");
2322 }
2323 
2324 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2326  Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2327  !E->getType()->isVoidType());
2328  if (!RetAlloca.isValid())
2329  return nullptr;
2330  return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2331  E->getExprLoc());
2332 }
2333 
2334 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2335  CGF.enterFullExpression(E);
2337  Value *V = Visit(E->getSubExpr());
2338  // Defend against dominance problems caused by jumps out of expression
2339  // evaluation through the shared cleanup block.
2340  Scope.ForceCleanup({&V});
2341  return V;
2342 }
2343 
2344 //===----------------------------------------------------------------------===//
2345 // Unary Operators
2346 //===----------------------------------------------------------------------===//
2347 
2348 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2349  llvm::Value *InVal, bool IsInc) {
2350  BinOpInfo BinOp;
2351  BinOp.LHS = InVal;
2352  BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2353  BinOp.Ty = E->getType();
2354  BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2355  // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2356  BinOp.E = E;
2357  return BinOp;
2358 }
2359 
2360 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2361  const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2362  llvm::Value *Amount =
2363  llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2364  StringRef Name = IsInc ? "inc" : "dec";
2365  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2367  return Builder.CreateAdd(InVal, Amount, Name);
2369  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2370  return Builder.CreateNSWAdd(InVal, Amount, Name);
2371  LLVM_FALLTHROUGH;
2373  if (!E->canOverflow())
2374  return Builder.CreateNSWAdd(InVal, Amount, Name);
2375  return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
2376  }
2377  llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2378 }
2379 
2380 llvm::Value *
2381 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2382  bool isInc, bool isPre) {
2383 
2384  QualType type = E->getSubExpr()->getType();
2385  llvm::PHINode *atomicPHI = nullptr;
2386  llvm::Value *value;
2387  llvm::Value *input;
2388 
2389  int amount = (isInc ? 1 : -1);
2390  bool isSubtraction = !isInc;
2391 
2392  if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2393  type = atomicTy->getValueType();
2394  if (isInc && type->isBooleanType()) {
2395  llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2396  if (isPre) {
2397  Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
2398  ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2399  return Builder.getTrue();
2400  }
2401  // For atomic bool increment, we just store true and return it for
2402  // preincrement, do an atomic swap with true for postincrement
2403  return Builder.CreateAtomicRMW(
2404  llvm::AtomicRMWInst::Xchg, LV.getPointer(), True,
2405  llvm::AtomicOrdering::SequentiallyConsistent);
2406  }
2407  // Special case for atomic increment / decrement on integers, emit
2408  // atomicrmw instructions. We skip this if we want to be doing overflow
2409  // checking, and fall into the slow path with the atomic cmpxchg loop.
2410  if (!type->isBooleanType() && type->isIntegerType() &&
2411  !(type->isUnsignedIntegerType() &&
2412  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2413  CGF.getLangOpts().getSignedOverflowBehavior() !=
2415  llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2416  llvm::AtomicRMWInst::Sub;
2417  llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2418  llvm::Instruction::Sub;
2419  llvm::Value *amt = CGF.EmitToMemory(
2420  llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2421  llvm::Value *old = Builder.CreateAtomicRMW(aop,
2422  LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent);
2423  return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2424  }
2425  value = EmitLoadOfLValue(LV, E->getExprLoc());
2426  input = value;
2427  // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2428  llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2429  llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2430  value = CGF.EmitToMemory(value, type);
2431  Builder.CreateBr(opBB);
2432  Builder.SetInsertPoint(opBB);
2433  atomicPHI = Builder.CreatePHI(value->getType(), 2);
2434  atomicPHI->addIncoming(value, startBB);
2435  value = atomicPHI;
2436  } else {
2437  value = EmitLoadOfLValue(LV, E->getExprLoc());
2438  input = value;
2439  }
2440 
2441  // Special case of integer increment that we have to check first: bool++.
2442  // Due to promotion rules, we get:
2443  // bool++ -> bool = bool + 1
2444  // -> bool = (int)bool + 1
2445  // -> bool = ((int)bool + 1 != 0)
2446  // An interesting aspect of this is that increment is always true.
2447  // Decrement does not have this property.
2448  if (isInc && type->isBooleanType()) {
2449  value = Builder.getTrue();
2450 
2451  // Most common case by far: integer increment.
2452  } else if (type->isIntegerType()) {
2453  // Note that signed integer inc/dec with width less than int can't
2454  // overflow because of promotion rules; we're just eliding a few steps here.
2455  if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2456  value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2457  } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2458  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2459  value =
2460  EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
2461  } else {
2462  llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2463  value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2464  }
2465 
2466  // Next most common: pointer increment.
2467  } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2468  QualType type = ptr->getPointeeType();
2469 
2470  // VLA types don't have constant size.
2471  if (const VariableArrayType *vla
2472  = CGF.getContext().getAsVariableArrayType(type)) {
2473  llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2474  if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2476  value = Builder.CreateGEP(value, numElts, "vla.inc");
2477  else
2478  value = CGF.EmitCheckedInBoundsGEP(
2479  value, numElts, /*SignedIndices=*/false, isSubtraction,
2480  E->getExprLoc(), "vla.inc");
2481 
2482  // Arithmetic on function pointers (!) is just +-1.
2483  } else if (type->isFunctionType()) {
2484  llvm::Value *amt = Builder.getInt32(amount);
2485 
2486  value = CGF.EmitCastToVoidPtr(value);
2488  value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2489  else
2490  value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2491  isSubtraction, E->getExprLoc(),
2492  "incdec.funcptr");
2493  value = Builder.CreateBitCast(value, input->getType());
2494 
2495  // For everything else, we can just do a simple increment.
2496  } else {
2497  llvm::Value *amt = Builder.getInt32(amount);
2499  value = Builder.CreateGEP(value, amt, "incdec.ptr");
2500  else
2501  value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2502  isSubtraction, E->getExprLoc(),
2503  "incdec.ptr");
2504  }
2505 
2506  // Vector increment/decrement.
2507  } else if (type->isVectorType()) {
2508  if (type->hasIntegerRepresentation()) {
2509  llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2510 
2511  value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2512  } else {
2513  value = Builder.CreateFAdd(
2514  value,
2515  llvm::ConstantFP::get(value->getType(), amount),
2516  isInc ? "inc" : "dec");
2517  }
2518 
2519  // Floating point.
2520  } else if (type->isRealFloatingType()) {
2521  // Add the inc/dec to the real part.
2522  llvm::Value *amt;
2523 
2524  if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2525  // Another special case: half FP increment should be done via float
2527  value = Builder.CreateCall(
2528  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2529  CGF.CGM.FloatTy),
2530  input, "incdec.conv");
2531  } else {
2532  value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2533  }
2534  }
2535 
2536  if (value->getType()->isFloatTy())
2537  amt = llvm::ConstantFP::get(VMContext,
2538  llvm::APFloat(static_cast<float>(amount)));
2539  else if (value->getType()->isDoubleTy())
2540  amt = llvm::ConstantFP::get(VMContext,
2541  llvm::APFloat(static_cast<double>(amount)));
2542  else {
2543  // Remaining types are Half, LongDouble or __float128. Convert from float.
2544  llvm::APFloat F(static_cast<float>(amount));
2545  bool ignored;
2546  const llvm::fltSemantics *FS;
2547  // Don't use getFloatTypeSemantics because Half isn't
2548  // necessarily represented using the "half" LLVM type.
2549  if (value->getType()->isFP128Ty())
2550  FS = &CGF.getTarget().getFloat128Format();
2551  else if (value->getType()->isHalfTy())
2552  FS = &CGF.getTarget().getHalfFormat();
2553  else
2554  FS = &CGF.getTarget().getLongDoubleFormat();
2555  F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2556  amt = llvm::ConstantFP::get(VMContext, F);
2557  }
2558  value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2559 
2560  if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2562  value = Builder.CreateCall(
2563  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2564  CGF.CGM.FloatTy),
2565  value, "incdec.conv");
2566  } else {
2567  value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2568  }
2569  }
2570 
2571  // Objective-C pointer types.
2572  } else {
2573  const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2574  value = CGF.EmitCastToVoidPtr(value);
2575 
2577  if (!isInc) size = -size;
2578  llvm::Value *sizeValue =
2579  llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2580 
2582  value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2583  else
2584  value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2585  /*SignedIndices=*/false, isSubtraction,
2586  E->getExprLoc(), "incdec.objptr");
2587  value = Builder.CreateBitCast(value, input->getType());
2588  }
2589 
2590  if (atomicPHI) {
2591  llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2592  llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2593  auto Pair = CGF.EmitAtomicCompareExchange(
2594  LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2595  llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2596  llvm::Value *success = Pair.second;
2597  atomicPHI->addIncoming(old, curBlock);
2598  Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2599  Builder.SetInsertPoint(contBB);
2600  return isPre ? value : input;
2601  }
2602 
2603  // Store the updated result through the lvalue.
2604  if (LV.isBitField())
2605  CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2606  else
2607  CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2608 
2609  // If this is a postinc, return the value read from memory, otherwise use the
2610  // updated value.
2611  return isPre ? value : input;
2612 }
2613 
2614 
2615 
2616 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2617  TestAndClearIgnoreResultAssign();
2618  // Emit unary minus with EmitSub so we handle overflow cases etc.
2619  BinOpInfo BinOp;
2620  BinOp.RHS = Visit(E->getSubExpr());
2621 
2622  if (BinOp.RHS->getType()->isFPOrFPVectorTy())
2623  BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
2624  else
2625  BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2626  BinOp.Ty = E->getType();
2627  BinOp.Opcode = BO_Sub;
2628  // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2629  BinOp.E = E;
2630  return EmitSub(BinOp);
2631 }
2632 
2633 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2634  TestAndClearIgnoreResultAssign();
2635  Value *Op = Visit(E->getSubExpr());
2636  return Builder.CreateNot(Op, "neg");
2637 }
2638 
2639 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2640  // Perform vector logical not on comparison with zero vector.
2641  if (E->getType()->isExtVectorType()) {
2642  Value *Oper = Visit(E->getSubExpr());
2643  Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2644  Value *Result;
2645  if (Oper->getType()->isFPOrFPVectorTy())
2646  Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2647  else
2648  Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2649  return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2650  }
2651 
2652  // Compare operand to zero.
2653  Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2654 
2655  // Invert value.
2656  // TODO: Could dynamically modify easy computations here. For example, if
2657  // the operand is an icmp ne, turn into icmp eq.
2658  BoolVal = Builder.CreateNot(BoolVal, "lnot");
2659 
2660  // ZExt result to the expr type.
2661  return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2662 }
2663 
2664 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2665  // Try folding the offsetof to a constant.
2666  Expr::EvalResult EVResult;
2667  if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2668  llvm::APSInt Value = EVResult.Val.getInt();
2669  return Builder.getInt(Value);
2670  }
2671 
2672  // Loop over the components of the offsetof to compute the value.
2673  unsigned n = E->getNumComponents();
2674  llvm::Type* ResultType = ConvertType(E->getType());
2675  llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2676  QualType CurrentType = E->getTypeSourceInfo()->getType();
2677  for (unsigned i = 0; i != n; ++i) {
2678  OffsetOfNode ON = E->getComponent(i);
2679  llvm::Value *Offset = nullptr;
2680  switch (ON.getKind()) {
2681  case OffsetOfNode::Array: {
2682  // Compute the index
2683  Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2684  llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2685  bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2686  Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2687 
2688  // Save the element type
2689  CurrentType =
2690  CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2691 
2692  // Compute the element size
2693  llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2694  CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2695 
2696  // Multiply out to compute the result
2697  Offset = Builder.CreateMul(Idx, ElemSize);
2698  break;
2699  }
2700 
2701  case OffsetOfNode::Field: {
2702  FieldDecl *MemberDecl = ON.getField();
2703  RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2704  const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2705 
2706  // Compute the index of the field in its parent.
2707  unsigned i = 0;
2708  // FIXME: It would be nice if we didn't have to loop here!
2709  for (RecordDecl::field_iterator Field = RD->field_begin(),
2710  FieldEnd = RD->field_end();
2711  Field != FieldEnd; ++Field, ++i) {
2712  if (*Field == MemberDecl)
2713  break;
2714  }
2715  assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2716 
2717  // Compute the offset to the field
2718  int64_t OffsetInt = RL.getFieldOffset(i) /
2719  CGF.getContext().getCharWidth();
2720  Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2721 
2722  // Save the element type.
2723  CurrentType = MemberDecl->getType();
2724  break;
2725  }
2726 
2728  llvm_unreachable("dependent __builtin_offsetof");
2729 
2730  case OffsetOfNode::Base: {
2731  if (ON.getBase()->isVirtual()) {
2732  CGF.ErrorUnsupported(E, "virtual base in offsetof");
2733  continue;
2734  }
2735 
2736  RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2737  const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2738 
2739  // Save the element type.
2740  CurrentType = ON.getBase()->getType();
2741 
2742  // Compute the offset to the base.
2743  const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2744  CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2745  CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2746  Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2747  break;
2748  }
2749  }
2750  Result = Builder.CreateAdd(Result, Offset);
2751  }
2752  return Result;
2753 }
2754 
2755 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2756 /// argument of the sizeof expression as an integer.
2757 Value *
2758 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2759  const UnaryExprOrTypeTraitExpr *E) {
2760  QualType TypeToSize = E->getTypeOfArgument();
2761  if (E->getKind() == UETT_SizeOf) {
2762  if (const VariableArrayType *VAT =
2763  CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2764  if (E->isArgumentType()) {
2765  // sizeof(type) - make sure to emit the VLA size.
2766  CGF.EmitVariablyModifiedType(TypeToSize);
2767  } else {
2768  // C99 6.5.3.4p2: If the argument is an expression of type
2769  // VLA, it is evaluated.
2770  CGF.EmitIgnoredExpr(E->getArgumentExpr());
2771  }
2772 
2773  auto VlaSize = CGF.getVLASize(VAT);
2774  llvm::Value *size = VlaSize.NumElts;
2775 
2776  // Scale the number of non-VLA elements by the non-VLA element size.
2777  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2778  if (!eltSize.isOne())
2779  size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2780 
2781  return size;
2782  }
2783  } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2784  auto Alignment =
2785  CGF.getContext()
2788  .getQuantity();
2789  return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2790  }
2791 
2792  // If this isn't sizeof(vla), the result must be constant; use the constant
2793  // folding logic so we don't have to duplicate it here.
2794  return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2795 }
2796 
2797 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2798  Expr *Op = E->getSubExpr();
2799  if (Op->getType()->isAnyComplexType()) {
2800  // If it's an l-value, load through the appropriate subobject l-value.
2801  // Note that we have to ask E because Op might be an l-value that
2802  // this won't work for, e.g. an Obj-C property.
2803  if (E->isGLValue())
2804  return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2805  E->getExprLoc()).getScalarVal();
2806 
2807  // Otherwise, calculate and project.
2808  return CGF.EmitComplexExpr(Op, false, true).first;
2809  }
2810 
2811  return Visit(Op);
2812 }
2813 
2814 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2815  Expr *Op = E->getSubExpr();
2816  if (Op->getType()->isAnyComplexType()) {
2817  // If it's an l-value, load through the appropriate subobject l-value.
2818  // Note that we have to ask E because Op might be an l-value that
2819  // this won't work for, e.g. an Obj-C property.
2820  if (Op->isGLValue())
2821  return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2822  E->getExprLoc()).getScalarVal();
2823 
2824  // Otherwise, calculate and project.
2825  return CGF.EmitComplexExpr(Op, true, false).second;
2826  }
2827 
2828  // __imag on a scalar returns zero. Emit the subexpr to ensure side
2829  // effects are evaluated, but not the actual value.
2830  if (Op->isGLValue())
2831  CGF.EmitLValue(Op);
2832  else
2833  CGF.EmitScalarExpr(Op, true);
2834  return llvm::Constant::getNullValue(ConvertType(E->getType()));
2835 }
2836 
2837 //===----------------------------------------------------------------------===//
2838 // Binary Operators
2839 //===----------------------------------------------------------------------===//
2840 
2841 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2842  TestAndClearIgnoreResultAssign();
2843  BinOpInfo Result;
2844  Result.LHS = Visit(E->getLHS());
2845  Result.RHS = Visit(E->getRHS());
2846  Result.Ty = E->getType();
2847  Result.Opcode = E->getOpcode();
2848  Result.FPFeatures = E->getFPFeatures();
2849  Result.E = E;
2850  return Result;
2851 }
2852 
2853 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2854  const CompoundAssignOperator *E,
2855  Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2856  Value *&Result) {
2857  QualType LHSTy = E->getLHS()->getType();
2858  BinOpInfo OpInfo;
2859 
2861  return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2862 
2863  // Emit the RHS first. __block variables need to have the rhs evaluated
2864  // first, plus this should improve codegen a little.
2865  OpInfo.RHS = Visit(E->getRHS());
2866  OpInfo.Ty = E->getComputationResultType();
2867  OpInfo.Opcode = E->getOpcode();
2868  OpInfo.FPFeatures = E->getFPFeatures();
2869  OpInfo.E = E;
2870  // Load/convert the LHS.
2871  LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2872 
2873  llvm::PHINode *atomicPHI = nullptr;
2874  if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2875  QualType type = atomicTy->getValueType();
2876  if (!type->isBooleanType() && type->isIntegerType() &&
2877  !(type->isUnsignedIntegerType() &&
2878  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2879  CGF.getLangOpts().getSignedOverflowBehavior() !=
2881  llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
2882  switch (OpInfo.Opcode) {
2883  // We don't have atomicrmw operands for *, %, /, <<, >>
2884  case BO_MulAssign: case BO_DivAssign:
2885  case BO_RemAssign:
2886  case BO_ShlAssign:
2887  case BO_ShrAssign:
2888  break;
2889  case BO_AddAssign:
2890  aop = llvm::AtomicRMWInst::Add;
2891  break;
2892  case BO_SubAssign:
2893  aop = llvm::AtomicRMWInst::Sub;
2894  break;
2895  case BO_AndAssign:
2897  break;
2898  case BO_XorAssign:
2899  aop = llvm::AtomicRMWInst::Xor;
2900  break;
2901  case BO_OrAssign:
2902  aop = llvm::AtomicRMWInst::Or;
2903  break;
2904  default:
2905  llvm_unreachable("Invalid compound assignment type");
2906  }
2907  if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
2908  llvm::Value *amt = CGF.EmitToMemory(
2909  EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
2910  E->getExprLoc()),
2911  LHSTy);
2912  Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
2913  llvm::AtomicOrdering::SequentiallyConsistent);
2914  return LHSLV;
2915  }
2916  }
2917  // FIXME: For floating point types, we should be saving and restoring the
2918  // floating point environment in the loop.
2919  llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2920  llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2921  OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2922  OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2923  Builder.CreateBr(opBB);
2924  Builder.SetInsertPoint(opBB);
2925  atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2926  atomicPHI->addIncoming(OpInfo.LHS, startBB);
2927  OpInfo.LHS = atomicPHI;
2928  }
2929  else
2930  OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2931 
2932  SourceLocation Loc = E->getExprLoc();
2933  OpInfo.LHS =
2934  EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
2935 
2936  // Expand the binary operator.
2937  Result = (this->*Func)(OpInfo);
2938 
2939  // Convert the result back to the LHS type,
2940  // potentially with Implicit Conversion sanitizer check.
2941  Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
2942  Loc, ScalarConversionOpts(CGF.SanOpts));
2943 
2944  if (atomicPHI) {
2945  llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
2946  llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2947  auto Pair = CGF.EmitAtomicCompareExchange(
2948  LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
2949  llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
2950  llvm::Value *success = Pair.second;
2951  atomicPHI->addIncoming(old, curBlock);
2952  Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
2953  Builder.SetInsertPoint(contBB);
2954  return LHSLV;
2955  }
2956 
2957  // Store the result value into the LHS lvalue. Bit-fields are handled
2958  // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
2959  // 'An assignment expression has the value of the left operand after the
2960  // assignment...'.
2961  if (LHSLV.isBitField())
2962  CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
2963  else
2964  CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
2965 
2966  return LHSLV;
2967 }
2968 
2969 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
2970  Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
2971  bool Ignore = TestAndClearIgnoreResultAssign();
2972  Value *RHS;
2973  LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
2974 
2975  // If the result is clearly ignored, return now.
2976  if (Ignore)
2977  return nullptr;
2978 
2979  // The result of an assignment in C is the assigned r-value.
2980  if (!CGF.getLangOpts().CPlusPlus)
2981  return RHS;
2982 
2983  // If the lvalue is non-volatile, return the computed value of the assignment.
2984  if (!LHS.isVolatileQualified())
2985  return RHS;
2986 
2987  // Otherwise, reload the value.
2988  return EmitLoadOfLValue(LHS, E->getExprLoc());
2989 }
2990 
2991 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
2992  const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
2994 
2995  if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
2996  Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
2997  SanitizerKind::IntegerDivideByZero));
2998  }
2999 
3000  const auto *BO = cast<BinaryOperator>(Ops.E);
3001  if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3002  Ops.Ty->hasSignedIntegerRepresentation() &&
3003  !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3004  Ops.mayHaveIntegerOverflow()) {
3005  llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3006 
3007  llvm::Value *IntMin =
3008  Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3009  llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
3010 
3011  llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3012  llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3013  llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3014  Checks.push_back(
3015  std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3016  }
3017 
3018  if (Checks.size() > 0)
3019  EmitBinOpCheck(Checks, Ops);
3020 }
3021 
3022 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3023  {
3024  CodeGenFunction::SanitizerScope SanScope(&CGF);
3025  if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3026  CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3027  Ops.Ty->isIntegerType() &&
3028  (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3029  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3030  EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3031  } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3032  Ops.Ty->isRealFloatingType() &&
3033  Ops.mayHaveFloatDivisionByZero()) {
3034  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3035  llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3036  EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3037  Ops);
3038  }
3039  }
3040 
3041  if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3042  llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3043  if (CGF.getLangOpts().OpenCL &&
3044  !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
3045  // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
3046  // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
3047  // build option allows an application to specify that single precision
3048  // floating-point divide (x/y and 1/x) and sqrt used in the program
3049  // source are correctly rounded.
3050  llvm::Type *ValTy = Val->getType();
3051  if (ValTy->isFloatTy() ||
3052  (isa<llvm::VectorType>(ValTy) &&
3053  cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
3054  CGF.SetFPAccuracy(Val, 2.5);
3055  }
3056  return Val;
3057  }
3058  else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3059  return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3060  else
3061  return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3062 }
3063 
3064 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3065  // Rem in C can't be a floating point type: C99 6.5.5p2.
3066  if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3067  CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3068  Ops.Ty->isIntegerType() &&
3069  (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3070  CodeGenFunction::SanitizerScope SanScope(&CGF);
3071  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3072  EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3073  }
3074 
3075  if (Ops.Ty->hasUnsignedIntegerRepresentation())
3076  return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3077  else
3078  return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3079 }
3080 
3081 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3082  unsigned IID;
3083  unsigned OpID = 0;
3084 
3085  bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3086  switch (Ops.Opcode) {
3087  case BO_Add:
3088  case BO_AddAssign:
3089  OpID = 1;
3090  IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3091  llvm::Intrinsic::uadd_with_overflow;
3092  break;
3093  case BO_Sub:
3094  case BO_SubAssign:
3095  OpID = 2;
3096  IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3097  llvm::Intrinsic::usub_with_overflow;
3098  break;
3099  case BO_Mul:
3100  case BO_MulAssign:
3101  OpID = 3;
3102  IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3103  llvm::Intrinsic::umul_with_overflow;
3104  break;
3105  default:
3106  llvm_unreachable("Unsupported operation for overflow detection");
3107  }
3108  OpID <<= 1;
3109  if (isSigned)
3110  OpID |= 1;
3111 
3112  CodeGenFunction::SanitizerScope SanScope(&CGF);
3113  llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3114 
3115  llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3116 
3117  Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3118  Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3119  Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3120 
3121  // Handle overflow with llvm.trap if no custom handler has been specified.
3122  const std::string *handlerName =
3124  if (handlerName->empty()) {
3125  // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3126  // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3127  if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3128  llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3129  SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3130  : SanitizerKind::UnsignedIntegerOverflow;
3131  EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3132  } else
3133  CGF.EmitTrapCheck(Builder.CreateNot(overflow));
3134  return result;
3135  }
3136 
3137  // Branch in case of overflow.
3138  llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3139  llvm::BasicBlock *continueBB =
3140  CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3141  llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3142 
3143  Builder.CreateCondBr(overflow, overflowBB, continueBB);
3144 
3145  // If an overflow handler is set, then we want to call it and then use its
3146  // result, if it returns.
3147  Builder.SetInsertPoint(overflowBB);
3148 
3149  // Get the overflow handler.
3150  llvm::Type *Int8Ty = CGF.Int8Ty;
3151  llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3152  llvm::FunctionType *handlerTy =
3153  llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3154  llvm::FunctionCallee handler =
3155  CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3156 
3157  // Sign extend the args to 64-bit, so that we can use the same handler for
3158  // all types of overflow.
3159  llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3160  llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3161 
3162  // Call the handler with the two arguments, the operation, and the size of
3163  // the result.
3164  llvm::Value *handlerArgs[] = {
3165  lhs,
3166  rhs,
3167  Builder.getInt8(OpID),
3168  Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3169  };
3170  llvm::Value *handlerResult =
3171  CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3172 
3173  // Truncate the result back to the desired size.
3174  handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3175  Builder.CreateBr(continueBB);
3176 
3177  Builder.SetInsertPoint(continueBB);
3178  llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3179  phi->addIncoming(result, initialBB);
3180  phi->addIncoming(handlerResult, overflowBB);
3181 
3182  return phi;
3183 }
3184 
3185 /// Emit pointer + index arithmetic.
3187  const BinOpInfo &op,
3188  bool isSubtraction) {
3189  // Must have binary (not unary) expr here. Unary pointer
3190  // increment/decrement doesn't use this path.
3191  const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3192 
3193  Value *pointer = op.LHS;
3194  Expr *pointerOperand = expr->getLHS();
3195  Value *index = op.RHS;
3196  Expr *indexOperand = expr->getRHS();
3197 
3198  // In a subtraction, the LHS is always the pointer.
3199  if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3200  std::swap(pointer, index);
3201  std::swap(pointerOperand, indexOperand);
3202  }
3203 
3204  bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3205 
3206  unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3207  auto &DL = CGF.CGM.getDataLayout();
3208  auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3209 
3210  // Some versions of glibc and gcc use idioms (particularly in their malloc
3211  // routines) that add a pointer-sized integer (known to be a pointer value)
3212  // to a null pointer in order to cast the value back to an integer or as
3213  // part of a pointer alignment algorithm. This is undefined behavior, but
3214  // we'd like to be able to compile programs that use it.
3215  //
3216  // Normally, we'd generate a GEP with a null-pointer base here in response
3217  // to that code, but it's also UB to dereference a pointer created that
3218  // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3219  // generate a direct cast of the integer value to a pointer.
3220  //
3221  // The idiom (p = nullptr + N) is not met if any of the following are true:
3222  //
3223  // The operation is subtraction.
3224  // The index is not pointer-sized.
3225  // The pointer type is not byte-sized.
3226  //
3228  op.Opcode,
3229  expr->getLHS(),
3230  expr->getRHS()))
3231  return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3232 
3233  if (width != DL.getTypeSizeInBits(PtrTy)) {
3234  // Zero-extend or sign-extend the pointer value according to
3235  // whether the index is signed or not.
3236  index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
3237  "idx.ext");
3238  }
3239 
3240  // If this is subtraction, negate the index.
3241  if (isSubtraction)
3242  index = CGF.Builder.CreateNeg(index, "idx.neg");
3243 
3244  if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3245  CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3246  /*Accessed*/ false);
3247 
3248  const PointerType *pointerType
3249  = pointerOperand->getType()->getAs<PointerType>();
3250  if (!pointerType) {
3251  QualType objectType = pointerOperand->getType()
3253  ->getPointeeType();
3254  llvm::Value *objectSize
3255  = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3256 
3257  index = CGF.Builder.CreateMul(index, objectSize);
3258 
3259  Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3260  result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3261  return CGF.Builder.CreateBitCast(result, pointer->getType());
3262  }
3263 
3264  QualType elementType = pointerType->getPointeeType();
3265  if (const VariableArrayType *vla
3266  = CGF.getContext().getAsVariableArrayType(elementType)) {
3267  // The element count here is the total number of non-VLA elements.
3268  llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3269 
3270  // Effectively, the multiply by the VLA size is part of the GEP.
3271  // GEP indexes are signed, and scaling an index isn't permitted to
3272  // signed-overflow, so we use the same semantics for our explicit
3273  // multiply. We suppress this if overflow is not undefined behavior.
3274  if (CGF.getLangOpts().isSignedOverflowDefined()) {
3275  index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3276  pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3277  } else {
3278  index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3279  pointer =
3280  CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3281  op.E->getExprLoc(), "add.ptr");
3282  }
3283  return pointer;
3284  }
3285 
3286  // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3287  // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3288  // future proof.
3289  if (elementType->isVoidType() || elementType->isFunctionType()) {
3290  Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3291  result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3292  return CGF.Builder.CreateBitCast(result, pointer->getType());
3293  }
3294 
3296  return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3297 
3298  return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3299  op.E->getExprLoc(), "add.ptr");
3300 }
3301 
3302 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3303 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
3304 // the add operand respectively. This allows fmuladd to represent a*b-c, or
3305 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3306 // efficient operations.
3307 static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
3308  const CodeGenFunction &CGF, CGBuilderTy &Builder,
3309  bool negMul, bool negAdd) {
3310  assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
3311 
3312  Value *MulOp0 = MulOp->getOperand(0);
3313  Value *MulOp1 = MulOp->getOperand(1);
3314  if (negMul) {
3315  MulOp0 =
3316  Builder.CreateFSub(
3317  llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
3318  "neg");
3319  } else if (negAdd) {
3320  Addend =
3321  Builder.CreateFSub(
3322  llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
3323  "neg");
3324  }
3325 
3326  Value *FMulAdd = Builder.CreateCall(
3327  CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3328  {MulOp0, MulOp1, Addend});
3329  MulOp->eraseFromParent();
3330 
3331  return FMulAdd;
3332 }
3333 
3334 // Check whether it would be legal to emit an fmuladd intrinsic call to
3335 // represent op and if so, build the fmuladd.
3336 //
3337 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3338 // Does NOT check the type of the operation - it's assumed that this function
3339 // will be called from contexts where it's known that the type is contractable.
3340 static Value* tryEmitFMulAdd(const BinOpInfo &op,
3341  const CodeGenFunction &CGF, CGBuilderTy &Builder,
3342  bool isSub=false) {
3343 
3344  assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3345  op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3346  "Only fadd/fsub can be the root of an fmuladd.");
3347 
3348  // Check whether this op is marked as fusable.
3349  if (!op.FPFeatures.allowFPContractWithinStatement())
3350  return nullptr;
3351 
3352  // We have a potentially fusable op. Look for a mul on one of the operands.
3353  // Also, make sure that the mul result isn't used directly. In that case,
3354  // there's no point creating a muladd operation.
3355  if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3356  if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3357  LHSBinOp->use_empty())
3358  return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3359  }
3360  if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3361  if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3362  RHSBinOp->use_empty())
3363  return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3364  }
3365 
3366  return nullptr;
3367 }
3368 
3369 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3370  if (op.LHS->getType()->isPointerTy() ||
3371  op.RHS->getType()->isPointerTy())
3373 
3374  if (op.Ty->isSignedIntegerOrEnumerationType()) {
3375  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3377  return Builder.CreateAdd(op.LHS, op.RHS, "add");
3379  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3380  return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3381  LLVM_FALLTHROUGH;
3383  if (CanElideOverflowCheck(CGF.getContext(), op))
3384  return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3385  return EmitOverflowCheckedBinOp(op);
3386  }
3387  }
3388 
3389  if (op.Ty->isUnsignedIntegerType() &&
3390  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3391  !CanElideOverflowCheck(CGF.getContext(), op))
3392  return EmitOverflowCheckedBinOp(op);
3393 
3394  if (op.LHS->getType()->isFPOrFPVectorTy()) {
3395  // Try to form an fmuladd.
3396  if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3397  return FMulAdd;
3398 
3399  Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
3400  return propagateFMFlags(V, op);
3401  }
3402 
3403  if (op.isFixedPointBinOp())
3404  return EmitFixedPointBinOp(op);
3405 
3406  return Builder.CreateAdd(op.LHS, op.RHS, "add");
3407 }
3408 
3409 /// The resulting value must be calculated with exact precision, so the operands
3410 /// may not be the same type.
3411 Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
3412  using llvm::APSInt;
3413  using llvm::ConstantInt;
3414 
3415  const auto *BinOp = cast<BinaryOperator>(op.E);
3416 
3417  // The result is a fixed point type and at least one of the operands is fixed
3418  // point while the other is either fixed point or an int. This resulting type
3419  // should be determined by Sema::handleFixedPointConversions().
3420  QualType ResultTy = op.Ty;
3421  QualType LHSTy = BinOp->getLHS()->getType();
3422  QualType RHSTy = BinOp->getRHS()->getType();
3423  ASTContext &Ctx = CGF.getContext();
3424  Value *LHS = op.LHS;
3425  Value *RHS = op.RHS;
3426 
3427  auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
3428  auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
3429  auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
3430  auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
3431 
3432  // Convert the operands to the full precision type.
3433  Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
3434  BinOp->getExprLoc());
3435  Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
3436  BinOp->getExprLoc());
3437 
3438  // Perform the actual addition.
3439  Value *Result;
3440  switch (BinOp->getOpcode()) {
3441  case BO_Add: {
3442  if (ResultFixedSema.isSaturated()) {
3443  llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
3444  ? llvm::Intrinsic::sadd_sat
3445  : llvm::Intrinsic::uadd_sat;
3446  Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
3447  } else {
3448  Result = Builder.CreateAdd(FullLHS, FullRHS);
3449  }
3450  break;
3451  }
3452  case BO_Sub: {
3453  if (ResultFixedSema.isSaturated()) {
3454  llvm::Intrinsic::ID IID = ResultFixedSema.isSigned()
3455  ? llvm::Intrinsic::ssub_sat
3456  : llvm::Intrinsic::usub_sat;
3457  Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
3458  } else {
3459  Result = Builder.CreateSub(FullLHS, FullRHS);
3460  }
3461  break;
3462  }
3463  case BO_LT:
3464  return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
3465  : Builder.CreateICmpULT(FullLHS, FullRHS);
3466  case BO_GT:
3467  return CommonFixedSema.isSigned() ? Builder.CreateICmpSGT(FullLHS, FullRHS)
3468  : Builder.CreateICmpUGT(FullLHS, FullRHS);
3469  case BO_LE:
3470  return CommonFixedSema.isSigned() ? Builder.CreateICmpSLE(FullLHS, FullRHS)
3471  : Builder.CreateICmpULE(FullLHS, FullRHS);
3472  case BO_GE:
3473  return CommonFixedSema.isSigned() ? Builder.CreateICmpSGE(FullLHS, FullRHS)
3474  : Builder.CreateICmpUGE(FullLHS, FullRHS);
3475  case BO_EQ:
3476  // For equality operations, we assume any padding bits on unsigned types are
3477  // zero'd out. They could be overwritten through non-saturating operations
3478  // that cause overflow, but this leads to undefined behavior.
3479  return Builder.CreateICmpEQ(FullLHS, FullRHS);
3480  case BO_NE:
3481  return Builder.CreateICmpNE(FullLHS, FullRHS);
3482  case BO_Mul:
3483  case BO_Div:
3484  case BO_Shl:
3485  case BO_Shr:
3486  case BO_Cmp:
3487  case BO_LAnd:
3488  case BO_LOr:
3489  case BO_MulAssign:
3490  case BO_DivAssign:
3491  case BO_AddAssign:
3492  case BO_SubAssign:
3493  case BO_ShlAssign:
3494  case BO_ShrAssign:
3495  llvm_unreachable("Found unimplemented fixed point binary operation");
3496  case BO_PtrMemD:
3497  case BO_PtrMemI:
3498  case BO_Rem:
3499  case BO_Xor:
3500  case BO_And:
3501  case BO_Or:
3502  case BO_Assign:
3503  case BO_RemAssign:
3504  case BO_AndAssign:
3505  case BO_XorAssign:
3506  case BO_OrAssign:
3507  case BO_Comma:
3508  llvm_unreachable("Found unsupported binary operation for fixed point types.");
3509  }
3510 
3511  // Convert to the result type.
3512  return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
3513  BinOp->getExprLoc());
3514 }
3515 
3516 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3517  // The LHS is always a pointer if either side is.
3518  if (!op.LHS->getType()->isPointerTy()) {
3519  if (op.Ty->isSignedIntegerOrEnumerationType()) {
3520  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3522  return Builder.CreateSub(op.LHS, op.RHS, "sub");
3524  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3525  return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3526  LLVM_FALLTHROUGH;
3528  if (CanElideOverflowCheck(CGF.getContext(), op))
3529  return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3530  return EmitOverflowCheckedBinOp(op);
3531  }
3532  }
3533 
3534  if (op.Ty->isUnsignedIntegerType() &&
3535  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3536  !CanElideOverflowCheck(CGF.getContext(), op))
3537  return EmitOverflowCheckedBinOp(op);
3538 
3539  if (op.LHS->getType()->isFPOrFPVectorTy()) {
3540  // Try to form an fmuladd.
3541  if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3542  return FMulAdd;
3543  Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
3544  return propagateFMFlags(V, op);
3545  }
3546 
3547  if (op.isFixedPointBinOp())
3548  return EmitFixedPointBinOp(op);
3549 
3550  return Builder.CreateSub(op.LHS, op.RHS, "sub");
3551  }
3552 
3553  // If the RHS is not a pointer, then we have normal pointer
3554  // arithmetic.
3555  if (!op.RHS->getType()->isPointerTy())
3557 
3558  // Otherwise, this is a pointer subtraction.
3559 
3560  // Do the raw subtraction part.
3561  llvm::Value *LHS
3562  = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3563  llvm::Value *RHS
3564  = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3565  Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3566 
3567  // Okay, figure out the element size.
3568  const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3569  QualType elementType = expr->getLHS()->getType()->getPointeeType();
3570 
3571  llvm::Value *divisor = nullptr;
3572 
3573  // For a variable-length array, this is going to be non-constant.
3574  if (const VariableArrayType *vla
3575  = CGF.getContext().getAsVariableArrayType(elementType)) {
3576  auto VlaSize = CGF.getVLASize(vla);
3577  elementType = VlaSize.Type;
3578  divisor = VlaSize.NumElts;
3579 
3580  // Scale the number of non-VLA elements by the non-VLA element size.
3581  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3582  if (!eltSize.isOne())
3583  divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3584 
3585  // For everything elese, we can just compute it, safe in the
3586  // assumption that Sema won't let anything through that we can't
3587  // safely compute the size of.
3588  } else {
3589  CharUnits elementSize;
3590  // Handle GCC extension for pointer arithmetic on void* and
3591  // function pointer types.
3592  if (elementType->isVoidType() || elementType->isFunctionType())
3593  elementSize = CharUnits::One();
3594  else
3595  elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3596 
3597  // Don't even emit the divide for element size of 1.
3598  if (elementSize.isOne())
3599  return diffInChars;
3600 
3601  divisor = CGF.CGM.getSize(elementSize);
3602  }
3603 
3604  // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3605  // pointer difference in C is only defined in the case where both operands
3606  // are pointing to elements of an array.
3607  return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3608 }
3609 
3610 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3611  llvm::IntegerType *Ty;
3612  if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3613  Ty = cast<llvm::IntegerType>(VT->getElementType());
3614  else
3615  Ty = cast<llvm::IntegerType>(LHS->getType());
3616  return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3617 }
3618 
3619 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3620  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3621  // RHS to the same size as the LHS.
3622  Value *RHS = Ops.RHS;
3623  if (Ops.LHS->getType() != RHS->getType())
3624  RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3625 
3626  bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3627  Ops.Ty->hasSignedIntegerRepresentation() &&
3629  bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3630  // OpenCL 6.3j: shift values are effectively % word size of LHS.
3631  if (CGF.getLangOpts().OpenCL)
3632  RHS =
3633  Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
3634  else if ((SanitizeBase || SanitizeExponent) &&
3635  isa<llvm::IntegerType>(Ops.LHS->getType())) {
3636  CodeGenFunction::SanitizerScope SanScope(&CGF);
3638  llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3639  llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3640 
3641  if (SanitizeExponent) {
3642  Checks.push_back(
3643  std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3644  }
3645 
3646  if (SanitizeBase) {
3647  // Check whether we are shifting any non-zero bits off the top of the
3648  // integer. We only emit this check if exponent is valid - otherwise
3649  // instructions below will have undefined behavior themselves.
3650  llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3651  llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3652  llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3653  Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3654  llvm::Value *PromotedWidthMinusOne =
3655  (RHS == Ops.RHS) ? WidthMinusOne
3656  : GetWidthMinusOneValue(Ops.LHS, RHS);
3657  CGF.EmitBlock(CheckShiftBase);
3658  llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3659  Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3660  /*NUW*/ true, /*NSW*/ true),
3661  "shl.check");
3662  if (CGF.getLangOpts().CPlusPlus) {
3663  // In C99, we are not permitted to shift a 1 bit into the sign bit.
3664  // Under C++11's rules, shifting a 1 bit into the sign bit is
3665  // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3666  // define signed left shifts, so we use the C99 and C++11 rules there).
3667  llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3668  BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3669  }
3670  llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3671  llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3672  CGF.EmitBlock(Cont);
3673  llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3674  BaseCheck->addIncoming(Builder.getTrue(), Orig);
3675  BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3676  Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
3677  }
3678 
3679  assert(!Checks.empty());
3680  EmitBinOpCheck(Checks, Ops);
3681  }
3682 
3683  return Builder.CreateShl(Ops.LHS, RHS, "shl");
3684 }
3685 
3686 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3687  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3688  // RHS to the same size as the LHS.
3689  Value *RHS = Ops.RHS;
3690  if (Ops.LHS->getType() != RHS->getType())
3691  RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3692 
3693  // OpenCL 6.3j: shift values are effectively % word size of LHS.
3694  if (CGF.getLangOpts().OpenCL)
3695  RHS =
3696  Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
3697  else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3698  isa<llvm::IntegerType>(Ops.LHS->getType())) {
3699  CodeGenFunction::SanitizerScope SanScope(&CGF);
3700  llvm::Value *Valid =
3701  Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3702  EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3703  }
3704 
3705  if (Ops.Ty->hasUnsignedIntegerRepresentation())
3706  return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3707  return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3708 }
3709 
3711 // return corresponding comparison intrinsic for given vector type
3713  BuiltinType::Kind ElemKind) {
3714  switch (ElemKind) {
3715  default: llvm_unreachable("unexpected element type");
3716  case BuiltinType::Char_U:
3717  case BuiltinType::UChar:
3718  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3719  llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3720  case BuiltinType::Char_S:
3721  case BuiltinType::SChar:
3722  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3723  llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3724  case BuiltinType::UShort:
3725  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3726  llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3727  case BuiltinType::Short:
3728  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3729  llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3730  case BuiltinType::UInt:
3731  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3732  llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3733  case BuiltinType::Int:
3734  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3735  llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3736  case BuiltinType::ULong:
3737  case BuiltinType::ULongLong:
3738  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3739  llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3740  case BuiltinType::Long:
3741  case BuiltinType::LongLong:
3742  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3743  llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3744  case BuiltinType::Float:
3745  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3746  llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3747  case BuiltinType::Double:
3748  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3749  llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3750  }
3751 }
3752 
3754  llvm::CmpInst::Predicate UICmpOpc,
3755  llvm::CmpInst::Predicate SICmpOpc,
3756  llvm::CmpInst::Predicate FCmpOpc) {
3757  TestAndClearIgnoreResultAssign();
3758  Value *Result;
3759  QualType LHSTy = E->getLHS()->getType();
3760  QualType RHSTy = E->getRHS()->getType();
3761  if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3762  assert(E->getOpcode() == BO_EQ ||
3763  E->getOpcode() == BO_NE);
3764  Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3765  Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3766  Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3767  CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3768  } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3769  BinOpInfo BOInfo = EmitBinOps(E);
3770  Value *LHS = BOInfo.LHS;
3771  Value *RHS = BOInfo.RHS;
3772 
3773  // If AltiVec, the comparison results in a numeric type, so we use
3774  // intrinsics comparing vectors and giving 0 or 1 as a result
3775  if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3776  // constants for mapping CR6 register bits to predicate result
3777  enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3778 
3779  llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3780 
3781  // in several cases vector arguments order will be reversed
3782  Value *FirstVecArg = LHS,
3783  *SecondVecArg = RHS;
3784 
3785  QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
3786  const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
3787  BuiltinType::Kind ElementKind = BTy->getKind();
3788 
3789  switch(E->getOpcode()) {
3790  default: llvm_unreachable("is not a comparison operation");
3791  case BO_EQ:
3792  CR6 = CR6_LT;
3793  ID = GetIntrinsic(VCMPEQ, ElementKind);
3794  break;
3795  case BO_NE:
3796  CR6 = CR6_EQ;
3797  ID = GetIntrinsic(VCMPEQ, ElementKind);
3798  break;
3799  case BO_LT:
3800  CR6 = CR6_LT;
3801  ID = GetIntrinsic(VCMPGT, ElementKind);
3802  std::swap(FirstVecArg, SecondVecArg);
3803  break;
3804  case BO_GT:
3805  CR6 = CR6_LT;
3806  ID = GetIntrinsic(VCMPGT, ElementKind);
3807  break;
3808  case BO_LE:
3809  if (ElementKind == BuiltinType::Float) {
3810  CR6 = CR6_LT;
3811  ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3812  std::swap(FirstVecArg, SecondVecArg);
3813  }
3814  else {
3815  CR6 = CR6_EQ;
3816  ID = GetIntrinsic(VCMPGT, ElementKind);
3817  }
3818  break;
3819  case BO_GE:
3820  if (ElementKind == BuiltinType::Float) {
3821  CR6 = CR6_LT;
3822  ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3823  }
3824  else {
3825  CR6 = CR6_EQ;
3826  ID = GetIntrinsic(VCMPGT, ElementKind);
3827  std::swap(FirstVecArg, SecondVecArg);
3828  }
3829  break;
3830  }
3831 
3832  Value *CR6Param = Builder.getInt32(CR6);
3833  llvm::Function *F = CGF.CGM.getIntrinsic(ID);
3834  Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
3835 
3836  // The result type of intrinsic may not be same as E->getType().
3837  // If E->getType() is not BoolTy, EmitScalarConversion will do the
3838  // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
3839  // do nothing, if ResultTy is not i1 at the same time, it will cause
3840  // crash later.
3841  llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
3842  if (ResultTy->getBitWidth() > 1 &&
3843  E->getType() == CGF.getContext().BoolTy)
3844  Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
3845  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3846  E->getExprLoc());
3847  }
3848 
3849  if (BOInfo.isFixedPointBinOp()) {
3850  Result = EmitFixedPointBinOp(BOInfo);
3851  } else if (LHS->getType()->isFPOrFPVectorTy()) {
3852  Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
3853  } else if (LHSTy->hasSignedIntegerRepresentation()) {
3854  Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
3855  } else {
3856  // Unsigned integers and pointers.
3857 
3858  if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
3859  !isa<llvm::ConstantPointerNull>(LHS) &&
3860  !isa<llvm::ConstantPointerNull>(RHS)) {
3861 
3862  // Dynamic information is required to be stripped for comparisons,
3863  // because it could leak the dynamic information. Based on comparisons
3864  // of pointers to dynamic objects, the optimizer can replace one pointer
3865  // with another, which might be incorrect in presence of invariant
3866  // groups. Comparison with null is safe because null does not carry any
3867  // dynamic information.
3868  if (LHSTy.mayBeDynamicClass())
3869  LHS = Builder.CreateStripInvariantGroup(LHS);
3870  if (RHSTy.mayBeDynamicClass())
3871  RHS = Builder.CreateStripInvariantGroup(RHS);
3872  }
3873 
3874  Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
3875  }
3876 
3877  // If this is a vector comparison, sign extend the result to the appropriate
3878  // vector integer type and return it (don't convert to bool).
3879  if (LHSTy->isVectorType())
3880  return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3881 
3882  } else {
3883  // Complex Comparison: can only be an equality comparison.
3885  QualType CETy;
3886  if (auto *CTy = LHSTy->getAs<ComplexType>()) {
3887  LHS = CGF.EmitComplexExpr(E->getLHS());
3888  CETy = CTy->getElementType();
3889  } else {
3890  LHS.first = Visit(E->getLHS());
3891  LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
3892  CETy = LHSTy;
3893  }
3894  if (auto *CTy = RHSTy->getAs<ComplexType>()) {
3895  RHS = CGF.EmitComplexExpr(E->getRHS());
3896  assert(CGF.getContext().hasSameUnqualifiedType(CETy,
3897  CTy->getElementType()) &&
3898  "The element types must always match.");
3899  (void)CTy;
3900  } else {
3901  RHS.first = Visit(E->getRHS());
3902  RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
3903  assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
3904  "The element types must always match.");
3905  }
3906 
3907  Value *ResultR, *ResultI;
3908  if (CETy->isRealFloatingType()) {
3909  ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
3910  ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
3911  } else {
3912  // Complex comparisons can only be equality comparisons. As such, signed
3913  // and unsigned opcodes are the same.
3914  ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
3915  ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
3916  }
3917 
3918  if (E->getOpcode() == BO_EQ) {
3919  Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
3920  } else {
3921  assert(E->getOpcode() == BO_NE &&
3922  "Complex comparison other than == or != ?");
3923  Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
3924  }
3925  }
3926 
3927  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3928  E->getExprLoc());
3929 }
3930 
3931 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
3932  bool Ignore = TestAndClearIgnoreResultAssign();
3933 
3934  Value *RHS;
3935  LValue LHS;
3936 
3937  switch (E->getLHS()->getType().getObjCLifetime()) {
3939  std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
3940  break;
3941 
3943  std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
3944  break;
3945 
3947  std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
3948  break;
3949 
3950  case Qualifiers::OCL_Weak:
3951  RHS = Visit(E->getRHS());
3952  LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3953  RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
3954  break;
3955 
3956  case Qualifiers::OCL_None:
3957  // __block variables need to have the rhs evaluated first, plus
3958  // this should improve codegen just a little.
3959  RHS = Visit(E->getRHS());
3960  LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3961 
3962  // Store the value into the LHS. Bit-fields are handled specially
3963  // because the result is altered by the store, i.e., [C99 6.5.16p1]
3964  // 'An assignment expression has the value of the left operand after
3965  // the assignment...'.
3966  if (LHS.isBitField()) {
3967  CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
3968  } else {
3969  CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
3970  CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
3971  }
3972  }
3973 
3974  // If the result is clearly ignored, return now.
3975  if (Ignore)
3976  return nullptr;
3977 
3978  // The result of an assignment in C is the assigned r-value.
3979  if (!CGF.getLangOpts().CPlusPlus)
3980  return RHS;
3981 
3982  // If the lvalue is non-volatile, return the computed value of the assignment.
3983  if (!LHS.isVolatileQualified())
3984  return RHS;
3985 
3986  // Otherwise, reload the value.
3987  return EmitLoadOfLValue(LHS, E->getExprLoc());
3988 }
3989 
3990 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
3991  // Perform vector logical and on comparisons with zero vectors.
3992  if (E->getType()->isVectorType()) {
3993  CGF.incrementProfileCounter(E);
3994 
3995  Value *LHS = Visit(E->getLHS());
3996  Value *RHS = Visit(E->getRHS());
3997  Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3998  if (LHS->getType()->isFPOrFPVectorTy()) {
3999  LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4000  RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4001  } else {
4002  LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4003  RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4004  }
4005  Value *And = Builder.CreateAnd(LHS, RHS);
4006  return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4007  }
4008 
4009  llvm::Type *ResTy = ConvertType(E->getType());
4010 
4011  // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4012  // If we have 1 && X, just emit X without inserting the control flow.
4013  bool LHSCondVal;
4014  if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4015  if (LHSCondVal) { // If we have 1 && X, just emit X.
4016  CGF.incrementProfileCounter(E);
4017 
4018  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4019  // ZExt result to int or bool.
4020  return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4021  }
4022 
4023  // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4024  if (!CGF.ContainsLabel(E->getRHS()))
4025  return llvm::Constant::getNullValue(ResTy);
4026  }
4027 
4028  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4029  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4030 
4032 
4033  // Branch on the LHS first. If it is false, go to the failure (cont) block.
4034  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4035  CGF.getProfileCount(E->getRHS()));
4036 
4037  // Any edges into the ContBlock are now from an (indeterminate number of)
4038  // edges from this first condition. All of these values will be false. Start
4039  // setting up the PHI node in the Cont Block for this.
4040  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4041  "", ContBlock);
4042  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4043  PI != PE; ++PI)
4044  PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4045 
4046  eval.begin(CGF);
4047  CGF.EmitBlock(RHSBlock);
4048  CGF.incrementProfileCounter(E);
4049  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4050  eval.end(CGF);
4051 
4052  // Reaquire the RHS block, as there may be subblocks inserted.
4053  RHSBlock = Builder.GetInsertBlock();
4054 
4055  // Emit an unconditional branch from this block to ContBlock.
4056  {
4057  // There is no need to emit line number for unconditional branch.
4058  auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4059  CGF.EmitBlock(ContBlock);
4060  }
4061  // Insert an entry into the phi node for the edge with the value of RHSCond.
4062  PN->addIncoming(RHSCond, RHSBlock);
4063 
4064  // Artificial location to preserve the scope information
4065  {
4066  auto NL = ApplyDebugLocation::CreateArtificial(CGF);
4067  PN->setDebugLoc(Builder.getCurrentDebugLocation());
4068  }
4069 
4070  // ZExt result to int.
4071  return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4072 }
4073 
4074 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4075  // Perform vector logical or on comparisons with zero vectors.
4076  if (E->getType()->isVectorType()) {
4077  CGF.incrementProfileCounter(E);
4078 
4079  Value *LHS = Visit(E->getLHS());
4080  Value *RHS = Visit(E->getRHS());
4081  Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4082  if (LHS->getType()->isFPOrFPVectorTy()) {
4083  LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4084  RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4085  } else {
4086  LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4087  RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4088  }
4089  Value *Or = Builder.CreateOr(LHS, RHS);
4090  return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4091  }
4092 
4093  llvm::Type *ResTy = ConvertType(E->getType());
4094 
4095  // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4096  // If we have 0 || X, just emit X without inserting the control flow.
4097  bool LHSCondVal;
4098  if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4099  if (!LHSCondVal) { // If we have 0 || X, just emit X.
4100  CGF.incrementProfileCounter(E);
4101 
4102  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4103  // ZExt result to int or bool.
4104  return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
4105  }
4106 
4107  // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
4108  if (!CGF.ContainsLabel(E->getRHS()))
4109  return llvm::ConstantInt::get(ResTy, 1);
4110  }
4111 
4112  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
4113  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
4114 
4116 
4117  // Branch on the LHS first. If it is true, go to the success (cont) block.
4118  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
4119  CGF.getCurrentProfileCount() -
4120  CGF.getProfileCount(E->getRHS()));
4121 
4122  // Any edges into the ContBlock are now from an (indeterminate number of)
4123  // edges from this first condition. All of these values will be true. Start
4124  // setting up the PHI node in the Cont Block for this.
4125  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4126  "", ContBlock);
4127  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4128  PI != PE; ++PI)
4129  PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
4130 
4131  eval.begin(CGF);
4132 
4133  // Emit the RHS condition as a bool value.
4134  CGF.EmitBlock(RHSBlock);
4135  CGF.incrementProfileCounter(E);
4136  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4137 
4138  eval.end(CGF);
4139 
4140  // Reaquire the RHS block, as there may be subblocks inserted.
4141  RHSBlock = Builder.GetInsertBlock();
4142 
4143  // Emit an unconditional branch from this block to ContBlock. Insert an entry
4144  // into the phi node for the edge with the value of RHSCond.
4145  CGF.EmitBlock(ContBlock);
4146  PN->addIncoming(RHSCond, RHSBlock);
4147 
4148  // ZExt result to int.
4149  return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
4150 }
4151 
4152 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
4153  CGF.EmitIgnoredExpr(E->getLHS());
4154  CGF.EnsureInsertPoint();
4155  return Visit(E->getRHS());
4156 }
4157 
4158 //===----------------------------------------------------------------------===//
4159 // Other Operators
4160 //===----------------------------------------------------------------------===//
4161 
4162 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
4163 /// expression is cheap enough and side-effect-free enough to evaluate
4164 /// unconditionally instead of conditionally. This is used to convert control
4165 /// flow into selects in some cases.
4167  CodeGenFunction &CGF) {
4168  // Anything that is an integer or floating point constant is fine.
4169  return E->IgnoreParens()->isEvaluatable(CGF.getContext());
4170 
4171  // Even non-volatile automatic variables can't be evaluated unconditionally.
4172  // Referencing a thread_local may cause non-trivial initialization work to
4173  // occur. If we're inside a lambda and one of the variables is from the scope
4174  // outside the lambda, that function may have returned already. Reading its
4175  // locals is a bad idea. Also, these reads may introduce races there didn't
4176  // exist in the source-level program.
4177 }
4178 
4179 
4180 Value *ScalarExprEmitter::
4181 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4182  TestAndClearIgnoreResultAssign();
4183 
4184  // Bind the common expression if necessary.
4185  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4186 
4187  Expr *condExpr = E->getCond();
4188  Expr *lhsExpr = E->getTrueExpr();
4189  Expr *rhsExpr = E->getFalseExpr();
4190 
4191  // If the condition constant folds and can be elided, try to avoid emitting
4192  // the condition and the dead arm.
4193  bool CondExprBool;
4194  if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4195  Expr *live = lhsExpr, *dead = rhsExpr;
4196  if (!CondExprBool) std::swap(live, dead);
4197 
4198  // If the dead side doesn't have labels we need, just emit the Live part.
4199  if (!CGF.ContainsLabel(dead)) {
4200  if (CondExprBool)
4201  CGF.incrementProfileCounter(E);
4202  Value *Result = Visit(live);
4203 
4204  // If the live part is a throw expression, it acts like it has a void
4205  // type, so evaluating it returns a null Value*. However, a conditional
4206  // with non-void type must return a non-null Value*.
4207  if (!Result && !E->getType()->isVoidType())
4208  Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4209 
4210  return Result;
4211  }
4212  }
4213 
4214  // OpenCL: If the condition is a vector, we can treat this condition like
4215  // the select function.
4216  if (CGF.getLangOpts().OpenCL
4217  && condExpr->getType()->isVectorType()) {
4218  CGF.incrementProfileCounter(E);
4219 
4220  llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4221  llvm::Value *LHS = Visit(lhsExpr);
4222  llvm::Value *RHS = Visit(rhsExpr);
4223 
4224  llvm::Type *condType = ConvertType(condExpr->getType());
4225  llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
4226 
4227  unsigned numElem = vecTy->getNumElements();
4228  llvm::Type *elemType = vecTy->getElementType();
4229 
4230  llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4231  llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4232  llvm::Value *tmp = Builder.CreateSExt(TestMSB,
4233  llvm::VectorType::get(elemType,
4234  numElem),
4235  "sext");
4236  llvm::Value *tmp2 = Builder.CreateNot(tmp);
4237 
4238  // Cast float to int to perform ANDs if necessary.
4239  llvm::Value *RHSTmp = RHS;
4240  llvm::Value *LHSTmp = LHS;
4241  bool wasCast = false;
4242  llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4243  if (rhsVTy->getElementType()->isFloatingPointTy()) {
4244  RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4245  LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4246  wasCast = true;
4247  }
4248 
4249  llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4250  llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4251  llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4252  if (wasCast)
4253  tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4254 
4255  return tmp5;
4256  }
4257 
4258  // If this is a really simple expression (like x ? 4 : 5), emit this as a
4259  // select instead of as control flow. We can only do this if it is cheap and
4260  // safe to evaluate the LHS and RHS unconditionally.
4261  if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4263  llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4264  llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4265 
4266  CGF.incrementProfileCounter(E, StepV);
4267 
4268  llvm::Value *LHS = Visit(lhsExpr);
4269  llvm::Value *RHS = Visit(rhsExpr);
4270  if (!LHS) {
4271  // If the conditional has void type, make sure we return a null Value*.
4272  assert(!RHS && "LHS and RHS types must match");
4273  return nullptr;
4274  }
4275  return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4276  }
4277 
4278  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4279  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4280  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4281 
4283  CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4284  CGF.getProfileCount(lhsExpr));
4285 
4286  CGF.EmitBlock(LHSBlock);
4287  CGF.incrementProfileCounter(E);
4288  eval.begin(CGF);
4289  Value *LHS = Visit(lhsExpr);
4290  eval.end(CGF);
4291 
4292  LHSBlock = Builder.GetInsertBlock();
4293  Builder.CreateBr(ContBlock);
4294 
4295  CGF.EmitBlock(RHSBlock);
4296  eval.begin(CGF);
4297  Value *RHS = Visit(rhsExpr);
4298  eval.end(CGF);
4299 
4300  RHSBlock = Builder.GetInsertBlock();
4301  CGF.EmitBlock(ContBlock);
4302 
4303  // If the LHS or RHS is a throw expression, it will be legitimately null.
4304  if (!LHS)
4305  return RHS;
4306  if (!RHS)
4307  return LHS;
4308 
4309  // Create a PHI node for the real part.
4310  llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4311  PN->addIncoming(LHS, LHSBlock);
4312  PN->addIncoming(RHS, RHSBlock);
4313  return PN;
4314 }
4315 
4316 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4317  return Visit(E->getChosenSubExpr());
4318 }
4319 
4320 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4321  QualType Ty = VE->getType();
4322 
4323  if (Ty->isVariablyModifiedType())
4324  CGF.EmitVariablyModifiedType(Ty);
4325 
4326  Address ArgValue = Address::invalid();
4327  Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4328 
4329  llvm::Type *ArgTy = ConvertType(VE->getType());
4330 
4331  // If EmitVAArg fails, emit an error.
4332  if (!ArgPtr.isValid()) {
4333  CGF.ErrorUnsupported(VE, "va_arg expression");
4334  return llvm::UndefValue::get(ArgTy);
4335  }
4336 
4337  // FIXME Volatility.
4338  llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4339 
4340  // If EmitVAArg promoted the type, we must truncate it.
4341  if (ArgTy != Val->getType()) {
4342  if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4343  Val = Builder.CreateIntToPtr(Val, ArgTy);
4344  else
4345  Val = Builder.CreateTrunc(Val, ArgTy);
4346  }
4347 
4348  return Val;
4349 }
4350 
4351 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4352  return CGF.EmitBlockLiteral(block);
4353 }
4354 
4355 // Convert a vec3 to vec4, or vice versa.
4357  Value *Src, unsigned NumElementsDst) {
4358  llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
4360  Args.push_back(Builder.getInt32(0));
4361  Args.push_back(Builder.getInt32(1));
4362  Args.push_back(Builder.getInt32(2));
4363  if (NumElementsDst == 4)
4364  Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
4365  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
4366  return Builder.CreateShuffleVector(Src, UnV, Mask);
4367 }
4368 
4369 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
4370 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
4371 // but could be scalar or vectors of different lengths, and either can be
4372 // pointer.
4373 // There are 4 cases:
4374 // 1. non-pointer -> non-pointer : needs 1 bitcast
4375 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4376 // 3. pointer -> non-pointer
4377 // a) pointer -> intptr_t : needs 1 ptrtoint
4378 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4379 // 4. non-pointer -> pointer
4380 // a) intptr_t -> pointer : needs 1 inttoptr
4381 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4382 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
4383 // allow casting directly between pointer types and non-integer non-pointer
4384 // types.
4386  const llvm::DataLayout &DL,
4387  Value *Src, llvm::Type *DstTy,
4388  StringRef Name = "") {
4389  auto SrcTy = Src->getType();
4390 
4391  // Case 1.
4392  if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4393  return Builder.CreateBitCast(Src, DstTy, Name);
4394 
4395  // Case 2.
4396  if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4397  return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4398 
4399  // Case 3.
4400  if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4401  // Case 3b.
4402  if (!DstTy->isIntegerTy())
4403  Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4404  // Cases 3a and 3b.
4405  return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4406  }
4407 
4408  // Case 4b.
4409  if (!SrcTy->isIntegerTy())
4410  Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4411  // Cases 4a and 4b.
4412  return Builder.CreateIntToPtr(Src, DstTy, Name);
4413 }
4414 
4415 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4416  Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4417  llvm::Type *DstTy = ConvertType(E->getType());
4418 
4419  llvm::Type *SrcTy = Src->getType();
4420  unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
4421  cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
4422  unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
4423  cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
4424 
4425  // Going from vec3 to non-vec3 is a special case and requires a shuffle
4426  // vector to get a vec4, then a bitcast if the target type is different.
4427  if (NumElementsSrc == 3 && NumElementsDst != 3) {
4428  Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4429 
4430  if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4431  Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4432  DstTy);
4433  }
4434 
4435  Src->setName("astype");
4436  return Src;
4437  }
4438 
4439  // Going from non-vec3 to vec3 is a special case and requires a bitcast
4440  // to vec4 if the original type is not vec4, then a shuffle vector to
4441  // get a vec3.
4442  if (NumElementsSrc != 3 && NumElementsDst == 3) {
4443  if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4444  auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
4445  Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4446  Vec4Ty);
4447  }
4448 
4449  Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4450  Src->setName("astype");
4451  return Src;
4452  }
4453 
4454  return Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4455  Src, DstTy, "astype");
4456 }
4457 
4458 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4459  return CGF.EmitAtomicExpr(E).getScalarVal();
4460 }
4461 
4462 //===----------------------------------------------------------------------===//
4463 // Entry Point into this File
4464 //===----------------------------------------------------------------------===//
4465 
4466 /// Emit the computation of the specified expression of scalar type, ignoring
4467 /// the result.
4468 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4469  assert(E && hasScalarEvaluationKind(E->getType()) &&
4470  "Invalid scalar expression to emit");
4471 
4472  return ScalarExprEmitter(*this, IgnoreResultAssign)
4473  .Visit(const_cast<Expr *>(E));
4474 }
4475 
4476 /// Emit a conversion from the specified type to the specified destination type,
4477 /// both of which are LLVM scalar types.
4479  QualType DstTy,
4480  SourceLocation Loc) {
4481  assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
4482  "Invalid scalar expression to emit");
4483  return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4484 }
4485 
4486 /// Emit a conversion from the specified complex type to the specified
4487 /// destination type, where the destination type is an LLVM scalar type.
4489  QualType SrcTy,
4490  QualType DstTy,
4491  SourceLocation Loc) {
4492  assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
4493  "Invalid complex -> scalar conversion");
4494  return ScalarExprEmitter(*this)
4495  .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4496 }
4497 
4498 
4501  bool isInc, bool isPre) {
4502  return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4503 }
4504 
4506  // object->isa or (*object).isa
4507  // Generate code as for: *(Class*)object
4508 
4509  Expr *BaseExpr = E->getBase();
4510  Address Addr = Address::invalid();
4511  if (BaseExpr->isRValue()) {
4512  Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4513  } else {
4514  Addr = EmitLValue(BaseExpr).getAddress();
4515  }
4516 
4517  // Cast the address to Class*.
4518  Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4519  return MakeAddrLValue(Addr, E->getType());
4520 }
4521 
4522 
4524  const CompoundAssignOperator *E) {
4525  ScalarExprEmitter Scalar(*this);
4526  Value *Result = nullptr;
4527  switch (E->getOpcode()) {
4528 #define COMPOUND_OP(Op) \
4529  case BO_##Op##Assign: \
4530  return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4531  Result)
4532  COMPOUND_OP(Mul);
4533  COMPOUND_OP(Div);
4534  COMPOUND_OP(Rem);
4535  COMPOUND_OP(Add);
4536  COMPOUND_OP(Sub);
4537  COMPOUND_OP(Shl);
4538  COMPOUND_OP(Shr);
4539  COMPOUND_OP(And);
4540  COMPOUND_OP(Xor);
4541  COMPOUND_OP(Or);
4542 #undef COMPOUND_OP
4543 
4544  case BO_PtrMemD:
4545  case BO_PtrMemI:
4546  case BO_Mul:
4547  case BO_Div:
4548  case BO_Rem:
4549  case BO_Add:
4550  case BO_Sub:
4551  case BO_Shl:
4552  case BO_Shr:
4553  case BO_LT:
4554  case BO_GT:
4555  case BO_LE:
4556  case BO_GE:
4557  case BO_EQ:
4558  case BO_NE:
4559  case BO_Cmp:
4560  case BO_And:
4561  case BO_Xor:
4562  case BO_Or:
4563  case BO_LAnd:
4564  case BO_LOr:
4565  case BO_Assign:
4566  case BO_Comma:
4567  llvm_unreachable("Not valid compound assignment operators");
4568  }
4569 
4570  llvm_unreachable("Unhandled compound assignment operator");
4571 }
4572 
4574  ArrayRef<Value *> IdxList,
4575  bool SignedIndices,
4576  bool IsSubtraction,
4577  SourceLocation Loc,
4578  const Twine &Name) {
4579  Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4580 
4581  // If the pointer overflow sanitizer isn't enabled, do nothing.
4582  if (!SanOpts.has(SanitizerKind::PointerOverflow))
4583  return GEPVal;
4584 
4585  // If the GEP has already been reduced to a constant, leave it be.
4586  if (isa<llvm::Constant>(GEPVal))
4587  return GEPVal;
4588 
4589  // Only check for overflows in the default address space.
4590  if (GEPVal->getType()->getPointerAddressSpace())
4591  return GEPVal;
4592 
4593  auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4594  assert(GEP->isInBounds() && "Expected inbounds GEP");
4595 
4596  SanitizerScope SanScope(this);
4597  auto &VMContext = getLLVMContext();
4598  const auto &DL = CGM.getDataLayout();
4599  auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4600 
4601  // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4602  auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4603  auto *SAddIntrinsic =
4604  CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4605  auto *SMulIntrinsic =
4606  CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4607 
4608  // The total (signed) byte offset for the GEP.
4609  llvm::Value *TotalOffset = nullptr;
4610  // The offset overflow flag - true if the total offset overflows.
4611  llvm::Value *OffsetOverflows = Builder.getFalse();
4612 
4613  /// Return the result of the given binary operation.
4614  auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4615  llvm::Value *RHS) -> llvm::Value * {
4616  assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4617 
4618  // If the operands are constants, return a constant result.
4619  if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4620  if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4621  llvm::APInt N;
4622  bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4623  /*Signed=*/true, N);
4624  if (HasOverflow)
4625  OffsetOverflows = Builder.getTrue();
4626  return llvm::ConstantInt::get(VMContext, N);
4627  }
4628  }
4629 
4630  // Otherwise, compute the result with checked arithmetic.
4631  auto *ResultAndOverflow = Builder.CreateCall(
4632  (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4633  OffsetOverflows = Builder.CreateOr(
4634  Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4635  return Builder.CreateExtractValue(ResultAndOverflow, 0);
4636  };
4637 
4638  // Determine the total byte offset by looking at each GEP operand.
4639  for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4640  GTI != GTE; ++GTI) {
4641  llvm::Value *LocalOffset;
4642  auto *Index = GTI.getOperand();
4643  // Compute the local offset contributed by this indexing step:
4644  if (auto *STy = GTI.getStructTypeOrNull()) {
4645  // For struct indexing, the local offset is the byte position of the
4646  // specified field.
4647  unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4648  LocalOffset = llvm::ConstantInt::get(
4649  IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4650  } else {
4651  // Otherwise this is array-like indexing. The local offset is the index
4652  // multiplied by the element size.
4653  auto *ElementSize = llvm::ConstantInt::get(
4654  IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4655  auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4656  LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4657  }
4658 
4659  // If this is the first offset, set it as the total offset. Otherwise, add
4660  // the local offset into the running total.
4661  if (!TotalOffset || TotalOffset == Zero)
4662  TotalOffset = LocalOffset;
4663  else
4664  TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4665  }
4666 
4667  // Common case: if the total offset is zero, don't emit a check.
4668  if (TotalOffset == Zero)
4669  return GEPVal;
4670 
4671  // Now that we've computed the total offset, add it to the base pointer (with
4672  // wrapping semantics).
4673  auto *IntPtr = Builder.CreatePtrToInt(GEP->getPointerOperand(), IntPtrTy);
4674  auto *ComputedGEP = Builder.CreateAdd(IntPtr, TotalOffset);
4675 
4676  // The GEP is valid if:
4677  // 1) The total offset doesn't overflow, and
4678  // 2) The sign of the difference between the computed address and the base
4679  // pointer matches the sign of the total offset.
4680  llvm::Value *ValidGEP;
4681  auto *NoOffsetOverflow = Builder.CreateNot(OffsetOverflows);
4682  if (SignedIndices) {
4683  auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4684  auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero);
4685  llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
4686  ValidGEP = Builder.CreateAnd(
4687  Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid),
4688  NoOffsetOverflow);
4689  } else if (!SignedIndices && !IsSubtraction) {
4690  auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4691  ValidGEP = Builder.CreateAnd(PosOrZeroValid, NoOffsetOverflow);
4692  } else {
4693  auto *NegOrZeroValid = Builder.CreateICmpULE(ComputedGEP, IntPtr);
4694  ValidGEP = Builder.CreateAnd(NegOrZeroValid, NoOffsetOverflow);
4695  }
4696 
4697  llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
4698  // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
4699  llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4700  EmitCheck(std::make_pair(ValidGEP, SanitizerKind::PointerOverflow),
4701  SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
4702 
4703  return GEPVal;
4704 }
const llvm::DataLayout & getDataLayout() const
const Expr * getSubExpr() const
Definition: Expr.h:923
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
Definition: CGAtomic.cpp:1979
The null pointer literal (C++11 [lex.nullptr])
Definition: ExprCXX.h:599
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:4052
static APFixedPoint getMax(const FixedPointSemantics &Sema)
Definition: FixedPoint.cpp:114
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition: CGObjC.cpp:2282
bool getValue() const
Definition: ExprObjC.h:97
bool getValue() const
Definition: ExprCXX.h:2560
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition: Type.h:6607
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition: CGObjC.cpp:2820
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc)
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled...
Definition: TargetInfo.h:1228
bool isSignedOverflowDefined() const
Definition: LangOptions.h:271
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2549
QualType getPointeeType() const
Definition: Type.h:2562
A (possibly-)qualified type.
Definition: Type.h:639
uint64_t getValue() const
Definition: ExprCXX.h:2650
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition: CGExpr.cpp:645
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition: Expr.h:3455
const CodeGenOptions & getCodeGenOpts() const
SourceLocation getExprLoc() const
Definition: Expr.h:3349
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition: CGObjC.cpp:3256
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:1922
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler...
Definition: CGExpr.cpp:2716
void enterFullExpression(const FullExpr *E)
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition: Expr.h:3920
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition: Expr.h:3914
llvm::APSInt getValue() const
Definition: FixedPoint.h:110
A type trait used in the implementation of various C++11 and Library TR1 trait templates.
Definition: ExprCXX.h:2517
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition: Expr.h:5236
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
CompoundStmt * getSubStmt()
Definition: Expr.h:3851
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const Expr * getInit(unsigned Init) const
Definition: Expr.h:4267
static llvm::Constant * getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer...
Stmt - This represents one statement.
Definition: Stmt.h:65
Kind getKind() const
Definition: Type.h:2430
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:505
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:232
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:1959
The fixed point semantics work similarly to llvm::fltSemantics.
Definition: FixedPoint.h:33
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition: CGClass.cpp:375
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1036
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4569
Expr * getBase() const
Definition: Expr.h:2810
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition: CGExpr.cpp:4919
llvm::APFloat getValue() const
Definition: Expr.h:1492
Represents the index of the current element of an array being initialized by an ArrayInitLoopExpr.
Definition: Expr.h:4892
bool isExtVectorType() const
Definition: Type.h:6397
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:244
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Opcode getOpcode() const
Definition: Expr.h:3353
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask >> Checked, SanitizerHandler Check, ArrayRef< llvm::Constant *> StaticArgs, ArrayRef< llvm::Value *> DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition: CGExpr.cpp:2930
ParenExpr - This represents a parethesized expression, e.g.
Definition: Expr.h:1882
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:6257
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of &#39;this&#39;.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1920
FPOptions getFPFeatures() const
Definition: Expr.h:3495
An Embarcadero array type trait, as used in the implementation of __array_rank and __array_extent...
Definition: ExprCXX.h:2605
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:689
Floating point control options.
Definition: LangOptions.h:306
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a prvalue temporary that is written into memory so that a reference can bind to it...
Definition: ExprCXX.h:4309
static Value * buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant, or if it does but contains a label, return false.
QualType getElementType() const
Definition: Type.h:2859
#define COMPOUND_OP(Op)
Expr * getIndexExpr(unsigned Idx)
Definition: Expr.h:2218
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition: Type.cpp:1906
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC &#39;id&#39; type.
Definition: ExprObjC.h:1492
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:2968
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const AstTypeMatcher< PointerType > pointerType
Matches pointer types, but does not match Objective-C object pointer types.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6768
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
const llvm::fltSemantics & getHalfFormat() const
Definition: TargetInfo.h:572
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition: CGObjC.cpp:3656
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::Value * getPointer() const
Definition: Address.h:37
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1056
Represents an expression – generally a full-expression – that introduces cleanups to be run at the ...
Definition: ExprCXX.h:3196
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition: CGObjC.cpp:244
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition: CGExpr.cpp:2017
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition: CGExpr.cpp:959
Represents a struct/union/class.
Definition: Decl.h:3592
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
Definition: CGExprCXX.cpp:1520
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition: Expr.h:2115
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
ShuffleVectorExpr - clang-specific builtin-in function __builtin_shufflevector.
Definition: Expr.h:3880
Address getAddress() const
Definition: CGValue.h:326
QualType getComputationResultType() const
Definition: Expr.h:3564
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition: CGObjC.cpp:3412
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:158
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
Definition: CGExprCXX.cpp:2144
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:263
bool isVolatileQualified() const
Definition: CGValue.h:257
Represents a member of a struct/union/class.
Definition: Decl.h:2578
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition: Expr.h:2109
unsigned getIntegralBits() const
Return the number of integral bits represented by these semantics.
Definition: FixedPoint.h:55
GNUNullExpr - Implements the GNU __null extension, which is a name for a null pointer constant that h...
Definition: Expr.h:4091
bool isReferenceType() const
Definition: Type.h:6320
Expr * getSubExpr()
Definition: Expr.h:3093
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp, [NSNumber numberWithInt:42]];.
Definition: ExprObjC.h:188
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method...
Definition: CGObjC.cpp:59
bool hadArrayRangeDesignator() const
Definition: Expr.h:4387
An r-value expression (a pr-value in the C++11 taxonomy) produces a temporary value.
Definition: Specifiers.h:109
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:721
bool isGLValue() const
Definition: Expr.h:254
Describes an C or C++ initializer list.
Definition: Expr.h:4219
BinaryOperatorKind
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition: CGObjC.cpp:45
static APFixedPoint getMin(const FixedPointSemantics &Sema)
Definition: FixedPoint.cpp:122
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:156
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:575
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:118
path_iterator path_begin()
Definition: Expr.h:3113
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition: CGBlocks.cpp:900
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3318
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:96
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:50
static llvm::Constant * getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off, llvm::Type *I32Ty)
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:40
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:6084
unsigned getScale() const
Definition: FixedPoint.h:45
field_iterator field_begin() const
Definition: Decl.cpp:4168
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a best-effort attempt to peephole expressions that naturally produce retained objects.
Definition: CGObjC.cpp:3223
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:181
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3041
Helper class for OffsetOfExpr.
Definition: Expr.h:2051
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
RValue EmitAtomicExpr(AtomicExpr *E)
Definition: CGAtomic.cpp:745
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
QualType getReturnType() const
Definition: DeclObjC.h:322
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1118
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
Checking the operand of a load. Must be suitably sized and aligned.
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point...
Definition: Expr.cpp:2707
This object can be modified without requiring retains or releases.
Definition: Type.h:158
Represents the this expression in C++.
Definition: ExprCXX.h:1013
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
Definition: TargetInfo.cpp:445
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, Expr *LHS, Expr *RHS)
Definition: Expr.cpp:1963
const Expr * getExpr() const
Get the initialization expression that will be used.
Definition: ExprCXX.h:1211
#define HANDLEBINOP(OP)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool isHalfType() const
Definition: Type.h:6566
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition: CGObjC.cpp:466
bool isValid() const
Definition: Address.h:35
Sema - This implements semantic analysis and AST building for C.
Definition: Sema.h:328
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
Definition: Type.cpp:2533
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:57
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2369
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
const TargetCodeGenInfo & getTargetCodeGenInfo()
QualType getComputationLHSType() const
Definition: Expr.h:3561
CastKind
CastKind - The kind of operation required for a conversion.
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
UnaryExprOrTypeTraitExpr - expression with either a type or (unevaluated) expression operand...
Definition: Expr.h:2260
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
ConstantExpr - An expression that occurs in a constant context.
Definition: Expr.h:937
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4125
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
unsigned Offset
Definition: Format.cpp:1709
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
const llvm::fltSemantics & getLongDoubleFormat() const
Definition: TargetInfo.h:588
unsigned getValue() const
Definition: Expr.h:1459
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition: CGExpr.cpp:1507
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition: CGObjC.cpp:2388
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:5461
llvm::APSInt getShuffleMaskIdx(const ASTContext &Ctx, unsigned N) const
Definition: Expr.h:3931
An expression "T()" which creates a value-initialized rvalue of type T, which is a non-class type...
Definition: ExprCXX.h:1942
bool isEventT() const
Definition: Type.h:6470
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:636
This represents one expression.
Definition: Expr.h:108
Allow any unmodeled side effect.
Definition: Expr.h:601
static Address invalid()
Definition: Address.h:34
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition: Expr.h:2125
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type, where the destination type is an LLVM scalar type.
SourceLocation getExprLoc() const LLVM_READONLY
Definition: ExprObjC.h:1541
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to...
Definition: Type.cpp:1620
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition: ExprCXX.h:4058
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6831
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:5389
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition: CGStmt.cpp:368
unsigned getNumInits() const
Definition: Expr.h:4249
bool isNullPtrType() const
Definition: Type.h:6585
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation, expressed as the maximum relative error in ulp.
Definition: CGExpr.cpp:4829
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements, of a variable length array type, plus that largest non-variably-sized element type.
field_iterator field_end() const
Definition: Decl.h:3786
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:304
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
Definition: CGExpr.cpp:1684
bool isAnyComplexType() const
Definition: Type.h:6389
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:454
TypeSourceInfo * getTypeSourceInfo() const
Definition: Expr.h:2197
Represents an expression that computes the length of a parameter pack.
Definition: ExprCXX.h:3981
AsTypeExpr - Clang builtin function __builtin_astype [OpenCL 6.2.4.2] This AST node provides support ...
Definition: Expr.h:5438
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:186
llvm::LLVMContext & getLLVMContext()
Kind getKind() const
Determine what kind of offsetof node this is.
Definition: Expr.h:2105
QualType getType() const
Definition: Expr.h:130
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull...
Definition: CGDecl.cpp:723
An RAII object to record that we&#39;re evaluating a statement expression.
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition: Expr.h:2323
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
Definition: CGExpr.cpp:652
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:950
FixedPointSemantics getCommonSemantics(const FixedPointSemantics &Other) const
Return the FixedPointSemantics that allows for calculating the full precision semantic that can preci...
Definition: FixedPoint.cpp:127
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1934
Represents a GCC generic vector type.
Definition: Type.h:3180
bool isNullPointer() const
Definition: APValue.cpp:673
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
Definition: CGExpr.cpp:49
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition: CGExpr.cpp:1426
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4099
const OffsetOfNode & getComponent(unsigned Idx) const
Definition: Expr.h:2204
const TargetInfo & getTarget() const
const Expr * getSubExpr() const
Definition: Expr.h:1898
bool getValue() const
Definition: ExprCXX.h:573
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Definition: Type.h:4392
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
Definition: TargetInfo.h:732
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:190
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
There is no lifetime qualification on this type.
Definition: Type.h:154
bool getValue() const
Definition: ExprCXX.h:3874
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:361
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class...
Definition: Expr.h:978
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
ConvertVectorExpr - Clang builtin function __builtin_convertvector This AST node provides support for...
Definition: Expr.h:3948
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion. ...
Definition: CGCXXABI.cpp:73
#define false
Definition: stdbool.h:17
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:165
Kind
A field in a dependent type, known only by its name.
Definition: Expr.h:2060
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:5511
Encodes a location in the source.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition: CGObjC.cpp:240
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition: CGObjC.cpp:3362
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition: Type.cpp:96
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:6200
bool allowFPContractAcrossStatement() const
Definition: LangOptions.h:326
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
Expr * getSubExpr() const
Definition: Expr.h:1964
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:163
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior...
Definition: CGExpr.cpp:1207
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2104
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
Definition: CGExpr.cpp:1698
CastKind getCastKind() const
Definition: Expr.h:3087
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)"...
Definition: ExprCXX.h:1989
QualType getElementType() const
Definition: Type.h:3215
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:690
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:182
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition: Expr.h:1977
CanQualType FloatTy
Definition: ASTContext.h:1027
SanitizerSet SanOpts
Sanitizers enabled for this function.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:1882
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>, and corresponding __opencl_atomic_* for OpenCL 2.0.
Definition: Expr.h:5645
UnaryExprOrTypeTrait getKind() const
Definition: Expr.h:2291
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:503
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
An aligned address.
Definition: Address.h:24
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition: Expr.h:3158
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition: CGExpr.cpp:3239
An expression trait intrinsic.
Definition: ExprCXX.h:2675
const ObjCMethodDecl * getMethodDecl() const
Definition: ExprObjC.h:1356
bool isVectorType() const
Definition: Type.h:6393
Assigning into this object requires a lifetime extension.
Definition: Type.h:171
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:3835
bool isSaturated() const
Definition: FixedPoint.h:47
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:124
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:104
bool isArgumentType() const
Definition: Expr.h:2296
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
Definition: CGExpr.cpp:4342
FixedPointSemantics getFixedPointSemantics(QualType Ty) const
bool isCanonical() const
Definition: Type.h:6128
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2306
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
Defines the fixed point number interface.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:214
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat &#39;semantics&#39; for the specified scalar floating point type.
Expr * getLHS() const
Definition: Expr.h:3358
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:3538
const llvm::fltSemantics & getFloat128Format() const
Definition: TargetInfo.h:596
Represents a C11 generic selection.
Definition: Expr.h:5044
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:86
AddrLabelExpr - The GNU address of label extension, representing &&label.
Definition: Expr.h:3791
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:1016
Dataflow Directional Tag Classes.
int getFloatingTypeOrder(QualType LHS, QualType RHS) const
Compare the rank of the two specified floating point types, ignoring the domain of the type (i...
void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition: CGClass.cpp:2655
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Definition: LangOptions.h:216
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:573
Represents a delete expression for memory deallocation and destructor calls, e.g. ...
Definition: ExprCXX.h:2249
bool isShiftOp() const
Definition: Expr.h:3397
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one...
Definition: CGExpr.cpp:4291
A runtime availability query.
Definition: ExprObjC.h:1699
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
Represents a &#39;co_yield&#39; expression.
Definition: ExprCXX.h:4676
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static Value * emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
bool isBooleanType() const
Definition: Type.h:6677
const Expr * getExpr() const
Definition: ExprCXX.h:1151
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition: Type.h:5847