clang  8.0.0svn
CGExprScalar.cpp
Go to the documentation of this file.
1 //===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCXXABI.h"
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGObjCRuntime.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/ASTContext.h"
22 #include "clang/AST/DeclObjC.h"
23 #include "clang/AST/Expr.h"
24 #include "clang/AST/RecordLayout.h"
25 #include "clang/AST/StmtVisitor.h"
26 #include "clang/Basic/FixedPoint.h"
27 #include "clang/Basic/TargetInfo.h"
29 #include "llvm/ADT/Optional.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/GetElementPtrTypeIterator.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/Module.h"
38 #include <cstdarg>
39 
40 using namespace clang;
41 using namespace CodeGen;
42 using llvm::Value;
43 
44 //===----------------------------------------------------------------------===//
45 // Scalar Expression Emitter
46 //===----------------------------------------------------------------------===//
47 
48 namespace {
49 
50 /// Determine whether the given binary operation may overflow.
51 /// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
52 /// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
53 /// the returned overflow check is precise. The returned value is 'true' for
54 /// all other opcodes, to be conservative.
55 bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
56  BinaryOperator::Opcode Opcode, bool Signed,
57  llvm::APInt &Result) {
58  // Assume overflow is possible, unless we can prove otherwise.
59  bool Overflow = true;
60  const auto &LHSAP = LHS->getValue();
61  const auto &RHSAP = RHS->getValue();
62  if (Opcode == BO_Add) {
63  if (Signed)
64  Result = LHSAP.sadd_ov(RHSAP, Overflow);
65  else
66  Result = LHSAP.uadd_ov(RHSAP, Overflow);
67  } else if (Opcode == BO_Sub) {
68  if (Signed)
69  Result = LHSAP.ssub_ov(RHSAP, Overflow);
70  else
71  Result = LHSAP.usub_ov(RHSAP, Overflow);
72  } else if (Opcode == BO_Mul) {
73  if (Signed)
74  Result = LHSAP.smul_ov(RHSAP, Overflow);
75  else
76  Result = LHSAP.umul_ov(RHSAP, Overflow);
77  } else if (Opcode == BO_Div || Opcode == BO_Rem) {
78  if (Signed && !RHS->isZero())
79  Result = LHSAP.sdiv_ov(RHSAP, Overflow);
80  else
81  return false;
82  }
83  return Overflow;
84 }
85 
86 struct BinOpInfo {
87  Value *LHS;
88  Value *RHS;
89  QualType Ty; // Computation Type.
90  BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
91  FPOptions FPFeatures;
92  const Expr *E; // Entire expr, for error unsupported. May not be binop.
93 
94  /// Check if the binop can result in integer overflow.
95  bool mayHaveIntegerOverflow() const {
96  // Without constant input, we can't rule out overflow.
97  auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
98  auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
99  if (!LHSCI || !RHSCI)
100  return true;
101 
102  llvm::APInt Result;
103  return ::mayHaveIntegerOverflow(
104  LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
105  }
106 
107  /// Check if the binop computes a division or a remainder.
108  bool isDivremOp() const {
109  return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
110  Opcode == BO_RemAssign;
111  }
112 
113  /// Check if the binop can result in an integer division by zero.
114  bool mayHaveIntegerDivisionByZero() const {
115  if (isDivremOp())
116  if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
117  return CI->isZero();
118  return true;
119  }
120 
121  /// Check if the binop can result in a float division by zero.
122  bool mayHaveFloatDivisionByZero() const {
123  if (isDivremOp())
124  if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
125  return CFP->isZero();
126  return true;
127  }
128 };
129 
130 static bool MustVisitNullValue(const Expr *E) {
131  // If a null pointer expression's type is the C++0x nullptr_t, then
132  // it's not necessarily a simple constant and it must be evaluated
133  // for its potential side effects.
134  return E->getType()->isNullPtrType();
135 }
136 
137 /// If \p E is a widened promoted integer, get its base (unpromoted) type.
138 static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
139  const Expr *E) {
140  const Expr *Base = E->IgnoreImpCasts();
141  if (E == Base)
142  return llvm::None;
143 
144  QualType BaseTy = Base->getType();
145  if (!BaseTy->isPromotableIntegerType() ||
146  Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
147  return llvm::None;
148 
149  return BaseTy;
150 }
151 
152 /// Check if \p E is a widened promoted integer.
153 static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
154  return getUnwidenedIntegerType(Ctx, E).hasValue();
155 }
156 
157 /// Check if we can skip the overflow check for \p Op.
158 static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
159  assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
160  "Expected a unary or binary operator");
161 
162  // If the binop has constant inputs and we can prove there is no overflow,
163  // we can elide the overflow check.
164  if (!Op.mayHaveIntegerOverflow())
165  return true;
166 
167  // If a unary op has a widened operand, the op cannot overflow.
168  if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
169  return !UO->canOverflow();
170 
171  // We usually don't need overflow checks for binops with widened operands.
172  // Multiplication with promoted unsigned operands is a special case.
173  const auto *BO = cast<BinaryOperator>(Op.E);
174  auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
175  if (!OptionalLHSTy)
176  return false;
177 
178  auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
179  if (!OptionalRHSTy)
180  return false;
181 
182  QualType LHSTy = *OptionalLHSTy;
183  QualType RHSTy = *OptionalRHSTy;
184 
185  // This is the simple case: binops without unsigned multiplication, and with
186  // widened operands. No overflow check is needed here.
187  if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
188  !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
189  return true;
190 
191  // For unsigned multiplication the overflow check can be elided if either one
192  // of the unpromoted types are less than half the size of the promoted type.
193  unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
194  return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
195  (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
196 }
197 
198 /// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
199 static void updateFastMathFlags(llvm::FastMathFlags &FMF,
200  FPOptions FPFeatures) {
201  FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
202 }
203 
204 /// Propagate fast-math flags from \p Op to the instruction in \p V.
205 static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
206  if (auto *I = dyn_cast<llvm::Instruction>(V)) {
207  llvm::FastMathFlags FMF = I->getFastMathFlags();
208  updateFastMathFlags(FMF, Op.FPFeatures);
209  I->setFastMathFlags(FMF);
210  }
211  return V;
212 }
213 
214 class ScalarExprEmitter
215  : public StmtVisitor<ScalarExprEmitter, Value*> {
216  CodeGenFunction &CGF;
217  CGBuilderTy &Builder;
218  bool IgnoreResultAssign;
219  llvm::LLVMContext &VMContext;
220 public:
221 
222  ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
223  : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
224  VMContext(cgf.getLLVMContext()) {
225  }
226 
227  //===--------------------------------------------------------------------===//
228  // Utilities
229  //===--------------------------------------------------------------------===//
230 
231  bool TestAndClearIgnoreResultAssign() {
232  bool I = IgnoreResultAssign;
233  IgnoreResultAssign = false;
234  return I;
235  }
236 
237  llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
238  LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
239  LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
240  return CGF.EmitCheckedLValue(E, TCK);
241  }
242 
243  void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
244  const BinOpInfo &Info);
245 
246  Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
247  return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
248  }
249 
250  void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
251  const AlignValueAttr *AVAttr = nullptr;
252  if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
253  const ValueDecl *VD = DRE->getDecl();
254 
255  if (VD->getType()->isReferenceType()) {
256  if (const auto *TTy =
257  dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
258  AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
259  } else {
260  // Assumptions for function parameters are emitted at the start of the
261  // function, so there is no need to repeat that here.
262  if (isa<ParmVarDecl>(VD))
263  return;
264 
265  AVAttr = VD->getAttr<AlignValueAttr>();
266  }
267  }
268 
269  if (!AVAttr)
270  if (const auto *TTy =
271  dyn_cast<TypedefType>(E->getType()))
272  AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
273 
274  if (!AVAttr)
275  return;
276 
277  Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
278  llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
279  CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
280  }
281 
282  /// EmitLoadOfLValue - Given an expression with complex type that represents a
283  /// value l-value, this method emits the address of the l-value, then loads
284  /// and returns the result.
285  Value *EmitLoadOfLValue(const Expr *E) {
286  Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
287  E->getExprLoc());
288 
289  EmitLValueAlignmentAssumption(E, V);
290  return V;
291  }
292 
293  /// EmitConversionToBool - Convert the specified expression value to a
294  /// boolean (i1) truth value. This is equivalent to "Val != 0".
295  Value *EmitConversionToBool(Value *Src, QualType DstTy);
296 
297  /// Emit a check that a conversion to or from a floating-point type does not
298  /// overflow.
299  void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
300  Value *Src, QualType SrcType, QualType DstType,
301  llvm::Type *DstTy, SourceLocation Loc);
302 
303  /// Known implicit conversion check kinds.
304  /// Keep in sync with the enum of the same name in ubsan_handlers.h
305  enum ImplicitConversionCheckKind : unsigned char {
306  ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
307  ICCK_UnsignedIntegerTruncation = 1,
308  ICCK_SignedIntegerTruncation = 2,
309  ICCK_IntegerSignChange = 3,
310  ICCK_SignedIntegerTruncationOrSignChange = 4,
311  };
312 
313  /// Emit a check that an [implicit] truncation of an integer does not
314  /// discard any bits. It is not UB, so we use the value after truncation.
315  void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
316  QualType DstType, SourceLocation Loc);
317 
318  /// Emit a check that an [implicit] conversion of an integer does not change
319  /// the sign of the value. It is not UB, so we use the value after conversion.
320  /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
321  void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
322  QualType DstType, SourceLocation Loc);
323 
324  /// Emit a conversion from the specified type to the specified destination
325  /// type, both of which are LLVM scalar types.
326  struct ScalarConversionOpts {
327  bool TreatBooleanAsSigned;
328  bool EmitImplicitIntegerTruncationChecks;
329  bool EmitImplicitIntegerSignChangeChecks;
330 
331  ScalarConversionOpts()
332  : TreatBooleanAsSigned(false),
333  EmitImplicitIntegerTruncationChecks(false),
334  EmitImplicitIntegerSignChangeChecks(false) {}
335 
336  ScalarConversionOpts(clang::SanitizerSet SanOpts)
337  : TreatBooleanAsSigned(false),
338  EmitImplicitIntegerTruncationChecks(
339  SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
340  EmitImplicitIntegerSignChangeChecks(
341  SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
342  };
343  Value *
344  EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
345  SourceLocation Loc,
346  ScalarConversionOpts Opts = ScalarConversionOpts());
347 
348  Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
349  SourceLocation Loc);
350 
351  /// Emit a conversion from the specified complex type to the specified
352  /// destination type, where the destination type is an LLVM scalar type.
353  Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
354  QualType SrcTy, QualType DstTy,
355  SourceLocation Loc);
356 
357  /// EmitNullValue - Emit a value that corresponds to null for the given type.
358  Value *EmitNullValue(QualType Ty);
359 
360  /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
361  Value *EmitFloatToBoolConversion(Value *V) {
362  // Compare against 0.0 for fp scalars.
363  llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
364  return Builder.CreateFCmpUNE(V, Zero, "tobool");
365  }
366 
367  /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
368  Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
369  Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
370 
371  return Builder.CreateICmpNE(V, Zero, "tobool");
372  }
373 
374  Value *EmitIntToBoolConversion(Value *V) {
375  // Because of the type rules of C, we often end up computing a
376  // logical value, then zero extending it to int, then wanting it
377  // as a logical value again. Optimize this common case.
378  if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
379  if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
380  Value *Result = ZI->getOperand(0);
381  // If there aren't any more uses, zap the instruction to save space.
382  // Note that there can be more uses, for example if this
383  // is the result of an assignment.
384  if (ZI->use_empty())
385  ZI->eraseFromParent();
386  return Result;
387  }
388  }
389 
390  return Builder.CreateIsNotNull(V, "tobool");
391  }
392 
393  //===--------------------------------------------------------------------===//
394  // Visitor Methods
395  //===--------------------------------------------------------------------===//
396 
397  Value *Visit(Expr *E) {
398  ApplyDebugLocation DL(CGF, E);
400  }
401 
402  Value *VisitStmt(Stmt *S) {
403  S->dump(CGF.getContext().getSourceManager());
404  llvm_unreachable("Stmt can't have complex result type!");
405  }
406  Value *VisitExpr(Expr *S);
407 
408  Value *VisitConstantExpr(ConstantExpr *E) {
409  return Visit(E->getSubExpr());
410  }
411  Value *VisitParenExpr(ParenExpr *PE) {
412  return Visit(PE->getSubExpr());
413  }
414  Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
415  return Visit(E->getReplacement());
416  }
417  Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
418  return Visit(GE->getResultExpr());
419  }
420  Value *VisitCoawaitExpr(CoawaitExpr *S) {
421  return CGF.EmitCoawaitExpr(*S).getScalarVal();
422  }
423  Value *VisitCoyieldExpr(CoyieldExpr *S) {
424  return CGF.EmitCoyieldExpr(*S).getScalarVal();
425  }
426  Value *VisitUnaryCoawait(const UnaryOperator *E) {
427  return Visit(E->getSubExpr());
428  }
429 
430  // Leaves.
431  Value *VisitIntegerLiteral(const IntegerLiteral *E) {
432  return Builder.getInt(E->getValue());
433  }
434  Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
435  return Builder.getInt(E->getValue());
436  }
437  Value *VisitFloatingLiteral(const FloatingLiteral *E) {
438  return llvm::ConstantFP::get(VMContext, E->getValue());
439  }
440  Value *VisitCharacterLiteral(const CharacterLiteral *E) {
441  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
442  }
443  Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
444  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
445  }
446  Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
447  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
448  }
449  Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
450  return EmitNullValue(E->getType());
451  }
452  Value *VisitGNUNullExpr(const GNUNullExpr *E) {
453  return EmitNullValue(E->getType());
454  }
455  Value *VisitOffsetOfExpr(OffsetOfExpr *E);
456  Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
457  Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
458  llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
459  return Builder.CreateBitCast(V, ConvertType(E->getType()));
460  }
461 
462  Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
463  return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
464  }
465 
466  Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
467  return CGF.EmitPseudoObjectRValue(E).getScalarVal();
468  }
469 
470  Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
471  if (E->isGLValue())
472  return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
473  E->getExprLoc());
474 
475  // Otherwise, assume the mapping is the scalar directly.
477  }
478 
479  // l-values.
480  Value *VisitDeclRefExpr(DeclRefExpr *E) {
482  return CGF.emitScalarConstant(Constant, E);
483  return EmitLoadOfLValue(E);
484  }
485 
486  Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
487  return CGF.EmitObjCSelectorExpr(E);
488  }
489  Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
490  return CGF.EmitObjCProtocolExpr(E);
491  }
492  Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
493  return EmitLoadOfLValue(E);
494  }
495  Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
496  if (E->getMethodDecl() &&
498  return EmitLoadOfLValue(E);
499  return CGF.EmitObjCMessageExpr(E).getScalarVal();
500  }
501 
502  Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
503  LValue LV = CGF.EmitObjCIsaExpr(E);
504  Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
505  return V;
506  }
507 
508  Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
509  VersionTuple Version = E->getVersion();
510 
511  // If we're checking for a platform older than our minimum deployment
512  // target, we can fold the check away.
513  if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
514  return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
515 
516  Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
517  llvm::Value *Args[] = {
518  llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
519  llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
520  llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
521  };
522 
523  return CGF.EmitBuiltinAvailable(Args);
524  }
525 
526  Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
527  Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
528  Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
529  Value *VisitMemberExpr(MemberExpr *E);
530  Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
531  Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
532  return EmitLoadOfLValue(E);
533  }
534 
535  Value *VisitInitListExpr(InitListExpr *E);
536 
537  Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
538  assert(CGF.getArrayInitIndex() &&
539  "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
540  return CGF.getArrayInitIndex();
541  }
542 
543  Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
544  return EmitNullValue(E->getType());
545  }
546  Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
547  CGF.CGM.EmitExplicitCastExprType(E, &CGF);
548  return VisitCastExpr(E);
549  }
550  Value *VisitCastExpr(CastExpr *E);
551 
552  Value *VisitCallExpr(const CallExpr *E) {
553  if (E->getCallReturnType(CGF.getContext())->isReferenceType())
554  return EmitLoadOfLValue(E);
555 
556  Value *V = CGF.EmitCallExpr(E).getScalarVal();
557 
558  EmitLValueAlignmentAssumption(E, V);
559  return V;
560  }
561 
562  Value *VisitStmtExpr(const StmtExpr *E);
563 
564  // Unary Operators.
565  Value *VisitUnaryPostDec(const UnaryOperator *E) {
566  LValue LV = EmitLValue(E->getSubExpr());
567  return EmitScalarPrePostIncDec(E, LV, false, false);
568  }
569  Value *VisitUnaryPostInc(const UnaryOperator *E) {
570  LValue LV = EmitLValue(E->getSubExpr());
571  return EmitScalarPrePostIncDec(E, LV, true, false);
572  }
573  Value *VisitUnaryPreDec(const UnaryOperator *E) {
574  LValue LV = EmitLValue(E->getSubExpr());
575  return EmitScalarPrePostIncDec(E, LV, false, true);
576  }
577  Value *VisitUnaryPreInc(const UnaryOperator *E) {
578  LValue LV = EmitLValue(E->getSubExpr());
579  return EmitScalarPrePostIncDec(E, LV, true, true);
580  }
581 
582  llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
583  llvm::Value *InVal,
584  bool IsInc);
585 
586  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
587  bool isInc, bool isPre);
588 
589 
590  Value *VisitUnaryAddrOf(const UnaryOperator *E) {
591  if (isa<MemberPointerType>(E->getType())) // never sugared
592  return CGF.CGM.getMemberPointerConstant(E);
593 
594  return EmitLValue(E->getSubExpr()).getPointer();
595  }
596  Value *VisitUnaryDeref(const UnaryOperator *E) {
597  if (E->getType()->isVoidType())
598  return Visit(E->getSubExpr()); // the actual value should be unused
599  return EmitLoadOfLValue(E);
600  }
601  Value *VisitUnaryPlus(const UnaryOperator *E) {
602  // This differs from gcc, though, most likely due to a bug in gcc.
603  TestAndClearIgnoreResultAssign();
604  return Visit(E->getSubExpr());
605  }
606  Value *VisitUnaryMinus (const UnaryOperator *E);
607  Value *VisitUnaryNot (const UnaryOperator *E);
608  Value *VisitUnaryLNot (const UnaryOperator *E);
609  Value *VisitUnaryReal (const UnaryOperator *E);
610  Value *VisitUnaryImag (const UnaryOperator *E);
611  Value *VisitUnaryExtension(const UnaryOperator *E) {
612  return Visit(E->getSubExpr());
613  }
614 
615  // C++
616  Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
617  return EmitLoadOfLValue(E);
618  }
619 
620  Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
621  return Visit(DAE->getExpr());
622  }
623  Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
625  return Visit(DIE->getExpr());
626  }
627  Value *VisitCXXThisExpr(CXXThisExpr *TE) {
628  return CGF.LoadCXXThis();
629  }
630 
631  Value *VisitExprWithCleanups(ExprWithCleanups *E);
632  Value *VisitCXXNewExpr(const CXXNewExpr *E) {
633  return CGF.EmitCXXNewExpr(E);
634  }
635  Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
636  CGF.EmitCXXDeleteExpr(E);
637  return nullptr;
638  }
639 
640  Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
641  return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
642  }
643 
644  Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
645  return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
646  }
647 
648  Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
649  return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
650  }
651 
652  Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
653  // C++ [expr.pseudo]p1:
654  // The result shall only be used as the operand for the function call
655  // operator (), and the result of such a call has type void. The only
656  // effect is the evaluation of the postfix-expression before the dot or
657  // arrow.
658  CGF.EmitScalarExpr(E->getBase());
659  return nullptr;
660  }
661 
662  Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
663  return EmitNullValue(E->getType());
664  }
665 
666  Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
667  CGF.EmitCXXThrowExpr(E);
668  return nullptr;
669  }
670 
671  Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
672  return Builder.getInt1(E->getValue());
673  }
674 
675  // Binary Operators.
676  Value *EmitMul(const BinOpInfo &Ops) {
677  if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
678  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
680  return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
682  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
683  return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
684  LLVM_FALLTHROUGH;
686  if (CanElideOverflowCheck(CGF.getContext(), Ops))
687  return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
688  return EmitOverflowCheckedBinOp(Ops);
689  }
690  }
691 
692  if (Ops.Ty->isUnsignedIntegerType() &&
693  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
694  !CanElideOverflowCheck(CGF.getContext(), Ops))
695  return EmitOverflowCheckedBinOp(Ops);
696 
697  if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
698  Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
699  return propagateFMFlags(V, Ops);
700  }
701  return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
702  }
703  /// Create a binary op that checks for overflow.
704  /// Currently only supports +, - and *.
705  Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
706 
707  // Check for undefined division and modulus behaviors.
708  void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
709  llvm::Value *Zero,bool isDiv);
710  // Common helper for getting how wide LHS of shift is.
711  static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
712  Value *EmitDiv(const BinOpInfo &Ops);
713  Value *EmitRem(const BinOpInfo &Ops);
714  Value *EmitAdd(const BinOpInfo &Ops);
715  Value *EmitSub(const BinOpInfo &Ops);
716  Value *EmitShl(const BinOpInfo &Ops);
717  Value *EmitShr(const BinOpInfo &Ops);
718  Value *EmitAnd(const BinOpInfo &Ops) {
719  return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
720  }
721  Value *EmitXor(const BinOpInfo &Ops) {
722  return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
723  }
724  Value *EmitOr (const BinOpInfo &Ops) {
725  return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
726  }
727 
728  BinOpInfo EmitBinOps(const BinaryOperator *E);
729  LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
730  Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
731  Value *&Result);
732 
733  Value *EmitCompoundAssign(const CompoundAssignOperator *E,
734  Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
735 
736  // Binary operators and binary compound assignment operators.
737 #define HANDLEBINOP(OP) \
738  Value *VisitBin ## OP(const BinaryOperator *E) { \
739  return Emit ## OP(EmitBinOps(E)); \
740  } \
741  Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
742  return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
743  }
744  HANDLEBINOP(Mul)
745  HANDLEBINOP(Div)
746  HANDLEBINOP(Rem)
747  HANDLEBINOP(Add)
748  HANDLEBINOP(Sub)
749  HANDLEBINOP(Shl)
750  HANDLEBINOP(Shr)
752  HANDLEBINOP(Xor)
753  HANDLEBINOP(Or)
754 #undef HANDLEBINOP
755 
756  // Comparisons.
757  Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
758  llvm::CmpInst::Predicate SICmpOpc,
759  llvm::CmpInst::Predicate FCmpOpc);
760 #define VISITCOMP(CODE, UI, SI, FP) \
761  Value *VisitBin##CODE(const BinaryOperator *E) { \
762  return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
763  llvm::FCmpInst::FP); }
764  VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
765  VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
766  VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
767  VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
768  VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
769  VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
770 #undef VISITCOMP
771 
772  Value *VisitBinAssign (const BinaryOperator *E);
773 
774  Value *VisitBinLAnd (const BinaryOperator *E);
775  Value *VisitBinLOr (const BinaryOperator *E);
776  Value *VisitBinComma (const BinaryOperator *E);
777 
778  Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
779  Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
780 
781  // Other Operators.
782  Value *VisitBlockExpr(const BlockExpr *BE);
783  Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
784  Value *VisitChooseExpr(ChooseExpr *CE);
785  Value *VisitVAArgExpr(VAArgExpr *VE);
786  Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
787  return CGF.EmitObjCStringLiteral(E);
788  }
789  Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
790  return CGF.EmitObjCBoxedExpr(E);
791  }
792  Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
793  return CGF.EmitObjCArrayLiteral(E);
794  }
795  Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
796  return CGF.EmitObjCDictionaryLiteral(E);
797  }
798  Value *VisitAsTypeExpr(AsTypeExpr *CE);
799  Value *VisitAtomicExpr(AtomicExpr *AE);
800 };
801 } // end anonymous namespace.
802 
803 //===----------------------------------------------------------------------===//
804 // Utilities
805 //===----------------------------------------------------------------------===//
806 
807 /// EmitConversionToBool - Convert the specified expression value to a
808 /// boolean (i1) truth value. This is equivalent to "Val != 0".
809 Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
810  assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
811 
812  if (SrcType->isRealFloatingType())
813  return EmitFloatToBoolConversion(Src);
814 
815  if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
816  return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
817 
818  assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
819  "Unknown scalar type to convert");
820 
821  if (isa<llvm::IntegerType>(Src->getType()))
822  return EmitIntToBoolConversion(Src);
823 
824  assert(isa<llvm::PointerType>(Src->getType()));
825  return EmitPointerToBoolConversion(Src, SrcType);
826 }
827 
828 void ScalarExprEmitter::EmitFloatConversionCheck(
829  Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
830  QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
831  CodeGenFunction::SanitizerScope SanScope(&CGF);
832  using llvm::APFloat;
833  using llvm::APSInt;
834 
835  llvm::Type *SrcTy = Src->getType();
836 
837  llvm::Value *Check = nullptr;
838  if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) {
839  // Integer to floating-point. This can fail for unsigned short -> __half
840  // or unsigned __int128 -> float.
841  assert(DstType->isFloatingType());
842  bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType();
843 
844  APFloat LargestFloat =
845  APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType));
846  APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned);
847 
848  bool IsExact;
849  if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero,
850  &IsExact) != APFloat::opOK)
851  // The range of representable values of this floating point type includes
852  // all values of this integer type. Don't need an overflow check.
853  return;
854 
855  llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt);
856  if (SrcIsUnsigned)
857  Check = Builder.CreateICmpULE(Src, Max);
858  else {
859  llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt);
860  llvm::Value *GE = Builder.CreateICmpSGE(Src, Min);
861  llvm::Value *LE = Builder.CreateICmpSLE(Src, Max);
862  Check = Builder.CreateAnd(GE, LE);
863  }
864  } else {
865  const llvm::fltSemantics &SrcSema =
866  CGF.getContext().getFloatTypeSemantics(OrigSrcType);
867  if (isa<llvm::IntegerType>(DstTy)) {
868  // Floating-point to integer. This has undefined behavior if the source is
869  // +-Inf, NaN, or doesn't fit into the destination type (after truncation
870  // to an integer).
871  unsigned Width = CGF.getContext().getIntWidth(DstType);
872  bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType();
873 
874  APSInt Min = APSInt::getMinValue(Width, Unsigned);
875  APFloat MinSrc(SrcSema, APFloat::uninitialized);
876  if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
877  APFloat::opOverflow)
878  // Don't need an overflow check for lower bound. Just check for
879  // -Inf/NaN.
880  MinSrc = APFloat::getInf(SrcSema, true);
881  else
882  // Find the largest value which is too small to represent (before
883  // truncation toward zero).
884  MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
885 
886  APSInt Max = APSInt::getMaxValue(Width, Unsigned);
887  APFloat MaxSrc(SrcSema, APFloat::uninitialized);
888  if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
889  APFloat::opOverflow)
890  // Don't need an overflow check for upper bound. Just check for
891  // +Inf/NaN.
892  MaxSrc = APFloat::getInf(SrcSema, false);
893  else
894  // Find the smallest value which is too large to represent (before
895  // truncation toward zero).
896  MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
897 
898  // If we're converting from __half, convert the range to float to match
899  // the type of src.
900  if (OrigSrcType->isHalfType()) {
901  const llvm::fltSemantics &Sema =
902  CGF.getContext().getFloatTypeSemantics(SrcType);
903  bool IsInexact;
904  MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
905  MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
906  }
907 
908  llvm::Value *GE =
909  Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
910  llvm::Value *LE =
911  Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
912  Check = Builder.CreateAnd(GE, LE);
913  } else {
914  // FIXME: Maybe split this sanitizer out from float-cast-overflow.
915  //
916  // Floating-point to floating-point. This has undefined behavior if the
917  // source is not in the range of representable values of the destination
918  // type. The C and C++ standards are spectacularly unclear here. We
919  // diagnose finite out-of-range conversions, but allow infinities and NaNs
920  // to convert to the corresponding value in the smaller type.
921  //
922  // C11 Annex F gives all such conversions defined behavior for IEC 60559
923  // conforming implementations. Unfortunately, LLVM's fptrunc instruction
924  // does not.
925 
926  // Converting from a lower rank to a higher rank can never have
927  // undefined behavior, since higher-rank types must have a superset
928  // of values of lower-rank types.
929  if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1)
930  return;
931 
932  assert(!OrigSrcType->isHalfType() &&
933  "should not check conversion from __half, it has the lowest rank");
934 
935  const llvm::fltSemantics &DstSema =
936  CGF.getContext().getFloatTypeSemantics(DstType);
937  APFloat MinBad = APFloat::getLargest(DstSema, false);
938  APFloat MaxBad = APFloat::getInf(DstSema, false);
939 
940  bool IsInexact;
941  MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
942  MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact);
943 
944  Value *AbsSrc = CGF.EmitNounwindRuntimeCall(
945  CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src);
946  llvm::Value *GE =
947  Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad));
948  llvm::Value *LE =
949  Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad));
950  Check = Builder.CreateNot(Builder.CreateAnd(GE, LE));
951  }
952  }
953 
954  llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
955  CGF.EmitCheckTypeDescriptor(OrigSrcType),
956  CGF.EmitCheckTypeDescriptor(DstType)};
957  CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
958  SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
959 }
960 
961 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
962 // Returns 'i1 false' when the truncation Src -> Dst was lossy.
963 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
964  std::pair<llvm::Value *, SanitizerMask>>
966  QualType DstType, CGBuilderTy &Builder) {
967  llvm::Type *SrcTy = Src->getType();
968  llvm::Type *DstTy = Dst->getType();
969  (void)DstTy; // Only used in assert()
970 
971  // This should be truncation of integral types.
972  assert(Src != Dst);
973  assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
974  assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
975  "non-integer llvm type");
976 
977  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
978  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
979 
980  // If both (src and dst) types are unsigned, then it's an unsigned truncation.
981  // Else, it is a signed truncation.
982  ScalarExprEmitter::ImplicitConversionCheckKind Kind;
983  SanitizerMask Mask;
984  if (!SrcSigned && !DstSigned) {
985  Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
986  Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
987  } else {
988  Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
989  Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
990  }
991 
992  llvm::Value *Check = nullptr;
993  // 1. Extend the truncated value back to the same width as the Src.
994  Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
995  // 2. Equality-compare with the original source value
996  Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
997  // If the comparison result is 'i1 false', then the truncation was lossy.
998  return std::make_pair(Kind, std::make_pair(Check, Mask));
999 }
1000 
1001 void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1002  Value *Dst, QualType DstType,
1003  SourceLocation Loc) {
1004  if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1005  return;
1006 
1007  // We only care about int->int conversions here.
1008  // We ignore conversions to/from pointer and/or bool.
1009  if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
1010  return;
1011 
1012  unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1013  unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1014  // This must be truncation. Else we do not care.
1015  if (SrcBits <= DstBits)
1016  return;
1017 
1018  assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1019 
1020  // If the integer sign change sanitizer is enabled,
1021  // and we are truncating from larger unsigned type to smaller signed type,
1022  // let that next sanitizer deal with it.
1023  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1024  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1025  if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1026  (!SrcSigned && DstSigned))
1027  return;
1028 
1029  CodeGenFunction::SanitizerScope SanScope(&CGF);
1030 
1031  std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1032  std::pair<llvm::Value *, SanitizerMask>>
1033  Check =
1034  EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1035  // If the comparison result is 'i1 false', then the truncation was lossy.
1036 
1037  // Do we care about this type of truncation?
1038  if (!CGF.SanOpts.has(Check.second.second))
1039  return;
1040 
1041  llvm::Constant *StaticArgs[] = {
1042  CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1043  CGF.EmitCheckTypeDescriptor(DstType),
1044  llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first)};
1045  CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1046  {Src, Dst});
1047 }
1048 
1049 // Should be called within CodeGenFunction::SanitizerScope RAII scope.
1050 // Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1051 static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1052  std::pair<llvm::Value *, SanitizerMask>>
1054  QualType DstType, CGBuilderTy &Builder) {
1055  llvm::Type *SrcTy = Src->getType();
1056  llvm::Type *DstTy = Dst->getType();
1057 
1058  assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1059  "non-integer llvm type");
1060 
1061  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1062  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1063  (void)SrcSigned; // Only used in assert()
1064  (void)DstSigned; // Only used in assert()
1065  unsigned SrcBits = SrcTy->getScalarSizeInBits();
1066  unsigned DstBits = DstTy->getScalarSizeInBits();
1067  (void)SrcBits; // Only used in assert()
1068  (void)DstBits; // Only used in assert()
1069 
1070  assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1071  "either the widths should be different, or the signednesses.");
1072 
1073  // NOTE: zero value is considered to be non-negative.
1074  auto EmitIsNegativeTest = [&Builder](Value *V, QualType VType,
1075  const char *Name) -> Value * {
1076  // Is this value a signed type?
1077  bool VSigned = VType->isSignedIntegerOrEnumerationType();
1078  llvm::Type *VTy = V->getType();
1079  if (!VSigned) {
1080  // If the value is unsigned, then it is never negative.
1081  // FIXME: can we encounter non-scalar VTy here?
1082  return llvm::ConstantInt::getFalse(VTy->getContext());
1083  }
1084  // Get the zero of the same type with which we will be comparing.
1085  llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1086  // %V.isnegative = icmp slt %V, 0
1087  // I.e is %V *strictly* less than zero, does it have negative value?
1088  return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1089  llvm::Twine(Name) + "." + V->getName() +
1090  ".negativitycheck");
1091  };
1092 
1093  // 1. Was the old Value negative?
1094  llvm::Value *SrcIsNegative = EmitIsNegativeTest(Src, SrcType, "src");
1095  // 2. Is the new Value negative?
1096  llvm::Value *DstIsNegative = EmitIsNegativeTest(Dst, DstType, "dst");
1097  // 3. Now, was the 'negativity status' preserved during the conversion?
1098  // NOTE: conversion from negative to zero is considered to change the sign.
1099  // (We want to get 'false' when the conversion changed the sign)
1100  // So we should just equality-compare the negativity statuses.
1101  llvm::Value *Check = nullptr;
1102  Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1103  // If the comparison result is 'false', then the conversion changed the sign.
1104  return std::make_pair(
1105  ScalarExprEmitter::ICCK_IntegerSignChange,
1106  std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1107 }
1108 
1109 void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1110  Value *Dst, QualType DstType,
1111  SourceLocation Loc) {
1112  if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1113  return;
1114 
1115  llvm::Type *SrcTy = Src->getType();
1116  llvm::Type *DstTy = Dst->getType();
1117 
1118  // We only care about int->int conversions here.
1119  // We ignore conversions to/from pointer and/or bool.
1120  if (!(SrcType->isIntegerType() && DstType->isIntegerType()))
1121  return;
1122 
1123  bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1124  bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1125  unsigned SrcBits = SrcTy->getScalarSizeInBits();
1126  unsigned DstBits = DstTy->getScalarSizeInBits();
1127 
1128  // Now, we do not need to emit the check in *all* of the cases.
1129  // We can avoid emitting it in some obvious cases where it would have been
1130  // dropped by the opt passes (instcombine) always anyways.
1131  // If it's a cast between effectively the same type, no check.
1132  // NOTE: this is *not* equivalent to checking the canonical types.
1133  if (SrcSigned == DstSigned && SrcBits == DstBits)
1134  return;
1135  // At least one of the values needs to have signed type.
1136  // If both are unsigned, then obviously, neither of them can be negative.
1137  if (!SrcSigned && !DstSigned)
1138  return;
1139  // If the conversion is to *larger* *signed* type, then no check is needed.
1140  // Because either sign-extension happens (so the sign will remain),
1141  // or zero-extension will happen (the sign bit will be zero.)
1142  if ((DstBits > SrcBits) && DstSigned)
1143  return;
1144  if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1145  (SrcBits > DstBits) && SrcSigned) {
1146  // If the signed integer truncation sanitizer is enabled,
1147  // and this is a truncation from signed type, then no check is needed.
1148  // Because here sign change check is interchangeable with truncation check.
1149  return;
1150  }
1151  // That's it. We can't rule out any more cases with the data we have.
1152 
1153  CodeGenFunction::SanitizerScope SanScope(&CGF);
1154 
1155  std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1156  std::pair<llvm::Value *, SanitizerMask>>
1157  Check;
1158 
1159  // Each of these checks needs to return 'false' when an issue was detected.
1160  ImplicitConversionCheckKind CheckKind;
1162  // So we can 'and' all the checks together, and still get 'false',
1163  // if at least one of the checks detected an issue.
1164 
1165  Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1166  CheckKind = Check.first;
1167  Checks.emplace_back(Check.second);
1168 
1169  if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1170  (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1171  // If the signed integer truncation sanitizer was enabled,
1172  // and we are truncating from larger unsigned type to smaller signed type,
1173  // let's handle the case we skipped in that check.
1174  Check =
1175  EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1176  CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1177  Checks.emplace_back(Check.second);
1178  // If the comparison result is 'i1 false', then the truncation was lossy.
1179  }
1180 
1181  llvm::Constant *StaticArgs[] = {
1182  CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1183  CGF.EmitCheckTypeDescriptor(DstType),
1184  llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind)};
1185  // EmitCheck() will 'and' all the checks together.
1186  CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1187  {Src, Dst});
1188 }
1189 
1190 /// Emit a conversion from the specified type to the specified destination type,
1191 /// both of which are LLVM scalar types.
1192 Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1193  QualType DstType,
1194  SourceLocation Loc,
1195  ScalarConversionOpts Opts) {
1196  // All conversions involving fixed point types should be handled by the
1197  // EmitFixedPoint family functions. This is done to prevent bloating up this
1198  // function more, and although fixed point numbers are represented by
1199  // integers, we do not want to follow any logic that assumes they should be
1200  // treated as integers.
1201  // TODO(leonardchan): When necessary, add another if statement checking for
1202  // conversions to fixed point types from other types.
1203  if (SrcType->isFixedPointType()) {
1204  if (DstType->isFixedPointType()) {
1205  return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1206  } else if (DstType->isBooleanType()) {
1207  // We do not need to check the padding bit on unsigned types if unsigned
1208  // padding is enabled because overflow into this bit is undefined
1209  // behavior.
1210  return Builder.CreateIsNotNull(Src, "tobool");
1211  }
1212 
1213  llvm_unreachable(
1214  "Unhandled scalar conversion involving a fixed point type.");
1215  }
1216 
1217  QualType NoncanonicalSrcType = SrcType;
1218  QualType NoncanonicalDstType = DstType;
1219 
1220  SrcType = CGF.getContext().getCanonicalType(SrcType);
1221  DstType = CGF.getContext().getCanonicalType(DstType);
1222  if (SrcType == DstType) return Src;
1223 
1224  if (DstType->isVoidType()) return nullptr;
1225 
1226  llvm::Value *OrigSrc = Src;
1227  QualType OrigSrcType = SrcType;
1228  llvm::Type *SrcTy = Src->getType();
1229 
1230  // Handle conversions to bool first, they are special: comparisons against 0.
1231  if (DstType->isBooleanType())
1232  return EmitConversionToBool(Src, SrcType);
1233 
1234  llvm::Type *DstTy = ConvertType(DstType);
1235 
1236  // Cast from half through float if half isn't a native type.
1237  if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1238  // Cast to FP using the intrinsic if the half type itself isn't supported.
1239  if (DstTy->isFloatingPointTy()) {
1241  return Builder.CreateCall(
1242  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1243  Src);
1244  } else {
1245  // Cast to other types through float, using either the intrinsic or FPExt,
1246  // depending on whether the half type itself is supported
1247  // (as opposed to operations on half, available with NativeHalfType).
1249  Src = Builder.CreateCall(
1250  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1251  CGF.CGM.FloatTy),
1252  Src);
1253  } else {
1254  Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1255  }
1256  SrcType = CGF.getContext().FloatTy;
1257  SrcTy = CGF.FloatTy;
1258  }
1259  }
1260 
1261  // Ignore conversions like int -> uint.
1262  if (SrcTy == DstTy) {
1263  if (Opts.EmitImplicitIntegerSignChangeChecks)
1264  EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1265  NoncanonicalDstType, Loc);
1266 
1267  return Src;
1268  }
1269 
1270  // Handle pointer conversions next: pointers can only be converted to/from
1271  // other pointers and integers. Check for pointer types in terms of LLVM, as
1272  // some native types (like Obj-C id) may map to a pointer type.
1273  if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1274  // The source value may be an integer, or a pointer.
1275  if (isa<llvm::PointerType>(SrcTy))
1276  return Builder.CreateBitCast(Src, DstTy, "conv");
1277 
1278  assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1279  // First, convert to the correct width so that we control the kind of
1280  // extension.
1281  llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1282  bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1283  llvm::Value* IntResult =
1284  Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1285  // Then, cast to pointer.
1286  return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1287  }
1288 
1289  if (isa<llvm::PointerType>(SrcTy)) {
1290  // Must be an ptr to int cast.
1291  assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1292  return Builder.CreatePtrToInt(Src, DstTy, "conv");
1293  }
1294 
1295  // A scalar can be splatted to an extended vector of the same element type
1296  if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1297  // Sema should add casts to make sure that the source expression's type is
1298  // the same as the vector's element type (sans qualifiers)
1299  assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1300  SrcType.getTypePtr() &&
1301  "Splatted expr doesn't match with vector element type?");
1302 
1303  // Splat the element across to all elements
1304  unsigned NumElements = DstTy->getVectorNumElements();
1305  return Builder.CreateVectorSplat(NumElements, Src, "splat");
1306  }
1307 
1308  if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1309  // Allow bitcast from vector to integer/fp of the same size.
1310  unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
1311  unsigned DstSize = DstTy->getPrimitiveSizeInBits();
1312  if (SrcSize == DstSize)
1313  return Builder.CreateBitCast(Src, DstTy, "conv");
1314 
1315  // Conversions between vectors of different sizes are not allowed except
1316  // when vectors of half are involved. Operations on storage-only half
1317  // vectors require promoting half vector operands to float vectors and
1318  // truncating the result, which is either an int or float vector, to a
1319  // short or half vector.
1320 
1321  // Source and destination are both expected to be vectors.
1322  llvm::Type *SrcElementTy = SrcTy->getVectorElementType();
1323  llvm::Type *DstElementTy = DstTy->getVectorElementType();
1324  (void)DstElementTy;
1325 
1326  assert(((SrcElementTy->isIntegerTy() &&
1327  DstElementTy->isIntegerTy()) ||
1328  (SrcElementTy->isFloatingPointTy() &&
1329  DstElementTy->isFloatingPointTy())) &&
1330  "unexpected conversion between a floating-point vector and an "
1331  "integer vector");
1332 
1333  // Truncate an i32 vector to an i16 vector.
1334  if (SrcElementTy->isIntegerTy())
1335  return Builder.CreateIntCast(Src, DstTy, false, "conv");
1336 
1337  // Truncate a float vector to a half vector.
1338  if (SrcSize > DstSize)
1339  return Builder.CreateFPTrunc(Src, DstTy, "conv");
1340 
1341  // Promote a half vector to a float vector.
1342  return Builder.CreateFPExt(Src, DstTy, "conv");
1343  }
1344 
1345  // Finally, we have the arithmetic types: real int/float.
1346  Value *Res = nullptr;
1347  llvm::Type *ResTy = DstTy;
1348 
1349  // An overflowing conversion has undefined behavior if either the source type
1350  // or the destination type is a floating-point type.
1351  if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1352  (OrigSrcType->isFloatingType() || DstType->isFloatingType()))
1353  EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1354  Loc);
1355 
1356  // Cast to half through float if half isn't a native type.
1357  if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1358  // Make sure we cast in a single step if from another FP type.
1359  if (SrcTy->isFloatingPointTy()) {
1360  // Use the intrinsic if the half type itself isn't supported
1361  // (as opposed to operations on half, available with NativeHalfType).
1363  return Builder.CreateCall(
1364  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1365  // If the half type is supported, just use an fptrunc.
1366  return Builder.CreateFPTrunc(Src, DstTy);
1367  }
1368  DstTy = CGF.FloatTy;
1369  }
1370 
1371  if (isa<llvm::IntegerType>(SrcTy)) {
1372  bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1373  if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1374  InputSigned = true;
1375  }
1376  if (isa<llvm::IntegerType>(DstTy))
1377  Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1378  else if (InputSigned)
1379  Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1380  else
1381  Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1382  } else if (isa<llvm::IntegerType>(DstTy)) {
1383  assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
1384  if (DstType->isSignedIntegerOrEnumerationType())
1385  Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1386  else
1387  Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1388  } else {
1389  assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
1390  "Unknown real conversion");
1391  if (DstTy->getTypeID() < SrcTy->getTypeID())
1392  Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1393  else
1394  Res = Builder.CreateFPExt(Src, DstTy, "conv");
1395  }
1396 
1397  if (DstTy != ResTy) {
1399  assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1400  Res = Builder.CreateCall(
1401  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1402  Res);
1403  } else {
1404  Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1405  }
1406  }
1407 
1408  if (Opts.EmitImplicitIntegerTruncationChecks)
1409  EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1410  NoncanonicalDstType, Loc);
1411 
1412  if (Opts.EmitImplicitIntegerSignChangeChecks)
1413  EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1414  NoncanonicalDstType, Loc);
1415 
1416  return Res;
1417 }
1418 
1419 Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1420  QualType DstTy,
1421  SourceLocation Loc) {
1422  using llvm::APInt;
1423  using llvm::ConstantInt;
1424  using llvm::Value;
1425 
1426  assert(SrcTy->isFixedPointType());
1427  assert(DstTy->isFixedPointType());
1428 
1429  FixedPointSemantics SrcFPSema =
1430  CGF.getContext().getFixedPointSemantics(SrcTy);
1431  FixedPointSemantics DstFPSema =
1432  CGF.getContext().getFixedPointSemantics(DstTy);
1433  unsigned SrcWidth = SrcFPSema.getWidth();
1434  unsigned DstWidth = DstFPSema.getWidth();
1435  unsigned SrcScale = SrcFPSema.getScale();
1436  unsigned DstScale = DstFPSema.getScale();
1437  bool SrcIsSigned = SrcFPSema.isSigned();
1438  bool DstIsSigned = DstFPSema.isSigned();
1439 
1440  llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
1441 
1442  Value *Result = Src;
1443  unsigned ResultWidth = SrcWidth;
1444 
1445  if (!DstFPSema.isSaturated()) {
1446  // Downscale.
1447  if (DstScale < SrcScale)
1448  Result = SrcIsSigned ?
1449  Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
1450  Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1451 
1452  // Resize.
1453  Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1454 
1455  // Upscale.
1456  if (DstScale > SrcScale)
1457  Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1458  } else {
1459  // Adjust the number of fractional bits.
1460  if (DstScale > SrcScale) {
1461  ResultWidth = SrcWidth + DstScale - SrcScale;
1462  llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
1463  Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
1464  Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
1465  } else if (DstScale < SrcScale) {
1466  Result = SrcIsSigned ?
1467  Builder.CreateAShr(Result, SrcScale - DstScale, "downscale") :
1468  Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
1469  }
1470 
1471  // Handle saturation.
1472  bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
1473  if (LessIntBits) {
1474  Value *Max = ConstantInt::get(
1475  CGF.getLLVMContext(),
1476  APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
1477  Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
1478  : Builder.CreateICmpUGT(Result, Max);
1479  Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
1480  }
1481  // Cannot overflow min to dest type if src is unsigned since all fixed
1482  // point types can cover the unsigned min of 0.
1483  if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
1484  Value *Min = ConstantInt::get(
1485  CGF.getLLVMContext(),
1486  APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
1487  Value *TooLow = Builder.CreateICmpSLT(Result, Min);
1488  Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
1489  }
1490 
1491  // Resize the integer part to get the final destination size.
1492  Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
1493  }
1494  return Result;
1495 }
1496 
1497 /// Emit a conversion from the specified complex type to the specified
1498 /// destination type, where the destination type is an LLVM scalar type.
1499 Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1501  SourceLocation Loc) {
1502  // Get the source element type.
1503  SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1504 
1505  // Handle conversions to bool first, they are special: comparisons against 0.
1506  if (DstTy->isBooleanType()) {
1507  // Complex != 0 -> (Real != 0) | (Imag != 0)
1508  Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1509  Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1510  return Builder.CreateOr(Src.first, Src.second, "tobool");
1511  }
1512 
1513  // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1514  // the imaginary part of the complex value is discarded and the value of the
1515  // real part is converted according to the conversion rules for the
1516  // corresponding real type.
1517  return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1518 }
1519 
1520 Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1521  return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1522 }
1523 
1524 /// Emit a sanitization check for the given "binary" operation (which
1525 /// might actually be a unary increment which has been lowered to a binary
1526 /// operation). The check passes if all values in \p Checks (which are \c i1),
1527 /// are \c true.
1528 void ScalarExprEmitter::EmitBinOpCheck(
1529  ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1530  assert(CGF.IsSanitizerScope);
1531  SanitizerHandler Check;
1533  SmallVector<llvm::Value *, 2> DynamicData;
1534 
1535  BinaryOperatorKind Opcode = Info.Opcode;
1538 
1539  StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1540  const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1541  if (UO && UO->getOpcode() == UO_Minus) {
1542  Check = SanitizerHandler::NegateOverflow;
1543  StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1544  DynamicData.push_back(Info.RHS);
1545  } else {
1546  if (BinaryOperator::isShiftOp(Opcode)) {
1547  // Shift LHS negative or too large, or RHS out of bounds.
1548  Check = SanitizerHandler::ShiftOutOfBounds;
1549  const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1550  StaticData.push_back(
1551  CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1552  StaticData.push_back(
1553  CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1554  } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1555  // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1556  Check = SanitizerHandler::DivremOverflow;
1557  StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1558  } else {
1559  // Arithmetic overflow (+, -, *).
1560  switch (Opcode) {
1561  case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1562  case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1563  case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1564  default: llvm_unreachable("unexpected opcode for bin op check");
1565  }
1566  StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1567  }
1568  DynamicData.push_back(Info.LHS);
1569  DynamicData.push_back(Info.RHS);
1570  }
1571 
1572  CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1573 }
1574 
1575 //===----------------------------------------------------------------------===//
1576 // Visitor Methods
1577 //===----------------------------------------------------------------------===//
1578 
1579 Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1580  CGF.ErrorUnsupported(E, "scalar expression");
1581  if (E->getType()->isVoidType())
1582  return nullptr;
1583  return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1584 }
1585 
1586 Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1587  // Vector Mask Case
1588  if (E->getNumSubExprs() == 2) {
1589  Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1590  Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1591  Value *Mask;
1592 
1593  llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
1594  unsigned LHSElts = LTy->getNumElements();
1595 
1596  Mask = RHS;
1597 
1598  llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
1599 
1600  // Mask off the high bits of each shuffle index.
1601  Value *MaskBits =
1602  llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1603  Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1604 
1605  // newv = undef
1606  // mask = mask & maskbits
1607  // for each elt
1608  // n = extract mask i
1609  // x = extract val n
1610  // newv = insert newv, x, i
1611  llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
1612  MTy->getNumElements());
1613  Value* NewV = llvm::UndefValue::get(RTy);
1614  for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1615  Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1616  Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1617 
1618  Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1619  NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1620  }
1621  return NewV;
1622  }
1623 
1624  Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1625  Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1626 
1628  for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1629  llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1630  // Check for -1 and output it as undef in the IR.
1631  if (Idx.isSigned() && Idx.isAllOnesValue())
1632  indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
1633  else
1634  indices.push_back(Builder.getInt32(Idx.getZExtValue()));
1635  }
1636 
1637  Value *SV = llvm::ConstantVector::get(indices);
1638  return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
1639 }
1640 
1641 Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1642  QualType SrcType = E->getSrcExpr()->getType(),
1643  DstType = E->getType();
1644 
1645  Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1646 
1647  SrcType = CGF.getContext().getCanonicalType(SrcType);
1648  DstType = CGF.getContext().getCanonicalType(DstType);
1649  if (SrcType == DstType) return Src;
1650 
1651  assert(SrcType->isVectorType() &&
1652  "ConvertVector source type must be a vector");
1653  assert(DstType->isVectorType() &&
1654  "ConvertVector destination type must be a vector");
1655 
1656  llvm::Type *SrcTy = Src->getType();
1657  llvm::Type *DstTy = ConvertType(DstType);
1658 
1659  // Ignore conversions like int -> uint.
1660  if (SrcTy == DstTy)
1661  return Src;
1662 
1663  QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(),
1664  DstEltType = DstType->getAs<VectorType>()->getElementType();
1665 
1666  assert(SrcTy->isVectorTy() &&
1667  "ConvertVector source IR type must be a vector");
1668  assert(DstTy->isVectorTy() &&
1669  "ConvertVector destination IR type must be a vector");
1670 
1671  llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
1672  *DstEltTy = DstTy->getVectorElementType();
1673 
1674  if (DstEltType->isBooleanType()) {
1675  assert((SrcEltTy->isFloatingPointTy() ||
1676  isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1677 
1678  llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1679  if (SrcEltTy->isFloatingPointTy()) {
1680  return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1681  } else {
1682  return Builder.CreateICmpNE(Src, Zero, "tobool");
1683  }
1684  }
1685 
1686  // We have the arithmetic types: real int/float.
1687  Value *Res = nullptr;
1688 
1689  if (isa<llvm::IntegerType>(SrcEltTy)) {
1690  bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1691  if (isa<llvm::IntegerType>(DstEltTy))
1692  Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1693  else if (InputSigned)
1694  Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1695  else
1696  Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1697  } else if (isa<llvm::IntegerType>(DstEltTy)) {
1698  assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1699  if (DstEltType->isSignedIntegerOrEnumerationType())
1700  Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1701  else
1702  Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1703  } else {
1704  assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1705  "Unknown real conversion");
1706  if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1707  Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1708  else
1709  Res = Builder.CreateFPExt(Src, DstTy, "conv");
1710  }
1711 
1712  return Res;
1713 }
1714 
1715 Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1716  if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1717  CGF.EmitIgnoredExpr(E->getBase());
1718  return CGF.emitScalarConstant(Constant, E);
1719  } else {
1720  Expr::EvalResult Result;
1721  if (E->EvaluateAsInt(Result, CGF.getContext(), Expr::SE_AllowSideEffects)) {
1722  llvm::APSInt Value = Result.Val.getInt();
1723  CGF.EmitIgnoredExpr(E->getBase());
1724  return Builder.getInt(Value);
1725  }
1726  }
1727 
1728  return EmitLoadOfLValue(E);
1729 }
1730 
1731 Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1732  TestAndClearIgnoreResultAssign();
1733 
1734  // Emit subscript expressions in rvalue context's. For most cases, this just
1735  // loads the lvalue formed by the subscript expr. However, we have to be
1736  // careful, because the base of a vector subscript is occasionally an rvalue,
1737  // so we can't get it as an lvalue.
1738  if (!E->getBase()->getType()->isVectorType())
1739  return EmitLoadOfLValue(E);
1740 
1741  // Handle the vector case. The base must be a vector, the index must be an
1742  // integer value.
1743  Value *Base = Visit(E->getBase());
1744  Value *Idx = Visit(E->getIdx());
1745  QualType IdxTy = E->getIdx()->getType();
1746 
1747  if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1748  CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1749 
1750  return Builder.CreateExtractElement(Base, Idx, "vecext");
1751 }
1752 
1753 static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1754  unsigned Off, llvm::Type *I32Ty) {
1755  int MV = SVI->getMaskValue(Idx);
1756  if (MV == -1)
1757  return llvm::UndefValue::get(I32Ty);
1758  return llvm::ConstantInt::get(I32Ty, Off+MV);
1759 }
1760 
1761 static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1762  if (C->getBitWidth() != 32) {
1763  assert(llvm::ConstantInt::isValueValidForType(I32Ty,
1764  C->getZExtValue()) &&
1765  "Index operand too large for shufflevector mask!");
1766  return llvm::ConstantInt::get(I32Ty, C->getZExtValue());
1767  }
1768  return C;
1769 }
1770 
1771 Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1772  bool Ignore = TestAndClearIgnoreResultAssign();
1773  (void)Ignore;
1774  assert (Ignore == false && "init list ignored");
1775  unsigned NumInitElements = E->getNumInits();
1776 
1777  if (E->hadArrayRangeDesignator())
1778  CGF.ErrorUnsupported(E, "GNU array range designator extension");
1779 
1780  llvm::VectorType *VType =
1781  dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
1782 
1783  if (!VType) {
1784  if (NumInitElements == 0) {
1785  // C++11 value-initialization for the scalar.
1786  return EmitNullValue(E->getType());
1787  }
1788  // We have a scalar in braces. Just use the first element.
1789  return Visit(E->getInit(0));
1790  }
1791 
1792  unsigned ResElts = VType->getNumElements();
1793 
1794  // Loop over initializers collecting the Value for each, and remembering
1795  // whether the source was swizzle (ExtVectorElementExpr). This will allow
1796  // us to fold the shuffle for the swizzle into the shuffle for the vector
1797  // initializer, since LLVM optimizers generally do not want to touch
1798  // shuffles.
1799  unsigned CurIdx = 0;
1800  bool VIsUndefShuffle = false;
1801  llvm::Value *V = llvm::UndefValue::get(VType);
1802  for (unsigned i = 0; i != NumInitElements; ++i) {
1803  Expr *IE = E->getInit(i);
1804  Value *Init = Visit(IE);
1806 
1807  llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
1808 
1809  // Handle scalar elements. If the scalar initializer is actually one
1810  // element of a different vector of the same width, use shuffle instead of
1811  // extract+insert.
1812  if (!VVT) {
1813  if (isa<ExtVectorElementExpr>(IE)) {
1814  llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
1815 
1816  if (EI->getVectorOperandType()->getNumElements() == ResElts) {
1817  llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
1818  Value *LHS = nullptr, *RHS = nullptr;
1819  if (CurIdx == 0) {
1820  // insert into undef -> shuffle (src, undef)
1821  // shufflemask must use an i32
1822  Args.push_back(getAsInt32(C, CGF.Int32Ty));
1823  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1824 
1825  LHS = EI->getVectorOperand();
1826  RHS = V;
1827  VIsUndefShuffle = true;
1828  } else if (VIsUndefShuffle) {
1829  // insert into undefshuffle && size match -> shuffle (v, src)
1830  llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
1831  for (unsigned j = 0; j != CurIdx; ++j)
1832  Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
1833  Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
1834  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1835 
1836  LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1837  RHS = EI->getVectorOperand();
1838  VIsUndefShuffle = false;
1839  }
1840  if (!Args.empty()) {
1841  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1842  V = Builder.CreateShuffleVector(LHS, RHS, Mask);
1843  ++CurIdx;
1844  continue;
1845  }
1846  }
1847  }
1848  V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
1849  "vecinit");
1850  VIsUndefShuffle = false;
1851  ++CurIdx;
1852  continue;
1853  }
1854 
1855  unsigned InitElts = VVT->getNumElements();
1856 
1857  // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
1858  // input is the same width as the vector being constructed, generate an
1859  // optimized shuffle of the swizzle input into the result.
1860  unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
1861  if (isa<ExtVectorElementExpr>(IE)) {
1862  llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
1863  Value *SVOp = SVI->getOperand(0);
1864  llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
1865 
1866  if (OpTy->getNumElements() == ResElts) {
1867  for (unsigned j = 0; j != CurIdx; ++j) {
1868  // If the current vector initializer is a shuffle with undef, merge
1869  // this shuffle directly into it.
1870  if (VIsUndefShuffle) {
1871  Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
1872  CGF.Int32Ty));
1873  } else {
1874  Args.push_back(Builder.getInt32(j));
1875  }
1876  }
1877  for (unsigned j = 0, je = InitElts; j != je; ++j)
1878  Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
1879  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1880 
1881  if (VIsUndefShuffle)
1882  V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
1883 
1884  Init = SVOp;
1885  }
1886  }
1887 
1888  // Extend init to result vector length, and then shuffle its contribution
1889  // to the vector initializer into V.
1890  if (Args.empty()) {
1891  for (unsigned j = 0; j != InitElts; ++j)
1892  Args.push_back(Builder.getInt32(j));
1893  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1894  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1895  Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
1896  Mask, "vext");
1897 
1898  Args.clear();
1899  for (unsigned j = 0; j != CurIdx; ++j)
1900  Args.push_back(Builder.getInt32(j));
1901  for (unsigned j = 0; j != InitElts; ++j)
1902  Args.push_back(Builder.getInt32(j+Offset));
1903  Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
1904  }
1905 
1906  // If V is undef, make sure it ends up on the RHS of the shuffle to aid
1907  // merging subsequent shuffles into this one.
1908  if (CurIdx == 0)
1909  std::swap(V, Init);
1910  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
1911  V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
1912  VIsUndefShuffle = isa<llvm::UndefValue>(Init);
1913  CurIdx += InitElts;
1914  }
1915 
1916  // FIXME: evaluate codegen vs. shuffling against constant null vector.
1917  // Emit remaining default initializers.
1918  llvm::Type *EltTy = VType->getElementType();
1919 
1920  // Emit remaining default initializers
1921  for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
1922  Value *Idx = Builder.getInt32(CurIdx);
1923  llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
1924  V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
1925  }
1926  return V;
1927 }
1928 
1930  const Expr *E = CE->getSubExpr();
1931 
1932  if (CE->getCastKind() == CK_UncheckedDerivedToBase)
1933  return false;
1934 
1935  if (isa<CXXThisExpr>(E->IgnoreParens())) {
1936  // We always assume that 'this' is never null.
1937  return false;
1938  }
1939 
1940  if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
1941  // And that glvalue casts are never null.
1942  if (ICE->getValueKind() != VK_RValue)
1943  return false;
1944  }
1945 
1946  return true;
1947 }
1948 
1949 // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
1950 // have to handle a more broad range of conversions than explicit casts, as they
1951 // handle things like function to ptr-to-function decay etc.
1952 Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
1953  Expr *E = CE->getSubExpr();
1954  QualType DestTy = CE->getType();
1955  CastKind Kind = CE->getCastKind();
1956 
1957  // These cases are generally not written to ignore the result of
1958  // evaluating their sub-expressions, so we clear this now.
1959  bool Ignored = TestAndClearIgnoreResultAssign();
1960 
1961  // Since almost all cast kinds apply to scalars, this switch doesn't have
1962  // a default case, so the compiler will warn on a missing case. The cases
1963  // are in the same order as in the CastKind enum.
1964  switch (Kind) {
1965  case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
1966  case CK_BuiltinFnToFnPtr:
1967  llvm_unreachable("builtin functions are handled elsewhere");
1968 
1969  case CK_LValueBitCast:
1970  case CK_ObjCObjectLValueCast: {
1971  Address Addr = EmitLValue(E).getAddress();
1972  Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
1973  LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
1974  return EmitLoadOfLValue(LV, CE->getExprLoc());
1975  }
1976 
1977  case CK_CPointerToObjCPointerCast:
1978  case CK_BlockPointerToObjCPointerCast:
1979  case CK_AnyPointerToBlockPointerCast:
1980  case CK_BitCast: {
1981  Value *Src = Visit(const_cast<Expr*>(E));
1982  llvm::Type *SrcTy = Src->getType();
1983  llvm::Type *DstTy = ConvertType(DestTy);
1984  if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
1985  SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
1986  llvm_unreachable("wrong cast for pointers in different address spaces"
1987  "(must be an address space cast)!");
1988  }
1989 
1990  if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
1991  if (auto PT = DestTy->getAs<PointerType>())
1992  CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
1993  /*MayBeNull=*/true,
1995  CE->getBeginLoc());
1996  }
1997 
1998  if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
1999  const QualType SrcType = E->getType();
2000 
2001  if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2002  // Casting to pointer that could carry dynamic information (provided by
2003  // invariant.group) requires launder.
2004  Src = Builder.CreateLaunderInvariantGroup(Src);
2005  } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2006  // Casting to pointer that does not carry dynamic information (provided
2007  // by invariant.group) requires stripping it. Note that we don't do it
2008  // if the source could not be dynamic type and destination could be
2009  // dynamic because dynamic information is already laundered. It is
2010  // because launder(strip(src)) == launder(src), so there is no need to
2011  // add extra strip before launder.
2012  Src = Builder.CreateStripInvariantGroup(Src);
2013  }
2014  }
2015 
2016  return Builder.CreateBitCast(Src, DstTy);
2017  }
2018  case CK_AddressSpaceConversion: {
2019  Expr::EvalResult Result;
2020  if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2021  Result.Val.isNullPointer()) {
2022  // If E has side effect, it is emitted even if its final result is a
2023  // null pointer. In that case, a DCE pass should be able to
2024  // eliminate the useless instructions emitted during translating E.
2025  if (Result.HasSideEffects)
2026  Visit(E);
2027  return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2028  ConvertType(DestTy)), DestTy);
2029  }
2030  // Since target may map different address spaces in AST to the same address
2031  // space, an address space conversion may end up as a bitcast.
2033  CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2034  DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2035  }
2036  case CK_AtomicToNonAtomic:
2037  case CK_NonAtomicToAtomic:
2038  case CK_NoOp:
2039  case CK_UserDefinedConversion:
2040  return Visit(const_cast<Expr*>(E));
2041 
2042  case CK_BaseToDerived: {
2043  const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2044  assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2045 
2046  Address Base = CGF.EmitPointerWithAlignment(E);
2047  Address Derived =
2048  CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2049  CE->path_begin(), CE->path_end(),
2051 
2052  // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2053  // performed and the object is not of the derived type.
2054  if (CGF.sanitizePerformTypeCheck())
2056  Derived.getPointer(), DestTy->getPointeeType());
2057 
2058  if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2060  DestTy->getPointeeType(), Derived.getPointer(),
2061  /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
2062  CE->getBeginLoc());
2063 
2064  return Derived.getPointer();
2065  }
2066  case CK_UncheckedDerivedToBase:
2067  case CK_DerivedToBase: {
2068  // The EmitPointerWithAlignment path does this fine; just discard
2069  // the alignment.
2070  return CGF.EmitPointerWithAlignment(CE).getPointer();
2071  }
2072 
2073  case CK_Dynamic: {
2074  Address V = CGF.EmitPointerWithAlignment(E);
2075  const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2076  return CGF.EmitDynamicCast(V, DCE);
2077  }
2078 
2079  case CK_ArrayToPointerDecay:
2080  return CGF.EmitArrayToPointerDecay(E).getPointer();
2081  case CK_FunctionToPointerDecay:
2082  return EmitLValue(E).getPointer();
2083 
2084  case CK_NullToPointer:
2085  if (MustVisitNullValue(E))
2086  (void) Visit(E);
2087 
2088  return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2089  DestTy);
2090 
2091  case CK_NullToMemberPointer: {
2092  if (MustVisitNullValue(E))
2093  (void) Visit(E);
2094 
2095  const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2096  return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2097  }
2098 
2099  case CK_ReinterpretMemberPointer:
2100  case CK_BaseToDerivedMemberPointer:
2101  case CK_DerivedToBaseMemberPointer: {
2102  Value *Src = Visit(E);
2103 
2104  // Note that the AST doesn't distinguish between checked and
2105  // unchecked member pointer conversions, so we always have to
2106  // implement checked conversions here. This is inefficient when
2107  // actual control flow may be required in order to perform the
2108  // check, which it is for data member pointers (but not member
2109  // function pointers on Itanium and ARM).
2110  return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2111  }
2112 
2113  case CK_ARCProduceObject:
2114  return CGF.EmitARCRetainScalarExpr(E);
2115  case CK_ARCConsumeObject:
2116  return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2117  case CK_ARCReclaimReturnedObject:
2118  return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2119  case CK_ARCExtendBlockObject:
2120  return CGF.EmitARCExtendBlockObject(E);
2121 
2122  case CK_CopyAndAutoreleaseBlockObject:
2123  return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2124 
2125  case CK_FloatingRealToComplex:
2126  case CK_FloatingComplexCast:
2127  case CK_IntegralRealToComplex:
2128  case CK_IntegralComplexCast:
2129  case CK_IntegralComplexToFloatingComplex:
2130  case CK_FloatingComplexToIntegralComplex:
2131  case CK_ConstructorConversion:
2132  case CK_ToUnion:
2133  llvm_unreachable("scalar cast to non-scalar value");
2134 
2135  case CK_LValueToRValue:
2136  assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2137  assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2138  return Visit(const_cast<Expr*>(E));
2139 
2140  case CK_IntegralToPointer: {
2141  Value *Src = Visit(const_cast<Expr*>(E));
2142 
2143  // First, convert to the correct width so that we control the kind of
2144  // extension.
2145  auto DestLLVMTy = ConvertType(DestTy);
2146  llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2147  bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2148  llvm::Value* IntResult =
2149  Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2150 
2151  auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2152 
2153  if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2154  // Going from integer to pointer that could be dynamic requires reloading
2155  // dynamic information from invariant.group.
2156  if (DestTy.mayBeDynamicClass())
2157  IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2158  }
2159  return IntToPtr;
2160  }
2161  case CK_PointerToIntegral: {
2162  assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2163  auto *PtrExpr = Visit(E);
2164 
2165  if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2166  const QualType SrcType = E->getType();
2167 
2168  // Casting to integer requires stripping dynamic information as it does
2169  // not carries it.
2170  if (SrcType.mayBeDynamicClass())
2171  PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2172  }
2173 
2174  return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2175  }
2176  case CK_ToVoid: {
2177  CGF.EmitIgnoredExpr(E);
2178  return nullptr;
2179  }
2180  case CK_VectorSplat: {
2181  llvm::Type *DstTy = ConvertType(DestTy);
2182  Value *Elt = Visit(const_cast<Expr*>(E));
2183  // Splat the element across to all elements
2184  unsigned NumElements = DstTy->getVectorNumElements();
2185  return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2186  }
2187 
2188  case CK_FixedPointCast:
2189  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2190  CE->getExprLoc());
2191 
2192  case CK_FixedPointToBoolean:
2193  assert(E->getType()->isFixedPointType() &&
2194  "Expected src type to be fixed point type");
2195  assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2196  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2197  CE->getExprLoc());
2198 
2199  case CK_IntegralCast: {
2200  ScalarConversionOpts Opts;
2201  if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2202  if (!ICE->isPartOfExplicitCast())
2203  Opts = ScalarConversionOpts(CGF.SanOpts);
2204  }
2205  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2206  CE->getExprLoc(), Opts);
2207  }
2208  case CK_IntegralToFloating:
2209  case CK_FloatingToIntegral:
2210  case CK_FloatingCast:
2211  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2212  CE->getExprLoc());
2213  case CK_BooleanToSignedIntegral: {
2214  ScalarConversionOpts Opts;
2215  Opts.TreatBooleanAsSigned = true;
2216  return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2217  CE->getExprLoc(), Opts);
2218  }
2219  case CK_IntegralToBoolean:
2220  return EmitIntToBoolConversion(Visit(E));
2221  case CK_PointerToBoolean:
2222  return EmitPointerToBoolConversion(Visit(E), E->getType());
2223  case CK_FloatingToBoolean:
2224  return EmitFloatToBoolConversion(Visit(E));
2225  case CK_MemberPointerToBoolean: {
2226  llvm::Value *MemPtr = Visit(E);
2227  const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2228  return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2229  }
2230 
2231  case CK_FloatingComplexToReal:
2232  case CK_IntegralComplexToReal:
2233  return CGF.EmitComplexExpr(E, false, true).first;
2234 
2235  case CK_FloatingComplexToBoolean:
2236  case CK_IntegralComplexToBoolean: {
2238 
2239  // TODO: kill this function off, inline appropriate case here
2240  return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2241  CE->getExprLoc());
2242  }
2243 
2244  case CK_ZeroToOCLOpaqueType: {
2245  assert((DestTy->isEventT() || DestTy->isQueueT() ||
2246  DestTy->isOCLIntelSubgroupAVCType()) &&
2247  "CK_ZeroToOCLEvent cast on non-event type");
2248  return llvm::Constant::getNullValue(ConvertType(DestTy));
2249  }
2250 
2251  case CK_IntToOCLSampler:
2252  return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2253 
2254  } // end of switch
2255 
2256  llvm_unreachable("unknown scalar cast");
2257 }
2258 
2259 Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2261  Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2262  !E->getType()->isVoidType());
2263  if (!RetAlloca.isValid())
2264  return nullptr;
2265  return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2266  E->getExprLoc());
2267 }
2268 
2269 Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2270  CGF.enterFullExpression(E);
2272  Value *V = Visit(E->getSubExpr());
2273  // Defend against dominance problems caused by jumps out of expression
2274  // evaluation through the shared cleanup block.
2275  Scope.ForceCleanup({&V});
2276  return V;
2277 }
2278 
2279 //===----------------------------------------------------------------------===//
2280 // Unary Operators
2281 //===----------------------------------------------------------------------===//
2282 
2283 static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
2284  llvm::Value *InVal, bool IsInc) {
2285  BinOpInfo BinOp;
2286  BinOp.LHS = InVal;
2287  BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2288  BinOp.Ty = E->getType();
2289  BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2290  // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2291  BinOp.E = E;
2292  return BinOp;
2293 }
2294 
2295 llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2296  const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2297  llvm::Value *Amount =
2298  llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2299  StringRef Name = IsInc ? "inc" : "dec";
2300  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2302  return Builder.CreateAdd(InVal, Amount, Name);
2304  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2305  return Builder.CreateNSWAdd(InVal, Amount, Name);
2306  LLVM_FALLTHROUGH;
2308  if (!E->canOverflow())
2309  return Builder.CreateNSWAdd(InVal, Amount, Name);
2310  return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
2311  }
2312  llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2313 }
2314 
2315 llvm::Value *
2316 ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2317  bool isInc, bool isPre) {
2318 
2319  QualType type = E->getSubExpr()->getType();
2320  llvm::PHINode *atomicPHI = nullptr;
2321  llvm::Value *value;
2322  llvm::Value *input;
2323 
2324  int amount = (isInc ? 1 : -1);
2325  bool isSubtraction = !isInc;
2326 
2327  if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2328  type = atomicTy->getValueType();
2329  if (isInc && type->isBooleanType()) {
2330  llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2331  if (isPre) {
2332  Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
2333  ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2334  return Builder.getTrue();
2335  }
2336  // For atomic bool increment, we just store true and return it for
2337  // preincrement, do an atomic swap with true for postincrement
2338  return Builder.CreateAtomicRMW(
2339  llvm::AtomicRMWInst::Xchg, LV.getPointer(), True,
2340  llvm::AtomicOrdering::SequentiallyConsistent);
2341  }
2342  // Special case for atomic increment / decrement on integers, emit
2343  // atomicrmw instructions. We skip this if we want to be doing overflow
2344  // checking, and fall into the slow path with the atomic cmpxchg loop.
2345  if (!type->isBooleanType() && type->isIntegerType() &&
2346  !(type->isUnsignedIntegerType() &&
2347  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2348  CGF.getLangOpts().getSignedOverflowBehavior() !=
2350  llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2351  llvm::AtomicRMWInst::Sub;
2352  llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2353  llvm::Instruction::Sub;
2354  llvm::Value *amt = CGF.EmitToMemory(
2355  llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2356  llvm::Value *old = Builder.CreateAtomicRMW(aop,
2357  LV.getPointer(), amt, llvm::AtomicOrdering::SequentiallyConsistent);
2358  return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2359  }
2360  value = EmitLoadOfLValue(LV, E->getExprLoc());
2361  input = value;
2362  // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2363  llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2364  llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2365  value = CGF.EmitToMemory(value, type);
2366  Builder.CreateBr(opBB);
2367  Builder.SetInsertPoint(opBB);
2368  atomicPHI = Builder.CreatePHI(value->getType(), 2);
2369  atomicPHI->addIncoming(value, startBB);
2370  value = atomicPHI;
2371  } else {
2372  value = EmitLoadOfLValue(LV, E->getExprLoc());
2373  input = value;
2374  }
2375 
2376  // Special case of integer increment that we have to check first: bool++.
2377  // Due to promotion rules, we get:
2378  // bool++ -> bool = bool + 1
2379  // -> bool = (int)bool + 1
2380  // -> bool = ((int)bool + 1 != 0)
2381  // An interesting aspect of this is that increment is always true.
2382  // Decrement does not have this property.
2383  if (isInc && type->isBooleanType()) {
2384  value = Builder.getTrue();
2385 
2386  // Most common case by far: integer increment.
2387  } else if (type->isIntegerType()) {
2388  // Note that signed integer inc/dec with width less than int can't
2389  // overflow because of promotion rules; we're just eliding a few steps here.
2390  if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2391  value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2392  } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2393  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2394  value =
2395  EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc));
2396  } else {
2397  llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2398  value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2399  }
2400 
2401  // Next most common: pointer increment.
2402  } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2403  QualType type = ptr->getPointeeType();
2404 
2405  // VLA types don't have constant size.
2406  if (const VariableArrayType *vla
2407  = CGF.getContext().getAsVariableArrayType(type)) {
2408  llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2409  if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2411  value = Builder.CreateGEP(value, numElts, "vla.inc");
2412  else
2413  value = CGF.EmitCheckedInBoundsGEP(
2414  value, numElts, /*SignedIndices=*/false, isSubtraction,
2415  E->getExprLoc(), "vla.inc");
2416 
2417  // Arithmetic on function pointers (!) is just +-1.
2418  } else if (type->isFunctionType()) {
2419  llvm::Value *amt = Builder.getInt32(amount);
2420 
2421  value = CGF.EmitCastToVoidPtr(value);
2423  value = Builder.CreateGEP(value, amt, "incdec.funcptr");
2424  else
2425  value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2426  isSubtraction, E->getExprLoc(),
2427  "incdec.funcptr");
2428  value = Builder.CreateBitCast(value, input->getType());
2429 
2430  // For everything else, we can just do a simple increment.
2431  } else {
2432  llvm::Value *amt = Builder.getInt32(amount);
2434  value = Builder.CreateGEP(value, amt, "incdec.ptr");
2435  else
2436  value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
2437  isSubtraction, E->getExprLoc(),
2438  "incdec.ptr");
2439  }
2440 
2441  // Vector increment/decrement.
2442  } else if (type->isVectorType()) {
2443  if (type->hasIntegerRepresentation()) {
2444  llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2445 
2446  value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2447  } else {
2448  value = Builder.CreateFAdd(
2449  value,
2450  llvm::ConstantFP::get(value->getType(), amount),
2451  isInc ? "inc" : "dec");
2452  }
2453 
2454  // Floating point.
2455  } else if (type->isRealFloatingType()) {
2456  // Add the inc/dec to the real part.
2457  llvm::Value *amt;
2458 
2459  if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2460  // Another special case: half FP increment should be done via float
2462  value = Builder.CreateCall(
2463  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2464  CGF.CGM.FloatTy),
2465  input, "incdec.conv");
2466  } else {
2467  value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2468  }
2469  }
2470 
2471  if (value->getType()->isFloatTy())
2472  amt = llvm::ConstantFP::get(VMContext,
2473  llvm::APFloat(static_cast<float>(amount)));
2474  else if (value->getType()->isDoubleTy())
2475  amt = llvm::ConstantFP::get(VMContext,
2476  llvm::APFloat(static_cast<double>(amount)));
2477  else {
2478  // Remaining types are Half, LongDouble or __float128. Convert from float.
2479  llvm::APFloat F(static_cast<float>(amount));
2480  bool ignored;
2481  const llvm::fltSemantics *FS;
2482  // Don't use getFloatTypeSemantics because Half isn't
2483  // necessarily represented using the "half" LLVM type.
2484  if (value->getType()->isFP128Ty())
2485  FS = &CGF.getTarget().getFloat128Format();
2486  else if (value->getType()->isHalfTy())
2487  FS = &CGF.getTarget().getHalfFormat();
2488  else
2489  FS = &CGF.getTarget().getLongDoubleFormat();
2490  F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2491  amt = llvm::ConstantFP::get(VMContext, F);
2492  }
2493  value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2494 
2495  if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2497  value = Builder.CreateCall(
2498  CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2499  CGF.CGM.FloatTy),
2500  value, "incdec.conv");
2501  } else {
2502  value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
2503  }
2504  }
2505 
2506  // Objective-C pointer types.
2507  } else {
2508  const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
2509  value = CGF.EmitCastToVoidPtr(value);
2510 
2512  if (!isInc) size = -size;
2513  llvm::Value *sizeValue =
2514  llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
2515 
2517  value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
2518  else
2519  value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
2520  /*SignedIndices=*/false, isSubtraction,
2521  E->getExprLoc(), "incdec.objptr");
2522  value = Builder.CreateBitCast(value, input->getType());
2523  }
2524 
2525  if (atomicPHI) {
2526  llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2527  llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2528  auto Pair = CGF.EmitAtomicCompareExchange(
2529  LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
2530  llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
2531  llvm::Value *success = Pair.second;
2532  atomicPHI->addIncoming(old, opBB);
2533  Builder.CreateCondBr(success, contBB, opBB);
2534  Builder.SetInsertPoint(contBB);
2535  return isPre ? value : input;
2536  }
2537 
2538  // Store the updated result through the lvalue.
2539  if (LV.isBitField())
2540  CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
2541  else
2542  CGF.EmitStoreThroughLValue(RValue::get(value), LV);
2543 
2544  // If this is a postinc, return the value read from memory, otherwise use the
2545  // updated value.
2546  return isPre ? value : input;
2547 }
2548 
2549 
2550 
2551 Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
2552  TestAndClearIgnoreResultAssign();
2553  // Emit unary minus with EmitSub so we handle overflow cases etc.
2554  BinOpInfo BinOp;
2555  BinOp.RHS = Visit(E->getSubExpr());
2556 
2557  if (BinOp.RHS->getType()->isFPOrFPVectorTy())
2558  BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
2559  else
2560  BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
2561  BinOp.Ty = E->getType();
2562  BinOp.Opcode = BO_Sub;
2563  // FIXME: once UnaryOperator carries FPFeatures, copy it here.
2564  BinOp.E = E;
2565  return EmitSub(BinOp);
2566 }
2567 
2568 Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
2569  TestAndClearIgnoreResultAssign();
2570  Value *Op = Visit(E->getSubExpr());
2571  return Builder.CreateNot(Op, "neg");
2572 }
2573 
2574 Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
2575  // Perform vector logical not on comparison with zero vector.
2576  if (E->getType()->isExtVectorType()) {
2577  Value *Oper = Visit(E->getSubExpr());
2578  Value *Zero = llvm::Constant::getNullValue(Oper->getType());
2579  Value *Result;
2580  if (Oper->getType()->isFPOrFPVectorTy())
2581  Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
2582  else
2583  Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
2584  return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
2585  }
2586 
2587  // Compare operand to zero.
2588  Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
2589 
2590  // Invert value.
2591  // TODO: Could dynamically modify easy computations here. For example, if
2592  // the operand is an icmp ne, turn into icmp eq.
2593  BoolVal = Builder.CreateNot(BoolVal, "lnot");
2594 
2595  // ZExt result to the expr type.
2596  return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
2597 }
2598 
2599 Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
2600  // Try folding the offsetof to a constant.
2601  Expr::EvalResult EVResult;
2602  if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
2603  llvm::APSInt Value = EVResult.Val.getInt();
2604  return Builder.getInt(Value);
2605  }
2606 
2607  // Loop over the components of the offsetof to compute the value.
2608  unsigned n = E->getNumComponents();
2609  llvm::Type* ResultType = ConvertType(E->getType());
2610  llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
2611  QualType CurrentType = E->getTypeSourceInfo()->getType();
2612  for (unsigned i = 0; i != n; ++i) {
2613  OffsetOfNode ON = E->getComponent(i);
2614  llvm::Value *Offset = nullptr;
2615  switch (ON.getKind()) {
2616  case OffsetOfNode::Array: {
2617  // Compute the index
2618  Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
2619  llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
2620  bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
2621  Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
2622 
2623  // Save the element type
2624  CurrentType =
2625  CGF.getContext().getAsArrayType(CurrentType)->getElementType();
2626 
2627  // Compute the element size
2628  llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
2629  CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
2630 
2631  // Multiply out to compute the result
2632  Offset = Builder.CreateMul(Idx, ElemSize);
2633  break;
2634  }
2635 
2636  case OffsetOfNode::Field: {
2637  FieldDecl *MemberDecl = ON.getField();
2638  RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2639  const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2640 
2641  // Compute the index of the field in its parent.
2642  unsigned i = 0;
2643  // FIXME: It would be nice if we didn't have to loop here!
2644  for (RecordDecl::field_iterator Field = RD->field_begin(),
2645  FieldEnd = RD->field_end();
2646  Field != FieldEnd; ++Field, ++i) {
2647  if (*Field == MemberDecl)
2648  break;
2649  }
2650  assert(i < RL.getFieldCount() && "offsetof field in wrong type");
2651 
2652  // Compute the offset to the field
2653  int64_t OffsetInt = RL.getFieldOffset(i) /
2654  CGF.getContext().getCharWidth();
2655  Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
2656 
2657  // Save the element type.
2658  CurrentType = MemberDecl->getType();
2659  break;
2660  }
2661 
2663  llvm_unreachable("dependent __builtin_offsetof");
2664 
2665  case OffsetOfNode::Base: {
2666  if (ON.getBase()->isVirtual()) {
2667  CGF.ErrorUnsupported(E, "virtual base in offsetof");
2668  continue;
2669  }
2670 
2671  RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
2672  const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
2673 
2674  // Save the element type.
2675  CurrentType = ON.getBase()->getType();
2676 
2677  // Compute the offset to the base.
2678  const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2679  CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
2680  CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
2681  Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
2682  break;
2683  }
2684  }
2685  Result = Builder.CreateAdd(Result, Offset);
2686  }
2687  return Result;
2688 }
2689 
2690 /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
2691 /// argument of the sizeof expression as an integer.
2692 Value *
2693 ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2694  const UnaryExprOrTypeTraitExpr *E) {
2695  QualType TypeToSize = E->getTypeOfArgument();
2696  if (E->getKind() == UETT_SizeOf) {
2697  if (const VariableArrayType *VAT =
2698  CGF.getContext().getAsVariableArrayType(TypeToSize)) {
2699  if (E->isArgumentType()) {
2700  // sizeof(type) - make sure to emit the VLA size.
2701  CGF.EmitVariablyModifiedType(TypeToSize);
2702  } else {
2703  // C99 6.5.3.4p2: If the argument is an expression of type
2704  // VLA, it is evaluated.
2705  CGF.EmitIgnoredExpr(E->getArgumentExpr());
2706  }
2707 
2708  auto VlaSize = CGF.getVLASize(VAT);
2709  llvm::Value *size = VlaSize.NumElts;
2710 
2711  // Scale the number of non-VLA elements by the non-VLA element size.
2712  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
2713  if (!eltSize.isOne())
2714  size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
2715 
2716  return size;
2717  }
2718  } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
2719  auto Alignment =
2720  CGF.getContext()
2723  .getQuantity();
2724  return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
2725  }
2726 
2727  // If this isn't sizeof(vla), the result must be constant; use the constant
2728  // folding logic so we don't have to duplicate it here.
2729  return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
2730 }
2731 
2732 Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
2733  Expr *Op = E->getSubExpr();
2734  if (Op->getType()->isAnyComplexType()) {
2735  // If it's an l-value, load through the appropriate subobject l-value.
2736  // Note that we have to ask E because Op might be an l-value that
2737  // this won't work for, e.g. an Obj-C property.
2738  if (E->isGLValue())
2739  return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2740  E->getExprLoc()).getScalarVal();
2741 
2742  // Otherwise, calculate and project.
2743  return CGF.EmitComplexExpr(Op, false, true).first;
2744  }
2745 
2746  return Visit(Op);
2747 }
2748 
2749 Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
2750  Expr *Op = E->getSubExpr();
2751  if (Op->getType()->isAnyComplexType()) {
2752  // If it's an l-value, load through the appropriate subobject l-value.
2753  // Note that we have to ask E because Op might be an l-value that
2754  // this won't work for, e.g. an Obj-C property.
2755  if (Op->isGLValue())
2756  return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
2757  E->getExprLoc()).getScalarVal();
2758 
2759  // Otherwise, calculate and project.
2760  return CGF.EmitComplexExpr(Op, true, false).second;
2761  }
2762 
2763  // __imag on a scalar returns zero. Emit the subexpr to ensure side
2764  // effects are evaluated, but not the actual value.
2765  if (Op->isGLValue())
2766  CGF.EmitLValue(Op);
2767  else
2768  CGF.EmitScalarExpr(Op, true);
2769  return llvm::Constant::getNullValue(ConvertType(E->getType()));
2770 }
2771 
2772 //===----------------------------------------------------------------------===//
2773 // Binary Operators
2774 //===----------------------------------------------------------------------===//
2775 
2776 BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
2777  TestAndClearIgnoreResultAssign();
2778  BinOpInfo Result;
2779  Result.LHS = Visit(E->getLHS());
2780  Result.RHS = Visit(E->getRHS());
2781  Result.Ty = E->getType();
2782  Result.Opcode = E->getOpcode();
2783  Result.FPFeatures = E->getFPFeatures();
2784  Result.E = E;
2785  return Result;
2786 }
2787 
2788 LValue ScalarExprEmitter::EmitCompoundAssignLValue(
2789  const CompoundAssignOperator *E,
2790  Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
2791  Value *&Result) {
2792  QualType LHSTy = E->getLHS()->getType();
2793  BinOpInfo OpInfo;
2794 
2796  return CGF.EmitScalarCompoundAssignWithComplex(E, Result);
2797 
2798  // Emit the RHS first. __block variables need to have the rhs evaluated
2799  // first, plus this should improve codegen a little.
2800  OpInfo.RHS = Visit(E->getRHS());
2801  OpInfo.Ty = E->getComputationResultType();
2802  OpInfo.Opcode = E->getOpcode();
2803  OpInfo.FPFeatures = E->getFPFeatures();
2804  OpInfo.E = E;
2805  // Load/convert the LHS.
2806  LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
2807 
2808  llvm::PHINode *atomicPHI = nullptr;
2809  if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
2810  QualType type = atomicTy->getValueType();
2811  if (!type->isBooleanType() && type->isIntegerType() &&
2812  !(type->isUnsignedIntegerType() &&
2813  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2814  CGF.getLangOpts().getSignedOverflowBehavior() !=
2816  llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
2817  switch (OpInfo.Opcode) {
2818  // We don't have atomicrmw operands for *, %, /, <<, >>
2819  case BO_MulAssign: case BO_DivAssign:
2820  case BO_RemAssign:
2821  case BO_ShlAssign:
2822  case BO_ShrAssign:
2823  break;
2824  case BO_AddAssign:
2825  aop = llvm::AtomicRMWInst::Add;
2826  break;
2827  case BO_SubAssign:
2828  aop = llvm::AtomicRMWInst::Sub;
2829  break;
2830  case BO_AndAssign:
2832  break;
2833  case BO_XorAssign:
2834  aop = llvm::AtomicRMWInst::Xor;
2835  break;
2836  case BO_OrAssign:
2837  aop = llvm::AtomicRMWInst::Or;
2838  break;
2839  default:
2840  llvm_unreachable("Invalid compound assignment type");
2841  }
2842  if (aop != llvm::AtomicRMWInst::BAD_BINOP) {
2843  llvm::Value *amt = CGF.EmitToMemory(
2844  EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
2845  E->getExprLoc()),
2846  LHSTy);
2847  Builder.CreateAtomicRMW(aop, LHSLV.getPointer(), amt,
2848  llvm::AtomicOrdering::SequentiallyConsistent);
2849  return LHSLV;
2850  }
2851  }
2852  // FIXME: For floating point types, we should be saving and restoring the
2853  // floating point environment in the loop.
2854  llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2855  llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2856  OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2857  OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
2858  Builder.CreateBr(opBB);
2859  Builder.SetInsertPoint(opBB);
2860  atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
2861  atomicPHI->addIncoming(OpInfo.LHS, startBB);
2862  OpInfo.LHS = atomicPHI;
2863  }
2864  else
2865  OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
2866 
2867  SourceLocation Loc = E->getExprLoc();
2868  OpInfo.LHS =
2869  EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType(), Loc);
2870 
2871  // Expand the binary operator.
2872  Result = (this->*Func)(OpInfo);
2873 
2874  // Convert the result back to the LHS type,
2875  // potentially with Implicit Conversion sanitizer check.
2876  Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy,
2877  Loc, ScalarConversionOpts(CGF.SanOpts));
2878 
2879  if (atomicPHI) {
2880  llvm::BasicBlock *opBB = Builder.GetInsertBlock();
2881  llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
2882  auto Pair = CGF.EmitAtomicCompareExchange(
2883  LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
2884  llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
2885  llvm::Value *success = Pair.second;
2886  atomicPHI->addIncoming(old, opBB);
2887  Builder.CreateCondBr(success, contBB, opBB);
2888  Builder.SetInsertPoint(contBB);
2889  return LHSLV;
2890  }
2891 
2892  // Store the result value into the LHS lvalue. Bit-fields are handled
2893  // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
2894  // 'An assignment expression has the value of the left operand after the
2895  // assignment...'.
2896  if (LHSLV.isBitField())
2897  CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
2898  else
2899  CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
2900 
2901  return LHSLV;
2902 }
2903 
2904 Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
2905  Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
2906  bool Ignore = TestAndClearIgnoreResultAssign();
2907  Value *RHS;
2908  LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
2909 
2910  // If the result is clearly ignored, return now.
2911  if (Ignore)
2912  return nullptr;
2913 
2914  // The result of an assignment in C is the assigned r-value.
2915  if (!CGF.getLangOpts().CPlusPlus)
2916  return RHS;
2917 
2918  // If the lvalue is non-volatile, return the computed value of the assignment.
2919  if (!LHS.isVolatileQualified())
2920  return RHS;
2921 
2922  // Otherwise, reload the value.
2923  return EmitLoadOfLValue(LHS, E->getExprLoc());
2924 }
2925 
2926 void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
2927  const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
2929 
2930  if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
2931  Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
2932  SanitizerKind::IntegerDivideByZero));
2933  }
2934 
2935  const auto *BO = cast<BinaryOperator>(Ops.E);
2936  if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
2937  Ops.Ty->hasSignedIntegerRepresentation() &&
2938  !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
2939  Ops.mayHaveIntegerOverflow()) {
2940  llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
2941 
2942  llvm::Value *IntMin =
2943  Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
2944  llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
2945 
2946  llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
2947  llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
2948  llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
2949  Checks.push_back(
2950  std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
2951  }
2952 
2953  if (Checks.size() > 0)
2954  EmitBinOpCheck(Checks, Ops);
2955 }
2956 
2957 Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
2958  {
2959  CodeGenFunction::SanitizerScope SanScope(&CGF);
2960  if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
2961  CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
2962  Ops.Ty->isIntegerType() &&
2963  (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
2964  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2965  EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
2966  } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
2967  Ops.Ty->isRealFloatingType() &&
2968  Ops.mayHaveFloatDivisionByZero()) {
2969  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
2970  llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
2971  EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
2972  Ops);
2973  }
2974  }
2975 
2976  if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
2977  llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
2978  if (CGF.getLangOpts().OpenCL &&
2979  !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
2980  // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
2981  // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
2982  // build option allows an application to specify that single precision
2983  // floating-point divide (x/y and 1/x) and sqrt used in the program
2984  // source are correctly rounded.
2985  llvm::Type *ValTy = Val->getType();
2986  if (ValTy->isFloatTy() ||
2987  (isa<llvm::VectorType>(ValTy) &&
2988  cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
2989  CGF.SetFPAccuracy(Val, 2.5);
2990  }
2991  return Val;
2992  }
2993  else if (Ops.Ty->hasUnsignedIntegerRepresentation())
2994  return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
2995  else
2996  return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
2997 }
2998 
2999 Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3000  // Rem in C can't be a floating point type: C99 6.5.5p2.
3001  if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3002  CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3003  Ops.Ty->isIntegerType() &&
3004  (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3005  CodeGenFunction::SanitizerScope SanScope(&CGF);
3006  llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3007  EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3008  }
3009 
3010  if (Ops.Ty->hasUnsignedIntegerRepresentation())
3011  return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3012  else
3013  return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3014 }
3015 
3016 Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3017  unsigned IID;
3018  unsigned OpID = 0;
3019 
3020  bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3021  switch (Ops.Opcode) {
3022  case BO_Add:
3023  case BO_AddAssign:
3024  OpID = 1;
3025  IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3026  llvm::Intrinsic::uadd_with_overflow;
3027  break;
3028  case BO_Sub:
3029  case BO_SubAssign:
3030  OpID = 2;
3031  IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3032  llvm::Intrinsic::usub_with_overflow;
3033  break;
3034  case BO_Mul:
3035  case BO_MulAssign:
3036  OpID = 3;
3037  IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3038  llvm::Intrinsic::umul_with_overflow;
3039  break;
3040  default:
3041  llvm_unreachable("Unsupported operation for overflow detection");
3042  }
3043  OpID <<= 1;
3044  if (isSigned)
3045  OpID |= 1;
3046 
3047  CodeGenFunction::SanitizerScope SanScope(&CGF);
3048  llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3049 
3050  llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3051 
3052  Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3053  Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3054  Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3055 
3056  // Handle overflow with llvm.trap if no custom handler has been specified.
3057  const std::string *handlerName =
3059  if (handlerName->empty()) {
3060  // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3061  // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3062  if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3063  llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3064  SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3065  : SanitizerKind::UnsignedIntegerOverflow;
3066  EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3067  } else
3068  CGF.EmitTrapCheck(Builder.CreateNot(overflow));
3069  return result;
3070  }
3071 
3072  // Branch in case of overflow.
3073  llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3074  llvm::BasicBlock *continueBB =
3075  CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3076  llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3077 
3078  Builder.CreateCondBr(overflow, overflowBB, continueBB);
3079 
3080  // If an overflow handler is set, then we want to call it and then use its
3081  // result, if it returns.
3082  Builder.SetInsertPoint(overflowBB);
3083 
3084  // Get the overflow handler.
3085  llvm::Type *Int8Ty = CGF.Int8Ty;
3086  llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3087  llvm::FunctionType *handlerTy =
3088  llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3089  llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3090 
3091  // Sign extend the args to 64-bit, so that we can use the same handler for
3092  // all types of overflow.
3093  llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3094  llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3095 
3096  // Call the handler with the two arguments, the operation, and the size of
3097  // the result.
3098  llvm::Value *handlerArgs[] = {
3099  lhs,
3100  rhs,
3101  Builder.getInt8(OpID),
3102  Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3103  };
3104  llvm::Value *handlerResult =
3105  CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3106 
3107  // Truncate the result back to the desired size.
3108  handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3109  Builder.CreateBr(continueBB);
3110 
3111  Builder.SetInsertPoint(continueBB);
3112  llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3113  phi->addIncoming(result, initialBB);
3114  phi->addIncoming(handlerResult, overflowBB);
3115 
3116  return phi;
3117 }
3118 
3119 /// Emit pointer + index arithmetic.
3121  const BinOpInfo &op,
3122  bool isSubtraction) {
3123  // Must have binary (not unary) expr here. Unary pointer
3124  // increment/decrement doesn't use this path.
3125  const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3126 
3127  Value *pointer = op.LHS;
3128  Expr *pointerOperand = expr->getLHS();
3129  Value *index = op.RHS;
3130  Expr *indexOperand = expr->getRHS();
3131 
3132  // In a subtraction, the LHS is always the pointer.
3133  if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3134  std::swap(pointer, index);
3135  std::swap(pointerOperand, indexOperand);
3136  }
3137 
3138  bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3139 
3140  unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3141  auto &DL = CGF.CGM.getDataLayout();
3142  auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3143 
3144  // Some versions of glibc and gcc use idioms (particularly in their malloc
3145  // routines) that add a pointer-sized integer (known to be a pointer value)
3146  // to a null pointer in order to cast the value back to an integer or as
3147  // part of a pointer alignment algorithm. This is undefined behavior, but
3148  // we'd like to be able to compile programs that use it.
3149  //
3150  // Normally, we'd generate a GEP with a null-pointer base here in response
3151  // to that code, but it's also UB to dereference a pointer created that
3152  // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3153  // generate a direct cast of the integer value to a pointer.
3154  //
3155  // The idiom (p = nullptr + N) is not met if any of the following are true:
3156  //
3157  // The operation is subtraction.
3158  // The index is not pointer-sized.
3159  // The pointer type is not byte-sized.
3160  //
3162  op.Opcode,
3163  expr->getLHS(),
3164  expr->getRHS()))
3165  return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3166 
3167  if (width != DL.getTypeSizeInBits(PtrTy)) {
3168  // Zero-extend or sign-extend the pointer value according to
3169  // whether the index is signed or not.
3170  index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
3171  "idx.ext");
3172  }
3173 
3174  // If this is subtraction, negate the index.
3175  if (isSubtraction)
3176  index = CGF.Builder.CreateNeg(index, "idx.neg");
3177 
3178  if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3179  CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3180  /*Accessed*/ false);
3181 
3182  const PointerType *pointerType
3183  = pointerOperand->getType()->getAs<PointerType>();
3184  if (!pointerType) {
3185  QualType objectType = pointerOperand->getType()
3187  ->getPointeeType();
3188  llvm::Value *objectSize
3189  = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3190 
3191  index = CGF.Builder.CreateMul(index, objectSize);
3192 
3193  Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3194  result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3195  return CGF.Builder.CreateBitCast(result, pointer->getType());
3196  }
3197 
3198  QualType elementType = pointerType->getPointeeType();
3199  if (const VariableArrayType *vla
3200  = CGF.getContext().getAsVariableArrayType(elementType)) {
3201  // The element count here is the total number of non-VLA elements.
3202  llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3203 
3204  // Effectively, the multiply by the VLA size is part of the GEP.
3205  // GEP indexes are signed, and scaling an index isn't permitted to
3206  // signed-overflow, so we use the same semantics for our explicit
3207  // multiply. We suppress this if overflow is not undefined behavior.
3208  if (CGF.getLangOpts().isSignedOverflowDefined()) {
3209  index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3210  pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3211  } else {
3212  index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3213  pointer =
3214  CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3215  op.E->getExprLoc(), "add.ptr");
3216  }
3217  return pointer;
3218  }
3219 
3220  // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3221  // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3222  // future proof.
3223  if (elementType->isVoidType() || elementType->isFunctionType()) {
3224  Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
3225  result = CGF.Builder.CreateGEP(result, index, "add.ptr");
3226  return CGF.Builder.CreateBitCast(result, pointer->getType());
3227  }
3228 
3230  return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
3231 
3232  return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
3233  op.E->getExprLoc(), "add.ptr");
3234 }
3235 
3236 // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3237 // Addend. Use negMul and negAdd to negate the first operand of the Mul or
3238 // the add operand respectively. This allows fmuladd to represent a*b-c, or
3239 // c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3240 // efficient operations.
3241 static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
3242  const CodeGenFunction &CGF, CGBuilderTy &Builder,
3243  bool negMul, bool negAdd) {
3244  assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
3245 
3246  Value *MulOp0 = MulOp->getOperand(0);
3247  Value *MulOp1 = MulOp->getOperand(1);
3248  if (negMul) {
3249  MulOp0 =
3250  Builder.CreateFSub(
3251  llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0,
3252  "neg");
3253  } else if (negAdd) {
3254  Addend =
3255  Builder.CreateFSub(
3256  llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend,
3257  "neg");
3258  }
3259 
3260  Value *FMulAdd = Builder.CreateCall(
3261  CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3262  {MulOp0, MulOp1, Addend});
3263  MulOp->eraseFromParent();
3264 
3265  return FMulAdd;
3266 }
3267 
3268 // Check whether it would be legal to emit an fmuladd intrinsic call to
3269 // represent op and if so, build the fmuladd.
3270 //
3271 // Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3272 // Does NOT check the type of the operation - it's assumed that this function
3273 // will be called from contexts where it's known that the type is contractable.
3274 static Value* tryEmitFMulAdd(const BinOpInfo &op,
3275  const CodeGenFunction &CGF, CGBuilderTy &Builder,
3276  bool isSub=false) {
3277 
3278  assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
3279  op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
3280  "Only fadd/fsub can be the root of an fmuladd.");
3281 
3282  // Check whether this op is marked as fusable.
3283  if (!op.FPFeatures.allowFPContractWithinStatement())
3284  return nullptr;
3285 
3286  // We have a potentially fusable op. Look for a mul on one of the operands.
3287  // Also, make sure that the mul result isn't used directly. In that case,
3288  // there's no point creating a muladd operation.
3289  if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) {
3290  if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3291  LHSBinOp->use_empty())
3292  return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub);
3293  }
3294  if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) {
3295  if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
3296  RHSBinOp->use_empty())
3297  return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false);
3298  }
3299 
3300  return nullptr;
3301 }
3302 
3303 Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
3304  if (op.LHS->getType()->isPointerTy() ||
3305  op.RHS->getType()->isPointerTy())
3307 
3308  if (op.Ty->isSignedIntegerOrEnumerationType()) {
3309  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3311  return Builder.CreateAdd(op.LHS, op.RHS, "add");
3313  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3314  return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3315  LLVM_FALLTHROUGH;
3317  if (CanElideOverflowCheck(CGF.getContext(), op))
3318  return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
3319  return EmitOverflowCheckedBinOp(op);
3320  }
3321  }
3322 
3323  if (op.Ty->isUnsignedIntegerType() &&
3324  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3325  !CanElideOverflowCheck(CGF.getContext(), op))
3326  return EmitOverflowCheckedBinOp(op);
3327 
3328  if (op.LHS->getType()->isFPOrFPVectorTy()) {
3329  // Try to form an fmuladd.
3330  if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
3331  return FMulAdd;
3332 
3333  Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
3334  return propagateFMFlags(V, op);
3335  }
3336 
3337  return Builder.CreateAdd(op.LHS, op.RHS, "add");
3338 }
3339 
3340 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
3341  // The LHS is always a pointer if either side is.
3342  if (!op.LHS->getType()->isPointerTy()) {
3343  if (op.Ty->isSignedIntegerOrEnumerationType()) {
3344  switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3346  return Builder.CreateSub(op.LHS, op.RHS, "sub");
3348  if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3349  return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3350  LLVM_FALLTHROUGH;
3352  if (CanElideOverflowCheck(CGF.getContext(), op))
3353  return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
3354  return EmitOverflowCheckedBinOp(op);
3355  }
3356  }
3357 
3358  if (op.Ty->isUnsignedIntegerType() &&
3359  CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3360  !CanElideOverflowCheck(CGF.getContext(), op))
3361  return EmitOverflowCheckedBinOp(op);
3362 
3363  if (op.LHS->getType()->isFPOrFPVectorTy()) {
3364  // Try to form an fmuladd.
3365  if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
3366  return FMulAdd;
3367  Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
3368  return propagateFMFlags(V, op);
3369  }
3370 
3371  return Builder.CreateSub(op.LHS, op.RHS, "sub");
3372  }
3373 
3374  // If the RHS is not a pointer, then we have normal pointer
3375  // arithmetic.
3376  if (!op.RHS->getType()->isPointerTy())
3378 
3379  // Otherwise, this is a pointer subtraction.
3380 
3381  // Do the raw subtraction part.
3382  llvm::Value *LHS
3383  = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
3384  llvm::Value *RHS
3385  = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
3386  Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
3387 
3388  // Okay, figure out the element size.
3389  const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3390  QualType elementType = expr->getLHS()->getType()->getPointeeType();
3391 
3392  llvm::Value *divisor = nullptr;
3393 
3394  // For a variable-length array, this is going to be non-constant.
3395  if (const VariableArrayType *vla
3396  = CGF.getContext().getAsVariableArrayType(elementType)) {
3397  auto VlaSize = CGF.getVLASize(vla);
3398  elementType = VlaSize.Type;
3399  divisor = VlaSize.NumElts;
3400 
3401  // Scale the number of non-VLA elements by the non-VLA element size.
3402  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
3403  if (!eltSize.isOne())
3404  divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
3405 
3406  // For everything elese, we can just compute it, safe in the
3407  // assumption that Sema won't let anything through that we can't
3408  // safely compute the size of.
3409  } else {
3410  CharUnits elementSize;
3411  // Handle GCC extension for pointer arithmetic on void* and
3412  // function pointer types.
3413  if (elementType->isVoidType() || elementType->isFunctionType())
3414  elementSize = CharUnits::One();
3415  else
3416  elementSize = CGF.getContext().getTypeSizeInChars(elementType);
3417 
3418  // Don't even emit the divide for element size of 1.
3419  if (elementSize.isOne())
3420  return diffInChars;
3421 
3422  divisor = CGF.CGM.getSize(elementSize);
3423  }
3424 
3425  // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
3426  // pointer difference in C is only defined in the case where both operands
3427  // are pointing to elements of an array.
3428  return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
3429 }
3430 
3431 Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) {
3432  llvm::IntegerType *Ty;
3433  if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
3434  Ty = cast<llvm::IntegerType>(VT->getElementType());
3435  else
3436  Ty = cast<llvm::IntegerType>(LHS->getType());
3437  return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1);
3438 }
3439 
3440 Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
3441  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3442  // RHS to the same size as the LHS.
3443  Value *RHS = Ops.RHS;
3444  if (Ops.LHS->getType() != RHS->getType())
3445  RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3446 
3447  bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
3448  Ops.Ty->hasSignedIntegerRepresentation() &&
3450  bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
3451  // OpenCL 6.3j: shift values are effectively % word size of LHS.
3452  if (CGF.getLangOpts().OpenCL)
3453  RHS =
3454  Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask");
3455  else if ((SanitizeBase || SanitizeExponent) &&
3456  isa<llvm::IntegerType>(Ops.LHS->getType())) {
3457  CodeGenFunction::SanitizerScope SanScope(&CGF);
3459  llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
3460  llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
3461 
3462  if (SanitizeExponent) {
3463  Checks.push_back(
3464  std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
3465  }
3466 
3467  if (SanitizeBase) {
3468  // Check whether we are shifting any non-zero bits off the top of the
3469  // integer. We only emit this check if exponent is valid - otherwise
3470  // instructions below will have undefined behavior themselves.
3471  llvm::BasicBlock *Orig = Builder.GetInsertBlock();
3472  llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3473  llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
3474  Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
3475  llvm::Value *PromotedWidthMinusOne =
3476  (RHS == Ops.RHS) ? WidthMinusOne
3477  : GetWidthMinusOneValue(Ops.LHS, RHS);
3478  CGF.EmitBlock(CheckShiftBase);
3479  llvm::Value *BitsShiftedOff = Builder.CreateLShr(
3480  Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
3481  /*NUW*/ true, /*NSW*/ true),
3482  "shl.check");
3483  if (CGF.getLangOpts().CPlusPlus) {
3484  // In C99, we are not permitted to shift a 1 bit into the sign bit.
3485  // Under C++11's rules, shifting a 1 bit into the sign bit is
3486  // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
3487  // define signed left shifts, so we use the C99 and C++11 rules there).
3488  llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
3489  BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
3490  }
3491  llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
3492  llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
3493  CGF.EmitBlock(Cont);
3494  llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
3495  BaseCheck->addIncoming(Builder.getTrue(), Orig);
3496  BaseCheck->addIncoming(ValidBase, CheckShiftBase);
3497  Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase));
3498  }
3499 
3500  assert(!Checks.empty());
3501  EmitBinOpCheck(Checks, Ops);
3502  }
3503 
3504  return Builder.CreateShl(Ops.LHS, RHS, "shl");
3505 }
3506 
3507 Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
3508  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
3509  // RHS to the same size as the LHS.
3510  Value *RHS = Ops.RHS;
3511  if (Ops.LHS->getType() != RHS->getType())
3512  RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
3513 
3514  // OpenCL 6.3j: shift values are effectively % word size of LHS.
3515  if (CGF.getLangOpts().OpenCL)
3516  RHS =
3517  Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask");
3518  else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
3519  isa<llvm::IntegerType>(Ops.LHS->getType())) {
3520  CodeGenFunction::SanitizerScope SanScope(&CGF);
3521  llvm::Value *Valid =
3522  Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
3523  EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
3524  }
3525 
3526  if (Ops.Ty->hasUnsignedIntegerRepresentation())
3527  return Builder.CreateLShr(Ops.LHS, RHS, "shr");
3528  return Builder.CreateAShr(Ops.LHS, RHS, "shr");
3529 }
3530 
3532 // return corresponding comparison intrinsic for given vector type
3534  BuiltinType::Kind ElemKind) {
3535  switch (ElemKind) {
3536  default: llvm_unreachable("unexpected element type");
3537  case BuiltinType::Char_U:
3538  case BuiltinType::UChar:
3539  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3540  llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
3541  case BuiltinType::Char_S:
3542  case BuiltinType::SChar:
3543  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
3544  llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
3545  case BuiltinType::UShort:
3546  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3547  llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
3548  case BuiltinType::Short:
3549  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
3550  llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
3551  case BuiltinType::UInt:
3552  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3553  llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
3554  case BuiltinType::Int:
3555  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
3556  llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
3557  case BuiltinType::ULong:
3558  case BuiltinType::ULongLong:
3559  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3560  llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
3561  case BuiltinType::Long:
3562  case BuiltinType::LongLong:
3563  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
3564  llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
3565  case BuiltinType::Float:
3566  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
3567  llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
3568  case BuiltinType::Double:
3569  return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
3570  llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
3571  }
3572 }
3573 
3575  llvm::CmpInst::Predicate UICmpOpc,
3576  llvm::CmpInst::Predicate SICmpOpc,
3577  llvm::CmpInst::Predicate FCmpOpc) {
3578  TestAndClearIgnoreResultAssign();
3579  Value *Result;
3580  QualType LHSTy = E->getLHS()->getType();
3581  QualType RHSTy = E->getRHS()->getType();
3582  if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
3583  assert(E->getOpcode() == BO_EQ ||
3584  E->getOpcode() == BO_NE);
3585  Value *LHS = CGF.EmitScalarExpr(E->getLHS());
3586  Value *RHS = CGF.EmitScalarExpr(E->getRHS());
3587  Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
3588  CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
3589  } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
3590  Value *LHS = Visit(E->getLHS());
3591  Value *RHS = Visit(E->getRHS());
3592 
3593  // If AltiVec, the comparison results in a numeric type, so we use
3594  // intrinsics comparing vectors and giving 0 or 1 as a result
3595  if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
3596  // constants for mapping CR6 register bits to predicate result
3597  enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
3598 
3599  llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
3600 
3601  // in several cases vector arguments order will be reversed
3602  Value *FirstVecArg = LHS,
3603  *SecondVecArg = RHS;
3604 
3605  QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
3606  const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
3607  BuiltinType::Kind ElementKind = BTy->getKind();
3608 
3609  switch(E->getOpcode()) {
3610  default: llvm_unreachable("is not a comparison operation");
3611  case BO_EQ:
3612  CR6 = CR6_LT;
3613  ID = GetIntrinsic(VCMPEQ, ElementKind);
3614  break;
3615  case BO_NE:
3616  CR6 = CR6_EQ;
3617  ID = GetIntrinsic(VCMPEQ, ElementKind);
3618  break;
3619  case BO_LT:
3620  CR6 = CR6_LT;
3621  ID = GetIntrinsic(VCMPGT, ElementKind);
3622  std::swap(FirstVecArg, SecondVecArg);
3623  break;
3624  case BO_GT:
3625  CR6 = CR6_LT;
3626  ID = GetIntrinsic(VCMPGT, ElementKind);
3627  break;
3628  case BO_LE:
3629  if (ElementKind == BuiltinType::Float) {
3630  CR6 = CR6_LT;
3631  ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3632  std::swap(FirstVecArg, SecondVecArg);
3633  }
3634  else {
3635  CR6 = CR6_EQ;
3636  ID = GetIntrinsic(VCMPGT, ElementKind);
3637  }
3638  break;
3639  case BO_GE:
3640  if (ElementKind == BuiltinType::Float) {
3641  CR6 = CR6_LT;
3642  ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
3643  }
3644  else {
3645  CR6 = CR6_EQ;
3646  ID = GetIntrinsic(VCMPGT, ElementKind);
3647  std::swap(FirstVecArg, SecondVecArg);
3648  }
3649  break;
3650  }
3651 
3652  Value *CR6Param = Builder.getInt32(CR6);
3653  llvm::Function *F = CGF.CGM.getIntrinsic(ID);
3654  Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
3655 
3656  // The result type of intrinsic may not be same as E->getType().
3657  // If E->getType() is not BoolTy, EmitScalarConversion will do the
3658  // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
3659  // do nothing, if ResultTy is not i1 at the same time, it will cause
3660  // crash later.
3661  llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
3662  if (ResultTy->getBitWidth() > 1 &&
3663  E->getType() == CGF.getContext().BoolTy)
3664  Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
3665  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3666  E->getExprLoc());
3667  }
3668 
3669  if (LHS->getType()->isFPOrFPVectorTy()) {
3670  Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
3671  } else if (LHSTy->hasSignedIntegerRepresentation()) {
3672  Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
3673  } else {
3674  // Unsigned integers and pointers.
3675 
3676  if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
3677  !isa<llvm::ConstantPointerNull>(LHS) &&
3678  !isa<llvm::ConstantPointerNull>(RHS)) {
3679 
3680  // Dynamic information is required to be stripped for comparisons,
3681  // because it could leak the dynamic information. Based on comparisons
3682  // of pointers to dynamic objects, the optimizer can replace one pointer
3683  // with another, which might be incorrect in presence of invariant
3684  // groups. Comparison with null is safe because null does not carry any
3685  // dynamic information.
3686  if (LHSTy.mayBeDynamicClass())
3687  LHS = Builder.CreateStripInvariantGroup(LHS);
3688  if (RHSTy.mayBeDynamicClass())
3689  RHS = Builder.CreateStripInvariantGroup(RHS);
3690  }
3691 
3692  Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
3693  }
3694 
3695  // If this is a vector comparison, sign extend the result to the appropriate
3696  // vector integer type and return it (don't convert to bool).
3697  if (LHSTy->isVectorType())
3698  return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3699 
3700  } else {
3701  // Complex Comparison: can only be an equality comparison.
3703  QualType CETy;
3704  if (auto *CTy = LHSTy->getAs<ComplexType>()) {
3705  LHS = CGF.EmitComplexExpr(E->getLHS());
3706  CETy = CTy->getElementType();
3707  } else {
3708  LHS.first = Visit(E->getLHS());
3709  LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
3710  CETy = LHSTy;
3711  }
3712  if (auto *CTy = RHSTy->getAs<ComplexType>()) {
3713  RHS = CGF.EmitComplexExpr(E->getRHS());
3714  assert(CGF.getContext().hasSameUnqualifiedType(CETy,
3715  CTy->getElementType()) &&
3716  "The element types must always match.");
3717  (void)CTy;
3718  } else {
3719  RHS.first = Visit(E->getRHS());
3720  RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
3721  assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
3722  "The element types must always match.");
3723  }
3724 
3725  Value *ResultR, *ResultI;
3726  if (CETy->isRealFloatingType()) {
3727  ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
3728  ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
3729  } else {
3730  // Complex comparisons can only be equality comparisons. As such, signed
3731  // and unsigned opcodes are the same.
3732  ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
3733  ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
3734  }
3735 
3736  if (E->getOpcode() == BO_EQ) {
3737  Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
3738  } else {
3739  assert(E->getOpcode() == BO_NE &&
3740  "Complex comparison other than == or != ?");
3741  Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
3742  }
3743  }
3744 
3745  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
3746  E->getExprLoc());
3747 }
3748 
3749 Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
3750  bool Ignore = TestAndClearIgnoreResultAssign();
3751 
3752  Value *RHS;
3753  LValue LHS;
3754 
3755  switch (E->getLHS()->getType().getObjCLifetime()) {
3757  std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
3758  break;
3759 
3761  std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
3762  break;
3763 
3765  std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
3766  break;
3767 
3768  case Qualifiers::OCL_Weak:
3769  RHS = Visit(E->getRHS());
3770  LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3771  RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
3772  break;
3773 
3774  case Qualifiers::OCL_None:
3775  // __block variables need to have the rhs evaluated first, plus
3776  // this should improve codegen just a little.
3777  RHS = Visit(E->getRHS());
3778  LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3779 
3780  // Store the value into the LHS. Bit-fields are handled specially
3781  // because the result is altered by the store, i.e., [C99 6.5.16p1]
3782  // 'An assignment expression has the value of the left operand after
3783  // the assignment...'.
3784  if (LHS.isBitField()) {
3785  CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
3786  } else {
3787  CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
3788  CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
3789  }
3790  }
3791 
3792  // If the result is clearly ignored, return now.
3793  if (Ignore)
3794  return nullptr;
3795 
3796  // The result of an assignment in C is the assigned r-value.
3797  if (!CGF.getLangOpts().CPlusPlus)
3798  return RHS;
3799 
3800  // If the lvalue is non-volatile, return the computed value of the assignment.
3801  if (!LHS.isVolatileQualified())
3802  return RHS;
3803 
3804  // Otherwise, reload the value.
3805  return EmitLoadOfLValue(LHS, E->getExprLoc());
3806 }
3807 
3808 Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
3809  // Perform vector logical and on comparisons with zero vectors.
3810  if (E->getType()->isVectorType()) {
3811  CGF.incrementProfileCounter(E);
3812 
3813  Value *LHS = Visit(E->getLHS());
3814  Value *RHS = Visit(E->getRHS());
3815  Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3816  if (LHS->getType()->isFPOrFPVectorTy()) {
3817  LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3818  RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3819  } else {
3820  LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3821  RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3822  }
3823  Value *And = Builder.CreateAnd(LHS, RHS);
3824  return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
3825  }
3826 
3827  llvm::Type *ResTy = ConvertType(E->getType());
3828 
3829  // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
3830  // If we have 1 && X, just emit X without inserting the control flow.
3831  bool LHSCondVal;
3832  if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3833  if (LHSCondVal) { // If we have 1 && X, just emit X.
3834  CGF.incrementProfileCounter(E);
3835 
3836  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3837  // ZExt result to int or bool.
3838  return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
3839  }
3840 
3841  // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
3842  if (!CGF.ContainsLabel(E->getRHS()))
3843  return llvm::Constant::getNullValue(ResTy);
3844  }
3845 
3846  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
3847  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
3848 
3850 
3851  // Branch on the LHS first. If it is false, go to the failure (cont) block.
3852  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
3853  CGF.getProfileCount(E->getRHS()));
3854 
3855  // Any edges into the ContBlock are now from an (indeterminate number of)
3856  // edges from this first condition. All of these values will be false. Start
3857  // setting up the PHI node in the Cont Block for this.
3858  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3859  "", ContBlock);
3860  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3861  PI != PE; ++PI)
3862  PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
3863 
3864  eval.begin(CGF);
3865  CGF.EmitBlock(RHSBlock);
3866  CGF.incrementProfileCounter(E);
3867  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3868  eval.end(CGF);
3869 
3870  // Reaquire the RHS block, as there may be subblocks inserted.
3871  RHSBlock = Builder.GetInsertBlock();
3872 
3873  // Emit an unconditional branch from this block to ContBlock.
3874  {
3875  // There is no need to emit line number for unconditional branch.
3876  auto NL = ApplyDebugLocation::CreateEmpty(CGF);
3877  CGF.EmitBlock(ContBlock);
3878  }
3879  // Insert an entry into the phi node for the edge with the value of RHSCond.
3880  PN->addIncoming(RHSCond, RHSBlock);
3881 
3882  // Artificial location to preserve the scope information
3883  {
3884  auto NL = ApplyDebugLocation::CreateArtificial(CGF);
3885  PN->setDebugLoc(Builder.getCurrentDebugLocation());
3886  }
3887 
3888  // ZExt result to int.
3889  return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
3890 }
3891 
3892 Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
3893  // Perform vector logical or on comparisons with zero vectors.
3894  if (E->getType()->isVectorType()) {
3895  CGF.incrementProfileCounter(E);
3896 
3897  Value *LHS = Visit(E->getLHS());
3898  Value *RHS = Visit(E->getRHS());
3899  Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
3900  if (LHS->getType()->isFPOrFPVectorTy()) {
3901  LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
3902  RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
3903  } else {
3904  LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
3905  RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
3906  }
3907  Value *Or = Builder.CreateOr(LHS, RHS);
3908  return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
3909  }
3910 
3911  llvm::Type *ResTy = ConvertType(E->getType());
3912 
3913  // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
3914  // If we have 0 || X, just emit X without inserting the control flow.
3915  bool LHSCondVal;
3916  if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
3917  if (!LHSCondVal) { // If we have 0 || X, just emit X.
3918  CGF.incrementProfileCounter(E);
3919 
3920  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3921  // ZExt result to int or bool.
3922  return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
3923  }
3924 
3925  // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
3926  if (!CGF.ContainsLabel(E->getRHS()))
3927  return llvm::ConstantInt::get(ResTy, 1);
3928  }
3929 
3930  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
3931  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
3932 
3934 
3935  // Branch on the LHS first. If it is true, go to the success (cont) block.
3936  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
3937  CGF.getCurrentProfileCount() -
3938  CGF.getProfileCount(E->getRHS()));
3939 
3940  // Any edges into the ContBlock are now from an (indeterminate number of)
3941  // edges from this first condition. All of these values will be true. Start
3942  // setting up the PHI node in the Cont Block for this.
3943  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
3944  "", ContBlock);
3945  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
3946  PI != PE; ++PI)
3947  PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
3948 
3949  eval.begin(CGF);
3950 
3951  // Emit the RHS condition as a bool value.
3952  CGF.EmitBlock(RHSBlock);
3953  CGF.incrementProfileCounter(E);
3954  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
3955 
3956  eval.end(CGF);
3957 
3958  // Reaquire the RHS block, as there may be subblocks inserted.
3959  RHSBlock = Builder.GetInsertBlock();
3960 
3961  // Emit an unconditional branch from this block to ContBlock. Insert an entry
3962  // into the phi node for the edge with the value of RHSCond.
3963  CGF.EmitBlock(ContBlock);
3964  PN->addIncoming(RHSCond, RHSBlock);
3965 
3966  // ZExt result to int.
3967  return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
3968 }
3969 
3970 Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
3971  CGF.EmitIgnoredExpr(E->getLHS());
3972  CGF.EnsureInsertPoint();
3973  return Visit(E->getRHS());
3974 }
3975 
3976 //===----------------------------------------------------------------------===//
3977 // Other Operators
3978 //===----------------------------------------------------------------------===//
3979 
3980 /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
3981 /// expression is cheap enough and side-effect-free enough to evaluate
3982 /// unconditionally instead of conditionally. This is used to convert control
3983 /// flow into selects in some cases.
3985  CodeGenFunction &CGF) {
3986  // Anything that is an integer or floating point constant is fine.
3987  return E->IgnoreParens()->isEvaluatable(CGF.getContext());
3988 
3989  // Even non-volatile automatic variables can't be evaluated unconditionally.
3990  // Referencing a thread_local may cause non-trivial initialization work to
3991  // occur. If we're inside a lambda and one of the variables is from the scope
3992  // outside the lambda, that function may have returned already. Reading its
3993  // locals is a bad idea. Also, these reads may introduce races there didn't
3994  // exist in the source-level program.
3995 }
3996 
3997 
3998 Value *ScalarExprEmitter::
3999 VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
4000  TestAndClearIgnoreResultAssign();
4001 
4002  // Bind the common expression if necessary.
4003  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
4004 
4005  Expr *condExpr = E->getCond();
4006  Expr *lhsExpr = E->getTrueExpr();
4007  Expr *rhsExpr = E->getFalseExpr();
4008 
4009  // If the condition constant folds and can be elided, try to avoid emitting
4010  // the condition and the dead arm.
4011  bool CondExprBool;
4012  if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4013  Expr *live = lhsExpr, *dead = rhsExpr;
4014  if (!CondExprBool) std::swap(live, dead);
4015 
4016  // If the dead side doesn't have labels we need, just emit the Live part.
4017  if (!CGF.ContainsLabel(dead)) {
4018  if (CondExprBool)
4019  CGF.incrementProfileCounter(E);
4020  Value *Result = Visit(live);
4021 
4022  // If the live part is a throw expression, it acts like it has a void
4023  // type, so evaluating it returns a null Value*. However, a conditional
4024  // with non-void type must return a non-null Value*.
4025  if (!Result && !E->getType()->isVoidType())
4026  Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
4027 
4028  return Result;
4029  }
4030  }
4031 
4032  // OpenCL: If the condition is a vector, we can treat this condition like
4033  // the select function.
4034  if (CGF.getLangOpts().OpenCL
4035  && condExpr->getType()->isVectorType()) {
4036  CGF.incrementProfileCounter(E);
4037 
4038  llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
4039  llvm::Value *LHS = Visit(lhsExpr);
4040  llvm::Value *RHS = Visit(rhsExpr);
4041 
4042  llvm::Type *condType = ConvertType(condExpr->getType());
4043  llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
4044 
4045  unsigned numElem = vecTy->getNumElements();
4046  llvm::Type *elemType = vecTy->getElementType();
4047 
4048  llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
4049  llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
4050  llvm::Value *tmp = Builder.CreateSExt(TestMSB,
4051  llvm::VectorType::get(elemType,
4052  numElem),
4053  "sext");
4054  llvm::Value *tmp2 = Builder.CreateNot(tmp);
4055 
4056  // Cast float to int to perform ANDs if necessary.
4057  llvm::Value *RHSTmp = RHS;
4058  llvm::Value *LHSTmp = LHS;
4059  bool wasCast = false;
4060  llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
4061  if (rhsVTy->getElementType()->isFloatingPointTy()) {
4062  RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
4063  LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
4064  wasCast = true;
4065  }
4066 
4067  llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
4068  llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
4069  llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
4070  if (wasCast)
4071  tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
4072 
4073  return tmp5;
4074  }
4075 
4076  // If this is a really simple expression (like x ? 4 : 5), emit this as a
4077  // select instead of as control flow. We can only do this if it is cheap and
4078  // safe to evaluate the LHS and RHS unconditionally.
4079  if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
4081  llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
4082  llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
4083 
4084  CGF.incrementProfileCounter(E, StepV);
4085 
4086  llvm::Value *LHS = Visit(lhsExpr);
4087  llvm::Value *RHS = Visit(rhsExpr);
4088  if (!LHS) {
4089  // If the conditional has void type, make sure we return a null Value*.
4090  assert(!RHS && "LHS and RHS types must match");
4091  return nullptr;
4092  }
4093  return Builder.CreateSelect(CondV, LHS, RHS, "cond");
4094  }
4095 
4096  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
4097  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
4098  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
4099 
4101  CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
4102  CGF.getProfileCount(lhsExpr));
4103 
4104  CGF.EmitBlock(LHSBlock);
4105  CGF.incrementProfileCounter(E);
4106  eval.begin(CGF);
4107  Value *LHS = Visit(lhsExpr);
4108  eval.end(CGF);
4109 
4110  LHSBlock = Builder.GetInsertBlock();
4111  Builder.CreateBr(ContBlock);
4112 
4113  CGF.EmitBlock(RHSBlock);
4114  eval.begin(CGF);
4115  Value *RHS = Visit(rhsExpr);
4116  eval.end(CGF);
4117 
4118  RHSBlock = Builder.GetInsertBlock();
4119  CGF.EmitBlock(ContBlock);
4120 
4121  // If the LHS or RHS is a throw expression, it will be legitimately null.
4122  if (!LHS)
4123  return RHS;
4124  if (!RHS)
4125  return LHS;
4126 
4127  // Create a PHI node for the real part.
4128  llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
4129  PN->addIncoming(LHS, LHSBlock);
4130  PN->addIncoming(RHS, RHSBlock);
4131  return PN;
4132 }
4133 
4134 Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
4135  return Visit(E->getChosenSubExpr());
4136 }
4137 
4138 Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
4139  QualType Ty = VE->getType();
4140 
4141  if (Ty->isVariablyModifiedType())
4142  CGF.EmitVariablyModifiedType(Ty);
4143 
4144  Address ArgValue = Address::invalid();
4145  Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
4146 
4147  llvm::Type *ArgTy = ConvertType(VE->getType());
4148 
4149  // If EmitVAArg fails, emit an error.
4150  if (!ArgPtr.isValid()) {
4151  CGF.ErrorUnsupported(VE, "va_arg expression");
4152  return llvm::UndefValue::get(ArgTy);
4153  }
4154 
4155  // FIXME Volatility.
4156  llvm::Value *Val = Builder.CreateLoad(ArgPtr);
4157 
4158  // If EmitVAArg promoted the type, we must truncate it.
4159  if (ArgTy != Val->getType()) {
4160  if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
4161  Val = Builder.CreateIntToPtr(Val, ArgTy);
4162  else
4163  Val = Builder.CreateTrunc(Val, ArgTy);
4164  }
4165 
4166  return Val;
4167 }
4168 
4169 Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
4170  return CGF.EmitBlockLiteral(block);
4171 }
4172 
4173 // Convert a vec3 to vec4, or vice versa.
4175  Value *Src, unsigned NumElementsDst) {
4176  llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
4178  Args.push_back(Builder.getInt32(0));
4179  Args.push_back(Builder.getInt32(1));
4180  Args.push_back(Builder.getInt32(2));
4181  if (NumElementsDst == 4)
4182  Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
4183  llvm::Constant *Mask = llvm::ConstantVector::get(Args);
4184  return Builder.CreateShuffleVector(Src, UnV, Mask);
4185 }
4186 
4187 // Create cast instructions for converting LLVM value \p Src to LLVM type \p
4188 // DstTy. \p Src has the same size as \p DstTy. Both are single value types
4189 // but could be scalar or vectors of different lengths, and either can be
4190 // pointer.
4191 // There are 4 cases:
4192 // 1. non-pointer -> non-pointer : needs 1 bitcast
4193 // 2. pointer -> pointer : needs 1 bitcast or addrspacecast
4194 // 3. pointer -> non-pointer
4195 // a) pointer -> intptr_t : needs 1 ptrtoint
4196 // b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
4197 // 4. non-pointer -> pointer
4198 // a) intptr_t -> pointer : needs 1 inttoptr
4199 // b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
4200 // Note: for cases 3b and 4b two casts are required since LLVM casts do not
4201 // allow casting directly between pointer types and non-integer non-pointer
4202 // types.
4204  const llvm::DataLayout &DL,
4205  Value *Src, llvm::Type *DstTy,
4206  StringRef Name = "") {
4207  auto SrcTy = Src->getType();
4208 
4209  // Case 1.
4210  if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
4211  return Builder.CreateBitCast(Src, DstTy, Name);
4212 
4213  // Case 2.
4214  if (SrcTy->isPointerTy() && DstTy->isPointerTy())
4215  return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
4216 
4217  // Case 3.
4218  if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
4219  // Case 3b.
4220  if (!DstTy->isIntegerTy())
4221  Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
4222  // Cases 3a and 3b.
4223  return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
4224  }
4225 
4226  // Case 4b.
4227  if (!SrcTy->isIntegerTy())
4228  Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
4229  // Cases 4a and 4b.
4230  return Builder.CreateIntToPtr(Src, DstTy, Name);
4231 }
4232 
4233 Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
4234  Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
4235  llvm::Type *DstTy = ConvertType(E->getType());
4236 
4237  llvm::Type *SrcTy = Src->getType();
4238  unsigned NumElementsSrc = isa<llvm::VectorType>(SrcTy) ?
4239  cast<llvm::VectorType>(SrcTy)->getNumElements() : 0;
4240  unsigned NumElementsDst = isa<llvm::VectorType>(DstTy) ?
4241  cast<llvm::VectorType>(DstTy)->getNumElements() : 0;
4242 
4243  // Going from vec3 to non-vec3 is a special case and requires a shuffle
4244  // vector to get a vec4, then a bitcast if the target type is different.
4245  if (NumElementsSrc == 3 && NumElementsDst != 3) {
4246  Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
4247 
4248  if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4249  Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4250  DstTy);
4251  }
4252 
4253  Src->setName("astype");
4254  return Src;
4255  }
4256 
4257  // Going from non-vec3 to vec3 is a special case and requires a bitcast
4258  // to vec4 if the original type is not vec4, then a shuffle vector to
4259  // get a vec3.
4260  if (NumElementsSrc != 3 && NumElementsDst == 3) {
4261  if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
4262  auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
4263  Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
4264  Vec4Ty);
4265  }
4266 
4267  Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
4268  Src->setName("astype");
4269  return Src;
4270  }
4271 
4272  return Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
4273  Src, DstTy, "astype");
4274 }
4275 
4276 Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
4277  return CGF.EmitAtomicExpr(E).getScalarVal();
4278 }
4279 
4280 //===----------------------------------------------------------------------===//
4281 // Entry Point into this File
4282 //===----------------------------------------------------------------------===//
4283 
4284 /// Emit the computation of the specified expression of scalar type, ignoring
4285 /// the result.
4286 Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
4287  assert(E && hasScalarEvaluationKind(E->getType()) &&
4288  "Invalid scalar expression to emit");
4289 
4290  return ScalarExprEmitter(*this, IgnoreResultAssign)
4291  .Visit(const_cast<Expr *>(E));
4292 }
4293 
4294 /// Emit a conversion from the specified type to the specified destination type,
4295 /// both of which are LLVM scalar types.
4297  QualType DstTy,
4298  SourceLocation Loc) {
4299  assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
4300  "Invalid scalar expression to emit");
4301  return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
4302 }
4303 
4304 /// Emit a conversion from the specified complex type to the specified
4305 /// destination type, where the destination type is an LLVM scalar type.
4307  QualType SrcTy,
4308  QualType DstTy,
4309  SourceLocation Loc) {
4310  assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
4311  "Invalid complex -> scalar conversion");
4312  return ScalarExprEmitter(*this)
4313  .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
4314 }
4315 
4316 
4319  bool isInc, bool isPre) {
4320  return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
4321 }
4322 
4324  // object->isa or (*object).isa
4325  // Generate code as for: *(Class*)object
4326 
4327  Expr *BaseExpr = E->getBase();
4328  Address Addr = Address::invalid();
4329  if (BaseExpr->isRValue()) {
4330  Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
4331  } else {
4332  Addr = EmitLValue(BaseExpr).getAddress();
4333  }
4334 
4335  // Cast the address to Class*.
4336  Addr = Builder.CreateElementBitCast(Addr, ConvertType(E->getType()));
4337  return MakeAddrLValue(Addr, E->getType());
4338 }
4339 
4340 
4342  const CompoundAssignOperator *E) {
4343  ScalarExprEmitter Scalar(*this);
4344  Value *Result = nullptr;
4345  switch (E->getOpcode()) {
4346 #define COMPOUND_OP(Op) \
4347  case BO_##Op##Assign: \
4348  return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
4349  Result)
4350  COMPOUND_OP(Mul);
4351  COMPOUND_OP(Div);
4352  COMPOUND_OP(Rem);
4353  COMPOUND_OP(Add);
4354  COMPOUND_OP(Sub);
4355  COMPOUND_OP(Shl);
4356  COMPOUND_OP(Shr);
4357  COMPOUND_OP(And);
4358  COMPOUND_OP(Xor);
4359  COMPOUND_OP(Or);
4360 #undef COMPOUND_OP
4361 
4362  case BO_PtrMemD:
4363  case BO_PtrMemI:
4364  case BO_Mul:
4365  case BO_Div:
4366  case BO_Rem:
4367  case BO_Add:
4368  case BO_Sub:
4369  case BO_Shl:
4370  case BO_Shr:
4371  case BO_LT:
4372  case BO_GT:
4373  case BO_LE:
4374  case BO_GE:
4375  case BO_EQ:
4376  case BO_NE:
4377  case BO_Cmp:
4378  case BO_And:
4379  case BO_Xor:
4380  case BO_Or:
4381  case BO_LAnd:
4382  case BO_LOr:
4383  case BO_Assign:
4384  case BO_Comma:
4385  llvm_unreachable("Not valid compound assignment operators");
4386  }
4387 
4388  llvm_unreachable("Unhandled compound assignment operator");
4389 }
4390 
4392  ArrayRef<Value *> IdxList,
4393  bool SignedIndices,
4394  bool IsSubtraction,
4395  SourceLocation Loc,
4396  const Twine &Name) {
4397  Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
4398 
4399  // If the pointer overflow sanitizer isn't enabled, do nothing.
4400  if (!SanOpts.has(SanitizerKind::PointerOverflow))
4401  return GEPVal;
4402 
4403  // If the GEP has already been reduced to a constant, leave it be.
4404  if (isa<llvm::Constant>(GEPVal))
4405  return GEPVal;
4406 
4407  // Only check for overflows in the default address space.
4408  if (GEPVal->getType()->getPointerAddressSpace())
4409  return GEPVal;
4410 
4411  auto *GEP = cast<llvm::GEPOperator>(GEPVal);
4412  assert(GEP->isInBounds() && "Expected inbounds GEP");
4413 
4414  SanitizerScope SanScope(this);
4415  auto &VMContext = getLLVMContext();
4416  const auto &DL = CGM.getDataLayout();
4417  auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
4418 
4419  // Grab references to the signed add/mul overflow intrinsics for intptr_t.
4420  auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
4421  auto *SAddIntrinsic =
4422  CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
4423  auto *SMulIntrinsic =
4424  CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
4425 
4426  // The total (signed) byte offset for the GEP.
4427  llvm::Value *TotalOffset = nullptr;
4428  // The offset overflow flag - true if the total offset overflows.
4429  llvm::Value *OffsetOverflows = Builder.getFalse();
4430 
4431  /// Return the result of the given binary operation.
4432  auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
4433  llvm::Value *RHS) -> llvm::Value * {
4434  assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
4435 
4436  // If the operands are constants, return a constant result.
4437  if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
4438  if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
4439  llvm::APInt N;
4440  bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
4441  /*Signed=*/true, N);
4442  if (HasOverflow)
4443  OffsetOverflows = Builder.getTrue();
4444  return llvm::ConstantInt::get(VMContext, N);
4445  }
4446  }
4447 
4448  // Otherwise, compute the result with checked arithmetic.
4449  auto *ResultAndOverflow = Builder.CreateCall(
4450  (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
4451  OffsetOverflows = Builder.CreateOr(
4452  Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
4453  return Builder.CreateExtractValue(ResultAndOverflow, 0);
4454  };
4455 
4456  // Determine the total byte offset by looking at each GEP operand.
4457  for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
4458  GTI != GTE; ++GTI) {
4459  llvm::Value *LocalOffset;
4460  auto *Index = GTI.getOperand();
4461  // Compute the local offset contributed by this indexing step:
4462  if (auto *STy = GTI.getStructTypeOrNull()) {
4463  // For struct indexing, the local offset is the byte position of the
4464  // specified field.
4465  unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
4466  LocalOffset = llvm::ConstantInt::get(
4467  IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
4468  } else {
4469  // Otherwise this is array-like indexing. The local offset is the index
4470  // multiplied by the element size.
4471  auto *ElementSize = llvm::ConstantInt::get(
4472  IntPtrTy, DL.getTypeAllocSize(GTI.getIndexedType()));
4473  auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
4474  LocalOffset = eval(BO_Mul, ElementSize, IndexS);
4475  }
4476 
4477  // If this is the first offset, set it as the total offset. Otherwise, add
4478  // the local offset into the running total.
4479  if (!TotalOffset || TotalOffset == Zero)
4480  TotalOffset = LocalOffset;
4481  else
4482  TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
4483  }
4484 
4485  // Common case: if the total offset is zero, don't emit a check.
4486  if (TotalOffset == Zero)
4487  return GEPVal;
4488 
4489  // Now that we've computed the total offset, add it to the base pointer (with
4490  // wrapping semantics).
4491  auto *IntPtr = Builder.CreatePtrToInt(GEP->getPointerOperand(), IntPtrTy);
4492  auto *ComputedGEP = Builder.CreateAdd(IntPtr, TotalOffset);
4493 
4494  // The GEP is valid if:
4495  // 1) The total offset doesn't overflow, and
4496  // 2) The sign of the difference between the computed address and the base
4497  // pointer matches the sign of the total offset.
4498  llvm::Value *ValidGEP;
4499  auto *NoOffsetOverflow = Builder.CreateNot(OffsetOverflows);
4500  if (SignedIndices) {
4501  auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4502  auto *PosOrZeroOffset = Builder.CreateICmpSGE(TotalOffset, Zero);
4503  llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
4504  ValidGEP = Builder.CreateAnd(
4505  Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid),
4506  NoOffsetOverflow);
4507  } else if (!SignedIndices && !IsSubtraction) {
4508  auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
4509  ValidGEP = Builder.CreateAnd(PosOrZeroValid, NoOffsetOverflow);
4510  } else {
4511  auto *NegOrZeroValid = Builder.CreateICmpULE(ComputedGEP, IntPtr);
4512  ValidGEP = Builder.CreateAnd(NegOrZeroValid, NoOffsetOverflow);
4513  }
4514 
4515  llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
4516  // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
4517  llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4518  EmitCheck(std::make_pair(ValidGEP, SanitizerKind::PointerOverflow),
4519  SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
4520 
4521  return GEPVal;
4522 }
const llvm::DataLayout & getDataLayout() const
const Expr * getSubExpr() const
Definition: Expr.h:890
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
Definition: CGAtomic.cpp:1979
The null pointer literal (C++11 [lex.nullptr])
Definition: ExprCXX.h:591
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:3964
static APFixedPoint getMax(const FixedPointSemantics &Sema)
Definition: FixedPoint.cpp:102
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition: CGObjC.cpp:2202
bool getValue() const
Definition: ExprObjC.h:94
bool getValue() const
Definition: ExprCXX.h:2398
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition: Type.h:6577
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition: CGObjC.cpp:2675
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc)
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled...
Definition: TargetInfo.h:1211
bool isSignedOverflowDefined() const
Definition: LangOptions.h:264
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2543
QualType getPointeeType() const
Definition: Type.h:2556
A (possibly-)qualified type.
Definition: Type.h:642
uint64_t getValue() const
Definition: ExprCXX.h:2484
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition: CGExpr.cpp:642
llvm::Type * ConvertTypeForMem(QualType T)
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition: Expr.h:3367
const CodeGenOptions & getCodeGenOpts() const
SourceLocation getExprLoc() const
Definition: Expr.h:3264
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition: CGObjC.cpp:3094
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:1900
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler...
Definition: CGExpr.cpp:2706
void enterFullExpression(const FullExpr *E)
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition: Expr.h:3832
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition: Expr.h:3826
llvm::APSInt getValue() const
Definition: FixedPoint.h:86
A type trait used in the implementation of various C++11 and Library TR1 trait templates.
Definition: ExprCXX.h:2355
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
CompoundStmt * getSubStmt()
Definition: Expr.h:3763
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const Expr * getInit(unsigned Init) const
Definition: Expr.h:4179
static llvm::Constant * getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer...
Stmt - This represents one statement.
Definition: Stmt.h:66
Kind getKind() const
Definition: Type.h:2424
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:505
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:233
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:1937
The fixed point semantics work similarly to llvm::fltSemantics.
Definition: FixedPoint.h:32
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition: CGClass.cpp:374
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1025
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition: CGCall.cpp:4542
Expr * getBase() const
Definition: Expr.h:2686
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition: CGExpr.cpp:4930
llvm::APFloat getValue() const
Definition: Expr.h:1470
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V, QualType Type, CharUnits Alignment=CharUnits::Zero(), SanitizerSet SkippedChecks=SanitizerSet())
Emit a check that V is the address of storage of the appropriate size and alignment for an object of ...
Definition: CGExpr.cpp:649
Represents the index of the current element of an array being initialized by an ArrayInitLoopExpr.
Definition: Expr.h:4804
bool isExtVectorType() const
Definition: Type.h:6371
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:245
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Opcode getOpcode() const
Definition: Expr.h:3268
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask >> Checked, SanitizerHandler Check, ArrayRef< llvm::Constant *> StaticArgs, ArrayRef< llvm::Value *> DynamicArgs)
Create a basic block that will call a handler function in a sanitizer runtime with the provided argum...
Definition: CGExpr.cpp:2923
ParenExpr - This represents a parethesized expression, e.g.
Definition: Expr.h:1855
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:6231
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of &#39;this&#39;.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1910
FPOptions getFPFeatures() const
Definition: Expr.h:3407
An Embarcadero array type trait, as used in the implementation of __array_rank and __array_extent...
Definition: ExprCXX.h:2439
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:689
Floating point control options.
Definition: LangOptions.h:299
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Represents a prvalue temporary that is written into memory so that a reference can bind to it...
Definition: ExprCXX.h:4047
const Expr * getResultExpr() const
The generic selection&#39;s result expression.
Definition: Expr.h:5032
static Value * buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant, or if it does but contains a label, return false.
QualType getElementType() const
Definition: Type.h:2853
#define COMPOUND_OP(Op)
Expr * getIndexExpr(unsigned Idx)
Definition: Expr.h:2191
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition: Type.cpp:1884
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC &#39;id&#39; type.
Definition: ExprObjC.h:1437
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:2844
RAII object to set/unset CodeGenFunction::IsSanitizerScope.
const AstTypeMatcher< PointerType > pointerType
Matches pointer types, but does not match Objective-C object pointer types.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6716
uint64_t getProfileCount(const Stmt *S)
Get the profiler&#39;s count for the given statement.
const llvm::fltSemantics & getHalfFormat() const
Definition: TargetInfo.h:559
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition: CGObjC.cpp:3495
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::Value * getPointer() const
Definition: Address.h:38
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1010
Represents an expression – generally a full-expression – that introduces cleanups to be run at the ...
Definition: ExprCXX.h:3012
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition: CGObjC.cpp:243
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition: CGExpr.cpp:2007
Expr * IgnoreImpCasts() LLVM_READONLY
IgnoreImpCasts - Skip past any implicit casts which might surround this expression.
Definition: Expr.h:3103
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition: CGExpr.cpp:948
Represents a struct/union/class.
Definition: Decl.h:3602
const TargetInfo & getTarget() const
An object to manage conditionally-evaluated expressions.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
Definition: CGExprCXX.cpp:1548
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition: Expr.h:2088
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
ShuffleVectorExpr - clang-specific builtin-in function __builtin_shufflevector.
Definition: Expr.h:3792
Address getAddress() const
Definition: CGValue.h:327
QualType getComputationResultType() const
Definition: Expr.h:3476
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition: CGObjC.cpp:3250
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:58
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
Definition: CGExprCXX.cpp:2165
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:288
bool isVolatileQualified() const
Definition: CGValue.h:258
Represents a member of a struct/union/class.
Definition: Decl.h:2588
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition: Expr.h:2082
unsigned getIntegralBits() const
Definition: FixedPoint.h:49
GNUNullExpr - Implements the GNU __null extension, which is a name for a null pointer constant that h...
Definition: Expr.h:4003
bool isReferenceType() const
Definition: Type.h:6294
Expr * getSubExpr()
Definition: Expr.h:2982
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp, [NSNumber numberWithInt:42]];.
Definition: ExprObjC.h:171
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method...
Definition: CGObjC.cpp:60
bool hadArrayRangeDesignator() const
Definition: Expr.h:4299
An r-value expression (a pr-value in the C++11 taxonomy) produces a temporary value.
Definition: Specifiers.h:110
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:711
bool isGLValue() const
Definition: Expr.h:252
Describes an C or C++ initializer list.
Definition: Expr.h:4131
BinaryOperatorKind
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition: CGObjC.cpp:46
static APFixedPoint getMin(const FixedPointSemantics &Sema)
Definition: FixedPoint.cpp:110
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Definition: CGBuilder.h:157
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:573
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:119
path_iterator path_begin()
Definition: Expr.h:3006
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition: CGBlocks.cpp:898
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3233
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:51
static llvm::Constant * getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off, llvm::Type *I32Ty)
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:40
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:6058
unsigned getScale() const
Definition: FixedPoint.h:42
field_iterator field_begin() const
Definition: Decl.cpp:4159
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a best-effort attempt to peephole expressions that naturally produce retained objects.
Definition: CGObjC.cpp:3061
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition: CGExpr.cpp:182
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:2917
Helper class for OffsetOfExpr.
Definition: Expr.h:2024
void ForceCleanup(std::initializer_list< llvm::Value **> ValuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
RValue EmitAtomicExpr(AtomicExpr *E)
Definition: CGAtomic.cpp:745
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler&#39;s counter for the given statement by StepV.
uint64_t getCurrentProfileCount()
Get the profiler&#39;s current count.
QualType getReturnType() const
Definition: DeclObjC.h:323
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1068
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
Checking the operand of a load. Must be suitably sized and aligned.
This object can be modified without requiring retains or releases.
Definition: Type.h:162
Represents the this expression in C++.
Definition: ExprCXX.h:971
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
Definition: TargetInfo.cpp:447
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false)
Create a new runtime function with the specified type and name.
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, Expr *LHS, Expr *RHS)
Definition: Expr.cpp:1919
const Expr * getExpr() const
Get the initialization expression that will be used.
Definition: ExprCXX.h:1157
#define HANDLEBINOP(OP)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool isHalfType() const
Definition: Type.h:6536
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition: CGObjC.cpp:405
bool isValid() const
Definition: Address.h:36
Sema - This implements semantic analysis and AST building for C.
Definition: Sema.h:278
bool isPromotableIntegerType() const
More type predicates useful for type checking/promotion.
Definition: Type.cpp:2455
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2211
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
llvm::CallInst * EmitNounwindRuntimeCall(llvm::Value *callee, const Twine &name="")
const TargetCodeGenInfo & getTargetCodeGenInfo()
QualType getComputationLHSType() const
Definition: Expr.h:3473
CastKind
CastKind - The kind of operation required for a conversion.
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
UnaryExprOrTypeTraitExpr - expression with either a type or (unevaluated) expression operand...
Definition: Expr.h:2233
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
ConstantExpr - An expression that occurs in a constant context.
Definition: Expr.h:904
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4037
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
unsigned Offset
Definition: Format.cpp:1631
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:39
const llvm::fltSemantics & getLongDoubleFormat() const
Definition: TargetInfo.h:575
unsigned getValue() const
Definition: Expr.h:1437
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition: CGExpr.cpp:1496
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition: CGObjC.cpp:2308
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:5195
llvm::APSInt getShuffleMaskIdx(const ASTContext &Ctx, unsigned N) const
Definition: Expr.h:3843
An expression "T()" which creates a value-initialized rvalue of type T, which is a non-class type...
Definition: ExprCXX.h:1828
bool isEventT() const
Definition: Type.h:6440
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:636
This represents one expression.
Definition: Expr.h:106
Allow any unmodeled side effect.
Definition: Expr.h:599
static Address invalid()
Definition: Address.h:35
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition: Expr.h:2098
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type, where the destination type is an LLVM scalar type.
SourceLocation getExprLoc() const LLVM_READONLY
Definition: ExprObjC.h:1486
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to...
Definition: Type.cpp:1598
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition: ExprCXX.h:3812
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6779
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:5123
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition: CGStmt.cpp:370
unsigned getNumInits() const
Definition: Expr.h:4161
bool isNullPtrType() const
Definition: Type.h:6555
void SetFPAccuracy(llvm::Value *Val, float Accuracy)
SetFPAccuracy - Set the minimum required accuracy of the given floating point operation, expressed as the maximum relative error in ulp.
Definition: CGExpr.cpp:4840
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements, of a variable length array type, plus that largest non-variably-sized element type.
field_iterator field_end() const
Definition: Decl.h:3796
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:288
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
Definition: CGExpr.cpp:1673
bool isAnyComplexType() const
Definition: Type.h:6363
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:429
TypeSourceInfo * getTypeSourceInfo() const
Definition: Expr.h:2170
Represents an expression that computes the length of a parameter pack.
Definition: ExprCXX.h:3735
AsTypeExpr - Clang builtin function __builtin_astype [OpenCL 6.2.4.2] This AST node provides support ...
Definition: Expr.h:5172
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:187
llvm::LLVMContext & getLLVMContext()
Kind getKind() const
Determine what kind of offsetof node this is.
Definition: Expr.h:2078
QualType getType() const
Definition: Expr.h:128
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull...
Definition: CGDecl.cpp:719
An RAII object to record that we&#39;re evaluating a statement expression.
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition: Expr.h:2296
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:904
UnaryOperator - This represents the unary-expression&#39;s (except sizeof and alignof), the postinc/postdec operators from postfix-expression, and various extensions.
Definition: Expr.h:1907
Represents a GCC generic vector type.
Definition: Type.h:3174
bool isNullPointer() const
Definition: APValue.cpp:639
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
Definition: CGExpr.cpp:50
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition: CGExpr.cpp:1415
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:3849
const OffsetOfNode & getComponent(unsigned Idx) const
Definition: Expr.h:2177
const TargetInfo & getTarget() const
const Expr * getSubExpr() const
Definition: Expr.h:1871
bool getValue() const
Definition: ExprCXX.h:569
The l-value was considered opaque, so the alignment was determined from a type.
RecordDecl * getDecl() const
Definition: Type.h:4366
void EmitAlignmentAssumption(llvm::Value *PtrValue, unsigned Alignment, llvm::Value *OffsetValue=nullptr)
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
Definition: TargetInfo.h:719
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:191
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
There is no lifetime qualification on this type.
Definition: Type.h:158
bool getValue() const
Definition: ExprCXX.h:3636
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:348
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class...
Definition: Expr.h:945
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
ConvertVectorExpr - Clang builtin function __builtin_convertvector This AST node provides support for...
Definition: Expr.h:3860
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion. ...
Definition: CGCXXABI.cpp:74
#define false
Definition: stdbool.h:33
Assigning into this object requires the old value to be released and the new value to be retained...
Definition: Type.h:169
Kind
A field in a dependent type, known only by its name.
Definition: Expr.h:2033
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:5245
Encodes a location in the source.
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go...
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition: CGObjC.cpp:239
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition: CGObjC.cpp:3200
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition: Type.cpp:96
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:6174
bool allowFPContractAcrossStatement() const
Definition: LangOptions.h:319
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
Expr * getSubExpr() const
Definition: Expr.h:1937
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition: CGExpr.cpp:164
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior...
Definition: CGExpr.cpp:1196
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2101
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
Definition: CGExpr.cpp:1687
CastKind getCastKind() const
Definition: Expr.h:2976
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)"...
Definition: ExprCXX.h:1868
QualType getElementType() const
Definition: Type.h:3209
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:680
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:186
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition: Expr.h:1950
CanQualType FloatTy
Definition: ASTContext.h:1027
SanitizerSet SanOpts
Sanitizers enabled for this function.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:1860
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>, and corresponding __opencl_atomic_* for OpenCL 2.0.
Definition: Expr.h:5379
UnaryExprOrTypeTrait getKind() const
Definition: Expr.h:2264
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:474
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
An aligned address.
Definition: Address.h:25
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition: Expr.h:3051
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition: CGExpr.cpp:3231
An expression trait intrinsic.
Definition: ExprCXX.h:2505
const ObjCMethodDecl * getMethodDecl() const
Definition: ExprObjC.h:1303
bool isVectorType() const
Definition: Type.h:6367
Assigning into this object requires a lifetime extension.
Definition: Type.h:175
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:3747
bool isSaturated() const
Definition: FixedPoint.h:44
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:117
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:105
bool isArgumentType() const
Definition: Expr.h:2269
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
Definition: CGExpr.cpp:4346
FixedPointSemantics getFixedPointSemantics(QualType Ty) const
bool isCanonical() const
Definition: Type.h:6102
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2292
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
Defines the fixed point number interface.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:216
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat &#39;semantics&#39; for the specified scalar floating point type.
Expr * getLHS() const
Definition: Expr.h:3273
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:3450
const llvm::fltSemantics & getFloat128Format() const
Definition: TargetInfo.h:583
Represents a C11 generic selection.
Definition: Expr.h:4956
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type, returning the result.
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:87
AddrLabelExpr - The GNU address of label extension, representing &&label.
Definition: Expr.h:3703
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:983
Dataflow Directional Tag Classes.
int getFloatingTypeOrder(QualType LHS, QualType RHS) const
Compare the rank of the two specified floating point types, ignoring the domain of the type (i...
void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition: CGClass.cpp:2629
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Definition: LangOptions.h:209
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:571
Represents a delete expression for memory deallocation and destructor calls, e.g. ...
Definition: ExprCXX.h:2095
bool isShiftOp() const
Definition: Expr.h:3312
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one...
Definition: CGExpr.cpp:4295
A runtime availability query.
Definition: ExprObjC.h:1641
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
Represents a &#39;co_yield&#39; expression.
Definition: ExprCXX.h:4393
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static Value * emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
bool isBooleanType() const
Definition: Type.h:6643
const Expr * getExpr() const
Definition: ExprCXX.h:1101
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition: Type.h:5821
Checking the destination of a store. Must be suitably sized and aligned.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type *> Tys=None)
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:2762
bool isBitField() const
Definition: CGValue.h:254
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition: Expr.cpp:1345
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3131
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext, providing only those that are of type SpecificDecl (or a class derived from it).
Definition: DeclBase.h:2017
llvm::APInt getValue() const
Definition: Expr.h:1303
LabelDecl * getLabel() const
Definition: Expr.h:3725
#define VISITCOMP(CODE, UI, SI, FP)
Represents a pointer to an Objective C object.
Definition: Type.h:5780
path_iterator path_end()
Definition: Expr.h:3007
Represents a C++11 noexcept expression (C++ [expr.unary.noexcept]).
Definition: ExprCXX.h:3611
uint64_t SanitizerMask
Definition: Sanitizers.h:26
unsigned getIntWidth(QualType T) const
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1009
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition: Expr.h:544
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4356
Complex values, per C99 6.2.5p11.
Definition: Type.h:2483
Expr * getBase() const
Definition: ExprObjC.h:1463
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
Definition: ASTDumper.cpp:2524
Checking the operand of a static_cast to a derived pointer type.
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2323
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:3486
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:6564
T * getAttr() const
Definition: DeclBase.h:527
An implicit indirection through a C++ base class, when the field found is in a base class...
Definition: Expr.h:2036
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2073
bool isFunctionType() const
Definition: Type.h:6278
llvm::Value * EmitCheckedInBoundsGEP(llvm::Value *Ptr, ArrayRef< llvm::Value *> IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Represents a &#39;co_await&#39; expression.
Definition: ExprCXX.h:4310
ExtVectorType - Extended vector type.
Definition: Type.h:3293
Opcode getOpcode() const
Definition: Expr.h:1932
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:443
unsigned getWidth() const
Definition: FixedPoint.h:41
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type, returning the result.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2069
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:513
SourceManager & getSourceManager()
Definition: ASTContext.h:661
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one...
Definition: CGExpr.cpp:4309
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2252
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1132
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:3884
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
Reading or writing from this object requires a barrier call.
Definition: Type.h:172
bool isQueueT() const
Definition: Type.h:6448
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:896
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:2601
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn&#39;t support the specified stmt yet...
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g., it is an signed integer type or a vector.
Definition: Type.cpp:1874
Represents a C++ struct/union/class.
Definition: DeclCXX.h:300
bool isVoidType() const
Definition: Type.h:6530
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:3923
An index into an array.
Definition: Expr.h:2029
llvm::Type * ConvertType(QualType T)
bool hasIntegerRepresentation() const
Determine whether this type has an integer representation of some sort, e.g., it is an integer type o...
Definition: Type.cpp:1733
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
Definition: CGExprCXX.cpp:1997
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1232
void EmitTrapCheck(llvm::Value *Checked)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it...
Definition: CGExpr.cpp:3199
bool isRValue() const
Definition: Expr.h:250