clang 19.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CGRecordLayout.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "TargetInfo.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclObjC.h"
26#include "clang/AST/Expr.h"
31#include "llvm/ADT/APFixedPoint.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/FixedPointBuilder.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/IntrinsicsPowerPC.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/IR/Module.h"
44#include "llvm/Support/TypeSize.h"
45#include <cstdarg>
46#include <optional>
47
48using namespace clang;
49using namespace CodeGen;
50using llvm::Value;
51
52//===----------------------------------------------------------------------===//
53// Scalar Expression Emitter
54//===----------------------------------------------------------------------===//
55
56namespace llvm {
57extern cl::opt<bool> EnableSingleByteCoverage;
58} // namespace llvm
59
60namespace {
61
62/// Determine whether the given binary operation may overflow.
63/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
64/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
65/// the returned overflow check is precise. The returned value is 'true' for
66/// all other opcodes, to be conservative.
67bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
68 BinaryOperator::Opcode Opcode, bool Signed,
69 llvm::APInt &Result) {
70 // Assume overflow is possible, unless we can prove otherwise.
71 bool Overflow = true;
72 const auto &LHSAP = LHS->getValue();
73 const auto &RHSAP = RHS->getValue();
74 if (Opcode == BO_Add) {
75 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
76 : LHSAP.uadd_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Sub) {
78 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
79 : LHSAP.usub_ov(RHSAP, Overflow);
80 } else if (Opcode == BO_Mul) {
81 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
82 : LHSAP.umul_ov(RHSAP, Overflow);
83 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
84 if (Signed && !RHS->isZero())
85 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
86 else
87 return false;
88 }
89 return Overflow;
90}
91
92struct BinOpInfo {
93 Value *LHS;
94 Value *RHS;
95 QualType Ty; // Computation Type.
96 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
97 FPOptions FPFeatures;
98 const Expr *E; // Entire expr, for error unsupported. May not be binop.
99
100 /// Check if the binop can result in integer overflow.
101 bool mayHaveIntegerOverflow() const {
102 // Without constant input, we can't rule out overflow.
103 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
104 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
105 if (!LHSCI || !RHSCI)
106 return true;
107
108 llvm::APInt Result;
109 return ::mayHaveIntegerOverflow(
110 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
111 }
112
113 /// Check if the binop computes a division or a remainder.
114 bool isDivremOp() const {
115 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
116 Opcode == BO_RemAssign;
117 }
118
119 /// Check if the binop can result in an integer division by zero.
120 bool mayHaveIntegerDivisionByZero() const {
121 if (isDivremOp())
122 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
123 return CI->isZero();
124 return true;
125 }
126
127 /// Check if the binop can result in a float division by zero.
128 bool mayHaveFloatDivisionByZero() const {
129 if (isDivremOp())
130 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
131 return CFP->isZero();
132 return true;
133 }
134
135 /// Check if at least one operand is a fixed point type. In such cases, this
136 /// operation did not follow usual arithmetic conversion and both operands
137 /// might not be of the same type.
138 bool isFixedPointOp() const {
139 // We cannot simply check the result type since comparison operations return
140 // an int.
141 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
142 QualType LHSType = BinOp->getLHS()->getType();
143 QualType RHSType = BinOp->getRHS()->getType();
144 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
145 }
146 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
147 return UnOp->getSubExpr()->getType()->isFixedPointType();
148 return false;
149 }
150};
151
152static bool MustVisitNullValue(const Expr *E) {
153 // If a null pointer expression's type is the C++0x nullptr_t, then
154 // it's not necessarily a simple constant and it must be evaluated
155 // for its potential side effects.
156 return E->getType()->isNullPtrType();
157}
158
159/// If \p E is a widened promoted integer, get its base (unpromoted) type.
160static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
161 const Expr *E) {
162 const Expr *Base = E->IgnoreImpCasts();
163 if (E == Base)
164 return std::nullopt;
165
166 QualType BaseTy = Base->getType();
167 if (!Ctx.isPromotableIntegerType(BaseTy) ||
168 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
169 return std::nullopt;
170
171 return BaseTy;
172}
173
174/// Check if \p E is a widened promoted integer.
175static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
176 return getUnwidenedIntegerType(Ctx, E).has_value();
177}
178
179/// Check if we can skip the overflow check for \p Op.
180static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
181 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
182 "Expected a unary or binary operator");
183
184 // If the binop has constant inputs and we can prove there is no overflow,
185 // we can elide the overflow check.
186 if (!Op.mayHaveIntegerOverflow())
187 return true;
188
189 // If a unary op has a widened operand, the op cannot overflow.
190 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
191 return !UO->canOverflow();
192
193 // We usually don't need overflow checks for binops with widened operands.
194 // Multiplication with promoted unsigned operands is a special case.
195 const auto *BO = cast<BinaryOperator>(Op.E);
196 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
197 if (!OptionalLHSTy)
198 return false;
199
200 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
201 if (!OptionalRHSTy)
202 return false;
203
204 QualType LHSTy = *OptionalLHSTy;
205 QualType RHSTy = *OptionalRHSTy;
206
207 // This is the simple case: binops without unsigned multiplication, and with
208 // widened operands. No overflow check is needed here.
209 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
210 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
211 return true;
212
213 // For unsigned multiplication the overflow check can be elided if either one
214 // of the unpromoted types are less than half the size of the promoted type.
215 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
216 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
217 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
218}
219
220class ScalarExprEmitter
221 : public StmtVisitor<ScalarExprEmitter, Value*> {
222 CodeGenFunction &CGF;
223 CGBuilderTy &Builder;
224 bool IgnoreResultAssign;
225 llvm::LLVMContext &VMContext;
226public:
227
228 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
229 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
230 VMContext(cgf.getLLVMContext()) {
231 }
232
233 //===--------------------------------------------------------------------===//
234 // Utilities
235 //===--------------------------------------------------------------------===//
236
237 bool TestAndClearIgnoreResultAssign() {
238 bool I = IgnoreResultAssign;
239 IgnoreResultAssign = false;
240 return I;
241 }
242
243 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
244 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
245 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
246 return CGF.EmitCheckedLValue(E, TCK);
247 }
248
249 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
250 const BinOpInfo &Info);
251
252 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
253 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
254 }
255
256 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
257 const AlignValueAttr *AVAttr = nullptr;
258 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
259 const ValueDecl *VD = DRE->getDecl();
260
261 if (VD->getType()->isReferenceType()) {
262 if (const auto *TTy =
264 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
265 } else {
266 // Assumptions for function parameters are emitted at the start of the
267 // function, so there is no need to repeat that here,
268 // unless the alignment-assumption sanitizer is enabled,
269 // then we prefer the assumption over alignment attribute
270 // on IR function param.
271 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
272 return;
273
274 AVAttr = VD->getAttr<AlignValueAttr>();
275 }
276 }
277
278 if (!AVAttr)
279 if (const auto *TTy = E->getType()->getAs<TypedefType>())
280 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
281
282 if (!AVAttr)
283 return;
284
285 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
286 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
287 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
288 }
289
290 /// EmitLoadOfLValue - Given an expression with complex type that represents a
291 /// value l-value, this method emits the address of the l-value, then loads
292 /// and returns the result.
293 Value *EmitLoadOfLValue(const Expr *E) {
294 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
295 E->getExprLoc());
296
297 EmitLValueAlignmentAssumption(E, V);
298 return V;
299 }
300
301 /// EmitConversionToBool - Convert the specified expression value to a
302 /// boolean (i1) truth value. This is equivalent to "Val != 0".
303 Value *EmitConversionToBool(Value *Src, QualType DstTy);
304
305 /// Emit a check that a conversion from a floating-point type does not
306 /// overflow.
307 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
308 Value *Src, QualType SrcType, QualType DstType,
309 llvm::Type *DstTy, SourceLocation Loc);
310
311 /// Known implicit conversion check kinds.
312 /// This is used for bitfield conversion checks as well.
313 /// Keep in sync with the enum of the same name in ubsan_handlers.h
314 enum ImplicitConversionCheckKind : unsigned char {
315 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
316 ICCK_UnsignedIntegerTruncation = 1,
317 ICCK_SignedIntegerTruncation = 2,
318 ICCK_IntegerSignChange = 3,
319 ICCK_SignedIntegerTruncationOrSignChange = 4,
320 };
321
322 /// Emit a check that an [implicit] truncation of an integer does not
323 /// discard any bits. It is not UB, so we use the value after truncation.
324 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
325 QualType DstType, SourceLocation Loc);
326
327 /// Emit a check that an [implicit] conversion of an integer does not change
328 /// the sign of the value. It is not UB, so we use the value after conversion.
329 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
330 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
331 QualType DstType, SourceLocation Loc);
332
333 /// Emit a conversion from the specified type to the specified destination
334 /// type, both of which are LLVM scalar types.
335 struct ScalarConversionOpts {
336 bool TreatBooleanAsSigned;
337 bool EmitImplicitIntegerTruncationChecks;
338 bool EmitImplicitIntegerSignChangeChecks;
339
340 ScalarConversionOpts()
341 : TreatBooleanAsSigned(false),
342 EmitImplicitIntegerTruncationChecks(false),
343 EmitImplicitIntegerSignChangeChecks(false) {}
344
345 ScalarConversionOpts(clang::SanitizerSet SanOpts)
346 : TreatBooleanAsSigned(false),
347 EmitImplicitIntegerTruncationChecks(
348 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
349 EmitImplicitIntegerSignChangeChecks(
350 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
351 };
352 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
353 llvm::Type *SrcTy, llvm::Type *DstTy,
354 ScalarConversionOpts Opts);
355 Value *
356 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
357 SourceLocation Loc,
358 ScalarConversionOpts Opts = ScalarConversionOpts());
359
360 /// Convert between either a fixed point and other fixed point or fixed point
361 /// and an integer.
362 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
363 SourceLocation Loc);
364
365 /// Emit a conversion from the specified complex type to the specified
366 /// destination type, where the destination type is an LLVM scalar type.
367 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
368 QualType SrcTy, QualType DstTy,
369 SourceLocation Loc);
370
371 /// EmitNullValue - Emit a value that corresponds to null for the given type.
372 Value *EmitNullValue(QualType Ty);
373
374 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
375 Value *EmitFloatToBoolConversion(Value *V) {
376 // Compare against 0.0 for fp scalars.
377 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
378 return Builder.CreateFCmpUNE(V, Zero, "tobool");
379 }
380
381 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
382 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
383 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
384
385 return Builder.CreateICmpNE(V, Zero, "tobool");
386 }
387
388 Value *EmitIntToBoolConversion(Value *V) {
389 // Because of the type rules of C, we often end up computing a
390 // logical value, then zero extending it to int, then wanting it
391 // as a logical value again. Optimize this common case.
392 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
393 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
394 Value *Result = ZI->getOperand(0);
395 // If there aren't any more uses, zap the instruction to save space.
396 // Note that there can be more uses, for example if this
397 // is the result of an assignment.
398 if (ZI->use_empty())
399 ZI->eraseFromParent();
400 return Result;
401 }
402 }
403
404 return Builder.CreateIsNotNull(V, "tobool");
405 }
406
407 //===--------------------------------------------------------------------===//
408 // Visitor Methods
409 //===--------------------------------------------------------------------===//
410
411 Value *Visit(Expr *E) {
412 ApplyDebugLocation DL(CGF, E);
414 }
415
416 Value *VisitStmt(Stmt *S) {
417 S->dump(llvm::errs(), CGF.getContext());
418 llvm_unreachable("Stmt can't have complex result type!");
419 }
420 Value *VisitExpr(Expr *S);
421
422 Value *VisitConstantExpr(ConstantExpr *E) {
423 // A constant expression of type 'void' generates no code and produces no
424 // value.
425 if (E->getType()->isVoidType())
426 return nullptr;
427
428 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
429 if (E->isGLValue())
430 return CGF.Builder.CreateLoad(Address(
431 Result, CGF.ConvertTypeForMem(E->getType()),
433 return Result;
434 }
435 return Visit(E->getSubExpr());
436 }
437 Value *VisitParenExpr(ParenExpr *PE) {
438 return Visit(PE->getSubExpr());
439 }
440 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
441 return Visit(E->getReplacement());
442 }
443 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
444 return Visit(GE->getResultExpr());
445 }
446 Value *VisitCoawaitExpr(CoawaitExpr *S) {
447 return CGF.EmitCoawaitExpr(*S).getScalarVal();
448 }
449 Value *VisitCoyieldExpr(CoyieldExpr *S) {
450 return CGF.EmitCoyieldExpr(*S).getScalarVal();
451 }
452 Value *VisitUnaryCoawait(const UnaryOperator *E) {
453 return Visit(E->getSubExpr());
454 }
455
456 // Leaves.
457 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
458 return Builder.getInt(E->getValue());
459 }
460 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
461 return Builder.getInt(E->getValue());
462 }
463 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
464 return llvm::ConstantFP::get(VMContext, E->getValue());
465 }
466 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
467 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
468 }
469 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
470 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
471 }
472 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
473 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
474 }
475 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
476 if (E->getType()->isVoidType())
477 return nullptr;
478
479 return EmitNullValue(E->getType());
480 }
481 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
482 return EmitNullValue(E->getType());
483 }
484 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
485 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
486 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
487 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
488 return Builder.CreateBitCast(V, ConvertType(E->getType()));
489 }
490
491 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
492 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
493 }
494
495 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
496 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
497 }
498
499 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
500
501 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
502 if (E->isGLValue())
503 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
504 E->getExprLoc());
505
506 // Otherwise, assume the mapping is the scalar directly.
508 }
509
510 // l-values.
511 Value *VisitDeclRefExpr(DeclRefExpr *E) {
512 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
513 return CGF.emitScalarConstant(Constant, E);
514 return EmitLoadOfLValue(E);
515 }
516
517 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
518 return CGF.EmitObjCSelectorExpr(E);
519 }
520 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
521 return CGF.EmitObjCProtocolExpr(E);
522 }
523 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
524 return EmitLoadOfLValue(E);
525 }
526 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
527 if (E->getMethodDecl() &&
529 return EmitLoadOfLValue(E);
530 return CGF.EmitObjCMessageExpr(E).getScalarVal();
531 }
532
533 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
534 LValue LV = CGF.EmitObjCIsaExpr(E);
536 return V;
537 }
538
539 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
540 VersionTuple Version = E->getVersion();
541
542 // If we're checking for a platform older than our minimum deployment
543 // target, we can fold the check away.
544 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
545 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
546
547 return CGF.EmitBuiltinAvailable(Version);
548 }
549
550 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
551 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
552 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
553 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
554 Value *VisitMemberExpr(MemberExpr *E);
555 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
556 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
557 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
558 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
559 // literals aren't l-values in C++. We do so simply because that's the
560 // cleanest way to handle compound literals in C++.
561 // See the discussion here: https://reviews.llvm.org/D64464
562 return EmitLoadOfLValue(E);
563 }
564
565 Value *VisitInitListExpr(InitListExpr *E);
566
567 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
568 assert(CGF.getArrayInitIndex() &&
569 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
570 return CGF.getArrayInitIndex();
571 }
572
573 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
574 return EmitNullValue(E->getType());
575 }
576 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
577 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
578 return VisitCastExpr(E);
579 }
580 Value *VisitCastExpr(CastExpr *E);
581
582 Value *VisitCallExpr(const CallExpr *E) {
584 return EmitLoadOfLValue(E);
585
586 Value *V = CGF.EmitCallExpr(E).getScalarVal();
587
588 EmitLValueAlignmentAssumption(E, V);
589 return V;
590 }
591
592 Value *VisitStmtExpr(const StmtExpr *E);
593
594 // Unary Operators.
595 Value *VisitUnaryPostDec(const UnaryOperator *E) {
596 LValue LV = EmitLValue(E->getSubExpr());
597 return EmitScalarPrePostIncDec(E, LV, false, false);
598 }
599 Value *VisitUnaryPostInc(const UnaryOperator *E) {
600 LValue LV = EmitLValue(E->getSubExpr());
601 return EmitScalarPrePostIncDec(E, LV, true, false);
602 }
603 Value *VisitUnaryPreDec(const UnaryOperator *E) {
604 LValue LV = EmitLValue(E->getSubExpr());
605 return EmitScalarPrePostIncDec(E, LV, false, true);
606 }
607 Value *VisitUnaryPreInc(const UnaryOperator *E) {
608 LValue LV = EmitLValue(E->getSubExpr());
609 return EmitScalarPrePostIncDec(E, LV, true, true);
610 }
611
612 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
613 llvm::Value *InVal,
614 bool IsInc);
615
616 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
617 bool isInc, bool isPre);
618
619
620 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
621 if (isa<MemberPointerType>(E->getType())) // never sugared
622 return CGF.CGM.getMemberPointerConstant(E);
623
624 return EmitLValue(E->getSubExpr()).getPointer(CGF);
625 }
626 Value *VisitUnaryDeref(const UnaryOperator *E) {
627 if (E->getType()->isVoidType())
628 return Visit(E->getSubExpr()); // the actual value should be unused
629 return EmitLoadOfLValue(E);
630 }
631
632 Value *VisitUnaryPlus(const UnaryOperator *E,
633 QualType PromotionType = QualType());
634 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
635 Value *VisitUnaryMinus(const UnaryOperator *E,
636 QualType PromotionType = QualType());
637 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
638
639 Value *VisitUnaryNot (const UnaryOperator *E);
640 Value *VisitUnaryLNot (const UnaryOperator *E);
641 Value *VisitUnaryReal(const UnaryOperator *E,
642 QualType PromotionType = QualType());
643 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
644 Value *VisitUnaryImag(const UnaryOperator *E,
645 QualType PromotionType = QualType());
646 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
647 Value *VisitUnaryExtension(const UnaryOperator *E) {
648 return Visit(E->getSubExpr());
649 }
650
651 // C++
652 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
653 return EmitLoadOfLValue(E);
654 }
655 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
656 auto &Ctx = CGF.getContext();
660 SLE->getType());
661 }
662
663 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
664 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
665 return Visit(DAE->getExpr());
666 }
667 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
668 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
669 return Visit(DIE->getExpr());
670 }
671 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
672 return CGF.LoadCXXThis();
673 }
674
675 Value *VisitExprWithCleanups(ExprWithCleanups *E);
676 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
677 return CGF.EmitCXXNewExpr(E);
678 }
679 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
680 CGF.EmitCXXDeleteExpr(E);
681 return nullptr;
682 }
683
684 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
685 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
686 }
687
688 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
689 return Builder.getInt1(E->isSatisfied());
690 }
691
692 Value *VisitRequiresExpr(const RequiresExpr *E) {
693 return Builder.getInt1(E->isSatisfied());
694 }
695
696 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
697 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
698 }
699
700 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
701 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
702 }
703
704 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
705 // C++ [expr.pseudo]p1:
706 // The result shall only be used as the operand for the function call
707 // operator (), and the result of such a call has type void. The only
708 // effect is the evaluation of the postfix-expression before the dot or
709 // arrow.
710 CGF.EmitScalarExpr(E->getBase());
711 return nullptr;
712 }
713
714 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
715 return EmitNullValue(E->getType());
716 }
717
718 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
719 CGF.EmitCXXThrowExpr(E);
720 return nullptr;
721 }
722
723 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
724 return Builder.getInt1(E->getValue());
725 }
726
727 // Binary Operators.
728 Value *EmitMul(const BinOpInfo &Ops) {
729 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
730 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
732 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
733 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
734 [[fallthrough]];
736 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
737 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
738 [[fallthrough]];
740 if (CanElideOverflowCheck(CGF.getContext(), Ops))
741 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
742 return EmitOverflowCheckedBinOp(Ops);
743 }
744 }
745
746 if (Ops.Ty->isConstantMatrixType()) {
747 llvm::MatrixBuilder MB(Builder);
748 // We need to check the types of the operands of the operator to get the
749 // correct matrix dimensions.
750 auto *BO = cast<BinaryOperator>(Ops.E);
751 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
752 BO->getLHS()->getType().getCanonicalType());
753 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
754 BO->getRHS()->getType().getCanonicalType());
755 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
756 if (LHSMatTy && RHSMatTy)
757 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
758 LHSMatTy->getNumColumns(),
759 RHSMatTy->getNumColumns());
760 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
761 }
762
763 if (Ops.Ty->isUnsignedIntegerType() &&
764 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
765 !CanElideOverflowCheck(CGF.getContext(), Ops))
766 return EmitOverflowCheckedBinOp(Ops);
767
768 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
769 // Preserve the old values
770 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
771 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
772 }
773 if (Ops.isFixedPointOp())
774 return EmitFixedPointBinOp(Ops);
775 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
776 }
777 /// Create a binary op that checks for overflow.
778 /// Currently only supports +, - and *.
779 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
780
781 // Check for undefined division and modulus behaviors.
782 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
783 llvm::Value *Zero,bool isDiv);
784 // Common helper for getting how wide LHS of shift is.
785 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS);
786
787 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
788 // non powers of two.
789 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
790
791 Value *EmitDiv(const BinOpInfo &Ops);
792 Value *EmitRem(const BinOpInfo &Ops);
793 Value *EmitAdd(const BinOpInfo &Ops);
794 Value *EmitSub(const BinOpInfo &Ops);
795 Value *EmitShl(const BinOpInfo &Ops);
796 Value *EmitShr(const BinOpInfo &Ops);
797 Value *EmitAnd(const BinOpInfo &Ops) {
798 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
799 }
800 Value *EmitXor(const BinOpInfo &Ops) {
801 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
802 }
803 Value *EmitOr (const BinOpInfo &Ops) {
804 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
805 }
806
807 // Helper functions for fixed point binary operations.
808 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
809
810 BinOpInfo EmitBinOps(const BinaryOperator *E,
811 QualType PromotionTy = QualType());
812
813 Value *EmitPromotedValue(Value *result, QualType PromotionType);
814 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
815 Value *EmitPromoted(const Expr *E, QualType PromotionType);
816
817 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
818 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
819 Value *&Result);
820
821 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
822 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
823
824 QualType getPromotionType(QualType Ty) {
825 const auto &Ctx = CGF.getContext();
826 if (auto *CT = Ty->getAs<ComplexType>()) {
827 QualType ElementType = CT->getElementType();
828 if (ElementType.UseExcessPrecision(Ctx))
829 return Ctx.getComplexType(Ctx.FloatTy);
830 }
831
832 if (Ty.UseExcessPrecision(Ctx)) {
833 if (auto *VT = Ty->getAs<VectorType>()) {
834 unsigned NumElements = VT->getNumElements();
835 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
836 }
837 return Ctx.FloatTy;
838 }
839
840 return QualType();
841 }
842
843 // Binary operators and binary compound assignment operators.
844#define HANDLEBINOP(OP) \
845 Value *VisitBin##OP(const BinaryOperator *E) { \
846 QualType promotionTy = getPromotionType(E->getType()); \
847 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
848 if (result && !promotionTy.isNull()) \
849 result = EmitUnPromotedValue(result, E->getType()); \
850 return result; \
851 } \
852 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
853 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
854 }
855 HANDLEBINOP(Mul)
856 HANDLEBINOP(Div)
857 HANDLEBINOP(Rem)
858 HANDLEBINOP(Add)
859 HANDLEBINOP(Sub)
860 HANDLEBINOP(Shl)
861 HANDLEBINOP(Shr)
863 HANDLEBINOP(Xor)
865#undef HANDLEBINOP
866
867 // Comparisons.
868 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
869 llvm::CmpInst::Predicate SICmpOpc,
870 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
871#define VISITCOMP(CODE, UI, SI, FP, SIG) \
872 Value *VisitBin##CODE(const BinaryOperator *E) { \
873 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
874 llvm::FCmpInst::FP, SIG); }
875 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
876 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
877 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
878 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
879 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
880 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
881#undef VISITCOMP
882
883 Value *VisitBinAssign (const BinaryOperator *E);
884
885 Value *VisitBinLAnd (const BinaryOperator *E);
886 Value *VisitBinLOr (const BinaryOperator *E);
887 Value *VisitBinComma (const BinaryOperator *E);
888
889 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
890 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
891
892 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
893 return Visit(E->getSemanticForm());
894 }
895
896 // Other Operators.
897 Value *VisitBlockExpr(const BlockExpr *BE);
898 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
899 Value *VisitChooseExpr(ChooseExpr *CE);
900 Value *VisitVAArgExpr(VAArgExpr *VE);
901 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
902 return CGF.EmitObjCStringLiteral(E);
903 }
904 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
905 return CGF.EmitObjCBoxedExpr(E);
906 }
907 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
908 return CGF.EmitObjCArrayLiteral(E);
909 }
910 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
911 return CGF.EmitObjCDictionaryLiteral(E);
912 }
913 Value *VisitAsTypeExpr(AsTypeExpr *CE);
914 Value *VisitAtomicExpr(AtomicExpr *AE);
915 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
916 return Visit(E->getSelectedExpr());
917 }
918};
919} // end anonymous namespace.
920
921//===----------------------------------------------------------------------===//
922// Utilities
923//===----------------------------------------------------------------------===//
924
925/// EmitConversionToBool - Convert the specified expression value to a
926/// boolean (i1) truth value. This is equivalent to "Val != 0".
927Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
928 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
929
930 if (SrcType->isRealFloatingType())
931 return EmitFloatToBoolConversion(Src);
932
933 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
934 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
935
936 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
937 "Unknown scalar type to convert");
938
939 if (isa<llvm::IntegerType>(Src->getType()))
940 return EmitIntToBoolConversion(Src);
941
942 assert(isa<llvm::PointerType>(Src->getType()));
943 return EmitPointerToBoolConversion(Src, SrcType);
944}
945
946void ScalarExprEmitter::EmitFloatConversionCheck(
947 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
948 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
949 assert(SrcType->isFloatingType() && "not a conversion from floating point");
950 if (!isa<llvm::IntegerType>(DstTy))
951 return;
952
953 CodeGenFunction::SanitizerScope SanScope(&CGF);
954 using llvm::APFloat;
955 using llvm::APSInt;
956
957 llvm::Value *Check = nullptr;
958 const llvm::fltSemantics &SrcSema =
959 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
960
961 // Floating-point to integer. This has undefined behavior if the source is
962 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
963 // to an integer).
964 unsigned Width = CGF.getContext().getIntWidth(DstType);
966
967 APSInt Min = APSInt::getMinValue(Width, Unsigned);
968 APFloat MinSrc(SrcSema, APFloat::uninitialized);
969 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
970 APFloat::opOverflow)
971 // Don't need an overflow check for lower bound. Just check for
972 // -Inf/NaN.
973 MinSrc = APFloat::getInf(SrcSema, true);
974 else
975 // Find the largest value which is too small to represent (before
976 // truncation toward zero).
977 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
978
979 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
980 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
981 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
982 APFloat::opOverflow)
983 // Don't need an overflow check for upper bound. Just check for
984 // +Inf/NaN.
985 MaxSrc = APFloat::getInf(SrcSema, false);
986 else
987 // Find the smallest value which is too large to represent (before
988 // truncation toward zero).
989 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
990
991 // If we're converting from __half, convert the range to float to match
992 // the type of src.
993 if (OrigSrcType->isHalfType()) {
994 const llvm::fltSemantics &Sema =
995 CGF.getContext().getFloatTypeSemantics(SrcType);
996 bool IsInexact;
997 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
998 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
999 }
1000
1001 llvm::Value *GE =
1002 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1003 llvm::Value *LE =
1004 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1005 Check = Builder.CreateAnd(GE, LE);
1006
1007 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1008 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1009 CGF.EmitCheckTypeDescriptor(DstType)};
1010 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
1011 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
1012}
1013
1014// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1015// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1016static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1017 std::pair<llvm::Value *, SanitizerMask>>
1019 QualType DstType, CGBuilderTy &Builder) {
1020 llvm::Type *SrcTy = Src->getType();
1021 llvm::Type *DstTy = Dst->getType();
1022 (void)DstTy; // Only used in assert()
1023
1024 // This should be truncation of integral types.
1025 assert(Src != Dst);
1026 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1027 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1028 "non-integer llvm type");
1029
1030 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1031 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1032
1033 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1034 // Else, it is a signed truncation.
1035 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1036 SanitizerMask Mask;
1037 if (!SrcSigned && !DstSigned) {
1038 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1039 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
1040 } else {
1041 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1042 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
1043 }
1044
1045 llvm::Value *Check = nullptr;
1046 // 1. Extend the truncated value back to the same width as the Src.
1047 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1048 // 2. Equality-compare with the original source value
1049 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1050 // If the comparison result is 'i1 false', then the truncation was lossy.
1051 return std::make_pair(Kind, std::make_pair(Check, Mask));
1052}
1053
1055 QualType SrcType, QualType DstType) {
1056 return SrcType->isIntegerType() && DstType->isIntegerType();
1057}
1058
1059void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1060 Value *Dst, QualType DstType,
1061 SourceLocation Loc) {
1062 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1063 return;
1064
1065 // We only care about int->int conversions here.
1066 // We ignore conversions to/from pointer and/or bool.
1068 DstType))
1069 return;
1070
1071 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1072 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1073 // This must be truncation. Else we do not care.
1074 if (SrcBits <= DstBits)
1075 return;
1076
1077 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1078
1079 // If the integer sign change sanitizer is enabled,
1080 // and we are truncating from larger unsigned type to smaller signed type,
1081 // let that next sanitizer deal with it.
1082 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1083 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1084 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1085 (!SrcSigned && DstSigned))
1086 return;
1087
1088 CodeGenFunction::SanitizerScope SanScope(&CGF);
1089
1090 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1091 std::pair<llvm::Value *, SanitizerMask>>
1092 Check =
1093 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1094 // If the comparison result is 'i1 false', then the truncation was lossy.
1095
1096 // Do we care about this type of truncation?
1097 if (!CGF.SanOpts.has(Check.second.second))
1098 return;
1099
1100 llvm::Constant *StaticArgs[] = {
1101 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1102 CGF.EmitCheckTypeDescriptor(DstType),
1103 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1104 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1105
1106 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1107 {Src, Dst});
1108}
1109
1110static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1111 const char *Name,
1112 CGBuilderTy &Builder) {
1113 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1114 llvm::Type *VTy = V->getType();
1115 if (!VSigned) {
1116 // If the value is unsigned, then it is never negative.
1117 return llvm::ConstantInt::getFalse(VTy->getContext());
1118 }
1119 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1120 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1121 llvm::Twine(Name) + "." + V->getName() +
1122 ".negativitycheck");
1123}
1124
1125// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1126// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1127static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1128 std::pair<llvm::Value *, SanitizerMask>>
1130 QualType DstType, CGBuilderTy &Builder) {
1131 llvm::Type *SrcTy = Src->getType();
1132 llvm::Type *DstTy = Dst->getType();
1133
1134 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1135 "non-integer llvm type");
1136
1137 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1138 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1139 (void)SrcSigned; // Only used in assert()
1140 (void)DstSigned; // Only used in assert()
1141 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1142 unsigned DstBits = DstTy->getScalarSizeInBits();
1143 (void)SrcBits; // Only used in assert()
1144 (void)DstBits; // Only used in assert()
1145
1146 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1147 "either the widths should be different, or the signednesses.");
1148
1149 // 1. Was the old Value negative?
1150 llvm::Value *SrcIsNegative =
1151 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1152 // 2. Is the new Value negative?
1153 llvm::Value *DstIsNegative =
1154 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1155 // 3. Now, was the 'negativity status' preserved during the conversion?
1156 // NOTE: conversion from negative to zero is considered to change the sign.
1157 // (We want to get 'false' when the conversion changed the sign)
1158 // So we should just equality-compare the negativity statuses.
1159 llvm::Value *Check = nullptr;
1160 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1161 // If the comparison result is 'false', then the conversion changed the sign.
1162 return std::make_pair(
1163 ScalarExprEmitter::ICCK_IntegerSignChange,
1164 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1165}
1166
1167void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1168 Value *Dst, QualType DstType,
1169 SourceLocation Loc) {
1170 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1171 return;
1172
1173 llvm::Type *SrcTy = Src->getType();
1174 llvm::Type *DstTy = Dst->getType();
1175
1176 // We only care about int->int conversions here.
1177 // We ignore conversions to/from pointer and/or bool.
1179 DstType))
1180 return;
1181
1182 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1183 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1184 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1185 unsigned DstBits = DstTy->getScalarSizeInBits();
1186
1187 // Now, we do not need to emit the check in *all* of the cases.
1188 // We can avoid emitting it in some obvious cases where it would have been
1189 // dropped by the opt passes (instcombine) always anyways.
1190 // If it's a cast between effectively the same type, no check.
1191 // NOTE: this is *not* equivalent to checking the canonical types.
1192 if (SrcSigned == DstSigned && SrcBits == DstBits)
1193 return;
1194 // At least one of the values needs to have signed type.
1195 // If both are unsigned, then obviously, neither of them can be negative.
1196 if (!SrcSigned && !DstSigned)
1197 return;
1198 // If the conversion is to *larger* *signed* type, then no check is needed.
1199 // Because either sign-extension happens (so the sign will remain),
1200 // or zero-extension will happen (the sign bit will be zero.)
1201 if ((DstBits > SrcBits) && DstSigned)
1202 return;
1203 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1204 (SrcBits > DstBits) && SrcSigned) {
1205 // If the signed integer truncation sanitizer is enabled,
1206 // and this is a truncation from signed type, then no check is needed.
1207 // Because here sign change check is interchangeable with truncation check.
1208 return;
1209 }
1210 // That's it. We can't rule out any more cases with the data we have.
1211
1212 CodeGenFunction::SanitizerScope SanScope(&CGF);
1213
1214 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1215 std::pair<llvm::Value *, SanitizerMask>>
1216 Check;
1217
1218 // Each of these checks needs to return 'false' when an issue was detected.
1219 ImplicitConversionCheckKind CheckKind;
1221 // So we can 'and' all the checks together, and still get 'false',
1222 // if at least one of the checks detected an issue.
1223
1224 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1225 CheckKind = Check.first;
1226 Checks.emplace_back(Check.second);
1227
1228 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1229 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1230 // If the signed integer truncation sanitizer was enabled,
1231 // and we are truncating from larger unsigned type to smaller signed type,
1232 // let's handle the case we skipped in that check.
1233 Check =
1234 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1235 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1236 Checks.emplace_back(Check.second);
1237 // If the comparison result is 'i1 false', then the truncation was lossy.
1238 }
1239
1240 llvm::Constant *StaticArgs[] = {
1241 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1242 CGF.EmitCheckTypeDescriptor(DstType),
1243 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1244 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1245 // EmitCheck() will 'and' all the checks together.
1246 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1247 {Src, Dst});
1248}
1249
1250// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1251// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1252static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1253 std::pair<llvm::Value *, SanitizerMask>>
1255 QualType DstType, CGBuilderTy &Builder) {
1256 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1257 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1258
1259 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1260 if (!SrcSigned && !DstSigned)
1261 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1262 else
1263 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1264
1265 llvm::Value *Check = nullptr;
1266 // 1. Extend the truncated value back to the same width as the Src.
1267 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1268 // 2. Equality-compare with the original source value
1269 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1270 // If the comparison result is 'i1 false', then the truncation was lossy.
1271
1272 return std::make_pair(
1273 Kind, std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));
1274}
1275
1276// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1277// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1278static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1279 std::pair<llvm::Value *, SanitizerMask>>
1281 QualType DstType, CGBuilderTy &Builder) {
1282 // 1. Was the old Value negative?
1283 llvm::Value *SrcIsNegative =
1284 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1285 // 2. Is the new Value negative?
1286 llvm::Value *DstIsNegative =
1287 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1288 // 3. Now, was the 'negativity status' preserved during the conversion?
1289 // NOTE: conversion from negative to zero is considered to change the sign.
1290 // (We want to get 'false' when the conversion changed the sign)
1291 // So we should just equality-compare the negativity statuses.
1292 llvm::Value *Check = nullptr;
1293 Check =
1294 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1295 // If the comparison result is 'false', then the conversion changed the sign.
1296 return std::make_pair(
1297 ScalarExprEmitter::ICCK_IntegerSignChange,
1298 std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));
1299}
1300
1301void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1302 Value *Dst, QualType DstType,
1303 const CGBitFieldInfo &Info,
1304 SourceLocation Loc) {
1305
1306 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1307 return;
1308
1309 // We only care about int->int conversions here.
1310 // We ignore conversions to/from pointer and/or bool.
1312 DstType))
1313 return;
1314
1315 if (DstType->isBooleanType() || SrcType->isBooleanType())
1316 return;
1317
1318 // This should be truncation of integral types.
1319 assert(isa<llvm::IntegerType>(Src->getType()) &&
1320 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1321
1322 // TODO: Calculate src width to avoid emitting code
1323 // for unecessary cases.
1324 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1325 unsigned DstBits = Info.Size;
1326
1327 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1328 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1329
1330 CodeGenFunction::SanitizerScope SanScope(this);
1331
1332 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1333 std::pair<llvm::Value *, SanitizerMask>>
1334 Check;
1335
1336 // Truncation
1337 bool EmitTruncation = DstBits < SrcBits;
1338 // If Dst is signed and Src unsigned, we want to be more specific
1339 // about the CheckKind we emit, in this case we want to emit
1340 // ICCK_SignedIntegerTruncationOrSignChange.
1341 bool EmitTruncationFromUnsignedToSigned =
1342 EmitTruncation && DstSigned && !SrcSigned;
1343 // Sign change
1344 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1345 bool BothUnsigned = !SrcSigned && !DstSigned;
1346 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1347 // We can avoid emitting sign change checks in some obvious cases
1348 // 1. If Src and Dst have the same signedness and size
1349 // 2. If both are unsigned sign check is unecessary!
1350 // 3. If Dst is signed and bigger than Src, either
1351 // sign-extension or zero-extension will make sure
1352 // the sign remains.
1353 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1354
1355 if (EmitTruncation)
1356 Check =
1357 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1358 else if (EmitSignChange) {
1359 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1360 "either the widths should be different, or the signednesses.");
1361 Check =
1362 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1363 } else
1364 return;
1365
1366 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1367 if (EmitTruncationFromUnsignedToSigned)
1368 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1369
1370 llvm::Constant *StaticArgs[] = {
1372 EmitCheckTypeDescriptor(DstType),
1373 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1374 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1375
1376 EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1377 {Src, Dst});
1378}
1379
1380Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1381 QualType DstType, llvm::Type *SrcTy,
1382 llvm::Type *DstTy,
1383 ScalarConversionOpts Opts) {
1384 // The Element types determine the type of cast to perform.
1385 llvm::Type *SrcElementTy;
1386 llvm::Type *DstElementTy;
1387 QualType SrcElementType;
1388 QualType DstElementType;
1389 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1390 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1391 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1392 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1393 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1394 } else {
1395 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1396 "cannot cast between matrix and non-matrix types");
1397 SrcElementTy = SrcTy;
1398 DstElementTy = DstTy;
1399 SrcElementType = SrcType;
1400 DstElementType = DstType;
1401 }
1402
1403 if (isa<llvm::IntegerType>(SrcElementTy)) {
1404 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1405 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1406 InputSigned = true;
1407 }
1408
1409 if (isa<llvm::IntegerType>(DstElementTy))
1410 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1411 if (InputSigned)
1412 return Builder.CreateSIToFP(Src, DstTy, "conv");
1413 return Builder.CreateUIToFP(Src, DstTy, "conv");
1414 }
1415
1416 if (isa<llvm::IntegerType>(DstElementTy)) {
1417 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1418 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1419
1420 // If we can't recognize overflow as undefined behavior, assume that
1421 // overflow saturates. This protects against normal optimizations if we are
1422 // compiling with non-standard FP semantics.
1423 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1424 llvm::Intrinsic::ID IID =
1425 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1426 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1427 }
1428
1429 if (IsSigned)
1430 return Builder.CreateFPToSI(Src, DstTy, "conv");
1431 return Builder.CreateFPToUI(Src, DstTy, "conv");
1432 }
1433
1434 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1435 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1436 return Builder.CreateFPExt(Src, DstTy, "conv");
1437}
1438
1439/// Emit a conversion from the specified type to the specified destination type,
1440/// both of which are LLVM scalar types.
1441Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1442 QualType DstType,
1443 SourceLocation Loc,
1444 ScalarConversionOpts Opts) {
1445 // All conversions involving fixed point types should be handled by the
1446 // EmitFixedPoint family functions. This is done to prevent bloating up this
1447 // function more, and although fixed point numbers are represented by
1448 // integers, we do not want to follow any logic that assumes they should be
1449 // treated as integers.
1450 // TODO(leonardchan): When necessary, add another if statement checking for
1451 // conversions to fixed point types from other types.
1452 if (SrcType->isFixedPointType()) {
1453 if (DstType->isBooleanType())
1454 // It is important that we check this before checking if the dest type is
1455 // an integer because booleans are technically integer types.
1456 // We do not need to check the padding bit on unsigned types if unsigned
1457 // padding is enabled because overflow into this bit is undefined
1458 // behavior.
1459 return Builder.CreateIsNotNull(Src, "tobool");
1460 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1461 DstType->isRealFloatingType())
1462 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1463
1464 llvm_unreachable(
1465 "Unhandled scalar conversion from a fixed point type to another type.");
1466 } else if (DstType->isFixedPointType()) {
1467 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1468 // This also includes converting booleans and enums to fixed point types.
1469 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1470
1471 llvm_unreachable(
1472 "Unhandled scalar conversion to a fixed point type from another type.");
1473 }
1474
1475 QualType NoncanonicalSrcType = SrcType;
1476 QualType NoncanonicalDstType = DstType;
1477
1478 SrcType = CGF.getContext().getCanonicalType(SrcType);
1479 DstType = CGF.getContext().getCanonicalType(DstType);
1480 if (SrcType == DstType) return Src;
1481
1482 if (DstType->isVoidType()) return nullptr;
1483
1484 llvm::Value *OrigSrc = Src;
1485 QualType OrigSrcType = SrcType;
1486 llvm::Type *SrcTy = Src->getType();
1487
1488 // Handle conversions to bool first, they are special: comparisons against 0.
1489 if (DstType->isBooleanType())
1490 return EmitConversionToBool(Src, SrcType);
1491
1492 llvm::Type *DstTy = ConvertType(DstType);
1493
1494 // Cast from half through float if half isn't a native type.
1495 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1496 // Cast to FP using the intrinsic if the half type itself isn't supported.
1497 if (DstTy->isFloatingPointTy()) {
1499 return Builder.CreateCall(
1500 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1501 Src);
1502 } else {
1503 // Cast to other types through float, using either the intrinsic or FPExt,
1504 // depending on whether the half type itself is supported
1505 // (as opposed to operations on half, available with NativeHalfType).
1507 Src = Builder.CreateCall(
1508 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1509 CGF.CGM.FloatTy),
1510 Src);
1511 } else {
1512 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1513 }
1514 SrcType = CGF.getContext().FloatTy;
1515 SrcTy = CGF.FloatTy;
1516 }
1517 }
1518
1519 // Ignore conversions like int -> uint.
1520 if (SrcTy == DstTy) {
1521 if (Opts.EmitImplicitIntegerSignChangeChecks)
1522 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1523 NoncanonicalDstType, Loc);
1524
1525 return Src;
1526 }
1527
1528 // Handle pointer conversions next: pointers can only be converted to/from
1529 // other pointers and integers. Check for pointer types in terms of LLVM, as
1530 // some native types (like Obj-C id) may map to a pointer type.
1531 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1532 // The source value may be an integer, or a pointer.
1533 if (isa<llvm::PointerType>(SrcTy))
1534 return Builder.CreateBitCast(Src, DstTy, "conv");
1535
1536 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1537 // First, convert to the correct width so that we control the kind of
1538 // extension.
1539 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1540 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1541 llvm::Value* IntResult =
1542 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1543 // Then, cast to pointer.
1544 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1545 }
1546
1547 if (isa<llvm::PointerType>(SrcTy)) {
1548 // Must be an ptr to int cast.
1549 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1550 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1551 }
1552
1553 // A scalar can be splatted to an extended vector of the same element type
1554 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1555 // Sema should add casts to make sure that the source expression's type is
1556 // the same as the vector's element type (sans qualifiers)
1557 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1558 SrcType.getTypePtr() &&
1559 "Splatted expr doesn't match with vector element type?");
1560
1561 // Splat the element across to all elements
1562 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1563 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1564 }
1565
1566 if (SrcType->isMatrixType() && DstType->isMatrixType())
1567 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1568
1569 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1570 // Allow bitcast from vector to integer/fp of the same size.
1571 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1572 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1573 if (SrcSize == DstSize)
1574 return Builder.CreateBitCast(Src, DstTy, "conv");
1575
1576 // Conversions between vectors of different sizes are not allowed except
1577 // when vectors of half are involved. Operations on storage-only half
1578 // vectors require promoting half vector operands to float vectors and
1579 // truncating the result, which is either an int or float vector, to a
1580 // short or half vector.
1581
1582 // Source and destination are both expected to be vectors.
1583 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1584 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1585 (void)DstElementTy;
1586
1587 assert(((SrcElementTy->isIntegerTy() &&
1588 DstElementTy->isIntegerTy()) ||
1589 (SrcElementTy->isFloatingPointTy() &&
1590 DstElementTy->isFloatingPointTy())) &&
1591 "unexpected conversion between a floating-point vector and an "
1592 "integer vector");
1593
1594 // Truncate an i32 vector to an i16 vector.
1595 if (SrcElementTy->isIntegerTy())
1596 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1597
1598 // Truncate a float vector to a half vector.
1599 if (SrcSize > DstSize)
1600 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1601
1602 // Promote a half vector to a float vector.
1603 return Builder.CreateFPExt(Src, DstTy, "conv");
1604 }
1605
1606 // Finally, we have the arithmetic types: real int/float.
1607 Value *Res = nullptr;
1608 llvm::Type *ResTy = DstTy;
1609
1610 // An overflowing conversion has undefined behavior if either the source type
1611 // or the destination type is a floating-point type. However, we consider the
1612 // range of representable values for all floating-point types to be
1613 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1614 // floating-point type.
1615 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1616 OrigSrcType->isFloatingType())
1617 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1618 Loc);
1619
1620 // Cast to half through float if half isn't a native type.
1621 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1622 // Make sure we cast in a single step if from another FP type.
1623 if (SrcTy->isFloatingPointTy()) {
1624 // Use the intrinsic if the half type itself isn't supported
1625 // (as opposed to operations on half, available with NativeHalfType).
1627 return Builder.CreateCall(
1628 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1629 // If the half type is supported, just use an fptrunc.
1630 return Builder.CreateFPTrunc(Src, DstTy);
1631 }
1632 DstTy = CGF.FloatTy;
1633 }
1634
1635 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1636
1637 if (DstTy != ResTy) {
1639 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1640 Res = Builder.CreateCall(
1641 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1642 Res);
1643 } else {
1644 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1645 }
1646 }
1647
1648 if (Opts.EmitImplicitIntegerTruncationChecks)
1649 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1650 NoncanonicalDstType, Loc);
1651
1652 if (Opts.EmitImplicitIntegerSignChangeChecks)
1653 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1654 NoncanonicalDstType, Loc);
1655
1656 return Res;
1657}
1658
1659Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1660 QualType DstTy,
1661 SourceLocation Loc) {
1662 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1663 llvm::Value *Result;
1664 if (SrcTy->isRealFloatingType())
1665 Result = FPBuilder.CreateFloatingToFixed(Src,
1666 CGF.getContext().getFixedPointSemantics(DstTy));
1667 else if (DstTy->isRealFloatingType())
1668 Result = FPBuilder.CreateFixedToFloating(Src,
1670 ConvertType(DstTy));
1671 else {
1672 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1673 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1674
1675 if (DstTy->isIntegerType())
1676 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1677 DstFPSema.getWidth(),
1678 DstFPSema.isSigned());
1679 else if (SrcTy->isIntegerType())
1680 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1681 DstFPSema);
1682 else
1683 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1684 }
1685 return Result;
1686}
1687
1688/// Emit a conversion from the specified complex type to the specified
1689/// destination type, where the destination type is an LLVM scalar type.
1690Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1692 SourceLocation Loc) {
1693 // Get the source element type.
1694 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1695
1696 // Handle conversions to bool first, they are special: comparisons against 0.
1697 if (DstTy->isBooleanType()) {
1698 // Complex != 0 -> (Real != 0) | (Imag != 0)
1699 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1700 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1701 return Builder.CreateOr(Src.first, Src.second, "tobool");
1702 }
1703
1704 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1705 // the imaginary part of the complex value is discarded and the value of the
1706 // real part is converted according to the conversion rules for the
1707 // corresponding real type.
1708 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1709}
1710
1711Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1712 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1713}
1714
1715/// Emit a sanitization check for the given "binary" operation (which
1716/// might actually be a unary increment which has been lowered to a binary
1717/// operation). The check passes if all values in \p Checks (which are \c i1),
1718/// are \c true.
1719void ScalarExprEmitter::EmitBinOpCheck(
1720 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1721 assert(CGF.IsSanitizerScope);
1722 SanitizerHandler Check;
1725
1726 BinaryOperatorKind Opcode = Info.Opcode;
1729
1730 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1731 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1732 if (UO && UO->getOpcode() == UO_Minus) {
1733 Check = SanitizerHandler::NegateOverflow;
1734 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1735 DynamicData.push_back(Info.RHS);
1736 } else {
1737 if (BinaryOperator::isShiftOp(Opcode)) {
1738 // Shift LHS negative or too large, or RHS out of bounds.
1739 Check = SanitizerHandler::ShiftOutOfBounds;
1740 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1741 StaticData.push_back(
1742 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1743 StaticData.push_back(
1744 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1745 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1746 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1747 Check = SanitizerHandler::DivremOverflow;
1748 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1749 } else {
1750 // Arithmetic overflow (+, -, *).
1751 switch (Opcode) {
1752 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1753 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1754 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1755 default: llvm_unreachable("unexpected opcode for bin op check");
1756 }
1757 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1758 }
1759 DynamicData.push_back(Info.LHS);
1760 DynamicData.push_back(Info.RHS);
1761 }
1762
1763 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1764}
1765
1766//===----------------------------------------------------------------------===//
1767// Visitor Methods
1768//===----------------------------------------------------------------------===//
1769
1770Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1771 CGF.ErrorUnsupported(E, "scalar expression");
1772 if (E->getType()->isVoidType())
1773 return nullptr;
1774 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1775}
1776
1777Value *
1778ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1779 ASTContext &Context = CGF.getContext();
1780 unsigned AddrSpace =
1782 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1783 E->ComputeName(Context), "__usn_str", AddrSpace);
1784
1785 llvm::Type *ExprTy = ConvertType(E->getType());
1786 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1787 "usn_addr_cast");
1788}
1789
1790Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1791 // Vector Mask Case
1792 if (E->getNumSubExprs() == 2) {
1793 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1794 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1795 Value *Mask;
1796
1797 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1798 unsigned LHSElts = LTy->getNumElements();
1799
1800 Mask = RHS;
1801
1802 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1803
1804 // Mask off the high bits of each shuffle index.
1805 Value *MaskBits =
1806 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1807 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1808
1809 // newv = undef
1810 // mask = mask & maskbits
1811 // for each elt
1812 // n = extract mask i
1813 // x = extract val n
1814 // newv = insert newv, x, i
1815 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1816 MTy->getNumElements());
1817 Value* NewV = llvm::PoisonValue::get(RTy);
1818 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1819 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1820 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1821
1822 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1823 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1824 }
1825 return NewV;
1826 }
1827
1828 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1829 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1830
1831 SmallVector<int, 32> Indices;
1832 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1833 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1834 // Check for -1 and output it as undef in the IR.
1835 if (Idx.isSigned() && Idx.isAllOnes())
1836 Indices.push_back(-1);
1837 else
1838 Indices.push_back(Idx.getZExtValue());
1839 }
1840
1841 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1842}
1843
1844Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1845 QualType SrcType = E->getSrcExpr()->getType(),
1846 DstType = E->getType();
1847
1848 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1849
1850 SrcType = CGF.getContext().getCanonicalType(SrcType);
1851 DstType = CGF.getContext().getCanonicalType(DstType);
1852 if (SrcType == DstType) return Src;
1853
1854 assert(SrcType->isVectorType() &&
1855 "ConvertVector source type must be a vector");
1856 assert(DstType->isVectorType() &&
1857 "ConvertVector destination type must be a vector");
1858
1859 llvm::Type *SrcTy = Src->getType();
1860 llvm::Type *DstTy = ConvertType(DstType);
1861
1862 // Ignore conversions like int -> uint.
1863 if (SrcTy == DstTy)
1864 return Src;
1865
1866 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1867 DstEltType = DstType->castAs<VectorType>()->getElementType();
1868
1869 assert(SrcTy->isVectorTy() &&
1870 "ConvertVector source IR type must be a vector");
1871 assert(DstTy->isVectorTy() &&
1872 "ConvertVector destination IR type must be a vector");
1873
1874 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1875 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1876
1877 if (DstEltType->isBooleanType()) {
1878 assert((SrcEltTy->isFloatingPointTy() ||
1879 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1880
1881 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1882 if (SrcEltTy->isFloatingPointTy()) {
1883 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1884 } else {
1885 return Builder.CreateICmpNE(Src, Zero, "tobool");
1886 }
1887 }
1888
1889 // We have the arithmetic types: real int/float.
1890 Value *Res = nullptr;
1891
1892 if (isa<llvm::IntegerType>(SrcEltTy)) {
1893 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1894 if (isa<llvm::IntegerType>(DstEltTy))
1895 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1896 else if (InputSigned)
1897 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1898 else
1899 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1900 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1901 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1902 if (DstEltType->isSignedIntegerOrEnumerationType())
1903 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1904 else
1905 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1906 } else {
1907 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1908 "Unknown real conversion");
1909 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1910 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1911 else
1912 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1913 }
1914
1915 return Res;
1916}
1917
1918Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1919 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1920 CGF.EmitIgnoredExpr(E->getBase());
1921 return CGF.emitScalarConstant(Constant, E);
1922 } else {
1925 llvm::APSInt Value = Result.Val.getInt();
1926 CGF.EmitIgnoredExpr(E->getBase());
1927 return Builder.getInt(Value);
1928 }
1929 }
1930
1931 return EmitLoadOfLValue(E);
1932}
1933
1934Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1935 TestAndClearIgnoreResultAssign();
1936
1937 // Emit subscript expressions in rvalue context's. For most cases, this just
1938 // loads the lvalue formed by the subscript expr. However, we have to be
1939 // careful, because the base of a vector subscript is occasionally an rvalue,
1940 // so we can't get it as an lvalue.
1941 if (!E->getBase()->getType()->isVectorType() &&
1943 return EmitLoadOfLValue(E);
1944
1945 // Handle the vector case. The base must be a vector, the index must be an
1946 // integer value.
1947 Value *Base = Visit(E->getBase());
1948 Value *Idx = Visit(E->getIdx());
1949 QualType IdxTy = E->getIdx()->getType();
1950
1951 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1952 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1953
1954 return Builder.CreateExtractElement(Base, Idx, "vecext");
1955}
1956
1957Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1958 TestAndClearIgnoreResultAssign();
1959
1960 // Handle the vector case. The base must be a vector, the index must be an
1961 // integer value.
1962 Value *RowIdx = Visit(E->getRowIdx());
1963 Value *ColumnIdx = Visit(E->getColumnIdx());
1964
1965 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
1966 unsigned NumRows = MatrixTy->getNumRows();
1967 llvm::MatrixBuilder MB(Builder);
1968 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
1969 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
1970 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
1971
1972 Value *Matrix = Visit(E->getBase());
1973
1974 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1975 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
1976}
1977
1978static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1979 unsigned Off) {
1980 int MV = SVI->getMaskValue(Idx);
1981 if (MV == -1)
1982 return -1;
1983 return Off + MV;
1984}
1985
1986static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1987 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1988 "Index operand too large for shufflevector mask!");
1989 return C->getZExtValue();
1990}
1991
1992Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
1993 bool Ignore = TestAndClearIgnoreResultAssign();
1994 (void)Ignore;
1995 assert (Ignore == false && "init list ignored");
1996 unsigned NumInitElements = E->getNumInits();
1997
1998 if (E->hadArrayRangeDesignator())
1999 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2000
2001 llvm::VectorType *VType =
2002 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2003
2004 if (!VType) {
2005 if (NumInitElements == 0) {
2006 // C++11 value-initialization for the scalar.
2007 return EmitNullValue(E->getType());
2008 }
2009 // We have a scalar in braces. Just use the first element.
2010 return Visit(E->getInit(0));
2011 }
2012
2013 if (isa<llvm::ScalableVectorType>(VType)) {
2014 if (NumInitElements == 0) {
2015 // C++11 value-initialization for the vector.
2016 return EmitNullValue(E->getType());
2017 }
2018
2019 if (NumInitElements == 1) {
2020 Expr *InitVector = E->getInit(0);
2021
2022 // Initialize from another scalable vector of the same type.
2023 if (InitVector->getType() == E->getType())
2024 return Visit(InitVector);
2025 }
2026
2027 llvm_unreachable("Unexpected initialization of a scalable vector!");
2028 }
2029
2030 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2031
2032 // Loop over initializers collecting the Value for each, and remembering
2033 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2034 // us to fold the shuffle for the swizzle into the shuffle for the vector
2035 // initializer, since LLVM optimizers generally do not want to touch
2036 // shuffles.
2037 unsigned CurIdx = 0;
2038 bool VIsPoisonShuffle = false;
2039 llvm::Value *V = llvm::PoisonValue::get(VType);
2040 for (unsigned i = 0; i != NumInitElements; ++i) {
2041 Expr *IE = E->getInit(i);
2042 Value *Init = Visit(IE);
2044
2045 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2046
2047 // Handle scalar elements. If the scalar initializer is actually one
2048 // element of a different vector of the same width, use shuffle instead of
2049 // extract+insert.
2050 if (!VVT) {
2051 if (isa<ExtVectorElementExpr>(IE)) {
2052 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2053
2054 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2055 ->getNumElements() == ResElts) {
2056 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2057 Value *LHS = nullptr, *RHS = nullptr;
2058 if (CurIdx == 0) {
2059 // insert into poison -> shuffle (src, poison)
2060 // shufflemask must use an i32
2061 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2062 Args.resize(ResElts, -1);
2063
2064 LHS = EI->getVectorOperand();
2065 RHS = V;
2066 VIsPoisonShuffle = true;
2067 } else if (VIsPoisonShuffle) {
2068 // insert into poison shuffle && size match -> shuffle (v, src)
2069 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2070 for (unsigned j = 0; j != CurIdx; ++j)
2071 Args.push_back(getMaskElt(SVV, j, 0));
2072 Args.push_back(ResElts + C->getZExtValue());
2073 Args.resize(ResElts, -1);
2074
2075 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2076 RHS = EI->getVectorOperand();
2077 VIsPoisonShuffle = false;
2078 }
2079 if (!Args.empty()) {
2080 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2081 ++CurIdx;
2082 continue;
2083 }
2084 }
2085 }
2086 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2087 "vecinit");
2088 VIsPoisonShuffle = false;
2089 ++CurIdx;
2090 continue;
2091 }
2092
2093 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2094
2095 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2096 // input is the same width as the vector being constructed, generate an
2097 // optimized shuffle of the swizzle input into the result.
2098 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2099 if (isa<ExtVectorElementExpr>(IE)) {
2100 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2101 Value *SVOp = SVI->getOperand(0);
2102 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2103
2104 if (OpTy->getNumElements() == ResElts) {
2105 for (unsigned j = 0; j != CurIdx; ++j) {
2106 // If the current vector initializer is a shuffle with poison, merge
2107 // this shuffle directly into it.
2108 if (VIsPoisonShuffle) {
2109 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2110 } else {
2111 Args.push_back(j);
2112 }
2113 }
2114 for (unsigned j = 0, je = InitElts; j != je; ++j)
2115 Args.push_back(getMaskElt(SVI, j, Offset));
2116 Args.resize(ResElts, -1);
2117
2118 if (VIsPoisonShuffle)
2119 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2120
2121 Init = SVOp;
2122 }
2123 }
2124
2125 // Extend init to result vector length, and then shuffle its contribution
2126 // to the vector initializer into V.
2127 if (Args.empty()) {
2128 for (unsigned j = 0; j != InitElts; ++j)
2129 Args.push_back(j);
2130 Args.resize(ResElts, -1);
2131 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2132
2133 Args.clear();
2134 for (unsigned j = 0; j != CurIdx; ++j)
2135 Args.push_back(j);
2136 for (unsigned j = 0; j != InitElts; ++j)
2137 Args.push_back(j + Offset);
2138 Args.resize(ResElts, -1);
2139 }
2140
2141 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2142 // merging subsequent shuffles into this one.
2143 if (CurIdx == 0)
2144 std::swap(V, Init);
2145 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2146 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2147 CurIdx += InitElts;
2148 }
2149
2150 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2151 // Emit remaining default initializers.
2152 llvm::Type *EltTy = VType->getElementType();
2153
2154 // Emit remaining default initializers
2155 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2156 Value *Idx = Builder.getInt32(CurIdx);
2157 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2158 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2159 }
2160 return V;
2161}
2162
2164 const Expr *E = CE->getSubExpr();
2165
2166 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2167 return false;
2168
2169 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2170 // We always assume that 'this' is never null.
2171 return false;
2172 }
2173
2174 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2175 // And that glvalue casts are never null.
2176 if (ICE->isGLValue())
2177 return false;
2178 }
2179
2180 return true;
2181}
2182
2183// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2184// have to handle a more broad range of conversions than explicit casts, as they
2185// handle things like function to ptr-to-function decay etc.
2186Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2187 Expr *E = CE->getSubExpr();
2188 QualType DestTy = CE->getType();
2189 CastKind Kind = CE->getCastKind();
2190 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2191
2192 // These cases are generally not written to ignore the result of
2193 // evaluating their sub-expressions, so we clear this now.
2194 bool Ignored = TestAndClearIgnoreResultAssign();
2195
2196 // Since almost all cast kinds apply to scalars, this switch doesn't have
2197 // a default case, so the compiler will warn on a missing case. The cases
2198 // are in the same order as in the CastKind enum.
2199 switch (Kind) {
2200 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2201 case CK_BuiltinFnToFnPtr:
2202 llvm_unreachable("builtin functions are handled elsewhere");
2203
2204 case CK_LValueBitCast:
2205 case CK_ObjCObjectLValueCast: {
2206 Address Addr = EmitLValue(E).getAddress(CGF);
2207 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2208 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2209 return EmitLoadOfLValue(LV, CE->getExprLoc());
2210 }
2211
2212 case CK_LValueToRValueBitCast: {
2213 LValue SourceLVal = CGF.EmitLValue(E);
2214 Address Addr = SourceLVal.getAddress(CGF).withElementType(
2215 CGF.ConvertTypeForMem(DestTy));
2216 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2218 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2219 }
2220
2221 case CK_CPointerToObjCPointerCast:
2222 case CK_BlockPointerToObjCPointerCast:
2223 case CK_AnyPointerToBlockPointerCast:
2224 case CK_BitCast: {
2225 Value *Src = Visit(const_cast<Expr*>(E));
2226 llvm::Type *SrcTy = Src->getType();
2227 llvm::Type *DstTy = ConvertType(DestTy);
2228 assert(
2229 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2230 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2231 "Address-space cast must be used to convert address spaces");
2232
2233 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2234 if (auto *PT = DestTy->getAs<PointerType>()) {
2236 PT->getPointeeType(),
2237 Address(Src,
2240 CGF.getPointerAlign()),
2241 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2242 CE->getBeginLoc());
2243 }
2244 }
2245
2246 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2247 const QualType SrcType = E->getType();
2248
2249 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2250 // Casting to pointer that could carry dynamic information (provided by
2251 // invariant.group) requires launder.
2252 Src = Builder.CreateLaunderInvariantGroup(Src);
2253 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2254 // Casting to pointer that does not carry dynamic information (provided
2255 // by invariant.group) requires stripping it. Note that we don't do it
2256 // if the source could not be dynamic type and destination could be
2257 // dynamic because dynamic information is already laundered. It is
2258 // because launder(strip(src)) == launder(src), so there is no need to
2259 // add extra strip before launder.
2260 Src = Builder.CreateStripInvariantGroup(Src);
2261 }
2262 }
2263
2264 // Update heapallocsite metadata when there is an explicit pointer cast.
2265 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2266 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2267 !isa<CastExpr>(E)) {
2268 QualType PointeeType = DestTy->getPointeeType();
2269 if (!PointeeType.isNull())
2270 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2271 CE->getExprLoc());
2272 }
2273 }
2274
2275 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2276 // same element type, use the llvm.vector.insert intrinsic to perform the
2277 // bitcast.
2278 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2279 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2280 // If we are casting a fixed i8 vector to a scalable i1 predicate
2281 // vector, use a vector insert and bitcast the result.
2282 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2283 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
2284 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2285 ScalableDstTy = llvm::ScalableVectorType::get(
2286 FixedSrcTy->getElementType(),
2287 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
2288 }
2289 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2290 llvm::Value *UndefVec = llvm::UndefValue::get(ScalableDstTy);
2291 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2292 llvm::Value *Result = Builder.CreateInsertVector(
2293 ScalableDstTy, UndefVec, Src, Zero, "cast.scalable");
2294 if (Result->getType() != DstTy)
2295 Result = Builder.CreateBitCast(Result, DstTy);
2296 return Result;
2297 }
2298 }
2299 }
2300
2301 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2302 // same element type, use the llvm.vector.extract intrinsic to perform the
2303 // bitcast.
2304 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2305 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2306 // If we are casting a scalable i1 predicate vector to a fixed i8
2307 // vector, bitcast the source and use a vector extract.
2308 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2309 ScalableSrcTy->getElementCount().isKnownMultipleOf(8) &&
2310 FixedDstTy->getElementType()->isIntegerTy(8)) {
2311 ScalableSrcTy = llvm::ScalableVectorType::get(
2312 FixedDstTy->getElementType(),
2313 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2314 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2315 }
2316 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {
2317 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2318 return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
2319 }
2320 }
2321 }
2322
2323 // Perform VLAT <-> VLST bitcast through memory.
2324 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2325 // require the element types of the vectors to be the same, we
2326 // need to keep this around for bitcasts between VLAT <-> VLST where
2327 // the element types of the vectors are not the same, until we figure
2328 // out a better way of doing these casts.
2329 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2330 isa<llvm::ScalableVectorType>(DstTy)) ||
2331 (isa<llvm::ScalableVectorType>(SrcTy) &&
2332 isa<llvm::FixedVectorType>(DstTy))) {
2333 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2334 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2335 CGF.EmitStoreOfScalar(Src, LV);
2336 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2337 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2339 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2340 }
2341 return Builder.CreateBitCast(Src, DstTy);
2342 }
2343 case CK_AddressSpaceConversion: {
2345 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2346 Result.Val.isNullPointer()) {
2347 // If E has side effect, it is emitted even if its final result is a
2348 // null pointer. In that case, a DCE pass should be able to
2349 // eliminate the useless instructions emitted during translating E.
2350 if (Result.HasSideEffects)
2351 Visit(E);
2352 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2353 ConvertType(DestTy)), DestTy);
2354 }
2355 // Since target may map different address spaces in AST to the same address
2356 // space, an address space conversion may end up as a bitcast.
2358 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2359 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2360 }
2361 case CK_AtomicToNonAtomic:
2362 case CK_NonAtomicToAtomic:
2363 case CK_UserDefinedConversion:
2364 return Visit(const_cast<Expr*>(E));
2365
2366 case CK_NoOp: {
2367 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
2368 : Visit(const_cast<Expr *>(E));
2369 }
2370
2371 case CK_BaseToDerived: {
2372 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2373 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2374
2376 Address Derived =
2377 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2378 CE->path_begin(), CE->path_end(),
2380
2381 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2382 // performed and the object is not of the derived type.
2383 if (CGF.sanitizePerformTypeCheck())
2385 Derived, DestTy->getPointeeType());
2386
2387 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2388 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2389 /*MayBeNull=*/true,
2391 CE->getBeginLoc());
2392
2393 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2394 }
2395 case CK_UncheckedDerivedToBase:
2396 case CK_DerivedToBase: {
2397 // The EmitPointerWithAlignment path does this fine; just discard
2398 // the alignment.
2400 CE->getType()->getPointeeType());
2401 }
2402
2403 case CK_Dynamic: {
2405 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2406 return CGF.EmitDynamicCast(V, DCE);
2407 }
2408
2409 case CK_ArrayToPointerDecay:
2411 CE->getType()->getPointeeType());
2412 case CK_FunctionToPointerDecay:
2413 return EmitLValue(E).getPointer(CGF);
2414
2415 case CK_NullToPointer:
2416 if (MustVisitNullValue(E))
2417 CGF.EmitIgnoredExpr(E);
2418
2419 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2420 DestTy);
2421
2422 case CK_NullToMemberPointer: {
2423 if (MustVisitNullValue(E))
2424 CGF.EmitIgnoredExpr(E);
2425
2426 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2427 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2428 }
2429
2430 case CK_ReinterpretMemberPointer:
2431 case CK_BaseToDerivedMemberPointer:
2432 case CK_DerivedToBaseMemberPointer: {
2433 Value *Src = Visit(E);
2434
2435 // Note that the AST doesn't distinguish between checked and
2436 // unchecked member pointer conversions, so we always have to
2437 // implement checked conversions here. This is inefficient when
2438 // actual control flow may be required in order to perform the
2439 // check, which it is for data member pointers (but not member
2440 // function pointers on Itanium and ARM).
2441 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2442 }
2443
2444 case CK_ARCProduceObject:
2445 return CGF.EmitARCRetainScalarExpr(E);
2446 case CK_ARCConsumeObject:
2447 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2448 case CK_ARCReclaimReturnedObject:
2449 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2450 case CK_ARCExtendBlockObject:
2451 return CGF.EmitARCExtendBlockObject(E);
2452
2453 case CK_CopyAndAutoreleaseBlockObject:
2454 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2455
2456 case CK_FloatingRealToComplex:
2457 case CK_FloatingComplexCast:
2458 case CK_IntegralRealToComplex:
2459 case CK_IntegralComplexCast:
2460 case CK_IntegralComplexToFloatingComplex:
2461 case CK_FloatingComplexToIntegralComplex:
2462 case CK_ConstructorConversion:
2463 case CK_ToUnion:
2464 case CK_HLSLArrayRValue:
2465 llvm_unreachable("scalar cast to non-scalar value");
2466
2467 case CK_LValueToRValue:
2468 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2469 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2470 return Visit(const_cast<Expr*>(E));
2471
2472 case CK_IntegralToPointer: {
2473 Value *Src = Visit(const_cast<Expr*>(E));
2474
2475 // First, convert to the correct width so that we control the kind of
2476 // extension.
2477 auto DestLLVMTy = ConvertType(DestTy);
2478 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2479 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2480 llvm::Value* IntResult =
2481 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2482
2483 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2484
2485 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2486 // Going from integer to pointer that could be dynamic requires reloading
2487 // dynamic information from invariant.group.
2488 if (DestTy.mayBeDynamicClass())
2489 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2490 }
2491 return IntToPtr;
2492 }
2493 case CK_PointerToIntegral: {
2494 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2495 auto *PtrExpr = Visit(E);
2496
2497 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2498 const QualType SrcType = E->getType();
2499
2500 // Casting to integer requires stripping dynamic information as it does
2501 // not carries it.
2502 if (SrcType.mayBeDynamicClass())
2503 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2504 }
2505
2506 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2507 }
2508 case CK_ToVoid: {
2509 CGF.EmitIgnoredExpr(E);
2510 return nullptr;
2511 }
2512 case CK_MatrixCast: {
2513 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2514 CE->getExprLoc());
2515 }
2516 case CK_VectorSplat: {
2517 llvm::Type *DstTy = ConvertType(DestTy);
2518 Value *Elt = Visit(const_cast<Expr *>(E));
2519 // Splat the element across to all elements
2520 llvm::ElementCount NumElements =
2521 cast<llvm::VectorType>(DstTy)->getElementCount();
2522 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2523 }
2524
2525 case CK_FixedPointCast:
2526 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2527 CE->getExprLoc());
2528
2529 case CK_FixedPointToBoolean:
2530 assert(E->getType()->isFixedPointType() &&
2531 "Expected src type to be fixed point type");
2532 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2533 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2534 CE->getExprLoc());
2535
2536 case CK_FixedPointToIntegral:
2537 assert(E->getType()->isFixedPointType() &&
2538 "Expected src type to be fixed point type");
2539 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2540 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2541 CE->getExprLoc());
2542
2543 case CK_IntegralToFixedPoint:
2544 assert(E->getType()->isIntegerType() &&
2545 "Expected src type to be an integer");
2546 assert(DestTy->isFixedPointType() &&
2547 "Expected dest type to be fixed point type");
2548 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2549 CE->getExprLoc());
2550
2551 case CK_IntegralCast: {
2552 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2553 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2554 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2556 "conv");
2557 }
2558 ScalarConversionOpts Opts;
2559 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2560 if (!ICE->isPartOfExplicitCast())
2561 Opts = ScalarConversionOpts(CGF.SanOpts);
2562 }
2563 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2564 CE->getExprLoc(), Opts);
2565 }
2566 case CK_IntegralToFloating: {
2567 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2568 // TODO: Support constrained FP intrinsics.
2569 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2570 if (SrcElTy->isSignedIntegerOrEnumerationType())
2571 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2572 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2573 }
2574 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2575 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2576 CE->getExprLoc());
2577 }
2578 case CK_FloatingToIntegral: {
2579 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2580 // TODO: Support constrained FP intrinsics.
2581 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2582 if (DstElTy->isSignedIntegerOrEnumerationType())
2583 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2584 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2585 }
2586 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2587 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2588 CE->getExprLoc());
2589 }
2590 case CK_FloatingCast: {
2591 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2592 // TODO: Support constrained FP intrinsics.
2593 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2594 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2595 if (DstElTy->castAs<BuiltinType>()->getKind() <
2596 SrcElTy->castAs<BuiltinType>()->getKind())
2597 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2598 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2599 }
2600 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2601 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2602 CE->getExprLoc());
2603 }
2604 case CK_FixedPointToFloating:
2605 case CK_FloatingToFixedPoint: {
2606 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2607 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2608 CE->getExprLoc());
2609 }
2610 case CK_BooleanToSignedIntegral: {
2611 ScalarConversionOpts Opts;
2612 Opts.TreatBooleanAsSigned = true;
2613 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2614 CE->getExprLoc(), Opts);
2615 }
2616 case CK_IntegralToBoolean:
2617 return EmitIntToBoolConversion(Visit(E));
2618 case CK_PointerToBoolean:
2619 return EmitPointerToBoolConversion(Visit(E), E->getType());
2620 case CK_FloatingToBoolean: {
2621 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2622 return EmitFloatToBoolConversion(Visit(E));
2623 }
2624 case CK_MemberPointerToBoolean: {
2625 llvm::Value *MemPtr = Visit(E);
2626 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2627 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2628 }
2629
2630 case CK_FloatingComplexToReal:
2631 case CK_IntegralComplexToReal:
2632 return CGF.EmitComplexExpr(E, false, true).first;
2633
2634 case CK_FloatingComplexToBoolean:
2635 case CK_IntegralComplexToBoolean: {
2637
2638 // TODO: kill this function off, inline appropriate case here
2639 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2640 CE->getExprLoc());
2641 }
2642
2643 case CK_ZeroToOCLOpaqueType: {
2644 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2645 DestTy->isOCLIntelSubgroupAVCType()) &&
2646 "CK_ZeroToOCLEvent cast on non-event type");
2647 return llvm::Constant::getNullValue(ConvertType(DestTy));
2648 }
2649
2650 case CK_IntToOCLSampler:
2651 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2652
2653 case CK_HLSLVectorTruncation: {
2654 assert(DestTy->isVectorType() && "Expected dest type to be vector type");
2655 Value *Vec = Visit(const_cast<Expr *>(E));
2657 unsigned NumElts = DestTy->castAs<VectorType>()->getNumElements();
2658 for (unsigned I = 0; I != NumElts; ++I)
2659 Mask.push_back(I);
2660
2661 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
2662 }
2663
2664 } // end of switch
2665
2666 llvm_unreachable("unknown scalar cast");
2667}
2668
2669Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2670 CodeGenFunction::StmtExprEvaluation eval(CGF);
2671 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2672 !E->getType()->isVoidType());
2673 if (!RetAlloca.isValid())
2674 return nullptr;
2675 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2676 E->getExprLoc());
2677}
2678
2679Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2680 CodeGenFunction::RunCleanupsScope Scope(CGF);
2681 Value *V = Visit(E->getSubExpr());
2682 // Defend against dominance problems caused by jumps out of expression
2683 // evaluation through the shared cleanup block.
2684 Scope.ForceCleanup({&V});
2685 return V;
2686}
2687
2688//===----------------------------------------------------------------------===//
2689// Unary Operators
2690//===----------------------------------------------------------------------===//
2691
2693 llvm::Value *InVal, bool IsInc,
2694 FPOptions FPFeatures) {
2695 BinOpInfo BinOp;
2696 BinOp.LHS = InVal;
2697 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2698 BinOp.Ty = E->getType();
2699 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2700 BinOp.FPFeatures = FPFeatures;
2701 BinOp.E = E;
2702 return BinOp;
2703}
2704
2705llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2706 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2707 llvm::Value *Amount =
2708 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2709 StringRef Name = IsInc ? "inc" : "dec";
2710 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2712 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2713 return Builder.CreateAdd(InVal, Amount, Name);
2714 [[fallthrough]];
2716 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2717 return Builder.CreateNSWAdd(InVal, Amount, Name);
2718 [[fallthrough]];
2720 if (!E->canOverflow())
2721 return Builder.CreateNSWAdd(InVal, Amount, Name);
2722 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2723 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2724 }
2725 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2726}
2727
2728namespace {
2729/// Handles check and update for lastprivate conditional variables.
2730class OMPLastprivateConditionalUpdateRAII {
2731private:
2732 CodeGenFunction &CGF;
2733 const UnaryOperator *E;
2734
2735public:
2736 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2737 const UnaryOperator *E)
2738 : CGF(CGF), E(E) {}
2739 ~OMPLastprivateConditionalUpdateRAII() {
2740 if (CGF.getLangOpts().OpenMP)
2742 CGF, E->getSubExpr());
2743 }
2744};
2745} // namespace
2746
2747llvm::Value *
2748ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2749 bool isInc, bool isPre) {
2750 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2751 QualType type = E->getSubExpr()->getType();
2752 llvm::PHINode *atomicPHI = nullptr;
2753 llvm::Value *value;
2754 llvm::Value *input;
2755 llvm::Value *Previous = nullptr;
2756 QualType SrcType = E->getType();
2757
2758 int amount = (isInc ? 1 : -1);
2759 bool isSubtraction = !isInc;
2760
2761 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2762 type = atomicTy->getValueType();
2763 if (isInc && type->isBooleanType()) {
2764 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2765 if (isPre) {
2766 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2767 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2768 return Builder.getTrue();
2769 }
2770 // For atomic bool increment, we just store true and return it for
2771 // preincrement, do an atomic swap with true for postincrement
2772 return Builder.CreateAtomicRMW(
2773 llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
2774 llvm::AtomicOrdering::SequentiallyConsistent);
2775 }
2776 // Special case for atomic increment / decrement on integers, emit
2777 // atomicrmw instructions. We skip this if we want to be doing overflow
2778 // checking, and fall into the slow path with the atomic cmpxchg loop.
2779 if (!type->isBooleanType() && type->isIntegerType() &&
2780 !(type->isUnsignedIntegerType() &&
2781 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2782 CGF.getLangOpts().getSignedOverflowBehavior() !=
2784 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2785 llvm::AtomicRMWInst::Sub;
2786 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2787 llvm::Instruction::Sub;
2788 llvm::Value *amt = CGF.EmitToMemory(
2789 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2790 llvm::Value *old =
2791 Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
2792 llvm::AtomicOrdering::SequentiallyConsistent);
2793 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2794 }
2795 value = EmitLoadOfLValue(LV, E->getExprLoc());
2796 input = value;
2797 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2798 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2799 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2800 value = CGF.EmitToMemory(value, type);
2801 Builder.CreateBr(opBB);
2802 Builder.SetInsertPoint(opBB);
2803 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2804 atomicPHI->addIncoming(value, startBB);
2805 value = atomicPHI;
2806 } else {
2807 value = EmitLoadOfLValue(LV, E->getExprLoc());
2808 input = value;
2809 }
2810
2811 // Special case of integer increment that we have to check first: bool++.
2812 // Due to promotion rules, we get:
2813 // bool++ -> bool = bool + 1
2814 // -> bool = (int)bool + 1
2815 // -> bool = ((int)bool + 1 != 0)
2816 // An interesting aspect of this is that increment is always true.
2817 // Decrement does not have this property.
2818 if (isInc && type->isBooleanType()) {
2819 value = Builder.getTrue();
2820
2821 // Most common case by far: integer increment.
2822 } else if (type->isIntegerType()) {
2823 QualType promotedType;
2824 bool canPerformLossyDemotionCheck = false;
2826 promotedType = CGF.getContext().getPromotedIntegerType(type);
2827 assert(promotedType != type && "Shouldn't promote to the same type.");
2828 canPerformLossyDemotionCheck = true;
2829 canPerformLossyDemotionCheck &=
2831 CGF.getContext().getCanonicalType(promotedType);
2832 canPerformLossyDemotionCheck &=
2834 type, promotedType);
2835 assert((!canPerformLossyDemotionCheck ||
2836 type->isSignedIntegerOrEnumerationType() ||
2837 promotedType->isSignedIntegerOrEnumerationType() ||
2838 ConvertType(type)->getScalarSizeInBits() ==
2839 ConvertType(promotedType)->getScalarSizeInBits()) &&
2840 "The following check expects that if we do promotion to different "
2841 "underlying canonical type, at least one of the types (either "
2842 "base or promoted) will be signed, or the bitwidths will match.");
2843 }
2844 if (CGF.SanOpts.hasOneOf(
2845 SanitizerKind::ImplicitIntegerArithmeticValueChange |
2846 SanitizerKind::ImplicitBitfieldConversion) &&
2847 canPerformLossyDemotionCheck) {
2848 // While `x += 1` (for `x` with width less than int) is modeled as
2849 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2850 // ease; inc/dec with width less than int can't overflow because of
2851 // promotion rules, so we omit promotion+demotion, which means that we can
2852 // not catch lossy "demotion". Because we still want to catch these cases
2853 // when the sanitizer is enabled, we perform the promotion, then perform
2854 // the increment/decrement in the wider type, and finally
2855 // perform the demotion. This will catch lossy demotions.
2856
2857 // We have a special case for bitfields defined using all the bits of the
2858 // type. In this case we need to do the same trick as for the integer
2859 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
2860
2861 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2862 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2863 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2864 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2865 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
2866 // checks will take care of the conversion.
2867 ScalarConversionOpts Opts;
2868 if (!LV.isBitField())
2869 Opts = ScalarConversionOpts(CGF.SanOpts);
2870 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
2871 Previous = value;
2872 SrcType = promotedType;
2873 }
2874
2875 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2876 Opts);
2877
2878 // Note that signed integer inc/dec with width less than int can't
2879 // overflow because of promotion rules; we're just eliding a few steps
2880 // here.
2881 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2882 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2883 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2884 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2885 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2886 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2887 } else {
2888 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2889 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2890 }
2891
2892 // Next most common: pointer increment.
2893 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2894 QualType type = ptr->getPointeeType();
2895
2896 // VLA types don't have constant size.
2897 if (const VariableArrayType *vla
2899 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2900 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2901 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
2903 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
2904 else
2905 value = CGF.EmitCheckedInBoundsGEP(
2906 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
2907 E->getExprLoc(), "vla.inc");
2908
2909 // Arithmetic on function pointers (!) is just +-1.
2910 } else if (type->isFunctionType()) {
2911 llvm::Value *amt = Builder.getInt32(amount);
2912
2914 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
2915 else
2916 value =
2917 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
2918 /*SignedIndices=*/false, isSubtraction,
2919 E->getExprLoc(), "incdec.funcptr");
2920
2921 // For everything else, we can just do a simple increment.
2922 } else {
2923 llvm::Value *amt = Builder.getInt32(amount);
2924 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2926 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
2927 else
2928 value = CGF.EmitCheckedInBoundsGEP(
2929 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
2930 E->getExprLoc(), "incdec.ptr");
2931 }
2932
2933 // Vector increment/decrement.
2934 } else if (type->isVectorType()) {
2935 if (type->hasIntegerRepresentation()) {
2936 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2937
2938 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2939 } else {
2940 value = Builder.CreateFAdd(
2941 value,
2942 llvm::ConstantFP::get(value->getType(), amount),
2943 isInc ? "inc" : "dec");
2944 }
2945
2946 // Floating point.
2947 } else if (type->isRealFloatingType()) {
2948 // Add the inc/dec to the real part.
2949 llvm::Value *amt;
2950 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2951
2952 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2953 // Another special case: half FP increment should be done via float
2955 value = Builder.CreateCall(
2956 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2957 CGF.CGM.FloatTy),
2958 input, "incdec.conv");
2959 } else {
2960 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2961 }
2962 }
2963
2964 if (value->getType()->isFloatTy())
2965 amt = llvm::ConstantFP::get(VMContext,
2966 llvm::APFloat(static_cast<float>(amount)));
2967 else if (value->getType()->isDoubleTy())
2968 amt = llvm::ConstantFP::get(VMContext,
2969 llvm::APFloat(static_cast<double>(amount)));
2970 else {
2971 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
2972 // Convert from float.
2973 llvm::APFloat F(static_cast<float>(amount));
2974 bool ignored;
2975 const llvm::fltSemantics *FS;
2976 // Don't use getFloatTypeSemantics because Half isn't
2977 // necessarily represented using the "half" LLVM type.
2978 if (value->getType()->isFP128Ty())
2979 FS = &CGF.getTarget().getFloat128Format();
2980 else if (value->getType()->isHalfTy())
2981 FS = &CGF.getTarget().getHalfFormat();
2982 else if (value->getType()->isBFloatTy())
2983 FS = &CGF.getTarget().getBFloat16Format();
2984 else if (value->getType()->isPPC_FP128Ty())
2985 FS = &CGF.getTarget().getIbm128Format();
2986 else
2987 FS = &CGF.getTarget().getLongDoubleFormat();
2988 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2989 amt = llvm::ConstantFP::get(VMContext, F);
2990 }
2991 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
2992
2993 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2995 value = Builder.CreateCall(
2996 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
2997 CGF.CGM.FloatTy),
2998 value, "incdec.conv");
2999 } else {
3000 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3001 }
3002 }
3003
3004 // Fixed-point types.
3005 } else if (type->isFixedPointType()) {
3006 // Fixed-point types are tricky. In some cases, it isn't possible to
3007 // represent a 1 or a -1 in the type at all. Piggyback off of
3008 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3009 BinOpInfo Info;
3010 Info.E = E;
3011 Info.Ty = E->getType();
3012 Info.Opcode = isInc ? BO_Add : BO_Sub;
3013 Info.LHS = value;
3014 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3015 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3016 // since -1 is guaranteed to be representable.
3017 if (type->isSignedFixedPointType()) {
3018 Info.Opcode = isInc ? BO_Sub : BO_Add;
3019 Info.RHS = Builder.CreateNeg(Info.RHS);
3020 }
3021 // Now, convert from our invented integer literal to the type of the unary
3022 // op. This will upscale and saturate if necessary. This value can become
3023 // undef in some cases.
3024 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3025 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3026 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3027 value = EmitFixedPointBinOp(Info);
3028
3029 // Objective-C pointer types.
3030 } else {
3031 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3032
3034 if (!isInc) size = -size;
3035 llvm::Value *sizeValue =
3036 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
3037
3039 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3040 else
3041 value = CGF.EmitCheckedInBoundsGEP(
3042 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3043 E->getExprLoc(), "incdec.objptr");
3044 value = Builder.CreateBitCast(value, input->getType());
3045 }
3046
3047 if (atomicPHI) {
3048 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3049 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3050 auto Pair = CGF.EmitAtomicCompareExchange(
3051 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3052 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3053 llvm::Value *success = Pair.second;
3054 atomicPHI->addIncoming(old, curBlock);
3055 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3056 Builder.SetInsertPoint(contBB);
3057 return isPre ? value : input;
3058 }
3059
3060 // Store the updated result through the lvalue.
3061 if (LV.isBitField()) {
3062 Value *Src = Previous ? Previous : value;
3063 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3064 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3065 LV.getBitFieldInfo(), E->getExprLoc());
3066 } else
3067 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3068
3069 // If this is a postinc, return the value read from memory, otherwise use the
3070 // updated value.
3071 return isPre ? value : input;
3072}
3073
3074
3075Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3076 QualType PromotionType) {
3077 QualType promotionTy = PromotionType.isNull()
3078 ? getPromotionType(E->getSubExpr()->getType())
3079 : PromotionType;
3080 Value *result = VisitPlus(E, promotionTy);
3081 if (result && !promotionTy.isNull())
3082 result = EmitUnPromotedValue(result, E->getType());
3083 return result;
3084}
3085
3086Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3087 QualType PromotionType) {
3088 // This differs from gcc, though, most likely due to a bug in gcc.
3089 TestAndClearIgnoreResultAssign();
3090 if (!PromotionType.isNull())
3091 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3092 return Visit(E->getSubExpr());
3093}
3094
3095Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3096 QualType PromotionType) {
3097 QualType promotionTy = PromotionType.isNull()
3098 ? getPromotionType(E->getSubExpr()->getType())
3099 : PromotionType;
3100 Value *result = VisitMinus(E, promotionTy);
3101 if (result && !promotionTy.isNull())
3102 result = EmitUnPromotedValue(result, E->getType());
3103 return result;
3104}
3105
3106Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3107 QualType PromotionType) {
3108 TestAndClearIgnoreResultAssign();
3109 Value *Op;
3110 if (!PromotionType.isNull())
3111 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3112 else
3113 Op = Visit(E->getSubExpr());
3114
3115 // Generate a unary FNeg for FP ops.
3116 if (Op->getType()->isFPOrFPVectorTy())
3117 return Builder.CreateFNeg(Op, "fneg");
3118
3119 // Emit unary minus with EmitSub so we handle overflow cases etc.
3120 BinOpInfo BinOp;
3121 BinOp.RHS = Op;
3122 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3123 BinOp.Ty = E->getType();
3124 BinOp.Opcode = BO_Sub;
3125 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3126 BinOp.E = E;
3127 return EmitSub(BinOp);
3128}
3129
3130Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3131 TestAndClearIgnoreResultAssign();
3132 Value *Op = Visit(E->getSubExpr());
3133 return Builder.CreateNot(Op, "not");
3134}
3135
3136Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3137 // Perform vector logical not on comparison with zero vector.
3138 if (E->getType()->isVectorType() &&
3139 E->getType()->castAs<VectorType>()->getVectorKind() ==
3141 Value *Oper = Visit(E->getSubExpr());
3142 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3143 Value *Result;
3144 if (Oper->getType()->isFPOrFPVectorTy()) {
3145 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3146 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3147 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3148 } else
3149 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3150 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3151 }
3152
3153 // Compare operand to zero.
3154 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3155
3156 // Invert value.
3157 // TODO: Could dynamically modify easy computations here. For example, if
3158 // the operand is an icmp ne, turn into icmp eq.
3159 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3160
3161 // ZExt result to the expr type.
3162 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3163}
3164
3165Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3166 // Try folding the offsetof to a constant.
3167 Expr::EvalResult EVResult;
3168 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3169 llvm::APSInt Value = EVResult.Val.getInt();
3170 return Builder.getInt(Value);
3171 }
3172
3173 // Loop over the components of the offsetof to compute the value.
3174 unsigned n = E->getNumComponents();
3175 llvm::Type* ResultType = ConvertType(E->getType());
3176 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3177 QualType CurrentType = E->getTypeSourceInfo()->getType();
3178 for (unsigned i = 0; i != n; ++i) {
3179 OffsetOfNode ON = E->getComponent(i);
3180 llvm::Value *Offset = nullptr;
3181 switch (ON.getKind()) {
3182 case OffsetOfNode::Array: {
3183 // Compute the index
3184 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3185 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3186 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3187 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3188
3189 // Save the element type
3190 CurrentType =
3191 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3192
3193 // Compute the element size
3194 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3195 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3196
3197 // Multiply out to compute the result
3198 Offset = Builder.CreateMul(Idx, ElemSize);
3199 break;
3200 }
3201
3202 case OffsetOfNode::Field: {
3203 FieldDecl *MemberDecl = ON.getField();
3204 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3205 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3206
3207 // Compute the index of the field in its parent.
3208 unsigned i = 0;
3209 // FIXME: It would be nice if we didn't have to loop here!
3210 for (RecordDecl::field_iterator Field = RD->field_begin(),
3211 FieldEnd = RD->field_end();
3212 Field != FieldEnd; ++Field, ++i) {
3213 if (*Field == MemberDecl)
3214 break;
3215 }
3216 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3217
3218 // Compute the offset to the field
3219 int64_t OffsetInt = RL.getFieldOffset(i) /
3220 CGF.getContext().getCharWidth();
3221 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3222
3223 // Save the element type.
3224 CurrentType = MemberDecl->getType();
3225 break;
3226 }
3227
3229 llvm_unreachable("dependent __builtin_offsetof");
3230
3231 case OffsetOfNode::Base: {
3232 if (ON.getBase()->isVirtual()) {
3233 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3234 continue;
3235 }
3236
3237 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3238 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3239
3240 // Save the element type.
3241 CurrentType = ON.getBase()->getType();
3242
3243 // Compute the offset to the base.
3244 auto *BaseRT = CurrentType->castAs<RecordType>();
3245 auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
3246 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3247 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3248 break;
3249 }
3250 }
3251 Result = Builder.CreateAdd(Result, Offset);
3252 }
3253 return Result;
3254}
3255
3256/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3257/// argument of the sizeof expression as an integer.
3258Value *
3259ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3260 const UnaryExprOrTypeTraitExpr *E) {
3261 QualType TypeToSize = E->getTypeOfArgument();
3262 if (auto Kind = E->getKind();
3263 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
3264 if (const VariableArrayType *VAT =
3265 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3266 if (E->isArgumentType()) {
3267 // sizeof(type) - make sure to emit the VLA size.
3268 CGF.EmitVariablyModifiedType(TypeToSize);
3269 } else {
3270 // C99 6.5.3.4p2: If the argument is an expression of type
3271 // VLA, it is evaluated.
3273 }
3274
3275 auto VlaSize = CGF.getVLASize(VAT);
3276 llvm::Value *size = VlaSize.NumElts;
3277
3278 // Scale the number of non-VLA elements by the non-VLA element size.
3279 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3280 if (!eltSize.isOne())
3281 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
3282
3283 return size;
3284 }
3285 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3286 auto Alignment =
3287 CGF.getContext()
3290 .getQuantity();
3291 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3292 } else if (E->getKind() == UETT_VectorElements) {
3293 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3294 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3295 }
3296
3297 // If this isn't sizeof(vla), the result must be constant; use the constant
3298 // folding logic so we don't have to duplicate it here.
3299 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3300}
3301
3302Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3303 QualType PromotionType) {
3304 QualType promotionTy = PromotionType.isNull()
3305 ? getPromotionType(E->getSubExpr()->getType())
3306 : PromotionType;
3307 Value *result = VisitReal(E, promotionTy);
3308 if (result && !promotionTy.isNull())
3309 result = EmitUnPromotedValue(result, E->getType());
3310 return result;
3311}
3312
3313Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3314 QualType PromotionType) {
3315 Expr *Op = E->getSubExpr();
3316 if (Op->getType()->isAnyComplexType()) {
3317 // If it's an l-value, load through the appropriate subobject l-value.
3318 // Note that we have to ask E because Op might be an l-value that
3319 // this won't work for, e.g. an Obj-C property.
3320 if (E->isGLValue()) {
3321 if (!PromotionType.isNull()) {
3323 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3324 if (result.first)
3325 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3326 return result.first;
3327 } else {
3328 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3329 .getScalarVal();
3330 }
3331 }
3332 // Otherwise, calculate and project.
3333 return CGF.EmitComplexExpr(Op, false, true).first;
3334 }
3335
3336 if (!PromotionType.isNull())
3337 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3338 return Visit(Op);
3339}
3340
3341Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3342 QualType PromotionType) {
3343 QualType promotionTy = PromotionType.isNull()
3344 ? getPromotionType(E->getSubExpr()->getType())
3345 : PromotionType;
3346 Value *result = VisitImag(E, promotionTy);
3347 if (result && !promotionTy.isNull())
3348 result = EmitUnPromotedValue(result, E->getType());
3349 return result;
3350}
3351
3352Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3353 QualType PromotionType) {
3354 Expr *Op = E->getSubExpr();
3355 if (Op->getType()->isAnyComplexType()) {
3356 // If it's an l-value, load through the appropriate subobject l-value.
3357 // Note that we have to ask E because Op might be an l-value that
3358 // this won't work for, e.g. an Obj-C property.
3359 if (Op->isGLValue()) {
3360 if (!PromotionType.isNull()) {
3362 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3363 if (result.second)
3364 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3365 return result.second;
3366 } else {
3367 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3368 .getScalarVal();
3369 }
3370 }
3371 // Otherwise, calculate and project.
3372 return CGF.EmitComplexExpr(Op, true, false).second;
3373 }
3374
3375 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3376 // effects are evaluated, but not the actual value.
3377 if (Op->isGLValue())
3378 CGF.EmitLValue(Op);
3379 else if (!PromotionType.isNull())
3380 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3381 else
3382 CGF.EmitScalarExpr(Op, true);
3383 if (!PromotionType.isNull())
3384 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3385 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3386}
3387
3388//===----------------------------------------------------------------------===//
3389// Binary Operators
3390//===----------------------------------------------------------------------===//
3391
3392Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3393 QualType PromotionType) {
3394 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3395}
3396
3397Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3398 QualType ExprType) {
3399 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3400}
3401
3402Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3403 E = E->IgnoreParens();
3404 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3405 switch (BO->getOpcode()) {
3406#define HANDLE_BINOP(OP) \
3407 case BO_##OP: \
3408 return Emit##OP(EmitBinOps(BO, PromotionType));
3409 HANDLE_BINOP(Add)
3410 HANDLE_BINOP(Sub)
3411 HANDLE_BINOP(Mul)
3412 HANDLE_BINOP(Div)
3413#undef HANDLE_BINOP
3414 default:
3415 break;
3416 }
3417 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3418 switch (UO->getOpcode()) {
3419 case UO_Imag:
3420 return VisitImag(UO, PromotionType);
3421 case UO_Real:
3422 return VisitReal(UO, PromotionType);
3423 case UO_Minus:
3424 return VisitMinus(UO, PromotionType);
3425 case UO_Plus:
3426 return VisitPlus(UO, PromotionType);
3427 default:
3428 break;
3429 }
3430 }
3431 auto result = Visit(const_cast<Expr *>(E));
3432 if (result) {
3433 if (!PromotionType.isNull())
3434 return EmitPromotedValue(result, PromotionType);
3435 else
3436 return EmitUnPromotedValue(result, E->getType());
3437 }
3438 return result;
3439}
3440
3441BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3442 QualType PromotionType) {
3443 TestAndClearIgnoreResultAssign();
3444 BinOpInfo Result;
3445 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3446 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3447 if (!PromotionType.isNull())
3448 Result.Ty = PromotionType;
3449 else
3450 Result.Ty = E->getType();
3451 Result.Opcode = E->getOpcode();
3452 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3453 Result.E = E;
3454 return Result;
3455}
3456
3457LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3458 const CompoundAssignOperator *E,
3459 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3460 Value *&Result) {
3461 QualType LHSTy = E->getLHS()->getType();
3462 BinOpInfo OpInfo;
3463
3466
3467 // Emit the RHS first. __block variables need to have the rhs evaluated
3468 // first, plus this should improve codegen a little.
3469
3470 QualType PromotionTypeCR;
3471 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3472 if (PromotionTypeCR.isNull())
3473 PromotionTypeCR = E->getComputationResultType();
3474 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3475 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3476 if (!PromotionTypeRHS.isNull())
3477 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3478 else
3479 OpInfo.RHS = Visit(E->getRHS());
3480 OpInfo.Ty = PromotionTypeCR;
3481 OpInfo.Opcode = E->getOpcode();
3482 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3483 OpInfo.E = E;
3484 // Load/convert the LHS.
3485 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3486
3487 llvm::PHINode *atomicPHI = nullptr;
3488 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3489 QualType type = atomicTy->getValueType();
3490 if (!type->isBooleanType() && type->isIntegerType() &&
3491 !(type->isUnsignedIntegerType() &&
3492 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3493 CGF.getLangOpts().getSignedOverflowBehavior() !=
3495 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3496 llvm::Instruction::BinaryOps Op;
3497 switch (OpInfo.Opcode) {
3498 // We don't have atomicrmw operands for *, %, /, <<, >>
3499 case BO_MulAssign: case BO_DivAssign:
3500 case BO_RemAssign:
3501 case BO_ShlAssign:
3502 case BO_ShrAssign:
3503 break;
3504 case BO_AddAssign:
3505 AtomicOp = llvm::AtomicRMWInst::Add;
3506 Op = llvm::Instruction::Add;
3507 break;
3508 case BO_SubAssign:
3509 AtomicOp = llvm::AtomicRMWInst::Sub;
3510 Op = llvm::Instruction::Sub;
3511 break;
3512 case BO_AndAssign:
3513 AtomicOp = llvm::AtomicRMWInst::And;
3514 Op = llvm::Instruction::And;
3515 break;
3516 case BO_XorAssign:
3517 AtomicOp = llvm::AtomicRMWInst::Xor;
3518 Op = llvm::Instruction::Xor;
3519 break;
3520 case BO_OrAssign:
3521 AtomicOp = llvm::AtomicRMWInst::Or;
3522 Op = llvm::Instruction::Or;
3523 break;
3524 default:
3525 llvm_unreachable("Invalid compound assignment type");
3526 }
3527 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3528 llvm::Value *Amt = CGF.EmitToMemory(
3529 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3530 E->getExprLoc()),
3531 LHSTy);
3532 Value *OldVal = Builder.CreateAtomicRMW(
3533 AtomicOp, LHSLV.getAddress(CGF), Amt,
3534 llvm::AtomicOrdering::SequentiallyConsistent);
3535
3536 // Since operation is atomic, the result type is guaranteed to be the
3537 // same as the input in LLVM terms.
3538 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3539 return LHSLV;
3540 }
3541 }
3542 // FIXME: For floating point types, we should be saving and restoring the
3543 // floating point environment in the loop.
3544 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3545 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3546 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3547 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3548 Builder.CreateBr(opBB);
3549 Builder.SetInsertPoint(opBB);
3550 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3551 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3552 OpInfo.LHS = atomicPHI;
3553 }
3554 else
3555 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3556
3557 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3558 SourceLocation Loc = E->getExprLoc();
3559 if (!PromotionTypeLHS.isNull())
3560 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
3561 E->getExprLoc());
3562 else
3563 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
3564 E->getComputationLHSType(), Loc);
3565
3566 // Expand the binary operator.
3567 Result = (this->*Func)(OpInfo);
3568
3569 // Convert the result back to the LHS type,
3570 // potentially with Implicit Conversion sanitizer check.
3571 // If LHSLV is a bitfield, use default ScalarConversionOpts
3572 // to avoid emit any implicit integer checks.
3573 Value *Previous = nullptr;
3574 if (LHSLV.isBitField()) {
3575 Previous = Result;
3576 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
3577 } else
3578 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
3579 ScalarConversionOpts(CGF.SanOpts));
3580
3581 if (atomicPHI) {
3582 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3583 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3584 auto Pair = CGF.EmitAtomicCompareExchange(
3585 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3586 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3587 llvm::Value *success = Pair.second;
3588 atomicPHI->addIncoming(old, curBlock);
3589 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3590 Builder.SetInsertPoint(contBB);
3591 return LHSLV;
3592 }
3593
3594 // Store the result value into the LHS lvalue. Bit-fields are handled
3595 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3596 // 'An assignment expression has the value of the left operand after the
3597 // assignment...'.
3598 if (LHSLV.isBitField()) {
3599 Value *Src = Previous ? Previous : Result;
3600 QualType SrcType = E->getRHS()->getType();
3601 QualType DstType = E->getLHS()->getType();
3603 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
3604 LHSLV.getBitFieldInfo(), E->getExprLoc());
3605 } else
3607
3608 if (CGF.getLangOpts().OpenMP)
3610 E->getLHS());
3611 return LHSLV;
3612}
3613
3614Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3615 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3616 bool Ignore = TestAndClearIgnoreResultAssign();
3617 Value *RHS = nullptr;
3618 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3619
3620 // If the result is clearly ignored, return now.
3621 if (Ignore)
3622 return nullptr;
3623
3624 // The result of an assignment in C is the assigned r-value.
3625 if (!CGF.getLangOpts().CPlusPlus)
3626 return RHS;
3627
3628 // If the lvalue is non-volatile, return the computed value of the assignment.
3629 if (!LHS.isVolatileQualified())
3630 return RHS;
3631
3632 // Otherwise, reload the value.
3633 return EmitLoadOfLValue(LHS, E->getExprLoc());
3634}
3635
3636void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3637 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3639
3640 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3641 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3642 SanitizerKind::IntegerDivideByZero));
3643 }
3644
3645 const auto *BO = cast<BinaryOperator>(Ops.E);
3646 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3647 Ops.Ty->hasSignedIntegerRepresentation() &&
3648 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3649 Ops.mayHaveIntegerOverflow()) {
3650 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3651
3652 llvm::Value *IntMin =
3653 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3654 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3655
3656 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3657 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3658 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3659 Checks.push_back(
3660 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3661 }
3662
3663 if (Checks.size() > 0)
3664 EmitBinOpCheck(Checks, Ops);
3665}
3666
3667Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3668 {
3669 CodeGenFunction::SanitizerScope SanScope(&CGF);
3670 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3671 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3672 Ops.Ty->isIntegerType() &&
3673 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3674 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3675 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3676 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3677 Ops.Ty->isRealFloatingType() &&
3678 Ops.mayHaveFloatDivisionByZero()) {
3679 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3680 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3681 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3682 Ops);
3683 }
3684 }
3685
3686 if (Ops.Ty->isConstantMatrixType()) {
3687 llvm::MatrixBuilder MB(Builder);
3688 // We need to check the types of the operands of the operator to get the
3689 // correct matrix dimensions.
3690 auto *BO = cast<BinaryOperator>(Ops.E);
3691 (void)BO;
3692 assert(
3693 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3694 "first operand must be a matrix");
3695 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3696 "second operand must be an arithmetic type");
3697 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3698 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3699 Ops.Ty->hasUnsignedIntegerRepresentation());
3700 }
3701
3702 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3703 llvm::Value *Val;
3704 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3705 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3706 CGF.SetDivFPAccuracy(Val);
3707 return Val;
3708 }
3709 else if (Ops.isFixedPointOp())
3710 return EmitFixedPointBinOp(Ops);
3711 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3712 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3713 else
3714 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3715}
3716
3717Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3718 // Rem in C can't be a floating point type: C99 6.5.5p2.
3719 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3720 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3721 Ops.Ty->isIntegerType() &&
3722 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3723 CodeGenFunction::SanitizerScope SanScope(&CGF);
3724 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3725 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3726 }
3727
3728 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3729 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3730 else
3731 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3732}
3733
3734Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3735 unsigned IID;
3736 unsigned OpID = 0;
3737 SanitizerHandler OverflowKind;
3738
3739 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3740 switch (Ops.Opcode) {
3741 case BO_Add:
3742 case BO_AddAssign:
3743 OpID = 1;
3744 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3745 llvm::Intrinsic::uadd_with_overflow;
3746 OverflowKind = SanitizerHandler::AddOverflow;
3747 break;
3748 case BO_Sub:
3749 case BO_SubAssign:
3750 OpID = 2;
3751 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3752 llvm::Intrinsic::usub_with_overflow;
3753 OverflowKind = SanitizerHandler::SubOverflow;
3754 break;
3755 case BO_Mul:
3756 case BO_MulAssign:
3757 OpID = 3;
3758 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3759 llvm::Intrinsic::umul_with_overflow;
3760 OverflowKind = SanitizerHandler::MulOverflow;
3761 break;
3762 default:
3763 llvm_unreachable("Unsupported operation for overflow detection");
3764 }
3765 OpID <<= 1;
3766 if (isSigned)
3767 OpID |= 1;
3768
3769 CodeGenFunction::SanitizerScope SanScope(&CGF);
3770 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3771
3772 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3773
3774 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3775 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3776 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3777
3778 // Handle overflow with llvm.trap if no custom handler has been specified.
3779 const std::string *handlerName =
3781 if (handlerName->empty()) {
3782 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3783 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3784 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3785 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3786 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3787 : SanitizerKind::UnsignedIntegerOverflow;
3788 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3789 } else
3790 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3791 return result;
3792 }
3793
3794 // Branch in case of overflow.
3795 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3796 llvm::BasicBlock *continueBB =
3797 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3798 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3799
3800 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3801
3802 // If an overflow handler is set, then we want to call it and then use its
3803 // result, if it returns.
3804 Builder.SetInsertPoint(overflowBB);
3805
3806 // Get the overflow handler.
3807 llvm::Type *Int8Ty = CGF.Int8Ty;
3808 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3809 llvm::FunctionType *handlerTy =
3810 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3811 llvm::FunctionCallee handler =
3812 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3813
3814 // Sign extend the args to 64-bit, so that we can use the same handler for
3815 // all types of overflow.
3816 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3817 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3818
3819 // Call the handler with the two arguments, the operation, and the size of
3820 // the result.
3821 llvm::Value *handlerArgs[] = {
3822 lhs,
3823 rhs,
3824 Builder.getInt8(OpID),
3825 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3826 };
3827 llvm::Value *handlerResult =
3828 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3829
3830 // Truncate the result back to the desired size.
3831 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3832 Builder.CreateBr(continueBB);
3833
3834 Builder.SetInsertPoint(continueBB);
3835 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3836 phi->addIncoming(result, initialBB);
3837 phi->addIncoming(handlerResult, overflowBB);
3838
3839 return phi;
3840}
3841
3842/// Emit pointer + index arithmetic.
3844 const BinOpInfo &op,
3845 bool isSubtraction) {
3846 // Must have binary (not unary) expr here. Unary pointer
3847 // increment/decrement doesn't use this path.
3848 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3849
3850 Value *pointer = op.LHS;
3851 Expr *pointerOperand = expr->getLHS();
3852 Value *index = op.RHS;
3853 Expr *indexOperand = expr->getRHS();
3854
3855 // In a subtraction, the LHS is always the pointer.
3856 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3857 std::swap(pointer, index);
3858 std::swap(pointerOperand, indexOperand);
3859 }
3860
3861 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3862
3863 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3864 auto &DL = CGF.CGM.getDataLayout();
3865 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3866
3867 // Some versions of glibc and gcc use idioms (particularly in their malloc
3868 // routines) that add a pointer-sized integer (known to be a pointer value)
3869 // to a null pointer in order to cast the value back to an integer or as
3870 // part of a pointer alignment algorithm. This is undefined behavior, but
3871 // we'd like to be able to compile programs that use it.
3872 //
3873 // Normally, we'd generate a GEP with a null-pointer base here in response
3874 // to that code, but it's also UB to dereference a pointer created that
3875 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3876 // generate a direct cast of the integer value to a pointer.
3877 //
3878 // The idiom (p = nullptr + N) is not met if any of the following are true:
3879 //
3880 // The operation is subtraction.
3881 // The index is not pointer-sized.
3882 // The pointer type is not byte-sized.
3883 //
3885 op.Opcode,
3886 expr->getLHS(),
3887 expr->getRHS()))
3888 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3889
3890 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3891 // Zero-extend or sign-extend the pointer value according to
3892 // whether the index is signed or not.
3893 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3894 "idx.ext");
3895 }
3896
3897 // If this is subtraction, negate the index.
3898 if (isSubtraction)
3899 index = CGF.Builder.CreateNeg(index, "idx.neg");
3900
3901 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3902 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3903 /*Accessed*/ false);
3904
3906 = pointerOperand->getType()->getAs<PointerType>();
3907 if (!pointerType) {
3908 QualType objectType = pointerOperand->getType()
3910 ->getPointeeType();
3911 llvm::Value *objectSize
3912 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3913
3914 index = CGF.Builder.CreateMul(index, objectSize);
3915
3916 Value *result =
3917 CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
3918 return CGF.Builder.CreateBitCast(result, pointer->getType());
3919 }
3920
3921 QualType elementType = pointerType->getPointeeType();
3922 if (const VariableArrayType *vla
3923 = CGF.getContext().getAsVariableArrayType(elementType)) {
3924 // The element count here is the total number of non-VLA elements.
3925 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3926
3927 // Effectively, the multiply by the VLA size is part of the GEP.
3928 // GEP indexes are signed, and scaling an index isn't permitted to
3929 // signed-overflow, so we use the same semantics for our explicit
3930 // multiply. We suppress this if overflow is not undefined behavior.
3931 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3933 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3934 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3935 } else {
3936 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3937 pointer = CGF.EmitCheckedInBoundsGEP(
3938 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3939 "add.ptr");
3940 }
3941 return pointer;
3942 }
3943
3944 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3945 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3946 // future proof.
3947 llvm::Type *elemTy;
3948 if (elementType->isVoidType() || elementType->isFunctionType())
3949 elemTy = CGF.Int8Ty;
3950 else
3951 elemTy = CGF.ConvertTypeForMem(elementType);
3952
3954 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3955
3956 return CGF.EmitCheckedInBoundsGEP(
3957 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3958 "add.ptr");
3959}
3960
3961// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3962// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3963// the add operand respectively. This allows fmuladd to represent a*b-c, or
3964// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3965// efficient operations.
3966static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3967 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3968 bool negMul, bool negAdd) {
3969 Value *MulOp0 = MulOp->getOperand(0);
3970 Value *MulOp1 = MulOp->getOperand(1);
3971 if (negMul)
3972 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3973 if (negAdd)
3974 Addend = Builder.CreateFNeg(Addend, "neg");
3975
3976 Value *FMulAdd = nullptr;
3977 if (Builder.getIsFPConstrained()) {
3978 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3979 "Only constrained operation should be created when Builder is in FP "
3980 "constrained mode");
3981 FMulAdd = Builder.CreateConstrainedFPCall(
3982 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3983 Addend->getType()),
3984 {MulOp0, MulOp1, Addend});
3985 } else {
3986 FMulAdd = Builder.CreateCall(
3987 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3988 {MulOp0, MulOp1, Addend});
3989 }
3990 MulOp->eraseFromParent();
3991
3992 return FMulAdd;
3993}
3994
3995// Check whether it would be legal to emit an fmuladd intrinsic call to
3996// represent op and if so, build the fmuladd.
3997//
3998// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
3999// Does NOT check the type of the operation - it's assumed that this function
4000// will be called from contexts where it's known that the type is contractable.
4001static Value* tryEmitFMulAdd(const BinOpInfo &op,
4002 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4003 bool isSub=false) {
4004
4005 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4006 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4007 "Only fadd/fsub can be the root of an fmuladd.");
4008
4009 // Check whether this op is marked as fusable.
4010 if (!op.FPFeatures.allowFPContractWithinStatement())
4011 return nullptr;
4012
4013 Value *LHS = op.LHS;
4014 Value *RHS = op.RHS;
4015
4016 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4017 // it is the only use of its operand.
4018 bool NegLHS = false;
4019 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4020 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4021 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4022 LHS = LHSUnOp->getOperand(0);
4023 NegLHS = true;
4024 }
4025 }
4026
4027 bool NegRHS = false;
4028 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4029 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4030 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4031 RHS = RHSUnOp->getOperand(0);
4032 NegRHS = true;
4033 }
4034 }
4035
4036 // We have a potentially fusable op. Look for a mul on one of the operands.
4037 // Also, make sure that the mul result isn't used directly. In that case,
4038 // there's no point creating a muladd operation.
4039 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4040 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4041 (LHSBinOp->use_empty() || NegLHS)) {
4042 // If we looked through fneg, erase it.
4043 if (NegLHS)
4044 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4045 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4046 }
4047 }
4048 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4049 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4050 (RHSBinOp->use_empty() || NegRHS)) {
4051 // If we looked through fneg, erase it.
4052 if (NegRHS)
4053 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4054 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4055 }
4056 }
4057
4058 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4059 if (LHSBinOp->getIntrinsicID() ==
4060 llvm::Intrinsic::experimental_constrained_fmul &&
4061 (LHSBinOp->use_empty() || NegLHS)) {
4062 // If we looked through fneg, erase it.
4063 if (NegLHS)
4064 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4065 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4066 }
4067 }
4068 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4069 if (RHSBinOp->getIntrinsicID() ==
4070 llvm::Intrinsic::experimental_constrained_fmul &&
4071 (RHSBinOp->use_empty() || NegRHS)) {
4072 // If we looked through fneg, erase it.
4073 if (NegRHS)
4074 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4075 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4076 }
4077 }
4078
4079 return nullptr;
4080}
4081
4082Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4083 if (op.LHS->getType()->isPointerTy() ||
4084 op.RHS->getType()->isPointerTy())
4086
4087 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4088 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4090 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4091 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4092 [[fallthrough]];
4094 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4095 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4096 [[fallthrough]];
4098 if (CanElideOverflowCheck(CGF.getContext(), op))
4099 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4100 return EmitOverflowCheckedBinOp(op);
4101 }
4102 }
4103
4104 // For vector and matrix adds, try to fold into a fmuladd.
4105 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4106 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4107 // Try to form an fmuladd.
4108 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4109 return FMulAdd;
4110 }
4111
4112 if (op.Ty->isConstantMatrixType()) {
4113 llvm::MatrixBuilder MB(Builder);
4114 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4115 return MB.CreateAdd(op.LHS, op.RHS);
4116 }
4117
4118 if (op.Ty->isUnsignedIntegerType() &&
4119 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4120 !CanElideOverflowCheck(CGF.getContext(), op))
4121 return EmitOverflowCheckedBinOp(op);
4122
4123 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4124 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4125 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4126 }
4127
4128 if (op.isFixedPointOp())
4129 return EmitFixedPointBinOp(op);
4130
4131 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4132}
4133
4134/// The resulting value must be calculated with exact precision, so the operands
4135/// may not be the same type.
4136Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4137 using llvm::APSInt;
4138 using llvm::ConstantInt;
4139
4140 // This is either a binary operation where at least one of the operands is
4141 // a fixed-point type, or a unary operation where the operand is a fixed-point
4142 // type. The result type of a binary operation is determined by
4143 // Sema::handleFixedPointConversions().
4144 QualType ResultTy = op.Ty;
4145 QualType LHSTy, RHSTy;
4146 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4147 RHSTy = BinOp->getRHS()->getType();
4148 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4149 // For compound assignment, the effective type of the LHS at this point
4150 // is the computation LHS type, not the actual LHS type, and the final
4151 // result type is not the type of the expression but rather the
4152 // computation result type.
4153 LHSTy = CAO->getComputationLHSType();
4154 ResultTy = CAO->getComputationResultType();
4155 } else
4156 LHSTy = BinOp->getLHS()->getType();
4157 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4158 LHSTy = UnOp->getSubExpr()->getType();
4159 RHSTy = UnOp->getSubExpr()->getType();
4160 }
4161 ASTContext &Ctx = CGF.getContext();
4162 Value *LHS = op.LHS;
4163 Value *RHS = op.RHS;
4164
4165 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4166 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4167 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4168 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4169
4170 // Perform the actual operation.
4171 Value *Result;
4172 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4173 switch (op.Opcode) {
4174 case BO_AddAssign:
4175 case BO_Add:
4176 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4177 break;
4178 case BO_SubAssign:
4179 case BO_Sub:
4180 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4181 break;
4182 case BO_MulAssign:
4183 case BO_Mul:
4184 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4185 break;
4186 case BO_DivAssign:
4187 case BO_Div:
4188 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4189 break;
4190 case BO_ShlAssign:
4191 case BO_Shl:
4192 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4193 break;
4194 case BO_ShrAssign:
4195 case BO_Shr:
4196 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4197 break;
4198 case BO_LT:
4199 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4200 case BO_GT:
4201 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4202 case BO_LE:
4203 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4204 case BO_GE:
4205 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4206 case BO_EQ:
4207 // For equality operations, we assume any padding bits on unsigned types are
4208 // zero'd out. They could be overwritten through non-saturating operations
4209 // that cause overflow, but this leads to undefined behavior.
4210 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4211 case BO_NE:
4212 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4213 case BO_Cmp:
4214 case BO_LAnd:
4215 case BO_LOr:
4216 llvm_unreachable("Found unimplemented fixed point binary operation");
4217 case BO_PtrMemD:
4218 case BO_PtrMemI:
4219 case BO_Rem:
4220 case BO_Xor:
4221 case BO_And:
4222 case BO_Or:
4223 case BO_Assign:
4224 case BO_RemAssign:
4225 case BO_AndAssign:
4226 case BO_XorAssign:
4227 case BO_OrAssign:
4228 case BO_Comma:
4229 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4230 }
4231
4232 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4234 // Convert to the result type.
4235 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4236 : CommonFixedSema,
4237 ResultFixedSema);
4238}
4239
4240Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4241 // The LHS is always a pointer if either side is.
4242 if (!op.LHS->getType()->isPointerTy()) {
4243 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4244 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4246 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4247 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4248 [[fallthrough]];
4250 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4251 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4252 [[fallthrough]];
4254 if (CanElideOverflowCheck(CGF.getContext(), op))
4255 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4256 return EmitOverflowCheckedBinOp(op);
4257 }
4258 }
4259
4260 // For vector and matrix subs, try to fold into a fmuladd.
4261 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4262 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4263 // Try to form an fmuladd.
4264 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4265 return FMulAdd;
4266 }
4267
4268 if (op.Ty->isConstantMatrixType()) {
4269 llvm::MatrixBuilder MB(Builder);
4270 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4271 return MB.CreateSub(op.LHS, op.RHS);
4272 }
4273
4274 if (op.Ty->isUnsignedIntegerType() &&
4275 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4276 !CanElideOverflowCheck(CGF.getContext(), op))
4277 return EmitOverflowCheckedBinOp(op);
4278
4279 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4280 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4281 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4282 }
4283
4284 if (op.isFixedPointOp())
4285 return EmitFixedPointBinOp(op);
4286
4287 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4288 }
4289
4290 // If the RHS is not a pointer, then we have normal pointer
4291 // arithmetic.
4292 if (!op.RHS->getType()->isPointerTy())
4294
4295 // Otherwise, this is a pointer subtraction.
4296
4297 // Do the raw subtraction part.
4298 llvm::Value *LHS
4299 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4300 llvm::Value *RHS
4301 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4302 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4303
4304 // Okay, figure out the element size.
4305 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4306 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4307
4308 llvm::Value *divisor = nullptr;
4309
4310 // For a variable-length array, this is going to be non-constant.
4311 if (const VariableArrayType *vla
4312 = CGF.getContext().getAsVariableArrayType(elementType)) {
4313 auto VlaSize = CGF.getVLASize(vla);
4314 elementType = VlaSize.Type;
4315 divisor = VlaSize.NumElts;
4316
4317 // Scale the number of non-VLA elements by the non-VLA element size.
4318 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4319 if (!eltSize.isOne())
4320 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4321
4322 // For everything elese, we can just compute it, safe in the
4323 // assumption that Sema won't let anything through that we can't
4324 // safely compute the size of.
4325 } else {
4326 CharUnits elementSize;
4327 // Handle GCC extension for pointer arithmetic on void* and
4328 // function pointer types.
4329 if (elementType->isVoidType() || elementType->isFunctionType())
4330 elementSize = CharUnits::One();
4331 else
4332 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4333
4334 // Don't even emit the divide for element size of 1.
4335 if (elementSize.isOne())
4336 return diffInChars;
4337
4338 divisor = CGF.CGM.getSize(elementSize);
4339 }
4340
4341 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4342 // pointer difference in C is only defined in the case where both operands
4343 // are pointing to elements of an array.
4344 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4345}
4346
4347Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS) {
4348 llvm::IntegerType *Ty;
4349 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4350 Ty = cast<llvm::IntegerType>(VT->getElementType());
4351 else
4352 Ty = cast<llvm::IntegerType>(LHS->getType());
4353 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4354 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4355 // this in ConstantInt::get, this results in the value getting truncated.
4356 // Constrain the return value to be max(RHS) in this case.
4357 llvm::Type *RHSTy = RHS->getType();
4358 llvm::APInt RHSMax = llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4359 if (RHSMax.ult(Ty->getBitWidth()))
4360 return llvm::ConstantInt::get(RHSTy, RHSMax);
4361 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4362}
4363
4364Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4365 const Twine &Name) {
4366 llvm::IntegerType *Ty;
4367 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4368 Ty = cast<llvm::IntegerType>(VT->getElementType());
4369 else
4370 Ty = cast<llvm::IntegerType>(LHS->getType());
4371
4372 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4373 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS), Name);
4374
4375 return Builder.CreateURem(
4376 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4377}
4378
4379Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4380 // TODO: This misses out on the sanitizer check below.
4381 if (Ops.isFixedPointOp())
4382 return EmitFixedPointBinOp(Ops);
4383
4384 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4385 // RHS to the same size as the LHS.
4386 Value *RHS = Ops.RHS;
4387 if (Ops.LHS->getType() != RHS->getType())
4388 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4389
4390 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4391 Ops.Ty->hasSignedIntegerRepresentation() &&
4393 !CGF.getLangOpts().CPlusPlus20;
4394 bool SanitizeUnsignedBase =
4395 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4396 Ops.Ty->hasUnsignedIntegerRepresentation();
4397 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4398 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4399 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4400 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4401 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4402 else if ((SanitizeBase || SanitizeExponent) &&
4403 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4404 CodeGenFunction::SanitizerScope SanScope(&CGF);
4406 llvm::Value *WidthMinusOne = GetMaximumShiftAmount(Ops.LHS, Ops.RHS);
4407 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4408
4409 if (SanitizeExponent) {
4410 Checks.push_back(
4411 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
4412 }
4413
4414 if (SanitizeBase) {
4415 // Check whether we are shifting any non-zero bits off the top of the
4416 // integer. We only emit this check if exponent is valid - otherwise
4417 // instructions below will have undefined behavior themselves.
4418 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4419 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4420 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4421 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4422 llvm::Value *PromotedWidthMinusOne =
4423 (RHS == Ops.RHS) ? WidthMinusOne
4424 : GetMaximumShiftAmount(Ops.LHS, RHS);
4425 CGF.EmitBlock(CheckShiftBase);
4426 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4427 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4428 /*NUW*/ true, /*NSW*/ true),
4429 "shl.check");
4430 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4431 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4432 // Under C++11's rules, shifting a 1 bit into the sign bit is
4433 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4434 // define signed left shifts, so we use the C99 and C++11 rules there).
4435 // Unsigned shifts can always shift into the top bit.
4436 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4437 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4438 }
4439 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4440 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4441 CGF.EmitBlock(Cont);
4442 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4443 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4444 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4445 Checks.push_back(std::make_pair(
4446 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
4447 : SanitizerKind::UnsignedShiftBase));
4448 }
4449
4450 assert(!Checks.empty());
4451 EmitBinOpCheck(Checks, Ops);
4452 }
4453
4454 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4455}
4456
4457Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4458 // TODO: This misses out on the sanitizer check below.
4459 if (Ops.isFixedPointOp())
4460 return EmitFixedPointBinOp(Ops);
4461
4462 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4463 // RHS to the same size as the LHS.
4464 Value *RHS = Ops.RHS;
4465 if (Ops.LHS->getType() != RHS->getType())
4466 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4467
4468 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4469 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4470 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4471 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4472 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4473 CodeGenFunction::SanitizerScope SanScope(&CGF);
4474 llvm::Value *Valid =
4475 Builder.CreateICmpULE(Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS));
4476 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
4477 }
4478
4479 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4480 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4481 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4482}
4483
4485// return corresponding comparison intrinsic for given vector type
4486static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4487 BuiltinType::Kind ElemKind) {
4488 switch (ElemKind) {
4489 default: llvm_unreachable("unexpected element type");
4490 case BuiltinType::Char_U:
4491 case BuiltinType::UChar:
4492 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4493 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4494 case BuiltinType::Char_S:
4495 case BuiltinType::SChar:
4496 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4497 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4498 case BuiltinType::UShort:
4499 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4500 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4501 case BuiltinType::Short:
4502 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4503 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4504 case BuiltinType::UInt:
4505 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4506 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4507 case BuiltinType::Int:
4508 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4509 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4510 case BuiltinType::ULong:
4511 case BuiltinType::ULongLong:
4512 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4513 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4514 case BuiltinType::Long:
4515 case BuiltinType::LongLong:
4516 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4517 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4518 case BuiltinType::Float:
4519 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4520 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4521 case BuiltinType::Double:
4522 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4523 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4524 case BuiltinType::UInt128:
4525 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4526 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4527 case BuiltinType::Int128:
4528 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4529 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4530 }
4531}
4532
4533Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4534 llvm::CmpInst::Predicate UICmpOpc,
4535 llvm::CmpInst::Predicate SICmpOpc,
4536 llvm::CmpInst::Predicate FCmpOpc,
4537 bool IsSignaling) {
4538 TestAndClearIgnoreResultAssign();
4539 Value *Result;
4540 QualType LHSTy = E->getLHS()->getType();
4541 QualType RHSTy = E->getRHS()->getType();
4542 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4543 assert(E->getOpcode() == BO_EQ ||
4544 E->getOpcode() == BO_NE);
4545 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4546 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4548 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4549 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4550 BinOpInfo BOInfo = EmitBinOps(E);
4551 Value *LHS = BOInfo.LHS;
4552 Value *RHS = BOInfo.RHS;
4553
4554 // If AltiVec, the comparison results in a numeric type, so we use
4555 // intrinsics comparing vectors and giving 0 or 1 as a result
4556 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4557 // constants for mapping CR6 register bits to predicate result
4558 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4559
4560 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4561
4562 // in several cases vector arguments order will be reversed
4563 Value *FirstVecArg = LHS,
4564 *SecondVecArg = RHS;
4565
4566 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4567 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4568
4569 switch(E->getOpcode()) {
4570 default: llvm_unreachable("is not a comparison operation");
4571 case BO_EQ:
4572 CR6 = CR6_LT;
4573 ID = GetIntrinsic(VCMPEQ, ElementKind);
4574 break;
4575 case BO_NE:
4576 CR6 = CR6_EQ;
4577 ID = GetIntrinsic(VCMPEQ, ElementKind);
4578 break;
4579 case BO_LT:
4580 CR6 = CR6_LT;
4581 ID = GetIntrinsic(VCMPGT, ElementKind);
4582 std::swap(FirstVecArg, SecondVecArg);
4583 break;
4584 case BO_GT:
4585 CR6 = CR6_LT;
4586 ID = GetIntrinsic(VCMPGT, ElementKind);
4587 break;
4588 case BO_LE:
4589 if (ElementKind == BuiltinType::Float) {
4590 CR6 = CR6_LT;
4591 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4592 std::swap(FirstVecArg, SecondVecArg);
4593 }
4594 else {
4595 CR6 = CR6_EQ;
4596 ID = GetIntrinsic(VCMPGT, ElementKind);
4597 }
4598 break;
4599 case BO_GE:
4600 if (ElementKind == BuiltinType::Float) {
4601 CR6 = CR6_LT;
4602 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4603 }
4604 else {
4605 CR6 = CR6_EQ;
4606 ID = GetIntrinsic(VCMPGT, ElementKind);
4607 std::swap(FirstVecArg, SecondVecArg);
4608 }
4609 break;
4610 }
4611
4612 Value *CR6Param = Builder.getInt32(CR6);
4613 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4614 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4615
4616 // The result type of intrinsic may not be same as E->getType().
4617 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4618 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4619 // do nothing, if ResultTy is not i1 at the same time, it will cause
4620 // crash later.
4621 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4622 if (ResultTy->getBitWidth() > 1 &&
4623 E->getType() == CGF.getContext().BoolTy)
4624 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4625 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4626 E->getExprLoc());
4627 }
4628
4629 if (BOInfo.isFixedPointOp()) {
4630 Result = EmitFixedPointBinOp(BOInfo);
4631 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4632 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4633 if (!IsSignaling)
4634 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4635 else
4636 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4637 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4638 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4639 } else {
4640 // Unsigned integers and pointers.
4641
4642 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4643 !isa<llvm::ConstantPointerNull>(LHS) &&
4644 !isa<llvm::ConstantPointerNull>(RHS)) {
4645
4646 // Dynamic information is required to be stripped for comparisons,
4647 // because it could leak the dynamic information. Based on comparisons
4648 // of pointers to dynamic objects, the optimizer can replace one pointer
4649 // with another, which might be incorrect in presence of invariant
4650 // groups. Comparison with null is safe because null does not carry any
4651 // dynamic information.
4652 if (LHSTy.mayBeDynamicClass())
4653 LHS = Builder.CreateStripInvariantGroup(LHS);
4654 if (RHSTy.mayBeDynamicClass())
4655 RHS = Builder.CreateStripInvariantGroup(RHS);
4656 }
4657
4658 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4659 }
4660
4661 // If this is a vector comparison, sign extend the result to the appropriate
4662 // vector integer type and return it (don't convert to bool).
4663 if (LHSTy->isVectorType())
4664 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4665
4666 } else {
4667 // Complex Comparison: can only be an equality comparison.
4669 QualType CETy;
4670 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4671 LHS = CGF.EmitComplexExpr(E->getLHS());
4672 CETy = CTy->getElementType();
4673 } else {
4674 LHS.first = Visit(E->getLHS());
4675 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4676 CETy = LHSTy;
4677 }
4678 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4679 RHS = CGF.EmitComplexExpr(E->getRHS());
4680 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4681 CTy->getElementType()) &&
4682 "The element types must always match.");
4683 (void)CTy;
4684 } else {
4685 RHS.first = Visit(E->getRHS());
4686 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4687 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4688 "The element types must always match.");
4689 }
4690
4691 Value *ResultR, *ResultI;
4692 if (CETy->isRealFloatingType()) {
4693 // As complex comparisons can only be equality comparisons, they
4694 // are never signaling comparisons.
4695 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4696 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4697 } else {
4698 // Complex comparisons can only be equality comparisons. As such, signed
4699 // and unsigned opcodes are the same.
4700 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4701 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4702 }
4703
4704 if (E->getOpcode() == BO_EQ) {
4705 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4706 } else {
4707 assert(E->getOpcode() == BO_NE &&
4708 "Complex comparison other than == or != ?");
4709 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4710 }
4711 }
4712
4713 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4714 E->getExprLoc());
4715}
4716
4718 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
4719 // In case we have the integer or bitfield sanitizer checks enabled
4720 // we want to get the expression before scalar conversion.
4721 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
4722 CastKind Kind = ICE->getCastKind();
4723 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
4724 *SrcType = ICE->getSubExpr()->getType();
4725 *Previous = EmitScalarExpr(ICE->getSubExpr());
4726 // Pass default ScalarConversionOpts to avoid emitting
4727 // integer sanitizer checks as E refers to bitfield.
4728 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
4729 ICE->getExprLoc());
4730 }
4731 }
4732 return EmitScalarExpr(E->getRHS());
4733}
4734
4735Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4736 bool Ignore = TestAndClearIgnoreResultAssign();
4737
4738 Value *RHS;
4739 LValue LHS;
4740
4741 switch (E->getLHS()->getType().getObjCLifetime()) {
4743 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4744 break;
4745
4747 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4748 break;
4749
4751 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4752 break;
4753
4755 RHS = Visit(E->getRHS());
4756 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4757 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4758 break;
4759
4761 // __block variables need to have the rhs evaluated first, plus
4762 // this should improve codegen just a little.
4763 Value *Previous = nullptr;
4764 QualType SrcType = E->getRHS()->getType();
4765 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
4766 // we want to extract that value and potentially (if the bitfield sanitizer
4767 // is enabled) use it to check for an implicit conversion.
4768 if (E->getLHS()->refersToBitField())
4769 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
4770 else
4771 RHS = Visit(E->getRHS());
4772
4773 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4774
4775 // Store the value into the LHS. Bit-fields are handled specially
4776 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4777 // 'An assignment expression has the value of the left operand after
4778 // the assignment...'.
4779 if (LHS.isBitField()) {
4780 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4781 // If the expression contained an implicit conversion, make sure
4782 // to use the value before the scalar conversion.
4783 Value *Src = Previous ? Previous : RHS;
4784 QualType DstType = E->getLHS()->getType();
4785 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
4786 LHS.getBitFieldInfo(), E->getExprLoc());
4787 } else {
4788 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4789 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4790 }
4791 }
4792
4793 // If the result is clearly ignored, return now.
4794 if (Ignore)
4795 return nullptr;
4796
4797 // The result of an assignment in C is the assigned r-value.
4798 if (!CGF.getLangOpts().CPlusPlus)
4799 return RHS;
4800
4801 // If the lvalue is non-volatile, return the computed value of the assignment.
4802 if (!LHS.isVolatileQualified())
4803 return RHS;
4804
4805 // Otherwise, reload the value.
4806 return EmitLoadOfLValue(LHS, E->getExprLoc());
4807}
4808
4809Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4810 // Perform vector logical and on comparisons with zero vectors.
4811 if (E->getType()->isVectorType()) {
4813
4814 Value *LHS = Visit(E->getLHS());
4815 Value *RHS = Visit(E->getRHS());
4816 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4817 if (LHS->getType()->isFPOrFPVectorTy()) {
4818 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4819 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4820 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4821 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4822 } else {
4823 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4824 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4825 }
4826 Value *And = Builder.CreateAnd(LHS, RHS);
4827 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4828 }
4829
4830 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4831 llvm::Type *ResTy = ConvertType(E->getType());
4832
4833 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4834 // If we have 1 && X, just emit X without inserting the control flow.
4835 bool LHSCondVal;
4836 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4837 if (LHSCondVal) { // If we have 1 && X, just emit X.
4839
4840 // If the top of the logical operator nest, reset the MCDC temp to 0.
4841 if (CGF.MCDCLogOpStack.empty())
4843
4844 CGF.MCDCLogOpStack.push_back(E);
4845
4846 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4847
4848 // If we're generating for profiling or coverage, generate a branch to a
4849 // block that increments the RHS counter needed to track branch condition
4850 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4851 // "FalseBlock" after the increment is done.
4852 if (InstrumentRegions &&
4854 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4855 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4856 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4857 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4858 CGF.EmitBlock(RHSBlockCnt);
4860 CGF.EmitBranch(FBlock);
4861 CGF.EmitBlock(FBlock);
4862 }
4863
4864 CGF.MCDCLogOpStack.pop_back();
4865 // If the top of the logical operator nest, update the MCDC bitmap.
4866 if (CGF.MCDCLogOpStack.empty())
4868
4869 // ZExt result to int or bool.
4870 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4871 }
4872
4873 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4874 if (!CGF.ContainsLabel(E->getRHS()))
4875 return llvm::Constant::getNullValue(ResTy);
4876 }
4877
4878 // If the top of the logical operator nest, reset the MCDC temp to 0.
4879 if (CGF.MCDCLogOpStack.empty())
4881
4882 CGF.MCDCLogOpStack.push_back(E);
4883
4884 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4885 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4886
4887 CodeGenFunction::ConditionalEvaluation eval(CGF);
4888
4889 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4890 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4891 CGF.getProfileCount(E->getRHS()));
4892
4893 // Any edges into the ContBlock are now from an (indeterminate number of)
4894 // edges from this first condition. All of these values will be false. Start
4895 // setting up the PHI node in the Cont Block for this.
4896 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4897 "", ContBlock);
4898 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4899 PI != PE; ++PI)
4900 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4901
4902 eval.begin(CGF);
4903 CGF.EmitBlock(RHSBlock);
4905 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4906 eval.end(CGF);
4907
4908 // Reaquire the RHS block, as there may be subblocks inserted.
4909 RHSBlock = Builder.GetInsertBlock();
4910
4911 // If we're generating for profiling or coverage, generate a branch on the
4912 // RHS to a block that increments the RHS true counter needed to track branch
4913 // condition coverage.
4914 if (InstrumentRegions &&
4916 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4917 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4918 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4919 CGF.EmitBlock(RHSBlockCnt);
4921 CGF.EmitBranch(ContBlock);
4922 PN->addIncoming(RHSCond, RHSBlockCnt);
4923 }
4924
4925 // Emit an unconditional branch from this block to ContBlock.
4926 {
4927 // There is no need to emit line number for unconditional branch.
4928 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4929 CGF.EmitBlock(ContBlock);
4930 }
4931 // Insert an entry into the phi node for the edge with the value of RHSCond.
4932 PN->addIncoming(RHSCond, RHSBlock);
4933
4934 CGF.MCDCLogOpStack.pop_back();
4935 // If the top of the logical operator nest, update the MCDC bitmap.
4936 if (CGF.MCDCLogOpStack.empty())
4938
4939 // Artificial location to preserve the scope information
4940 {
4942 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4943 }
4944
4945 // ZExt result to int.
4946 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4947}
4948
4949Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4950 // Perform vector logical or on comparisons with zero vectors.
4951 if (E->getType()->isVectorType()) {
4953
4954 Value *LHS = Visit(E->getLHS());
4955 Value *RHS = Visit(E->getRHS());
4956 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4957 if (LHS->getType()->isFPOrFPVectorTy()) {
4958 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4959 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4960 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4961 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4962 } else {
4963 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4964 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4965 }
4966 Value *Or = Builder.CreateOr(LHS, RHS);
4967 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4968 }
4969
4970 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4971 llvm::Type *ResTy = ConvertType(E->getType());
4972
4973 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4974 // If we have 0 || X, just emit X without inserting the control flow.
4975 bool LHSCondVal;
4976 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4977 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4979
4980 // If the top of the logical operator nest, reset the MCDC temp to 0.
4981 if (CGF.MCDCLogOpStack.empty())
4983
4984 CGF.MCDCLogOpStack.push_back(E);
4985
4986 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4987
4988 // If we're generating for profiling or coverage, generate a branch to a
4989 // block that increments the RHS counter need to track branch condition
4990 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4991 // "FalseBlock" after the increment is done.
4992 if (InstrumentRegions &&
4994 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4995 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
4996 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
4997 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
4998 CGF.EmitBlock(RHSBlockCnt);
5000 CGF.EmitBranch(FBlock);
5001 CGF.EmitBlock(FBlock);
5002 }
5003
5004 CGF.MCDCLogOpStack.pop_back();
5005 // If the top of the logical operator nest, update the MCDC bitmap.
5006 if (CGF.MCDCLogOpStack.empty())
5008
5009 // ZExt result to int or bool.
5010 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5011 }
5012
5013 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5014 if (!CGF.ContainsLabel(E->getRHS()))
5015 return llvm::ConstantInt::get(ResTy, 1);
5016 }
5017
5018 // If the top of the logical operator nest, reset the MCDC temp to 0.
5019 if (CGF.MCDCLogOpStack.empty())
5021
5022 CGF.MCDCLogOpStack.push_back(E);
5023
5024 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5025 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5026
5027 CodeGenFunction::ConditionalEvaluation eval(CGF);
5028
5029 // Branch on the LHS first. If it is true, go to the success (cont) block.
5030 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5032 CGF.getProfileCount(E->getRHS()));
5033
5034 // Any edges into the ContBlock are now from an (indeterminate number of)
5035 // edges from this first condition. All of these values will be true. Start
5036 // setting up the PHI node in the Cont Block for this.
5037 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5038 "", ContBlock);
5039 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5040 PI != PE; ++PI)
5041 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5042
5043 eval.begin(CGF);
5044
5045 // Emit the RHS condition as a bool value.
5046 CGF.EmitBlock(RHSBlock);
5048 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5049
5050 eval.end(CGF);
5051
5052 // Reaquire the RHS block, as there may be subblocks inserted.
5053 RHSBlock = Builder.GetInsertBlock();
5054
5055 // If we're generating for profiling or coverage, generate a branch on the
5056 // RHS to a block that increments the RHS true counter needed to track branch
5057 // condition coverage.
5058 if (InstrumentRegions &&
5060 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5061 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5062 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5063 CGF.EmitBlock(RHSBlockCnt);
5065 CGF.EmitBranch(ContBlock);
5066 PN->addIncoming(RHSCond, RHSBlockCnt);
5067 }
5068
5069 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5070 // into the phi node for the edge with the value of RHSCond.
5071 CGF.EmitBlock(ContBlock);
5072 PN->addIncoming(RHSCond, RHSBlock);
5073
5074 CGF.MCDCLogOpStack.pop_back();
5075 // If the top of the logical operator nest, update the MCDC bitmap.
5076 if (CGF.MCDCLogOpStack.empty())
5078
5079 // ZExt result to int.
5080 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5081}
5082
5083Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5084 CGF.EmitIgnoredExpr(E->getLHS());
5085 CGF.EnsureInsertPoint();
5086 return Visit(E->getRHS());
5087}
5088
5089//===----------------------------------------------------------------------===//
5090// Other Operators
5091//===----------------------------------------------------------------------===//
5092
5093/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5094/// expression is cheap enough and side-effect-free enough to evaluate
5095/// unconditionally instead of conditionally. This is used to convert control
5096/// flow into selects in some cases.
5098 CodeGenFunction &CGF) {
5099 // Anything that is an integer or floating point constant is fine.
5100 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5101
5102 // Even non-volatile automatic variables can't be evaluated unconditionally.
5103 // Referencing a thread_local may cause non-trivial initialization work to
5104 // occur. If we're inside a lambda and one of the variables is from the scope
5105 // outside the lambda, that function may have returned already. Reading its
5106 // locals is a bad idea. Also, these reads may introduce races there didn't
5107 // exist in the source-level program.
5108}
5109
5110
5111Value *ScalarExprEmitter::
5112VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5113 TestAndClearIgnoreResultAssign();
5114
5115 // Bind the common expression if necessary.
5116 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5117
5118 Expr *condExpr = E->getCond();
5119 Expr *lhsExpr = E->getTrueExpr();
5120 Expr *rhsExpr = E->getFalseExpr();
5121
5122 // If the condition constant folds and can be elided, try to avoid emitting
5123 // the condition and the dead arm.
5124 bool CondExprBool;
5125 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5126 Expr *live = lhsExpr, *dead = rhsExpr;
5127 if (!CondExprBool) std::swap(live, dead);
5128
5129 // If the dead side doesn't have labels we need, just emit the Live part.
5130 if (!CGF.ContainsLabel(dead)) {
5131 if (CondExprBool) {
5133 CGF.incrementProfileCounter(lhsExpr);
5134 CGF.incrementProfileCounter(rhsExpr);
5135 }
5137 }
5138 Value *Result = Visit(live);
5139
5140 // If the live part is a throw expression, it acts like it has a void
5141 // type, so evaluating it returns a null Value*. However, a conditional
5142 // with non-void type must return a non-null Value*.
5143 if (!Result && !E->getType()->isVoidType())
5144 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5145
5146 return Result;
5147 }
5148 }
5149
5150 // OpenCL: If the condition is a vector, we can treat this condition like
5151 // the select function.
5152 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
5153 condExpr->getType()->isExtVectorType()) {
5155
5156 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5157 llvm::Value *LHS = Visit(lhsExpr);
5158 llvm::Value *RHS = Visit(rhsExpr);
5159
5160 llvm::Type *condType = ConvertType(condExpr->getType());
5161 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5162
5163 unsigned numElem = vecTy->getNumElements();
5164 llvm::Type *elemType = vecTy->getElementType();
5165
5166 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5167