clang 22.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
86 : LHSAP.usub_ov(RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
89 : LHSAP.umul_ov(RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(BaseTy) ||
184 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Check if we can skip the overflow check for \p Op.
196static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
197 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
198 "Expected a unary or binary operator");
199
200 // If the binop has constant inputs and we can prove there is no overflow,
201 // we can elide the overflow check.
202 if (!Op.mayHaveIntegerOverflow())
203 return true;
204
205 if (Op.Ty->isSignedIntegerType() &&
206 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
207 Op.Ty)) {
208 return true;
209 }
210
211 if (Op.Ty->isUnsignedIntegerType() &&
212 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
213 Op.Ty)) {
214 return true;
215 }
216
217 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
218
219 if (UO && UO->getOpcode() == UO_Minus &&
222 UO->isIntegerConstantExpr(Ctx))
223 return true;
224
225 // If a unary op has a widened operand, the op cannot overflow.
226 if (UO)
227 return !UO->canOverflow();
228
229 // We usually don't need overflow checks for binops with widened operands.
230 // Multiplication with promoted unsigned operands is a special case.
231 const auto *BO = cast<BinaryOperator>(Op.E);
232 if (BO->hasExcludedOverflowPattern())
233 return true;
234
235 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
236 if (!OptionalLHSTy)
237 return false;
238
239 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
240 if (!OptionalRHSTy)
241 return false;
242
243 QualType LHSTy = *OptionalLHSTy;
244 QualType RHSTy = *OptionalRHSTy;
245
246 // This is the simple case: binops without unsigned multiplication, and with
247 // widened operands. No overflow check is needed here.
248 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
249 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
250 return true;
251
252 // For unsigned multiplication the overflow check can be elided if either one
253 // of the unpromoted types are less than half the size of the promoted type.
254 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
255 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
256 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
257}
258
259class ScalarExprEmitter
260 : public StmtVisitor<ScalarExprEmitter, Value*> {
261 CodeGenFunction &CGF;
262 CGBuilderTy &Builder;
263 bool IgnoreResultAssign;
264 llvm::LLVMContext &VMContext;
265public:
266
267 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
268 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
269 VMContext(cgf.getLLVMContext()) {
270 }
271
272 //===--------------------------------------------------------------------===//
273 // Utilities
274 //===--------------------------------------------------------------------===//
275
276 bool TestAndClearIgnoreResultAssign() {
277 bool I = IgnoreResultAssign;
278 IgnoreResultAssign = false;
279 return I;
280 }
281
282 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
283 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
284 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
285 return CGF.EmitCheckedLValue(E, TCK);
286 }
287
288 void EmitBinOpCheck(
289 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
290 const BinOpInfo &Info);
291
292 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
293 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
294 }
295
296 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
297 const AlignValueAttr *AVAttr = nullptr;
298 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
299 const ValueDecl *VD = DRE->getDecl();
300
301 if (VD->getType()->isReferenceType()) {
302 if (const auto *TTy =
303 VD->getType().getNonReferenceType()->getAs<TypedefType>())
304 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
305 } else {
306 // Assumptions for function parameters are emitted at the start of the
307 // function, so there is no need to repeat that here,
308 // unless the alignment-assumption sanitizer is enabled,
309 // then we prefer the assumption over alignment attribute
310 // on IR function param.
311 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
312 return;
313
314 AVAttr = VD->getAttr<AlignValueAttr>();
315 }
316 }
317
318 if (!AVAttr)
319 if (const auto *TTy = E->getType()->getAs<TypedefType>())
320 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
321
322 if (!AVAttr)
323 return;
324
325 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
326 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
327 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
328 }
329
330 /// EmitLoadOfLValue - Given an expression with complex type that represents a
331 /// value l-value, this method emits the address of the l-value, then loads
332 /// and returns the result.
333 Value *EmitLoadOfLValue(const Expr *E) {
334 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
335 E->getExprLoc());
336
337 EmitLValueAlignmentAssumption(E, V);
338 return V;
339 }
340
341 /// EmitConversionToBool - Convert the specified expression value to a
342 /// boolean (i1) truth value. This is equivalent to "Val != 0".
343 Value *EmitConversionToBool(Value *Src, QualType DstTy);
344
345 /// Emit a check that a conversion from a floating-point type does not
346 /// overflow.
347 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
348 Value *Src, QualType SrcType, QualType DstType,
349 llvm::Type *DstTy, SourceLocation Loc);
350
351 /// Known implicit conversion check kinds.
352 /// This is used for bitfield conversion checks as well.
353 /// Keep in sync with the enum of the same name in ubsan_handlers.h
354 enum ImplicitConversionCheckKind : unsigned char {
355 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
356 ICCK_UnsignedIntegerTruncation = 1,
357 ICCK_SignedIntegerTruncation = 2,
358 ICCK_IntegerSignChange = 3,
359 ICCK_SignedIntegerTruncationOrSignChange = 4,
360 };
361
362 /// Emit a check that an [implicit] truncation of an integer does not
363 /// discard any bits. It is not UB, so we use the value after truncation.
364 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
365 QualType DstType, SourceLocation Loc);
366
367 /// Emit a check that an [implicit] conversion of an integer does not change
368 /// the sign of the value. It is not UB, so we use the value after conversion.
369 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
370 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
371 QualType DstType, SourceLocation Loc);
372
373 /// Emit a conversion from the specified type to the specified destination
374 /// type, both of which are LLVM scalar types.
375 struct ScalarConversionOpts {
376 bool TreatBooleanAsSigned;
377 bool EmitImplicitIntegerTruncationChecks;
378 bool EmitImplicitIntegerSignChangeChecks;
379
380 ScalarConversionOpts()
381 : TreatBooleanAsSigned(false),
382 EmitImplicitIntegerTruncationChecks(false),
383 EmitImplicitIntegerSignChangeChecks(false) {}
384
385 ScalarConversionOpts(clang::SanitizerSet SanOpts)
386 : TreatBooleanAsSigned(false),
387 EmitImplicitIntegerTruncationChecks(
388 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
389 EmitImplicitIntegerSignChangeChecks(
390 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
391 };
392 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
393 llvm::Type *SrcTy, llvm::Type *DstTy,
394 ScalarConversionOpts Opts);
395 Value *
396 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
397 SourceLocation Loc,
398 ScalarConversionOpts Opts = ScalarConversionOpts());
399
400 /// Convert between either a fixed point and other fixed point or fixed point
401 /// and an integer.
402 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
403 SourceLocation Loc);
404
405 /// Emit a conversion from the specified complex type to the specified
406 /// destination type, where the destination type is an LLVM scalar type.
407 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
408 QualType SrcTy, QualType DstTy,
409 SourceLocation Loc);
410
411 /// EmitNullValue - Emit a value that corresponds to null for the given type.
412 Value *EmitNullValue(QualType Ty);
413
414 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
415 Value *EmitFloatToBoolConversion(Value *V) {
416 // Compare against 0.0 for fp scalars.
417 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
418 return Builder.CreateFCmpUNE(V, Zero, "tobool");
419 }
420
421 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
422 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
423 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
424
425 return Builder.CreateICmpNE(V, Zero, "tobool");
426 }
427
428 Value *EmitIntToBoolConversion(Value *V) {
429 // Because of the type rules of C, we often end up computing a
430 // logical value, then zero extending it to int, then wanting it
431 // as a logical value again. Optimize this common case.
432 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
433 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
434 Value *Result = ZI->getOperand(0);
435 // If there aren't any more uses, zap the instruction to save space.
436 // Note that there can be more uses, for example if this
437 // is the result of an assignment.
438 if (ZI->use_empty())
439 ZI->eraseFromParent();
440 return Result;
441 }
442 }
443
444 return Builder.CreateIsNotNull(V, "tobool");
445 }
446
447 //===--------------------------------------------------------------------===//
448 // Visitor Methods
449 //===--------------------------------------------------------------------===//
450
451 Value *Visit(Expr *E) {
452 ApplyDebugLocation DL(CGF, E);
453 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
454 }
455
456 Value *VisitStmt(Stmt *S) {
457 S->dump(llvm::errs(), CGF.getContext());
458 llvm_unreachable("Stmt can't have complex result type!");
459 }
460 Value *VisitExpr(Expr *S);
461
462 Value *VisitConstantExpr(ConstantExpr *E) {
463 // A constant expression of type 'void' generates no code and produces no
464 // value.
465 if (E->getType()->isVoidType())
466 return nullptr;
467
468 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
469 if (E->isGLValue()) {
470 // This was already converted to an rvalue when it was constant
471 // evaluated.
472 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
473 return Result;
474 return CGF.EmitLoadOfScalar(
475 Address(Result, CGF.convertTypeForLoadStore(E->getType()),
477 /*Volatile*/ false, E->getType(), E->getExprLoc());
478 }
479 return Result;
480 }
481 return Visit(E->getSubExpr());
482 }
483 Value *VisitParenExpr(ParenExpr *PE) {
484 return Visit(PE->getSubExpr());
485 }
486 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
487 return Visit(E->getReplacement());
488 }
489 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
490 return Visit(GE->getResultExpr());
491 }
492 Value *VisitCoawaitExpr(CoawaitExpr *S) {
493 return CGF.EmitCoawaitExpr(*S).getScalarVal();
494 }
495 Value *VisitCoyieldExpr(CoyieldExpr *S) {
496 return CGF.EmitCoyieldExpr(*S).getScalarVal();
497 }
498 Value *VisitUnaryCoawait(const UnaryOperator *E) {
499 return Visit(E->getSubExpr());
500 }
501
502 // Leaves.
503 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
504 return Builder.getInt(E->getValue());
505 }
506 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
507 return Builder.getInt(E->getValue());
508 }
509 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
510 return llvm::ConstantFP::get(VMContext, E->getValue());
511 }
512 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
513 // Character literals are always stored in an unsigned (even for signed
514 // char), so allow implicit truncation here.
515 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue(),
516 /*IsSigned=*/false, /*ImplicitTrunc=*/true);
517 }
518 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
519 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
520 }
521 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
522 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
523 }
524 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
525 if (E->getType()->isVoidType())
526 return nullptr;
527
528 return EmitNullValue(E->getType());
529 }
530 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
531 return EmitNullValue(E->getType());
532 }
533 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
534 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
535 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
536 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
537 return Builder.CreateBitCast(V, ConvertType(E->getType()));
538 }
539
540 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
541 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
542 }
543
544 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
545 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
546 }
547
548 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
549 Value *VisitEmbedExpr(EmbedExpr *E);
550
551 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
552 if (E->isGLValue())
553 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
554 E->getExprLoc());
555
556 // Otherwise, assume the mapping is the scalar directly.
558 }
559
560 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
561 llvm_unreachable("Codegen for this isn't defined/implemented");
562 }
563
564 // l-values.
565 Value *VisitDeclRefExpr(DeclRefExpr *E) {
566 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
567 return CGF.emitScalarConstant(Constant, E);
568 return EmitLoadOfLValue(E);
569 }
570
571 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
572 return CGF.EmitObjCSelectorExpr(E);
573 }
574 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
575 return CGF.EmitObjCProtocolExpr(E);
576 }
577 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
578 return EmitLoadOfLValue(E);
579 }
580 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
581 if (E->getMethodDecl() &&
583 return EmitLoadOfLValue(E);
584 return CGF.EmitObjCMessageExpr(E).getScalarVal();
585 }
586
587 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
588 LValue LV = CGF.EmitObjCIsaExpr(E);
590 return V;
591 }
592
593 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
594 VersionTuple Version = E->getVersion();
595
596 // If we're checking for a platform older than our minimum deployment
597 // target, we can fold the check away.
598 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
599 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
600
601 return CGF.EmitBuiltinAvailable(Version);
602 }
603
604 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
605 Value *VisitMatrixSingleSubscriptExpr(MatrixSingleSubscriptExpr *E);
606 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
607 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
608 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
609 Value *VisitMemberExpr(MemberExpr *E);
610 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
611 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
612 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
613 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
614 // literals aren't l-values in C++. We do so simply because that's the
615 // cleanest way to handle compound literals in C++.
616 // See the discussion here: https://reviews.llvm.org/D64464
617 return EmitLoadOfLValue(E);
618 }
619
620 Value *VisitInitListExpr(InitListExpr *E);
621
622 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
623 assert(CGF.getArrayInitIndex() &&
624 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
625 return CGF.getArrayInitIndex();
626 }
627
628 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
629 return EmitNullValue(E->getType());
630 }
631 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
632 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
633 return VisitCastExpr(E);
634 }
635 Value *VisitCastExpr(CastExpr *E);
636
637 Value *VisitCallExpr(const CallExpr *E) {
639 return EmitLoadOfLValue(E);
640
641 Value *V = CGF.EmitCallExpr(E).getScalarVal();
642
643 EmitLValueAlignmentAssumption(E, V);
644 return V;
645 }
646
647 Value *VisitStmtExpr(const StmtExpr *E);
648
649 // Unary Operators.
650 Value *VisitUnaryPostDec(const UnaryOperator *E) {
651 LValue LV = EmitLValue(E->getSubExpr());
652 return EmitScalarPrePostIncDec(E, LV, false, false);
653 }
654 Value *VisitUnaryPostInc(const UnaryOperator *E) {
655 LValue LV = EmitLValue(E->getSubExpr());
656 return EmitScalarPrePostIncDec(E, LV, true, false);
657 }
658 Value *VisitUnaryPreDec(const UnaryOperator *E) {
659 LValue LV = EmitLValue(E->getSubExpr());
660 return EmitScalarPrePostIncDec(E, LV, false, true);
661 }
662 Value *VisitUnaryPreInc(const UnaryOperator *E) {
663 LValue LV = EmitLValue(E->getSubExpr());
664 return EmitScalarPrePostIncDec(E, LV, true, true);
665 }
666
667 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
668 llvm::Value *InVal,
669 bool IsInc);
670
671 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
672 bool isInc, bool isPre);
673
674
675 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
676 if (isa<MemberPointerType>(E->getType())) // never sugared
677 return CGF.CGM.getMemberPointerConstant(E);
678
679 return EmitLValue(E->getSubExpr()).getPointer(CGF);
680 }
681 Value *VisitUnaryDeref(const UnaryOperator *E) {
682 if (E->getType()->isVoidType())
683 return Visit(E->getSubExpr()); // the actual value should be unused
684 return EmitLoadOfLValue(E);
685 }
686
687 Value *VisitUnaryPlus(const UnaryOperator *E,
688 QualType PromotionType = QualType());
689 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
690 Value *VisitUnaryMinus(const UnaryOperator *E,
691 QualType PromotionType = QualType());
692 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
693
694 Value *VisitUnaryNot (const UnaryOperator *E);
695 Value *VisitUnaryLNot (const UnaryOperator *E);
696 Value *VisitUnaryReal(const UnaryOperator *E,
697 QualType PromotionType = QualType());
698 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
699 Value *VisitUnaryImag(const UnaryOperator *E,
700 QualType PromotionType = QualType());
701 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
702 Value *VisitUnaryExtension(const UnaryOperator *E) {
703 return Visit(E->getSubExpr());
704 }
705
706 // C++
707 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
708 return EmitLoadOfLValue(E);
709 }
710 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
711 auto &Ctx = CGF.getContext();
714 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
715 SLE->getType());
716 }
717
718 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
719 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
720 return Visit(DAE->getExpr());
721 }
722 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
723 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
724 return Visit(DIE->getExpr());
725 }
726 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
727 return CGF.LoadCXXThis();
728 }
729
730 Value *VisitExprWithCleanups(ExprWithCleanups *E);
731 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
732 return CGF.EmitCXXNewExpr(E);
733 }
734 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
735 CGF.EmitCXXDeleteExpr(E);
736 return nullptr;
737 }
738
739 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
740 if (E->isStoredAsBoolean())
741 return llvm::ConstantInt::get(ConvertType(E->getType()),
742 E->getBoolValue());
743 assert(E->getAPValue().isInt() && "APValue type not supported");
744 return llvm::ConstantInt::get(ConvertType(E->getType()),
745 E->getAPValue().getInt());
746 }
747
748 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
749 return Builder.getInt1(E->isSatisfied());
750 }
751
752 Value *VisitRequiresExpr(const RequiresExpr *E) {
753 return Builder.getInt1(E->isSatisfied());
754 }
755
756 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
757 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
758 }
759
760 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
761 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
762 }
763
764 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
765 // C++ [expr.pseudo]p1:
766 // The result shall only be used as the operand for the function call
767 // operator (), and the result of such a call has type void. The only
768 // effect is the evaluation of the postfix-expression before the dot or
769 // arrow.
770 CGF.EmitScalarExpr(E->getBase());
771 return nullptr;
772 }
773
774 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
775 return EmitNullValue(E->getType());
776 }
777
778 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
779 CGF.EmitCXXThrowExpr(E);
780 return nullptr;
781 }
782
783 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
784 return Builder.getInt1(E->getValue());
785 }
786
787 // Binary Operators.
788 Value *EmitMul(const BinOpInfo &Ops) {
789 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
790 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
791 case LangOptions::SOB_Defined:
792 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
793 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
794 [[fallthrough]];
795 case LangOptions::SOB_Undefined:
796 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
797 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
798 [[fallthrough]];
799 case LangOptions::SOB_Trapping:
800 if (CanElideOverflowCheck(CGF.getContext(), Ops))
801 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
802 return EmitOverflowCheckedBinOp(Ops);
803 }
804 }
805
806 if (Ops.Ty->isConstantMatrixType()) {
807 llvm::MatrixBuilder MB(Builder);
808 // We need to check the types of the operands of the operator to get the
809 // correct matrix dimensions.
810 auto *BO = cast<BinaryOperator>(Ops.E);
811 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
812 BO->getLHS()->getType().getCanonicalType());
813 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
814 BO->getRHS()->getType().getCanonicalType());
815 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
816 if (LHSMatTy && RHSMatTy)
817 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
818 LHSMatTy->getNumColumns(),
819 RHSMatTy->getNumColumns());
820 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
821 }
822
823 if (Ops.Ty->isUnsignedIntegerType() &&
824 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
825 !CanElideOverflowCheck(CGF.getContext(), Ops))
826 return EmitOverflowCheckedBinOp(Ops);
827
828 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
829 // Preserve the old values
830 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
831 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
832 }
833 if (Ops.isFixedPointOp())
834 return EmitFixedPointBinOp(Ops);
835 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
836 }
837 /// Create a binary op that checks for overflow.
838 /// Currently only supports +, - and *.
839 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
840
841 // Check for undefined division and modulus behaviors.
842 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
843 llvm::Value *Zero,bool isDiv);
844 // Common helper for getting how wide LHS of shift is.
845 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
846
847 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
848 // non powers of two.
849 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
850
851 Value *EmitDiv(const BinOpInfo &Ops);
852 Value *EmitRem(const BinOpInfo &Ops);
853 Value *EmitAdd(const BinOpInfo &Ops);
854 Value *EmitSub(const BinOpInfo &Ops);
855 Value *EmitShl(const BinOpInfo &Ops);
856 Value *EmitShr(const BinOpInfo &Ops);
857 Value *EmitAnd(const BinOpInfo &Ops) {
858 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
859 }
860 Value *EmitXor(const BinOpInfo &Ops) {
861 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
862 }
863 Value *EmitOr (const BinOpInfo &Ops) {
864 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
865 }
866
867 // Helper functions for fixed point binary operations.
868 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
869
870 BinOpInfo EmitBinOps(const BinaryOperator *E,
871 QualType PromotionTy = QualType());
872
873 Value *EmitPromotedValue(Value *result, QualType PromotionType);
874 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
875 Value *EmitPromoted(const Expr *E, QualType PromotionType);
876
877 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
878 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
879 Value *&Result);
880
881 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
882 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
883
884 QualType getPromotionType(QualType Ty) {
885 const auto &Ctx = CGF.getContext();
886 if (auto *CT = Ty->getAs<ComplexType>()) {
887 QualType ElementType = CT->getElementType();
888 if (ElementType.UseExcessPrecision(Ctx))
889 return Ctx.getComplexType(Ctx.FloatTy);
890 }
891
892 if (Ty.UseExcessPrecision(Ctx)) {
893 if (auto *VT = Ty->getAs<VectorType>()) {
894 unsigned NumElements = VT->getNumElements();
895 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
896 }
897 return Ctx.FloatTy;
898 }
899
900 return QualType();
901 }
902
903 // Binary operators and binary compound assignment operators.
904#define HANDLEBINOP(OP) \
905 Value *VisitBin##OP(const BinaryOperator *E) { \
906 QualType promotionTy = getPromotionType(E->getType()); \
907 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
908 if (result && !promotionTy.isNull()) \
909 result = EmitUnPromotedValue(result, E->getType()); \
910 return result; \
911 } \
912 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
913 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
914 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
915 }
916 HANDLEBINOP(Mul)
917 HANDLEBINOP(Div)
918 HANDLEBINOP(Rem)
919 HANDLEBINOP(Add)
920 HANDLEBINOP(Sub)
921 HANDLEBINOP(Shl)
922 HANDLEBINOP(Shr)
924 HANDLEBINOP(Xor)
926#undef HANDLEBINOP
927
928 // Comparisons.
929 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
930 llvm::CmpInst::Predicate SICmpOpc,
931 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
932#define VISITCOMP(CODE, UI, SI, FP, SIG) \
933 Value *VisitBin##CODE(const BinaryOperator *E) { \
934 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
935 llvm::FCmpInst::FP, SIG); }
936 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
937 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
938 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
939 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
940 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
941 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
942#undef VISITCOMP
943
944 Value *VisitBinAssign (const BinaryOperator *E);
945
946 Value *VisitBinLAnd (const BinaryOperator *E);
947 Value *VisitBinLOr (const BinaryOperator *E);
948 Value *VisitBinComma (const BinaryOperator *E);
949
950 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
951 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
952
953 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
954 return Visit(E->getSemanticForm());
955 }
956
957 // Other Operators.
958 Value *VisitBlockExpr(const BlockExpr *BE);
959 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
960 Value *VisitChooseExpr(ChooseExpr *CE);
961 Value *VisitVAArgExpr(VAArgExpr *VE);
962 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
963 return CGF.EmitObjCStringLiteral(E);
964 }
965 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
966 return CGF.EmitObjCBoxedExpr(E);
967 }
968 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
969 return CGF.EmitObjCArrayLiteral(E);
970 }
971 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
972 return CGF.EmitObjCDictionaryLiteral(E);
973 }
974 Value *VisitAsTypeExpr(AsTypeExpr *CE);
975 Value *VisitAtomicExpr(AtomicExpr *AE);
976 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
977 return Visit(E->getSelectedExpr());
978 }
979};
980} // end anonymous namespace.
981
982//===----------------------------------------------------------------------===//
983// Utilities
984//===----------------------------------------------------------------------===//
985
986/// EmitConversionToBool - Convert the specified expression value to a
987/// boolean (i1) truth value. This is equivalent to "Val != 0".
988Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
989 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
990
991 if (SrcType->isRealFloatingType())
992 return EmitFloatToBoolConversion(Src);
993
994 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
995 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
996
997 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
998 "Unknown scalar type to convert");
999
1000 if (isa<llvm::IntegerType>(Src->getType()))
1001 return EmitIntToBoolConversion(Src);
1002
1003 assert(isa<llvm::PointerType>(Src->getType()));
1004 return EmitPointerToBoolConversion(Src, SrcType);
1005}
1006
1007void ScalarExprEmitter::EmitFloatConversionCheck(
1008 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1009 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1010 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1011 if (!isa<llvm::IntegerType>(DstTy))
1012 return;
1013
1014 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1015 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1016 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1017 using llvm::APFloat;
1018 using llvm::APSInt;
1019
1020 llvm::Value *Check = nullptr;
1021 const llvm::fltSemantics &SrcSema =
1022 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1023
1024 // Floating-point to integer. This has undefined behavior if the source is
1025 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1026 // to an integer).
1027 unsigned Width = CGF.getContext().getIntWidth(DstType);
1029
1030 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1031 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1032 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1033 APFloat::opOverflow)
1034 // Don't need an overflow check for lower bound. Just check for
1035 // -Inf/NaN.
1036 MinSrc = APFloat::getInf(SrcSema, true);
1037 else
1038 // Find the largest value which is too small to represent (before
1039 // truncation toward zero).
1040 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1041
1042 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1043 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1044 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1045 APFloat::opOverflow)
1046 // Don't need an overflow check for upper bound. Just check for
1047 // +Inf/NaN.
1048 MaxSrc = APFloat::getInf(SrcSema, false);
1049 else
1050 // Find the smallest value which is too large to represent (before
1051 // truncation toward zero).
1052 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1053
1054 // If we're converting from __half, convert the range to float to match
1055 // the type of src.
1056 if (OrigSrcType->isHalfType()) {
1057 const llvm::fltSemantics &Sema =
1058 CGF.getContext().getFloatTypeSemantics(SrcType);
1059 bool IsInexact;
1060 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1061 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1062 }
1063
1064 llvm::Value *GE =
1065 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1066 llvm::Value *LE =
1067 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1068 Check = Builder.CreateAnd(GE, LE);
1069
1070 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1071 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1072 CGF.EmitCheckTypeDescriptor(DstType)};
1073 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1074 OrigSrc);
1075}
1076
1077// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1078// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1079static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1080 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1082 QualType DstType, CGBuilderTy &Builder) {
1083 llvm::Type *SrcTy = Src->getType();
1084 llvm::Type *DstTy = Dst->getType();
1085 (void)DstTy; // Only used in assert()
1086
1087 // This should be truncation of integral types.
1088 assert(Src != Dst);
1089 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1090 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1091 "non-integer llvm type");
1092
1093 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1094 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1095
1096 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1097 // Else, it is a signed truncation.
1098 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1100 if (!SrcSigned && !DstSigned) {
1101 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1102 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1103 } else {
1104 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1105 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1106 }
1107
1108 llvm::Value *Check = nullptr;
1109 // 1. Extend the truncated value back to the same width as the Src.
1110 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1111 // 2. Equality-compare with the original source value
1112 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1113 // If the comparison result is 'i1 false', then the truncation was lossy.
1114 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1115}
1116
1118 QualType SrcType, QualType DstType) {
1119 return SrcType->isIntegerType() && DstType->isIntegerType();
1120}
1121
1122void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1123 Value *Dst, QualType DstType,
1124 SourceLocation Loc) {
1125 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1126 return;
1127
1128 // We only care about int->int conversions here.
1129 // We ignore conversions to/from pointer and/or bool.
1131 DstType))
1132 return;
1133
1134 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1135 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1136 // This must be truncation. Else we do not care.
1137 if (SrcBits <= DstBits)
1138 return;
1139
1140 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1141
1142 // If the integer sign change sanitizer is enabled,
1143 // and we are truncating from larger unsigned type to smaller signed type,
1144 // let that next sanitizer deal with it.
1145 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1146 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1147 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1148 (!SrcSigned && DstSigned))
1149 return;
1150
1151 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1152 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1153 Check;
1154
1155 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1156 {
1157 // We don't know the check kind until we call
1158 // EmitIntegerTruncationCheckHelper, but we want to annotate
1159 // EmitIntegerTruncationCheckHelper's instructions too.
1160 SanitizerDebugLocation SanScope(
1161 &CGF,
1162 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1163 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1164 CheckHandler);
1165 Check =
1166 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1167 // If the comparison result is 'i1 false', then the truncation was lossy.
1168 }
1169
1170 // Do we care about this type of truncation?
1171 if (!CGF.SanOpts.has(Check.second.second))
1172 return;
1173
1174 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1175
1176 // Does some SSCL ignore this type?
1178 SanitizerMask::bitPosToMask(Check.second.second), DstType))
1179 return;
1180
1181 llvm::Constant *StaticArgs[] = {
1182 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1183 CGF.EmitCheckTypeDescriptor(DstType),
1184 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1185 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1186
1187 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1188}
1189
1190static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1191 const char *Name,
1192 CGBuilderTy &Builder) {
1193 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1194 llvm::Type *VTy = V->getType();
1195 if (!VSigned) {
1196 // If the value is unsigned, then it is never negative.
1197 return llvm::ConstantInt::getFalse(VTy->getContext());
1198 }
1199 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1200 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1201 llvm::Twine(Name) + "." + V->getName() +
1202 ".negativitycheck");
1203}
1204
1205// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1206// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1207static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1208 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1210 QualType DstType, CGBuilderTy &Builder) {
1211 llvm::Type *SrcTy = Src->getType();
1212 llvm::Type *DstTy = Dst->getType();
1213
1214 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1215 "non-integer llvm type");
1216
1217 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1218 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1219 (void)SrcSigned; // Only used in assert()
1220 (void)DstSigned; // Only used in assert()
1221 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1222 unsigned DstBits = DstTy->getScalarSizeInBits();
1223 (void)SrcBits; // Only used in assert()
1224 (void)DstBits; // Only used in assert()
1225
1226 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1227 "either the widths should be different, or the signednesses.");
1228
1229 // 1. Was the old Value negative?
1230 llvm::Value *SrcIsNegative =
1231 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1232 // 2. Is the new Value negative?
1233 llvm::Value *DstIsNegative =
1234 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1235 // 3. Now, was the 'negativity status' preserved during the conversion?
1236 // NOTE: conversion from negative to zero is considered to change the sign.
1237 // (We want to get 'false' when the conversion changed the sign)
1238 // So we should just equality-compare the negativity statuses.
1239 llvm::Value *Check = nullptr;
1240 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1241 // If the comparison result is 'false', then the conversion changed the sign.
1242 return std::make_pair(
1243 ScalarExprEmitter::ICCK_IntegerSignChange,
1244 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1245}
1246
1247void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1248 Value *Dst, QualType DstType,
1249 SourceLocation Loc) {
1250 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange))
1251 return;
1252
1253 llvm::Type *SrcTy = Src->getType();
1254 llvm::Type *DstTy = Dst->getType();
1255
1256 // We only care about int->int conversions here.
1257 // We ignore conversions to/from pointer and/or bool.
1259 DstType))
1260 return;
1261
1262 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1263 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1264 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1265 unsigned DstBits = DstTy->getScalarSizeInBits();
1266
1267 // Now, we do not need to emit the check in *all* of the cases.
1268 // We can avoid emitting it in some obvious cases where it would have been
1269 // dropped by the opt passes (instcombine) always anyways.
1270 // If it's a cast between effectively the same type, no check.
1271 // NOTE: this is *not* equivalent to checking the canonical types.
1272 if (SrcSigned == DstSigned && SrcBits == DstBits)
1273 return;
1274 // At least one of the values needs to have signed type.
1275 // If both are unsigned, then obviously, neither of them can be negative.
1276 if (!SrcSigned && !DstSigned)
1277 return;
1278 // If the conversion is to *larger* *signed* type, then no check is needed.
1279 // Because either sign-extension happens (so the sign will remain),
1280 // or zero-extension will happen (the sign bit will be zero.)
1281 if ((DstBits > SrcBits) && DstSigned)
1282 return;
1283 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1284 (SrcBits > DstBits) && SrcSigned) {
1285 // If the signed integer truncation sanitizer is enabled,
1286 // and this is a truncation from signed type, then no check is needed.
1287 // Because here sign change check is interchangeable with truncation check.
1288 return;
1289 }
1290 // Does an SSCL have an entry for the DstType under its respective sanitizer
1291 // section?
1292 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1293 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1294 return;
1295 if (!DstSigned &&
1297 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1298 return;
1299 // That's it. We can't rule out any more cases with the data we have.
1300
1301 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1302 SanitizerDebugLocation SanScope(
1303 &CGF,
1304 {SanitizerKind::SO_ImplicitIntegerSignChange,
1305 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1306 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1307 CheckHandler);
1308
1309 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1310 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1311 Check;
1312
1313 // Each of these checks needs to return 'false' when an issue was detected.
1314 ImplicitConversionCheckKind CheckKind;
1315 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1316 2>
1317 Checks;
1318 // So we can 'and' all the checks together, and still get 'false',
1319 // if at least one of the checks detected an issue.
1320
1321 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1322 CheckKind = Check.first;
1323 Checks.emplace_back(Check.second);
1324
1325 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1326 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1327 // If the signed integer truncation sanitizer was enabled,
1328 // and we are truncating from larger unsigned type to smaller signed type,
1329 // let's handle the case we skipped in that check.
1330 Check =
1331 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1332 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1333 Checks.emplace_back(Check.second);
1334 // If the comparison result is 'i1 false', then the truncation was lossy.
1335 }
1336
1337 llvm::Constant *StaticArgs[] = {
1338 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1339 CGF.EmitCheckTypeDescriptor(DstType),
1340 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1341 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1342 // EmitCheck() will 'and' all the checks together.
1343 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1344}
1345
1346// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1347// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1348static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1349 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1351 QualType DstType, CGBuilderTy &Builder) {
1352 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1353 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1354
1355 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1356 if (!SrcSigned && !DstSigned)
1357 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1358 else
1359 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1360
1361 llvm::Value *Check = nullptr;
1362 // 1. Extend the truncated value back to the same width as the Src.
1363 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1364 // 2. Equality-compare with the original source value
1365 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1366 // If the comparison result is 'i1 false', then the truncation was lossy.
1367
1368 return std::make_pair(
1369 Kind,
1370 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1371}
1372
1373// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1374// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1375static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1376 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1378 QualType DstType, CGBuilderTy &Builder) {
1379 // 1. Was the old Value negative?
1380 llvm::Value *SrcIsNegative =
1381 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1382 // 2. Is the new Value negative?
1383 llvm::Value *DstIsNegative =
1384 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1385 // 3. Now, was the 'negativity status' preserved during the conversion?
1386 // NOTE: conversion from negative to zero is considered to change the sign.
1387 // (We want to get 'false' when the conversion changed the sign)
1388 // So we should just equality-compare the negativity statuses.
1389 llvm::Value *Check = nullptr;
1390 Check =
1391 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1392 // If the comparison result is 'false', then the conversion changed the sign.
1393 return std::make_pair(
1394 ScalarExprEmitter::ICCK_IntegerSignChange,
1395 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1396}
1397
1399 Value *Dst, QualType DstType,
1400 const CGBitFieldInfo &Info,
1401 SourceLocation Loc) {
1402
1403 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1404 return;
1405
1406 // We only care about int->int conversions here.
1407 // We ignore conversions to/from pointer and/or bool.
1409 DstType))
1410 return;
1411
1412 if (DstType->isBooleanType() || SrcType->isBooleanType())
1413 return;
1414
1415 // This should be truncation of integral types.
1416 assert(isa<llvm::IntegerType>(Src->getType()) &&
1417 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1418
1419 // TODO: Calculate src width to avoid emitting code
1420 // for unecessary cases.
1421 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1422 unsigned DstBits = Info.Size;
1423
1424 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1425 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1426
1427 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1428 SanitizerDebugLocation SanScope(
1429 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1430
1431 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1432 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1433 Check;
1434
1435 // Truncation
1436 bool EmitTruncation = DstBits < SrcBits;
1437 // If Dst is signed and Src unsigned, we want to be more specific
1438 // about the CheckKind we emit, in this case we want to emit
1439 // ICCK_SignedIntegerTruncationOrSignChange.
1440 bool EmitTruncationFromUnsignedToSigned =
1441 EmitTruncation && DstSigned && !SrcSigned;
1442 // Sign change
1443 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1444 bool BothUnsigned = !SrcSigned && !DstSigned;
1445 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1446 // We can avoid emitting sign change checks in some obvious cases
1447 // 1. If Src and Dst have the same signedness and size
1448 // 2. If both are unsigned sign check is unecessary!
1449 // 3. If Dst is signed and bigger than Src, either
1450 // sign-extension or zero-extension will make sure
1451 // the sign remains.
1452 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1453
1454 if (EmitTruncation)
1455 Check =
1456 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1457 else if (EmitSignChange) {
1458 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1459 "either the widths should be different, or the signednesses.");
1460 Check =
1461 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1462 } else
1463 return;
1464
1465 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1466 if (EmitTruncationFromUnsignedToSigned)
1467 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1468
1469 llvm::Constant *StaticArgs[] = {
1471 EmitCheckTypeDescriptor(DstType),
1472 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1473 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1474
1475 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1476}
1477
1478Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1479 QualType DstType, llvm::Type *SrcTy,
1480 llvm::Type *DstTy,
1481 ScalarConversionOpts Opts) {
1482 // The Element types determine the type of cast to perform.
1483 llvm::Type *SrcElementTy;
1484 llvm::Type *DstElementTy;
1485 QualType SrcElementType;
1486 QualType DstElementType;
1487 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1488 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1489 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1490 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1491 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1492 } else {
1493 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1494 "cannot cast between matrix and non-matrix types");
1495 SrcElementTy = SrcTy;
1496 DstElementTy = DstTy;
1497 SrcElementType = SrcType;
1498 DstElementType = DstType;
1499 }
1500
1501 if (isa<llvm::IntegerType>(SrcElementTy)) {
1502 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1503 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1504 InputSigned = true;
1505 }
1506
1507 if (isa<llvm::IntegerType>(DstElementTy))
1508 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1509 if (InputSigned)
1510 return Builder.CreateSIToFP(Src, DstTy, "conv");
1511 return Builder.CreateUIToFP(Src, DstTy, "conv");
1512 }
1513
1514 if (isa<llvm::IntegerType>(DstElementTy)) {
1515 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1516 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1517
1518 // If we can't recognize overflow as undefined behavior, assume that
1519 // overflow saturates. This protects against normal optimizations if we are
1520 // compiling with non-standard FP semantics.
1521 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1522 llvm::Intrinsic::ID IID =
1523 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1524 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1525 }
1526
1527 if (IsSigned)
1528 return Builder.CreateFPToSI(Src, DstTy, "conv");
1529 return Builder.CreateFPToUI(Src, DstTy, "conv");
1530 }
1531
1532 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1533 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1534 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1535 }
1536 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1537 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1538 return Builder.CreateFPExt(Src, DstTy, "conv");
1539}
1540
1541/// Emit a conversion from the specified type to the specified destination type,
1542/// both of which are LLVM scalar types.
1543Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1544 QualType DstType,
1545 SourceLocation Loc,
1546 ScalarConversionOpts Opts) {
1547 // All conversions involving fixed point types should be handled by the
1548 // EmitFixedPoint family functions. This is done to prevent bloating up this
1549 // function more, and although fixed point numbers are represented by
1550 // integers, we do not want to follow any logic that assumes they should be
1551 // treated as integers.
1552 // TODO(leonardchan): When necessary, add another if statement checking for
1553 // conversions to fixed point types from other types.
1554 if (SrcType->isFixedPointType()) {
1555 if (DstType->isBooleanType())
1556 // It is important that we check this before checking if the dest type is
1557 // an integer because booleans are technically integer types.
1558 // We do not need to check the padding bit on unsigned types if unsigned
1559 // padding is enabled because overflow into this bit is undefined
1560 // behavior.
1561 return Builder.CreateIsNotNull(Src, "tobool");
1562 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1563 DstType->isRealFloatingType())
1564 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1565
1566 llvm_unreachable(
1567 "Unhandled scalar conversion from a fixed point type to another type.");
1568 } else if (DstType->isFixedPointType()) {
1569 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1570 // This also includes converting booleans and enums to fixed point types.
1571 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1572
1573 llvm_unreachable(
1574 "Unhandled scalar conversion to a fixed point type from another type.");
1575 }
1576
1577 QualType NoncanonicalSrcType = SrcType;
1578 QualType NoncanonicalDstType = DstType;
1579
1580 SrcType = CGF.getContext().getCanonicalType(SrcType);
1581 DstType = CGF.getContext().getCanonicalType(DstType);
1582 if (SrcType == DstType) return Src;
1583
1584 if (DstType->isVoidType()) return nullptr;
1585
1586 llvm::Value *OrigSrc = Src;
1587 QualType OrigSrcType = SrcType;
1588 llvm::Type *SrcTy = Src->getType();
1589
1590 // Handle conversions to bool first, they are special: comparisons against 0.
1591 if (DstType->isBooleanType())
1592 return EmitConversionToBool(Src, SrcType);
1593
1594 llvm::Type *DstTy = ConvertType(DstType);
1595
1596 // Cast from half through float if half isn't a native type.
1597 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1598 // Cast to FP using the intrinsic if the half type itself isn't supported.
1599 if (DstTy->isFloatingPointTy()) {
1601 return Builder.CreateCall(
1602 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1603 Src);
1604 } else {
1605 // Cast to other types through float, using either the intrinsic or FPExt,
1606 // depending on whether the half type itself is supported
1607 // (as opposed to operations on half, available with NativeHalfType).
1609 Src = Builder.CreateCall(
1610 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1611 CGF.CGM.FloatTy),
1612 Src);
1613 } else {
1614 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1615 }
1616 SrcType = CGF.getContext().FloatTy;
1617 SrcTy = CGF.FloatTy;
1618 }
1619 }
1620
1621 // Ignore conversions like int -> uint.
1622 if (SrcTy == DstTy) {
1623 if (Opts.EmitImplicitIntegerSignChangeChecks)
1624 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1625 NoncanonicalDstType, Loc);
1626
1627 return Src;
1628 }
1629
1630 // Handle pointer conversions next: pointers can only be converted to/from
1631 // other pointers and integers. Check for pointer types in terms of LLVM, as
1632 // some native types (like Obj-C id) may map to a pointer type.
1633 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1634 // The source value may be an integer, or a pointer.
1635 if (isa<llvm::PointerType>(SrcTy))
1636 return Src;
1637
1638 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1639 // First, convert to the correct width so that we control the kind of
1640 // extension.
1641 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1642 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1643 llvm::Value* IntResult =
1644 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1645 // Then, cast to pointer.
1646 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1647 }
1648
1649 if (isa<llvm::PointerType>(SrcTy)) {
1650 // Must be an ptr to int cast.
1651 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1652 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1653 }
1654
1655 // A scalar can be splatted to an extended vector of the same element type
1656 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1657 // Sema should add casts to make sure that the source expression's type is
1658 // the same as the vector's element type (sans qualifiers)
1659 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1660 SrcType.getTypePtr() &&
1661 "Splatted expr doesn't match with vector element type?");
1662
1663 // Splat the element across to all elements
1664 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1665 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1666 }
1667
1668 if (SrcType->isMatrixType() && DstType->isMatrixType())
1669 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1670
1671 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1672 // Allow bitcast from vector to integer/fp of the same size.
1673 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1674 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1675 if (SrcSize == DstSize)
1676 return Builder.CreateBitCast(Src, DstTy, "conv");
1677
1678 // Conversions between vectors of different sizes are not allowed except
1679 // when vectors of half are involved. Operations on storage-only half
1680 // vectors require promoting half vector operands to float vectors and
1681 // truncating the result, which is either an int or float vector, to a
1682 // short or half vector.
1683
1684 // Source and destination are both expected to be vectors.
1685 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1686 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1687 (void)DstElementTy;
1688
1689 assert(((SrcElementTy->isIntegerTy() &&
1690 DstElementTy->isIntegerTy()) ||
1691 (SrcElementTy->isFloatingPointTy() &&
1692 DstElementTy->isFloatingPointTy())) &&
1693 "unexpected conversion between a floating-point vector and an "
1694 "integer vector");
1695
1696 // Truncate an i32 vector to an i16 vector.
1697 if (SrcElementTy->isIntegerTy())
1698 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1699
1700 // Truncate a float vector to a half vector.
1701 if (SrcSize > DstSize)
1702 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1703
1704 // Promote a half vector to a float vector.
1705 return Builder.CreateFPExt(Src, DstTy, "conv");
1706 }
1707
1708 // Finally, we have the arithmetic types: real int/float.
1709 Value *Res = nullptr;
1710 llvm::Type *ResTy = DstTy;
1711
1712 // An overflowing conversion has undefined behavior if either the source type
1713 // or the destination type is a floating-point type. However, we consider the
1714 // range of representable values for all floating-point types to be
1715 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1716 // floating-point type.
1717 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1718 OrigSrcType->isFloatingType())
1719 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1720 Loc);
1721
1722 // Cast to half through float if half isn't a native type.
1723 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1724 // Make sure we cast in a single step if from another FP type.
1725 if (SrcTy->isFloatingPointTy()) {
1726 // Use the intrinsic if the half type itself isn't supported
1727 // (as opposed to operations on half, available with NativeHalfType).
1729 return Builder.CreateCall(
1730 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1731 // If the half type is supported, just use an fptrunc.
1732 return Builder.CreateFPTrunc(Src, DstTy);
1733 }
1734 DstTy = CGF.FloatTy;
1735 }
1736
1737 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1738
1739 if (DstTy != ResTy) {
1741 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1742 Res = Builder.CreateCall(
1743 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1744 Res);
1745 } else {
1746 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1747 }
1748 }
1749
1750 if (Opts.EmitImplicitIntegerTruncationChecks)
1751 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1752 NoncanonicalDstType, Loc);
1753
1754 if (Opts.EmitImplicitIntegerSignChangeChecks)
1755 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1756 NoncanonicalDstType, Loc);
1757
1758 return Res;
1759}
1760
1761Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1762 QualType DstTy,
1763 SourceLocation Loc) {
1764 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1765 llvm::Value *Result;
1766 if (SrcTy->isRealFloatingType())
1767 Result = FPBuilder.CreateFloatingToFixed(Src,
1768 CGF.getContext().getFixedPointSemantics(DstTy));
1769 else if (DstTy->isRealFloatingType())
1770 Result = FPBuilder.CreateFixedToFloating(Src,
1772 ConvertType(DstTy));
1773 else {
1774 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1775 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1776
1777 if (DstTy->isIntegerType())
1778 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1779 DstFPSema.getWidth(),
1780 DstFPSema.isSigned());
1781 else if (SrcTy->isIntegerType())
1782 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1783 DstFPSema);
1784 else
1785 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1786 }
1787 return Result;
1788}
1789
1790/// Emit a conversion from the specified complex type to the specified
1791/// destination type, where the destination type is an LLVM scalar type.
1792Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1793 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1794 SourceLocation Loc) {
1795 // Get the source element type.
1796 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1797
1798 // Handle conversions to bool first, they are special: comparisons against 0.
1799 if (DstTy->isBooleanType()) {
1800 // Complex != 0 -> (Real != 0) | (Imag != 0)
1801 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1802 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1803 return Builder.CreateOr(Src.first, Src.second, "tobool");
1804 }
1805
1806 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1807 // the imaginary part of the complex value is discarded and the value of the
1808 // real part is converted according to the conversion rules for the
1809 // corresponding real type.
1810 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1811}
1812
1813Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1814 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1815}
1816
1817/// Emit a sanitization check for the given "binary" operation (which
1818/// might actually be a unary increment which has been lowered to a binary
1819/// operation). The check passes if all values in \p Checks (which are \c i1),
1820/// are \c true.
1821void ScalarExprEmitter::EmitBinOpCheck(
1822 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1823 const BinOpInfo &Info) {
1824 assert(CGF.IsSanitizerScope);
1825 SanitizerHandler Check;
1826 SmallVector<llvm::Constant *, 4> StaticData;
1827 SmallVector<llvm::Value *, 2> DynamicData;
1828 TrapReason TR;
1829
1830 BinaryOperatorKind Opcode = Info.Opcode;
1833
1834 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1835 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1836 if (UO && UO->getOpcode() == UO_Minus) {
1837 Check = SanitizerHandler::NegateOverflow;
1838 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1839 DynamicData.push_back(Info.RHS);
1840 } else {
1841 if (BinaryOperator::isShiftOp(Opcode)) {
1842 // Shift LHS negative or too large, or RHS out of bounds.
1843 Check = SanitizerHandler::ShiftOutOfBounds;
1844 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1845 StaticData.push_back(
1846 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1847 StaticData.push_back(
1848 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1849 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1850 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1851 Check = SanitizerHandler::DivremOverflow;
1852 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1853 } else {
1854 // Arithmetic overflow (+, -, *).
1855 int ArithOverflowKind = 0;
1856 switch (Opcode) {
1857 case BO_Add: {
1858 Check = SanitizerHandler::AddOverflow;
1859 ArithOverflowKind = diag::UBSanArithKind::Add;
1860 break;
1861 }
1862 case BO_Sub: {
1863 Check = SanitizerHandler::SubOverflow;
1864 ArithOverflowKind = diag::UBSanArithKind::Sub;
1865 break;
1866 }
1867 case BO_Mul: {
1868 Check = SanitizerHandler::MulOverflow;
1869 ArithOverflowKind = diag::UBSanArithKind::Mul;
1870 break;
1871 }
1872 default:
1873 llvm_unreachable("unexpected opcode for bin op check");
1874 }
1875 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1877 SanitizerKind::UnsignedIntegerOverflow) ||
1879 SanitizerKind::SignedIntegerOverflow)) {
1880 // Only pay the cost for constructing the trap diagnostic if they are
1881 // going to be used.
1882 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1883 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1884 << Info.E;
1885 }
1886 }
1887 DynamicData.push_back(Info.LHS);
1888 DynamicData.push_back(Info.RHS);
1889 }
1890
1891 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1892}
1893
1894//===----------------------------------------------------------------------===//
1895// Visitor Methods
1896//===----------------------------------------------------------------------===//
1897
1898Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1899 CGF.ErrorUnsupported(E, "scalar expression");
1900 if (E->getType()->isVoidType())
1901 return nullptr;
1902 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1903}
1904
1905Value *
1906ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1907 ASTContext &Context = CGF.getContext();
1908 unsigned AddrSpace =
1910 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1911 E->ComputeName(Context), "__usn_str", AddrSpace);
1912
1913 llvm::Type *ExprTy = ConvertType(E->getType());
1914 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1915 "usn_addr_cast");
1916}
1917
1918Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1919 assert(E->getDataElementCount() == 1);
1920 auto It = E->begin();
1921 return Builder.getInt((*It)->getValue());
1922}
1923
1924Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1925 // Vector Mask Case
1926 if (E->getNumSubExprs() == 2) {
1927 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1928 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1929 Value *Mask;
1930
1931 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1932 unsigned LHSElts = LTy->getNumElements();
1933
1934 Mask = RHS;
1935
1936 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1937
1938 // Mask off the high bits of each shuffle index.
1939 Value *MaskBits =
1940 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1941 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1942
1943 // newv = undef
1944 // mask = mask & maskbits
1945 // for each elt
1946 // n = extract mask i
1947 // x = extract val n
1948 // newv = insert newv, x, i
1949 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1950 MTy->getNumElements());
1951 Value* NewV = llvm::PoisonValue::get(RTy);
1952 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1953 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1954 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1955
1956 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1957 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1958 }
1959 return NewV;
1960 }
1961
1962 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1963 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1964
1965 SmallVector<int, 32> Indices;
1966 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1967 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
1968 // Check for -1 and output it as undef in the IR.
1969 if (Idx.isSigned() && Idx.isAllOnes())
1970 Indices.push_back(-1);
1971 else
1972 Indices.push_back(Idx.getZExtValue());
1973 }
1974
1975 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1976}
1977
1978Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1979 QualType SrcType = E->getSrcExpr()->getType(),
1980 DstType = E->getType();
1981
1982 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1983
1984 SrcType = CGF.getContext().getCanonicalType(SrcType);
1985 DstType = CGF.getContext().getCanonicalType(DstType);
1986 if (SrcType == DstType) return Src;
1987
1988 assert(SrcType->isVectorType() &&
1989 "ConvertVector source type must be a vector");
1990 assert(DstType->isVectorType() &&
1991 "ConvertVector destination type must be a vector");
1992
1993 llvm::Type *SrcTy = Src->getType();
1994 llvm::Type *DstTy = ConvertType(DstType);
1995
1996 // Ignore conversions like int -> uint.
1997 if (SrcTy == DstTy)
1998 return Src;
1999
2000 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
2001 DstEltType = DstType->castAs<VectorType>()->getElementType();
2002
2003 assert(SrcTy->isVectorTy() &&
2004 "ConvertVector source IR type must be a vector");
2005 assert(DstTy->isVectorTy() &&
2006 "ConvertVector destination IR type must be a vector");
2007
2008 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
2009 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2010
2011 if (DstEltType->isBooleanType()) {
2012 assert((SrcEltTy->isFloatingPointTy() ||
2013 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2014
2015 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2016 if (SrcEltTy->isFloatingPointTy()) {
2017 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2018 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2019 } else {
2020 return Builder.CreateICmpNE(Src, Zero, "tobool");
2021 }
2022 }
2023
2024 // We have the arithmetic types: real int/float.
2025 Value *Res = nullptr;
2026
2027 if (isa<llvm::IntegerType>(SrcEltTy)) {
2028 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2029 if (isa<llvm::IntegerType>(DstEltTy))
2030 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2031 else {
2032 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2033 if (InputSigned)
2034 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2035 else
2036 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2037 }
2038 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2039 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2040 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2041 if (DstEltType->isSignedIntegerOrEnumerationType())
2042 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2043 else
2044 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2045 } else {
2046 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2047 "Unknown real conversion");
2048 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2049 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2050 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2051 else
2052 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2053 }
2054
2055 return Res;
2056}
2057
2058Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2059 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2060 CGF.EmitIgnoredExpr(E->getBase());
2061 return CGF.emitScalarConstant(Constant, E);
2062 } else {
2063 Expr::EvalResult Result;
2065 llvm::APSInt Value = Result.Val.getInt();
2066 CGF.EmitIgnoredExpr(E->getBase());
2067 return Builder.getInt(Value);
2068 }
2069 }
2070
2071 llvm::Value *Result = EmitLoadOfLValue(E);
2072
2073 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2074 // debug info for the pointer, even if there is no variable associated with
2075 // the pointer's expression.
2076 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2077 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2078 if (llvm::GetElementPtrInst *GEP =
2079 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2080 if (llvm::Instruction *Pointer =
2081 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2082 QualType Ty = E->getBase()->getType();
2083 if (!E->isArrow())
2084 Ty = CGF.getContext().getPointerType(Ty);
2085 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2086 }
2087 }
2088 }
2089 }
2090 return Result;
2091}
2092
2093Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2094 TestAndClearIgnoreResultAssign();
2095
2096 // Emit subscript expressions in rvalue context's. For most cases, this just
2097 // loads the lvalue formed by the subscript expr. However, we have to be
2098 // careful, because the base of a vector subscript is occasionally an rvalue,
2099 // so we can't get it as an lvalue.
2100 if (!E->getBase()->getType()->isVectorType() &&
2102 return EmitLoadOfLValue(E);
2103
2104 // Handle the vector case. The base must be a vector, the index must be an
2105 // integer value.
2106 Value *Base = Visit(E->getBase());
2107 Value *Idx = Visit(E->getIdx());
2108 QualType IdxTy = E->getIdx()->getType();
2109
2110 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2111 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2112
2113 return Builder.CreateExtractElement(Base, Idx, "vecext");
2114}
2115
2116Value *ScalarExprEmitter::VisitMatrixSingleSubscriptExpr(
2117 MatrixSingleSubscriptExpr *E) {
2118 TestAndClearIgnoreResultAssign();
2119
2120 auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2121 unsigned NumRows = MatrixTy->getNumRows();
2122 unsigned NumColumns = MatrixTy->getNumColumns();
2123
2124 // Row index
2125 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2126 llvm::MatrixBuilder MB(Builder);
2127
2128 // The row index must be in [0, NumRows)
2129 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2130 MB.CreateIndexAssumption(RowIdx, NumRows);
2131
2132 Value *FlatMatrix = Visit(E->getBase());
2133 llvm::Type *ElemTy = CGF.ConvertType(MatrixTy->getElementType());
2134 auto *ResultTy = llvm::FixedVectorType::get(ElemTy, NumColumns);
2135 Value *RowVec = llvm::PoisonValue::get(ResultTy);
2136
2137 for (unsigned Col = 0; Col != NumColumns; ++Col) {
2138 Value *ColVal = llvm::ConstantInt::get(RowIdx->getType(), Col);
2139 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2140 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2141 Value *EltIdx = MB.CreateIndex(RowIdx, ColVal, NumRows, NumColumns,
2142 IsMatrixRowMajor, "matrix_row_idx");
2143 Value *Elt =
2144 Builder.CreateExtractElement(FlatMatrix, EltIdx, "matrix_elem");
2145 Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2146 RowVec = Builder.CreateInsertElement(RowVec, Elt, Lane, "matrix_row_ins");
2147 }
2148
2149 return RowVec;
2150}
2151
2152Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2153 TestAndClearIgnoreResultAssign();
2154
2155 // Handle the vector case. The base must be a vector, the index must be an
2156 // integer value.
2157 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2158 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2159
2160 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2161 llvm::MatrixBuilder MB(Builder);
2162
2163 Value *Idx;
2164 unsigned NumCols = MatrixTy->getNumColumns();
2165 unsigned NumRows = MatrixTy->getNumRows();
2166 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2167 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2168 Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows, NumCols, IsMatrixRowMajor);
2169
2170 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2171 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2172
2173 Value *Matrix = Visit(E->getBase());
2174
2175 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2176 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2177}
2178
2179static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2180 unsigned Off) {
2181 int MV = SVI->getMaskValue(Idx);
2182 if (MV == -1)
2183 return -1;
2184 return Off + MV;
2185}
2186
2187static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2188 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2189 "Index operand too large for shufflevector mask!");
2190 return C->getZExtValue();
2191}
2192
2193Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2194 bool Ignore = TestAndClearIgnoreResultAssign();
2195 (void)Ignore;
2196 unsigned NumInitElements = E->getNumInits();
2197 assert((Ignore == false ||
2198 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2199 "init list ignored");
2200
2201 // HLSL initialization lists in the AST are an expansion which can contain
2202 // side-effecting expressions wrapped in opaque value expressions. To properly
2203 // emit these we need to emit the opaque values before we emit the argument
2204 // expressions themselves. This is a little hacky, but it prevents us needing
2205 // to do a bigger AST-level change for a language feature that we need
2206 // deprecate in the near future. See related HLSL language proposals in the
2207 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2208 // * 0005-strict-initializer-lists.md
2209 // * 0032-constructors.md
2210 if (CGF.getLangOpts().HLSL)
2212
2213 if (E->hadArrayRangeDesignator())
2214 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2215
2216 llvm::VectorType *VType =
2217 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2218
2219 if (!VType) {
2220 if (NumInitElements == 0) {
2221 // C++11 value-initialization for the scalar.
2222 return EmitNullValue(E->getType());
2223 }
2224 // We have a scalar in braces. Just use the first element.
2225 return Visit(E->getInit(0));
2226 }
2227
2228 if (isa<llvm::ScalableVectorType>(VType)) {
2229 if (NumInitElements == 0) {
2230 // C++11 value-initialization for the vector.
2231 return EmitNullValue(E->getType());
2232 }
2233
2234 if (NumInitElements == 1) {
2235 Expr *InitVector = E->getInit(0);
2236
2237 // Initialize from another scalable vector of the same type.
2238 if (InitVector->getType().getCanonicalType() ==
2240 return Visit(InitVector);
2241 }
2242
2243 llvm_unreachable("Unexpected initialization of a scalable vector!");
2244 }
2245
2246 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2247
2248 // Loop over initializers collecting the Value for each, and remembering
2249 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2250 // us to fold the shuffle for the swizzle into the shuffle for the vector
2251 // initializer, since LLVM optimizers generally do not want to touch
2252 // shuffles.
2253 unsigned CurIdx = 0;
2254 bool VIsPoisonShuffle = false;
2255 llvm::Value *V = llvm::PoisonValue::get(VType);
2256 for (unsigned i = 0; i != NumInitElements; ++i) {
2257 Expr *IE = E->getInit(i);
2258 Value *Init = Visit(IE);
2259 SmallVector<int, 16> Args;
2260
2261 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2262
2263 // Handle scalar elements. If the scalar initializer is actually one
2264 // element of a different vector of the same width, use shuffle instead of
2265 // extract+insert.
2266 if (!VVT) {
2267 if (isa<ExtVectorElementExpr>(IE)) {
2268 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2269
2270 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2271 ->getNumElements() == ResElts) {
2272 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2273 Value *LHS = nullptr, *RHS = nullptr;
2274 if (CurIdx == 0) {
2275 // insert into poison -> shuffle (src, poison)
2276 // shufflemask must use an i32
2277 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2278 Args.resize(ResElts, -1);
2279
2280 LHS = EI->getVectorOperand();
2281 RHS = V;
2282 VIsPoisonShuffle = true;
2283 } else if (VIsPoisonShuffle) {
2284 // insert into poison shuffle && size match -> shuffle (v, src)
2285 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2286 for (unsigned j = 0; j != CurIdx; ++j)
2287 Args.push_back(getMaskElt(SVV, j, 0));
2288 Args.push_back(ResElts + C->getZExtValue());
2289 Args.resize(ResElts, -1);
2290
2291 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2292 RHS = EI->getVectorOperand();
2293 VIsPoisonShuffle = false;
2294 }
2295 if (!Args.empty()) {
2296 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2297 ++CurIdx;
2298 continue;
2299 }
2300 }
2301 }
2302 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2303 "vecinit");
2304 VIsPoisonShuffle = false;
2305 ++CurIdx;
2306 continue;
2307 }
2308
2309 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2310
2311 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2312 // input is the same width as the vector being constructed, generate an
2313 // optimized shuffle of the swizzle input into the result.
2314 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2315 if (isa<ExtVectorElementExpr>(IE)) {
2316 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2317 Value *SVOp = SVI->getOperand(0);
2318 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2319
2320 if (OpTy->getNumElements() == ResElts) {
2321 for (unsigned j = 0; j != CurIdx; ++j) {
2322 // If the current vector initializer is a shuffle with poison, merge
2323 // this shuffle directly into it.
2324 if (VIsPoisonShuffle) {
2325 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2326 } else {
2327 Args.push_back(j);
2328 }
2329 }
2330 for (unsigned j = 0, je = InitElts; j != je; ++j)
2331 Args.push_back(getMaskElt(SVI, j, Offset));
2332 Args.resize(ResElts, -1);
2333
2334 if (VIsPoisonShuffle)
2335 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2336
2337 Init = SVOp;
2338 }
2339 }
2340
2341 // Extend init to result vector length, and then shuffle its contribution
2342 // to the vector initializer into V.
2343 if (Args.empty()) {
2344 for (unsigned j = 0; j != InitElts; ++j)
2345 Args.push_back(j);
2346 Args.resize(ResElts, -1);
2347 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2348
2349 Args.clear();
2350 for (unsigned j = 0; j != CurIdx; ++j)
2351 Args.push_back(j);
2352 for (unsigned j = 0; j != InitElts; ++j)
2353 Args.push_back(j + Offset);
2354 Args.resize(ResElts, -1);
2355 }
2356
2357 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2358 // merging subsequent shuffles into this one.
2359 if (CurIdx == 0)
2360 std::swap(V, Init);
2361 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2362 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2363 CurIdx += InitElts;
2364 }
2365
2366 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2367 // Emit remaining default initializers.
2368 llvm::Type *EltTy = VType->getElementType();
2369
2370 // Emit remaining default initializers
2371 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2372 Value *Idx = Builder.getInt32(CurIdx);
2373 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2374 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2375 }
2376 return V;
2377}
2378
2380 return !D->isWeak();
2381}
2382
2383static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2384 E = E->IgnoreParens();
2385
2386 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2387 if (UO->getOpcode() == UO_Deref)
2388 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2389
2390 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2391 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2392
2393 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2394 if (isa<FieldDecl>(ME->getMemberDecl()))
2395 return true;
2396 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2397 }
2398
2399 // Array subscripts? Anything else?
2400
2401 return false;
2402}
2403
2405 assert(E->getType()->isSignableType(getContext()));
2406
2407 E = E->IgnoreParens();
2408
2409 if (isa<CXXThisExpr>(E))
2410 return true;
2411
2412 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2413 if (UO->getOpcode() == UO_AddrOf)
2414 return isLValueKnownNonNull(*this, UO->getSubExpr());
2415
2416 if (const auto *CE = dyn_cast<CastExpr>(E))
2417 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2418 CE->getCastKind() == CK_ArrayToPointerDecay)
2419 return isLValueKnownNonNull(*this, CE->getSubExpr());
2420
2421 // Maybe honor __nonnull?
2422
2423 return false;
2424}
2425
2427 const Expr *E = CE->getSubExpr();
2428
2429 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2430 return false;
2431
2432 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2433 // We always assume that 'this' is never null.
2434 return false;
2435 }
2436
2437 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2438 // And that glvalue casts are never null.
2439 if (ICE->isGLValue())
2440 return false;
2441 }
2442
2443 return true;
2444}
2445
2446// RHS is an aggregate type
2448 QualType DestTy, SourceLocation Loc) {
2449 SmallVector<LValue, 16> LoadList;
2450 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
2451 // Dest is either a vector or a builtin?
2452 // if its a vector create a temp alloca to store into and return that
2453 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2454 assert(LoadList.size() >= VecTy->getNumElements() &&
2455 "Flattened type on RHS must have the same number or more elements "
2456 "than vector on LHS.");
2457 llvm::Value *V =
2458 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2459 // write to V.
2460 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2461 RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
2462 assert(RVal.isScalar() &&
2463 "All flattened source values should be scalars.");
2464 llvm::Value *Cast =
2465 CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
2466 VecTy->getElementType(), Loc);
2467 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2468 }
2469 return V;
2470 }
2471 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2472 assert(LoadList.size() >= MatTy->getNumElementsFlattened() &&
2473 "Flattened type on RHS must have the same number or more elements "
2474 "than vector on LHS.");
2475
2476 llvm::Value *V =
2477 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2478 // V is an allocated temporary to build the truncated matrix into.
2479 for (unsigned I = 0, E = MatTy->getNumElementsFlattened(); I < E; I++) {
2480 unsigned ColMajorIndex =
2481 (I % MatTy->getNumRows()) * MatTy->getNumColumns() +
2482 (I / MatTy->getNumRows());
2483 RValue RVal = CGF.EmitLoadOfLValue(LoadList[ColMajorIndex], Loc);
2484 assert(RVal.isScalar() &&
2485 "All flattened source values should be scalars.");
2486 llvm::Value *Cast = CGF.EmitScalarConversion(
2487 RVal.getScalarVal(), LoadList[ColMajorIndex].getType(),
2488 MatTy->getElementType(), Loc);
2489 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2490 }
2491 return V;
2492 }
2493 // if its a builtin just do an extract element or load.
2494 assert(DestTy->isBuiltinType() &&
2495 "Destination type must be a vector, matrix, or builtin type.");
2496 RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
2497 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2498 return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
2499 DestTy, Loc);
2500}
2501
2502// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2503// have to handle a more broad range of conversions than explicit casts, as they
2504// handle things like function to ptr-to-function decay etc.
2505Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2506 llvm::scope_exit RestoreCurCast(
2507 [this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2508 CGF.CurCast = CE;
2509
2510 Expr *E = CE->getSubExpr();
2511 QualType DestTy = CE->getType();
2512 CastKind Kind = CE->getCastKind();
2513 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2514
2515 // These cases are generally not written to ignore the result of
2516 // evaluating their sub-expressions, so we clear this now.
2517 bool Ignored = TestAndClearIgnoreResultAssign();
2518
2519 // Since almost all cast kinds apply to scalars, this switch doesn't have
2520 // a default case, so the compiler will warn on a missing case. The cases
2521 // are in the same order as in the CastKind enum.
2522 switch (Kind) {
2523 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2524 case CK_BuiltinFnToFnPtr:
2525 llvm_unreachable("builtin functions are handled elsewhere");
2526
2527 case CK_LValueBitCast:
2528 case CK_ObjCObjectLValueCast: {
2529 Address Addr = EmitLValue(E).getAddress();
2530 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2531 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2532 return EmitLoadOfLValue(LV, CE->getExprLoc());
2533 }
2534
2535 case CK_LValueToRValueBitCast: {
2536 LValue SourceLVal = CGF.EmitLValue(E);
2537 Address Addr =
2538 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2539 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2540 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2541 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2542 }
2543
2544 case CK_CPointerToObjCPointerCast:
2545 case CK_BlockPointerToObjCPointerCast:
2546 case CK_AnyPointerToBlockPointerCast:
2547 case CK_BitCast: {
2548 Value *Src = Visit(E);
2549 llvm::Type *SrcTy = Src->getType();
2550 llvm::Type *DstTy = ConvertType(DestTy);
2551
2552 // FIXME: this is a gross but seemingly necessary workaround for an issue
2553 // manifesting when a target uses a non-default AS for indirect sret args,
2554 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2555 // on the address of a local struct that gets returned by value yields an
2556 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2557 // DefaultAS. We can only do this subversive thing because sret args are
2558 // manufactured and them residing in the IndirectAS is a target specific
2559 // detail, and doing an AS cast here still retains the semantics the user
2560 // expects. It is desirable to remove this iff a better solution is found.
2561 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2563 CGF, Src, E->getType().getAddressSpace(), DstTy);
2564
2565 assert(
2566 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2567 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2568 "Address-space cast must be used to convert address spaces");
2569
2570 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2571 if (auto *PT = DestTy->getAs<PointerType>()) {
2573 PT->getPointeeType(),
2574 Address(Src,
2576 E->getType()->castAs<PointerType>()->getPointeeType()),
2577 CGF.getPointerAlign()),
2578 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2579 CE->getBeginLoc());
2580 }
2581 }
2582
2583 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2584 const QualType SrcType = E->getType();
2585
2586 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2587 // Casting to pointer that could carry dynamic information (provided by
2588 // invariant.group) requires launder.
2589 Src = Builder.CreateLaunderInvariantGroup(Src);
2590 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2591 // Casting to pointer that does not carry dynamic information (provided
2592 // by invariant.group) requires stripping it. Note that we don't do it
2593 // if the source could not be dynamic type and destination could be
2594 // dynamic because dynamic information is already laundered. It is
2595 // because launder(strip(src)) == launder(src), so there is no need to
2596 // add extra strip before launder.
2597 Src = Builder.CreateStripInvariantGroup(Src);
2598 }
2599 }
2600
2601 // Update heapallocsite metadata when there is an explicit pointer cast.
2602 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2603 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2604 !isa<CastExpr>(E)) {
2605 QualType PointeeType = DestTy->getPointeeType();
2606 if (!PointeeType.isNull())
2607 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2608 CE->getExprLoc());
2609 }
2610 }
2611
2612 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2613 // same element type, use the llvm.vector.insert intrinsic to perform the
2614 // bitcast.
2615 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2616 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2617 // If we are casting a fixed i8 vector to a scalable i1 predicate
2618 // vector, use a vector insert and bitcast the result.
2619 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2620 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2621 ScalableDstTy = llvm::ScalableVectorType::get(
2622 FixedSrcTy->getElementType(),
2623 llvm::divideCeil(
2624 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2625 }
2626 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2627 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2628 llvm::Value *Result = Builder.CreateInsertVector(
2629 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2630 ScalableDstTy = cast<llvm::ScalableVectorType>(
2631 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2632 if (Result->getType() != ScalableDstTy)
2633 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2634 if (Result->getType() != DstTy)
2635 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2636 return Result;
2637 }
2638 }
2639 }
2640
2641 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2642 // same element type, use the llvm.vector.extract intrinsic to perform the
2643 // bitcast.
2644 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2645 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2646 // If we are casting a scalable i1 predicate vector to a fixed i8
2647 // vector, bitcast the source and use a vector extract.
2648 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2649 FixedDstTy->getElementType()->isIntegerTy(8)) {
2650 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2651 ScalableSrcTy = llvm::ScalableVectorType::get(
2652 ScalableSrcTy->getElementType(),
2653 llvm::alignTo<8>(
2654 ScalableSrcTy->getElementCount().getKnownMinValue()));
2655 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2656 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2657 uint64_t(0));
2658 }
2659
2660 ScalableSrcTy = llvm::ScalableVectorType::get(
2661 FixedDstTy->getElementType(),
2662 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2663 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2664 }
2665 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2666 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2667 "cast.fixed");
2668 }
2669 }
2670
2671 // Perform VLAT <-> VLST bitcast through memory.
2672 // TODO: since the llvm.vector.{insert,extract} intrinsics
2673 // require the element types of the vectors to be the same, we
2674 // need to keep this around for bitcasts between VLAT <-> VLST where
2675 // the element types of the vectors are not the same, until we figure
2676 // out a better way of doing these casts.
2677 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2681 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2682 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2683 CGF.EmitStoreOfScalar(Src, LV);
2684 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2685 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2686 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2687 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2688 }
2689
2690 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2691 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2692 }
2693 case CK_AddressSpaceConversion: {
2694 Expr::EvalResult Result;
2695 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2696 Result.Val.isNullPointer()) {
2697 // If E has side effect, it is emitted even if its final result is a
2698 // null pointer. In that case, a DCE pass should be able to
2699 // eliminate the useless instructions emitted during translating E.
2700 if (Result.HasSideEffects)
2701 Visit(E);
2703 ConvertType(DestTy)), DestTy);
2704 }
2705 // Since target may map different address spaces in AST to the same address
2706 // space, an address space conversion may end up as a bitcast.
2708 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2709 ConvertType(DestTy));
2710 }
2711 case CK_AtomicToNonAtomic:
2712 case CK_NonAtomicToAtomic:
2713 case CK_UserDefinedConversion:
2714 return Visit(E);
2715
2716 case CK_NoOp: {
2717 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2718 }
2719
2720 case CK_BaseToDerived: {
2721 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2722 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2723
2724 Address Base = CGF.EmitPointerWithAlignment(E);
2725 Address Derived =
2726 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2727 CE->path_begin(), CE->path_end(),
2729
2730 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2731 // performed and the object is not of the derived type.
2732 if (CGF.sanitizePerformTypeCheck())
2734 Derived, DestTy->getPointeeType());
2735
2736 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2737 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2738 /*MayBeNull=*/true,
2740 CE->getBeginLoc());
2741
2742 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2743 }
2744 case CK_UncheckedDerivedToBase:
2745 case CK_DerivedToBase: {
2746 // The EmitPointerWithAlignment path does this fine; just discard
2747 // the alignment.
2749 CE->getType()->getPointeeType());
2750 }
2751
2752 case CK_Dynamic: {
2753 Address V = CGF.EmitPointerWithAlignment(E);
2754 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2755 return CGF.EmitDynamicCast(V, DCE);
2756 }
2757
2758 case CK_ArrayToPointerDecay:
2760 CE->getType()->getPointeeType());
2761 case CK_FunctionToPointerDecay:
2762 return EmitLValue(E).getPointer(CGF);
2763
2764 case CK_NullToPointer:
2765 if (MustVisitNullValue(E))
2766 CGF.EmitIgnoredExpr(E);
2767
2768 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2769 DestTy);
2770
2771 case CK_NullToMemberPointer: {
2772 if (MustVisitNullValue(E))
2773 CGF.EmitIgnoredExpr(E);
2774
2775 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2776 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2777 }
2778
2779 case CK_ReinterpretMemberPointer:
2780 case CK_BaseToDerivedMemberPointer:
2781 case CK_DerivedToBaseMemberPointer: {
2782 Value *Src = Visit(E);
2783
2784 // Note that the AST doesn't distinguish between checked and
2785 // unchecked member pointer conversions, so we always have to
2786 // implement checked conversions here. This is inefficient when
2787 // actual control flow may be required in order to perform the
2788 // check, which it is for data member pointers (but not member
2789 // function pointers on Itanium and ARM).
2790 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2791 }
2792
2793 case CK_ARCProduceObject:
2794 return CGF.EmitARCRetainScalarExpr(E);
2795 case CK_ARCConsumeObject:
2796 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2797 case CK_ARCReclaimReturnedObject:
2798 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2799 case CK_ARCExtendBlockObject:
2800 return CGF.EmitARCExtendBlockObject(E);
2801
2802 case CK_CopyAndAutoreleaseBlockObject:
2803 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2804
2805 case CK_FloatingRealToComplex:
2806 case CK_FloatingComplexCast:
2807 case CK_IntegralRealToComplex:
2808 case CK_IntegralComplexCast:
2809 case CK_IntegralComplexToFloatingComplex:
2810 case CK_FloatingComplexToIntegralComplex:
2811 case CK_ConstructorConversion:
2812 case CK_ToUnion:
2813 case CK_HLSLArrayRValue:
2814 llvm_unreachable("scalar cast to non-scalar value");
2815
2816 case CK_LValueToRValue:
2817 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2818 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2819 return Visit(E);
2820
2821 case CK_IntegralToPointer: {
2822 Value *Src = Visit(E);
2823
2824 // First, convert to the correct width so that we control the kind of
2825 // extension.
2826 auto DestLLVMTy = ConvertType(DestTy);
2827 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2828 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2829 llvm::Value* IntResult =
2830 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2831
2832 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2833
2834 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2835 // Going from integer to pointer that could be dynamic requires reloading
2836 // dynamic information from invariant.group.
2837 if (DestTy.mayBeDynamicClass())
2838 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2839 }
2840
2841 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2842 return IntToPtr;
2843 }
2844 case CK_PointerToIntegral: {
2845 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2846 auto *PtrExpr = Visit(E);
2847
2848 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2849 const QualType SrcType = E->getType();
2850
2851 // Casting to integer requires stripping dynamic information as it does
2852 // not carries it.
2853 if (SrcType.mayBeDynamicClass())
2854 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2855 }
2856
2857 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2858 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2859 }
2860 case CK_ToVoid: {
2861 CGF.EmitIgnoredExpr(E);
2862 return nullptr;
2863 }
2864 case CK_MatrixCast: {
2865 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2866 CE->getExprLoc());
2867 }
2868 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2869 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2870 // To perform any necessary Scalar Cast, so this Cast can be handled
2871 // by the regular Vector Splat cast code.
2872 case CK_HLSLAggregateSplatCast:
2873 case CK_VectorSplat: {
2874 llvm::Type *DstTy = ConvertType(DestTy);
2875 Value *Elt = Visit(E);
2876 // Splat the element across to all elements
2877 llvm::ElementCount NumElements =
2878 cast<llvm::VectorType>(DstTy)->getElementCount();
2879 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2880 }
2881
2882 case CK_FixedPointCast:
2883 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2884 CE->getExprLoc());
2885
2886 case CK_FixedPointToBoolean:
2887 assert(E->getType()->isFixedPointType() &&
2888 "Expected src type to be fixed point type");
2889 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2890 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2891 CE->getExprLoc());
2892
2893 case CK_FixedPointToIntegral:
2894 assert(E->getType()->isFixedPointType() &&
2895 "Expected src type to be fixed point type");
2896 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2897 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2898 CE->getExprLoc());
2899
2900 case CK_IntegralToFixedPoint:
2901 assert(E->getType()->isIntegerType() &&
2902 "Expected src type to be an integer");
2903 assert(DestTy->isFixedPointType() &&
2904 "Expected dest type to be fixed point type");
2905 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2906 CE->getExprLoc());
2907
2908 case CK_IntegralCast: {
2909 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2910 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2911 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2913 "conv");
2914 }
2915 ScalarConversionOpts Opts;
2916 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2917 if (!ICE->isPartOfExplicitCast())
2918 Opts = ScalarConversionOpts(CGF.SanOpts);
2919 }
2920 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2921 CE->getExprLoc(), Opts);
2922 }
2923 case CK_IntegralToFloating: {
2924 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2925 // TODO: Support constrained FP intrinsics.
2926 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2927 if (SrcElTy->isSignedIntegerOrEnumerationType())
2928 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2929 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2930 }
2931 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2932 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2933 CE->getExprLoc());
2934 }
2935 case CK_FloatingToIntegral: {
2936 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2937 // TODO: Support constrained FP intrinsics.
2938 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2939 if (DstElTy->isSignedIntegerOrEnumerationType())
2940 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2941 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2942 }
2943 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2944 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2945 CE->getExprLoc());
2946 }
2947 case CK_FloatingCast: {
2948 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2949 // TODO: Support constrained FP intrinsics.
2950 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2951 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2952 if (DstElTy->castAs<BuiltinType>()->getKind() <
2953 SrcElTy->castAs<BuiltinType>()->getKind())
2954 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2955 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2956 }
2957 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2958 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2959 CE->getExprLoc());
2960 }
2961 case CK_FixedPointToFloating:
2962 case CK_FloatingToFixedPoint: {
2963 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2964 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2965 CE->getExprLoc());
2966 }
2967 case CK_BooleanToSignedIntegral: {
2968 ScalarConversionOpts Opts;
2969 Opts.TreatBooleanAsSigned = true;
2970 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2971 CE->getExprLoc(), Opts);
2972 }
2973 case CK_IntegralToBoolean:
2974 return EmitIntToBoolConversion(Visit(E));
2975 case CK_PointerToBoolean:
2976 return EmitPointerToBoolConversion(Visit(E), E->getType());
2977 case CK_FloatingToBoolean: {
2978 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2979 return EmitFloatToBoolConversion(Visit(E));
2980 }
2981 case CK_MemberPointerToBoolean: {
2982 llvm::Value *MemPtr = Visit(E);
2983 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2984 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2985 }
2986
2987 case CK_FloatingComplexToReal:
2988 case CK_IntegralComplexToReal:
2989 return CGF.EmitComplexExpr(E, false, true).first;
2990
2991 case CK_FloatingComplexToBoolean:
2992 case CK_IntegralComplexToBoolean: {
2994
2995 // TODO: kill this function off, inline appropriate case here
2996 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2997 CE->getExprLoc());
2998 }
2999
3000 case CK_ZeroToOCLOpaqueType: {
3001 assert((DestTy->isEventT() || DestTy->isQueueT() ||
3002 DestTy->isOCLIntelSubgroupAVCType()) &&
3003 "CK_ZeroToOCLEvent cast on non-event type");
3004 return llvm::Constant::getNullValue(ConvertType(DestTy));
3005 }
3006
3007 case CK_IntToOCLSampler:
3008 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
3009
3010 case CK_HLSLVectorTruncation: {
3011 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
3012 "Destination type must be a vector or builtin type.");
3013 Value *Vec = Visit(E);
3014 if (auto *VecTy = DestTy->getAs<VectorType>()) {
3015 SmallVector<int> Mask;
3016 unsigned NumElts = VecTy->getNumElements();
3017 for (unsigned I = 0; I != NumElts; ++I)
3018 Mask.push_back(I);
3019
3020 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
3021 }
3022 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3023 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
3024 }
3025 case CK_HLSLMatrixTruncation: {
3026 assert((DestTy->isMatrixType() || DestTy->isBuiltinType()) &&
3027 "Destination type must be a matrix or builtin type.");
3028 Value *Mat = Visit(E);
3029 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
3030 SmallVector<int> Mask;
3031 unsigned NumCols = MatTy->getNumColumns();
3032 unsigned NumRows = MatTy->getNumRows();
3033 unsigned ColOffset = NumCols;
3034 if (auto *SrcMatTy = E->getType()->getAs<ConstantMatrixType>())
3035 ColOffset = SrcMatTy->getNumColumns();
3036 for (unsigned R = 0; R < NumRows; R++) {
3037 for (unsigned C = 0; C < NumCols; C++) {
3038 unsigned I = R * ColOffset + C;
3039 Mask.push_back(I);
3040 }
3041 }
3042
3043 return Builder.CreateShuffleVector(Mat, Mask, "trunc");
3044 }
3045 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3046 return Builder.CreateExtractElement(Mat, Zero, "cast.mtrunc");
3047 }
3048 case CK_HLSLElementwiseCast: {
3049 RValue RV = CGF.EmitAnyExpr(E);
3050 SourceLocation Loc = CE->getExprLoc();
3051
3052 Address SrcAddr = Address::invalid();
3053
3054 if (RV.isAggregate()) {
3055 SrcAddr = RV.getAggregateAddress();
3056 } else {
3057 SrcAddr = CGF.CreateMemTemp(E->getType(), "hlsl.ewcast.src");
3058 LValue TmpLV = CGF.MakeAddrLValue(SrcAddr, E->getType());
3059 CGF.EmitStoreThroughLValue(RV, TmpLV);
3060 }
3061
3062 LValue SrcVal = CGF.MakeAddrLValue(SrcAddr, E->getType());
3063 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
3064 }
3065
3066 } // end of switch
3067
3068 llvm_unreachable("unknown scalar cast");
3069}
3070
3071Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
3072 CodeGenFunction::StmtExprEvaluation eval(CGF);
3073 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
3074 !E->getType()->isVoidType());
3075 if (!RetAlloca.isValid())
3076 return nullptr;
3077 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
3078 E->getExprLoc());
3079}
3080
3081Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
3082 CodeGenFunction::RunCleanupsScope Scope(CGF);
3083 Value *V = Visit(E->getSubExpr());
3084 // Defend against dominance problems caused by jumps out of expression
3085 // evaluation through the shared cleanup block.
3086 Scope.ForceCleanup({&V});
3087 return V;
3088}
3089
3090//===----------------------------------------------------------------------===//
3091// Unary Operators
3092//===----------------------------------------------------------------------===//
3093
3095 llvm::Value *InVal, bool IsInc,
3096 FPOptions FPFeatures) {
3097 BinOpInfo BinOp;
3098 BinOp.LHS = InVal;
3099 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
3100 BinOp.Ty = E->getType();
3101 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3102 BinOp.FPFeatures = FPFeatures;
3103 BinOp.E = E;
3104 return BinOp;
3105}
3106
3107llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3108 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3109 llvm::Value *Amount =
3110 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
3111 StringRef Name = IsInc ? "inc" : "dec";
3112 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3113 case LangOptions::SOB_Defined:
3114 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3115 return Builder.CreateAdd(InVal, Amount, Name);
3116 [[fallthrough]];
3117 case LangOptions::SOB_Undefined:
3118 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3119 return Builder.CreateNSWAdd(InVal, Amount, Name);
3120 [[fallthrough]];
3121 case LangOptions::SOB_Trapping:
3122 BinOpInfo Info = createBinOpInfoFromIncDec(
3123 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3124 if (!E->canOverflow() || CanElideOverflowCheck(CGF.getContext(), Info))
3125 return Builder.CreateNSWAdd(InVal, Amount, Name);
3126 return EmitOverflowCheckedBinOp(Info);
3127 }
3128 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
3129}
3130
3131/// For the purposes of overflow pattern exclusion, does this match the
3132/// "while(i--)" pattern?
3133static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
3134 bool isPre, ASTContext &Ctx) {
3135 if (isInc || isPre)
3136 return false;
3137
3138 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3141 return false;
3142
3143 // all Parents (usually just one) must be a WhileStmt
3144 for (const auto &Parent : Ctx.getParentMapContext().getParents(*UO))
3145 if (!Parent.get<WhileStmt>())
3146 return false;
3147
3148 return true;
3149}
3150
3151namespace {
3152/// Handles check and update for lastprivate conditional variables.
3153class OMPLastprivateConditionalUpdateRAII {
3154private:
3155 CodeGenFunction &CGF;
3156 const UnaryOperator *E;
3157
3158public:
3159 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3160 const UnaryOperator *E)
3161 : CGF(CGF), E(E) {}
3162 ~OMPLastprivateConditionalUpdateRAII() {
3163 if (CGF.getLangOpts().OpenMP)
3165 CGF, E->getSubExpr());
3166 }
3167};
3168} // namespace
3169
3170llvm::Value *
3171ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3172 bool isInc, bool isPre) {
3173 ApplyAtomGroup Grp(CGF.getDebugInfo());
3174 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3175 QualType type = E->getSubExpr()->getType();
3176 llvm::PHINode *atomicPHI = nullptr;
3177 llvm::Value *value;
3178 llvm::Value *input;
3179 llvm::Value *Previous = nullptr;
3180 QualType SrcType = E->getType();
3181
3182 int amount = (isInc ? 1 : -1);
3183 bool isSubtraction = !isInc;
3184
3185 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3186 type = atomicTy->getValueType();
3187 if (isInc && type->isBooleanType()) {
3188 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3189 if (isPre) {
3190 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3191 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3192 return Builder.getTrue();
3193 }
3194 // For atomic bool increment, we just store true and return it for
3195 // preincrement, do an atomic swap with true for postincrement
3196 return Builder.CreateAtomicRMW(
3197 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3198 llvm::AtomicOrdering::SequentiallyConsistent);
3199 }
3200 // Special case for atomic increment / decrement on integers, emit
3201 // atomicrmw instructions. We skip this if we want to be doing overflow
3202 // checking, and fall into the slow path with the atomic cmpxchg loop.
3203 if (!type->isBooleanType() && type->isIntegerType() &&
3204 !(type->isUnsignedIntegerType() &&
3205 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3206 CGF.getLangOpts().getSignedOverflowBehavior() !=
3207 LangOptions::SOB_Trapping) {
3208 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3209 llvm::AtomicRMWInst::Sub;
3210 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3211 llvm::Instruction::Sub;
3212 llvm::Value *amt = CGF.EmitToMemory(
3213 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3214 llvm::Value *old =
3215 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3216 llvm::AtomicOrdering::SequentiallyConsistent);
3217 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3218 }
3219 // Special case for atomic increment/decrement on floats.
3220 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3221 if (type->isFloatingType()) {
3222 llvm::Type *Ty = ConvertType(type);
3223 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3224 llvm::AtomicRMWInst::BinOp aop =
3225 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3226 llvm::Instruction::BinaryOps op =
3227 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3228 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3229 llvm::AtomicRMWInst *old =
3230 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3231 llvm::AtomicOrdering::SequentiallyConsistent);
3232
3233 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3234 }
3235 }
3236 value = EmitLoadOfLValue(LV, E->getExprLoc());
3237 input = value;
3238 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3239 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3240 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3241 value = CGF.EmitToMemory(value, type);
3242 Builder.CreateBr(opBB);
3243 Builder.SetInsertPoint(opBB);
3244 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3245 atomicPHI->addIncoming(value, startBB);
3246 value = atomicPHI;
3247 } else {
3248 value = EmitLoadOfLValue(LV, E->getExprLoc());
3249 input = value;
3250 }
3251
3252 // Special case of integer increment that we have to check first: bool++.
3253 // Due to promotion rules, we get:
3254 // bool++ -> bool = bool + 1
3255 // -> bool = (int)bool + 1
3256 // -> bool = ((int)bool + 1 != 0)
3257 // An interesting aspect of this is that increment is always true.
3258 // Decrement does not have this property.
3259 if (isInc && type->isBooleanType()) {
3260 value = Builder.getTrue();
3261
3262 // Most common case by far: integer increment.
3263 } else if (type->isIntegerType()) {
3264 QualType promotedType;
3265 bool canPerformLossyDemotionCheck = false;
3266
3267 bool excludeOverflowPattern =
3268 matchesPostDecrInWhile(E, isInc, isPre, CGF.getContext());
3269
3271 promotedType = CGF.getContext().getPromotedIntegerType(type);
3272 assert(promotedType != type && "Shouldn't promote to the same type.");
3273 canPerformLossyDemotionCheck = true;
3274 canPerformLossyDemotionCheck &=
3276 CGF.getContext().getCanonicalType(promotedType);
3277 canPerformLossyDemotionCheck &=
3279 type, promotedType);
3280 assert((!canPerformLossyDemotionCheck ||
3281 type->isSignedIntegerOrEnumerationType() ||
3282 promotedType->isSignedIntegerOrEnumerationType() ||
3283 ConvertType(type)->getScalarSizeInBits() ==
3284 ConvertType(promotedType)->getScalarSizeInBits()) &&
3285 "The following check expects that if we do promotion to different "
3286 "underlying canonical type, at least one of the types (either "
3287 "base or promoted) will be signed, or the bitwidths will match.");
3288 }
3289 if (CGF.SanOpts.hasOneOf(
3290 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3291 SanitizerKind::ImplicitBitfieldConversion) &&
3292 canPerformLossyDemotionCheck) {
3293 // While `x += 1` (for `x` with width less than int) is modeled as
3294 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3295 // ease; inc/dec with width less than int can't overflow because of
3296 // promotion rules, so we omit promotion+demotion, which means that we can
3297 // not catch lossy "demotion". Because we still want to catch these cases
3298 // when the sanitizer is enabled, we perform the promotion, then perform
3299 // the increment/decrement in the wider type, and finally
3300 // perform the demotion. This will catch lossy demotions.
3301
3302 // We have a special case for bitfields defined using all the bits of the
3303 // type. In this case we need to do the same trick as for the integer
3304 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3305
3306 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3307 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3308 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3309 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3310 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3311 // checks will take care of the conversion.
3312 ScalarConversionOpts Opts;
3313 if (!LV.isBitField())
3314 Opts = ScalarConversionOpts(CGF.SanOpts);
3315 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3316 Previous = value;
3317 SrcType = promotedType;
3318 }
3319
3320 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3321 Opts);
3322
3323 // Note that signed integer inc/dec with width less than int can't
3324 // overflow because of promotion rules; we're just eliding a few steps
3325 // here.
3326 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3327 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3328 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3329 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3330 !excludeOverflowPattern &&
3332 SanitizerKind::UnsignedIntegerOverflow, E->getType())) {
3333 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
3334 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
3335 } else {
3336 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3337 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3338 }
3339
3340 // Next most common: pointer increment.
3341 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3342 QualType type = ptr->getPointeeType();
3343
3344 // VLA types don't have constant size.
3345 if (const VariableArrayType *vla
3347 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3348 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3349 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3350 if (CGF.getLangOpts().PointerOverflowDefined)
3351 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3352 else
3353 value = CGF.EmitCheckedInBoundsGEP(
3354 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3355 E->getExprLoc(), "vla.inc");
3356
3357 // Arithmetic on function pointers (!) is just +-1.
3358 } else if (type->isFunctionType()) {
3359 llvm::Value *amt = Builder.getInt32(amount);
3360
3361 if (CGF.getLangOpts().PointerOverflowDefined)
3362 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3363 else
3364 value =
3365 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3366 /*SignedIndices=*/false, isSubtraction,
3367 E->getExprLoc(), "incdec.funcptr");
3368
3369 // For everything else, we can just do a simple increment.
3370 } else {
3371 llvm::Value *amt = Builder.getInt32(amount);
3372 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3373 if (CGF.getLangOpts().PointerOverflowDefined)
3374 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3375 else
3376 value = CGF.EmitCheckedInBoundsGEP(
3377 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3378 E->getExprLoc(), "incdec.ptr");
3379 }
3380
3381 // Vector increment/decrement.
3382 } else if (type->isVectorType()) {
3383 if (type->hasIntegerRepresentation()) {
3384 llvm::Value *amt = llvm::ConstantInt::getSigned(value->getType(), amount);
3385
3386 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3387 } else {
3388 value = Builder.CreateFAdd(
3389 value,
3390 llvm::ConstantFP::get(value->getType(), amount),
3391 isInc ? "inc" : "dec");
3392 }
3393
3394 // Floating point.
3395 } else if (type->isRealFloatingType()) {
3396 // Add the inc/dec to the real part.
3397 llvm::Value *amt;
3398 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3399
3400 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3401 // Another special case: half FP increment should be done via float
3403 value = Builder.CreateCall(
3404 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
3405 CGF.CGM.FloatTy),
3406 input, "incdec.conv");
3407 } else {
3408 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
3409 }
3410 }
3411
3412 if (value->getType()->isFloatTy())
3413 amt = llvm::ConstantFP::get(VMContext,
3414 llvm::APFloat(static_cast<float>(amount)));
3415 else if (value->getType()->isDoubleTy())
3416 amt = llvm::ConstantFP::get(VMContext,
3417 llvm::APFloat(static_cast<double>(amount)));
3418 else {
3419 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3420 // Convert from float.
3421 llvm::APFloat F(static_cast<float>(amount));
3422 bool ignored;
3423 const llvm::fltSemantics *FS;
3424 // Don't use getFloatTypeSemantics because Half isn't
3425 // necessarily represented using the "half" LLVM type.
3426 if (value->getType()->isFP128Ty())
3427 FS = &CGF.getTarget().getFloat128Format();
3428 else if (value->getType()->isHalfTy())
3429 FS = &CGF.getTarget().getHalfFormat();
3430 else if (value->getType()->isBFloatTy())
3431 FS = &CGF.getTarget().getBFloat16Format();
3432 else if (value->getType()->isPPC_FP128Ty())
3433 FS = &CGF.getTarget().getIbm128Format();
3434 else
3435 FS = &CGF.getTarget().getLongDoubleFormat();
3436 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3437 amt = llvm::ConstantFP::get(VMContext, F);
3438 }
3439 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3440
3441 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3443 value = Builder.CreateCall(
3444 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3445 CGF.CGM.FloatTy),
3446 value, "incdec.conv");
3447 } else {
3448 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3449 }
3450 }
3451
3452 // Fixed-point types.
3453 } else if (type->isFixedPointType()) {
3454 // Fixed-point types are tricky. In some cases, it isn't possible to
3455 // represent a 1 or a -1 in the type at all. Piggyback off of
3456 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3457 BinOpInfo Info;
3458 Info.E = E;
3459 Info.Ty = E->getType();
3460 Info.Opcode = isInc ? BO_Add : BO_Sub;
3461 Info.LHS = value;
3462 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3463 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3464 // since -1 is guaranteed to be representable.
3465 if (type->isSignedFixedPointType()) {
3466 Info.Opcode = isInc ? BO_Sub : BO_Add;
3467 Info.RHS = Builder.CreateNeg(Info.RHS);
3468 }
3469 // Now, convert from our invented integer literal to the type of the unary
3470 // op. This will upscale and saturate if necessary. This value can become
3471 // undef in some cases.
3472 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3473 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3474 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3475 value = EmitFixedPointBinOp(Info);
3476
3477 // Objective-C pointer types.
3478 } else {
3479 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3480
3481 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3482 if (!isInc) size = -size;
3483 llvm::Value *sizeValue =
3484 llvm::ConstantInt::getSigned(CGF.SizeTy, size.getQuantity());
3485
3486 if (CGF.getLangOpts().PointerOverflowDefined)
3487 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3488 else
3489 value = CGF.EmitCheckedInBoundsGEP(
3490 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3491 E->getExprLoc(), "incdec.objptr");
3492 value = Builder.CreateBitCast(value, input->getType());
3493 }
3494
3495 if (atomicPHI) {
3496 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3497 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3498 auto Pair = CGF.EmitAtomicCompareExchange(
3499 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3500 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3501 llvm::Value *success = Pair.second;
3502 atomicPHI->addIncoming(old, curBlock);
3503 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3504 Builder.SetInsertPoint(contBB);
3505 return isPre ? value : input;
3506 }
3507
3508 // Store the updated result through the lvalue.
3509 if (LV.isBitField()) {
3510 Value *Src = Previous ? Previous : value;
3511 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3512 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3513 LV.getBitFieldInfo(), E->getExprLoc());
3514 } else
3515 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3516
3517 // If this is a postinc, return the value read from memory, otherwise use the
3518 // updated value.
3519 return isPre ? value : input;
3520}
3521
3522
3523Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3524 QualType PromotionType) {
3525 QualType promotionTy = PromotionType.isNull()
3526 ? getPromotionType(E->getSubExpr()->getType())
3527 : PromotionType;
3528 Value *result = VisitPlus(E, promotionTy);
3529 if (result && !promotionTy.isNull())
3530 result = EmitUnPromotedValue(result, E->getType());
3531 return result;
3532}
3533
3534Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3535 QualType PromotionType) {
3536 // This differs from gcc, though, most likely due to a bug in gcc.
3537 TestAndClearIgnoreResultAssign();
3538 if (!PromotionType.isNull())
3539 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3540 return Visit(E->getSubExpr());
3541}
3542
3543Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3544 QualType PromotionType) {
3545 QualType promotionTy = PromotionType.isNull()
3546 ? getPromotionType(E->getSubExpr()->getType())
3547 : PromotionType;
3548 Value *result = VisitMinus(E, promotionTy);
3549 if (result && !promotionTy.isNull())
3550 result = EmitUnPromotedValue(result, E->getType());
3551 return result;
3552}
3553
3554Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3555 QualType PromotionType) {
3556 TestAndClearIgnoreResultAssign();
3557 Value *Op;
3558 if (!PromotionType.isNull())
3559 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3560 else
3561 Op = Visit(E->getSubExpr());
3562
3563 // Generate a unary FNeg for FP ops.
3564 if (Op->getType()->isFPOrFPVectorTy())
3565 return Builder.CreateFNeg(Op, "fneg");
3566
3567 // Emit unary minus with EmitSub so we handle overflow cases etc.
3568 BinOpInfo BinOp;
3569 BinOp.RHS = Op;
3570 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3571 BinOp.Ty = E->getType();
3572 BinOp.Opcode = BO_Sub;
3573 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3574 BinOp.E = E;
3575 return EmitSub(BinOp);
3576}
3577
3578Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3579 TestAndClearIgnoreResultAssign();
3580 Value *Op = Visit(E->getSubExpr());
3581 return Builder.CreateNot(Op, "not");
3582}
3583
3584Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3585 // Perform vector logical not on comparison with zero vector.
3586 if (E->getType()->isVectorType() &&
3587 E->getType()->castAs<VectorType>()->getVectorKind() ==
3588 VectorKind::Generic) {
3589 Value *Oper = Visit(E->getSubExpr());
3590 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3591 Value *Result;
3592 if (Oper->getType()->isFPOrFPVectorTy()) {
3593 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3594 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3595 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3596 } else
3597 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3598 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3599 }
3600
3601 // Compare operand to zero.
3602 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3603
3604 // Invert value.
3605 // TODO: Could dynamically modify easy computations here. For example, if
3606 // the operand is an icmp ne, turn into icmp eq.
3607 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3608
3609 // ZExt result to the expr type.
3610 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3611}
3612
3613Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3614 // Try folding the offsetof to a constant.
3615 Expr::EvalResult EVResult;
3616 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3617 llvm::APSInt Value = EVResult.Val.getInt();
3618 return Builder.getInt(Value);
3619 }
3620
3621 // Loop over the components of the offsetof to compute the value.
3622 unsigned n = E->getNumComponents();
3623 llvm::Type* ResultType = ConvertType(E->getType());
3624 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3625 QualType CurrentType = E->getTypeSourceInfo()->getType();
3626 for (unsigned i = 0; i != n; ++i) {
3627 OffsetOfNode ON = E->getComponent(i);
3628 llvm::Value *Offset = nullptr;
3629 switch (ON.getKind()) {
3630 case OffsetOfNode::Array: {
3631 // Compute the index
3632 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3633 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3634 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3635 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3636
3637 // Save the element type
3638 CurrentType =
3639 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3640
3641 // Compute the element size
3642 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3643 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3644
3645 // Multiply out to compute the result
3646 Offset = Builder.CreateMul(Idx, ElemSize);
3647 break;
3648 }
3649
3650 case OffsetOfNode::Field: {
3651 FieldDecl *MemberDecl = ON.getField();
3652 auto *RD = CurrentType->castAsRecordDecl();
3653 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3654
3655 // Compute the index of the field in its parent.
3656 unsigned i = 0;
3657 // FIXME: It would be nice if we didn't have to loop here!
3658 for (RecordDecl::field_iterator Field = RD->field_begin(),
3659 FieldEnd = RD->field_end();
3660 Field != FieldEnd; ++Field, ++i) {
3661 if (*Field == MemberDecl)
3662 break;
3663 }
3664 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3665
3666 // Compute the offset to the field
3667 int64_t OffsetInt = RL.getFieldOffset(i) /
3668 CGF.getContext().getCharWidth();
3669 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3670
3671 // Save the element type.
3672 CurrentType = MemberDecl->getType();
3673 break;
3674 }
3675
3677 llvm_unreachable("dependent __builtin_offsetof");
3678
3679 case OffsetOfNode::Base: {
3680 if (ON.getBase()->isVirtual()) {
3681 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3682 continue;
3683 }
3684
3685 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3686 CurrentType->castAsCanonical<RecordType>()->getDecl());
3687
3688 // Save the element type.
3689 CurrentType = ON.getBase()->getType();
3690
3691 // Compute the offset to the base.
3692 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3693 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3694 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3695 break;
3696 }
3697 }
3698 Result = Builder.CreateAdd(Result, Offset);
3699 }
3700 return Result;
3701}
3702
3703/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3704/// argument of the sizeof expression as an integer.
3705Value *
3706ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3707 const UnaryExprOrTypeTraitExpr *E) {
3708 QualType TypeToSize = E->getTypeOfArgument();
3709 if (auto Kind = E->getKind();
3710 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3711 if (const VariableArrayType *VAT =
3712 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3713 // For _Countof, we only want to evaluate if the extent is actually
3714 // variable as opposed to a multi-dimensional array whose extent is
3715 // constant but whose element type is variable.
3716 bool EvaluateExtent = true;
3717 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3718 EvaluateExtent =
3719 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3720 }
3721 if (EvaluateExtent) {
3722 if (E->isArgumentType()) {
3723 // sizeof(type) - make sure to emit the VLA size.
3724 CGF.EmitVariablyModifiedType(TypeToSize);
3725 } else {
3726 // C99 6.5.3.4p2: If the argument is an expression of type
3727 // VLA, it is evaluated.
3729 }
3730
3731 // For _Countof, we just want to return the size of a single dimension.
3732 if (Kind == UETT_CountOf)
3733 return CGF.getVLAElements1D(VAT).NumElts;
3734
3735 // For sizeof and __datasizeof, we need to scale the number of elements
3736 // by the size of the array element type.
3737 auto VlaSize = CGF.getVLASize(VAT);
3738
3739 // Scale the number of non-VLA elements by the non-VLA element size.
3740 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3741 if (!eltSize.isOne())
3742 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3743 VlaSize.NumElts);
3744 return VlaSize.NumElts;
3745 }
3746 }
3747 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3748 auto Alignment =
3749 CGF.getContext()
3752 .getQuantity();
3753 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3754 } else if (E->getKind() == UETT_VectorElements) {
3755 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3756 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3757 }
3758
3759 // If this isn't sizeof(vla), the result must be constant; use the constant
3760 // folding logic so we don't have to duplicate it here.
3761 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3762}
3763
3764Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3765 QualType PromotionType) {
3766 QualType promotionTy = PromotionType.isNull()
3767 ? getPromotionType(E->getSubExpr()->getType())
3768 : PromotionType;
3769 Value *result = VisitReal(E, promotionTy);
3770 if (result && !promotionTy.isNull())
3771 result = EmitUnPromotedValue(result, E->getType());
3772 return result;
3773}
3774
3775Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3776 QualType PromotionType) {
3777 Expr *Op = E->getSubExpr();
3778 if (Op->getType()->isAnyComplexType()) {
3779 // If it's an l-value, load through the appropriate subobject l-value.
3780 // Note that we have to ask E because Op might be an l-value that
3781 // this won't work for, e.g. an Obj-C property.
3782 if (E->isGLValue()) {
3783 if (!PromotionType.isNull()) {
3785 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3786 PromotionType = PromotionType->isAnyComplexType()
3787 ? PromotionType
3788 : CGF.getContext().getComplexType(PromotionType);
3789 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3790 : result.first;
3791 }
3792
3793 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3794 .getScalarVal();
3795 }
3796 // Otherwise, calculate and project.
3797 return CGF.EmitComplexExpr(Op, false, true).first;
3798 }
3799
3800 if (!PromotionType.isNull())
3801 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3802 return Visit(Op);
3803}
3804
3805Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3806 QualType PromotionType) {
3807 QualType promotionTy = PromotionType.isNull()
3808 ? getPromotionType(E->getSubExpr()->getType())
3809 : PromotionType;
3810 Value *result = VisitImag(E, promotionTy);
3811 if (result && !promotionTy.isNull())
3812 result = EmitUnPromotedValue(result, E->getType());
3813 return result;
3814}
3815
3816Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3817 QualType PromotionType) {
3818 Expr *Op = E->getSubExpr();
3819 if (Op->getType()->isAnyComplexType()) {
3820 // If it's an l-value, load through the appropriate subobject l-value.
3821 // Note that we have to ask E because Op might be an l-value that
3822 // this won't work for, e.g. an Obj-C property.
3823 if (Op->isGLValue()) {
3824 if (!PromotionType.isNull()) {
3826 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3827 PromotionType = PromotionType->isAnyComplexType()
3828 ? PromotionType
3829 : CGF.getContext().getComplexType(PromotionType);
3830 return result.second
3831 ? CGF.EmitPromotedValue(result, PromotionType).second
3832 : result.second;
3833 }
3834
3835 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3836 .getScalarVal();
3837 }
3838 // Otherwise, calculate and project.
3839 return CGF.EmitComplexExpr(Op, true, false).second;
3840 }
3841
3842 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3843 // effects are evaluated, but not the actual value.
3844 if (Op->isGLValue())
3845 CGF.EmitLValue(Op);
3846 else if (!PromotionType.isNull())
3847 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3848 else
3849 CGF.EmitScalarExpr(Op, true);
3850 if (!PromotionType.isNull())
3851 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3852 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3853}
3854
3855//===----------------------------------------------------------------------===//
3856// Binary Operators
3857//===----------------------------------------------------------------------===//
3858
3859Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3860 QualType PromotionType) {
3861 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3862}
3863
3864Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3865 QualType ExprType) {
3866 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3867}
3868
3869Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3870 E = E->IgnoreParens();
3871 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3872 switch (BO->getOpcode()) {
3873#define HANDLE_BINOP(OP) \
3874 case BO_##OP: \
3875 return Emit##OP(EmitBinOps(BO, PromotionType));
3876 HANDLE_BINOP(Add)
3877 HANDLE_BINOP(Sub)
3878 HANDLE_BINOP(Mul)
3879 HANDLE_BINOP(Div)
3880#undef HANDLE_BINOP
3881 default:
3882 break;
3883 }
3884 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3885 switch (UO->getOpcode()) {
3886 case UO_Imag:
3887 return VisitImag(UO, PromotionType);
3888 case UO_Real:
3889 return VisitReal(UO, PromotionType);
3890 case UO_Minus:
3891 return VisitMinus(UO, PromotionType);
3892 case UO_Plus:
3893 return VisitPlus(UO, PromotionType);
3894 default:
3895 break;
3896 }
3897 }
3898 auto result = Visit(const_cast<Expr *>(E));
3899 if (result) {
3900 if (!PromotionType.isNull())
3901 return EmitPromotedValue(result, PromotionType);
3902 else
3903 return EmitUnPromotedValue(result, E->getType());
3904 }
3905 return result;
3906}
3907
3908BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3909 QualType PromotionType) {
3910 TestAndClearIgnoreResultAssign();
3911 BinOpInfo Result;
3912 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3913 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3914 if (!PromotionType.isNull())
3915 Result.Ty = PromotionType;
3916 else
3917 Result.Ty = E->getType();
3918 Result.Opcode = E->getOpcode();
3919 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3920 Result.E = E;
3921 return Result;
3922}
3923
3924LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3925 const CompoundAssignOperator *E,
3926 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3927 Value *&Result) {
3928 QualType LHSTy = E->getLHS()->getType();
3929 BinOpInfo OpInfo;
3930
3933
3934 // Emit the RHS first. __block variables need to have the rhs evaluated
3935 // first, plus this should improve codegen a little.
3936
3937 QualType PromotionTypeCR;
3938 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3939 if (PromotionTypeCR.isNull())
3940 PromotionTypeCR = E->getComputationResultType();
3941 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3942 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3943 if (!PromotionTypeRHS.isNull())
3944 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3945 else
3946 OpInfo.RHS = Visit(E->getRHS());
3947 OpInfo.Ty = PromotionTypeCR;
3948 OpInfo.Opcode = E->getOpcode();
3949 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3950 OpInfo.E = E;
3951 // Load/convert the LHS.
3952 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3953
3954 llvm::PHINode *atomicPHI = nullptr;
3955 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3956 QualType type = atomicTy->getValueType();
3957 if (!type->isBooleanType() && type->isIntegerType() &&
3958 !(type->isUnsignedIntegerType() &&
3959 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3960 CGF.getLangOpts().getSignedOverflowBehavior() !=
3961 LangOptions::SOB_Trapping) {
3962 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3963 llvm::Instruction::BinaryOps Op;
3964 switch (OpInfo.Opcode) {
3965 // We don't have atomicrmw operands for *, %, /, <<, >>
3966 case BO_MulAssign: case BO_DivAssign:
3967 case BO_RemAssign:
3968 case BO_ShlAssign:
3969 case BO_ShrAssign:
3970 break;
3971 case BO_AddAssign:
3972 AtomicOp = llvm::AtomicRMWInst::Add;
3973 Op = llvm::Instruction::Add;
3974 break;
3975 case BO_SubAssign:
3976 AtomicOp = llvm::AtomicRMWInst::Sub;
3977 Op = llvm::Instruction::Sub;
3978 break;
3979 case BO_AndAssign:
3980 AtomicOp = llvm::AtomicRMWInst::And;
3981 Op = llvm::Instruction::And;
3982 break;
3983 case BO_XorAssign:
3984 AtomicOp = llvm::AtomicRMWInst::Xor;
3985 Op = llvm::Instruction::Xor;
3986 break;
3987 case BO_OrAssign:
3988 AtomicOp = llvm::AtomicRMWInst::Or;
3989 Op = llvm::Instruction::Or;
3990 break;
3991 default:
3992 llvm_unreachable("Invalid compound assignment type");
3993 }
3994 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3995 llvm::Value *Amt = CGF.EmitToMemory(
3996 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3997 E->getExprLoc()),
3998 LHSTy);
3999
4000 llvm::AtomicRMWInst *OldVal =
4001 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
4002
4003 // Since operation is atomic, the result type is guaranteed to be the
4004 // same as the input in LLVM terms.
4005 Result = Builder.CreateBinOp(Op, OldVal, Amt);
4006 return LHSLV;
4007 }
4008 }
4009 // FIXME: For floating point types, we should be saving and restoring the
4010 // floating point environment in the loop.
4011 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
4012 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
4013 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4014 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
4015 Builder.CreateBr(opBB);
4016 Builder.SetInsertPoint(opBB);
4017 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
4018 atomicPHI->addIncoming(OpInfo.LHS, startBB);
4019 OpInfo.LHS = atomicPHI;
4020 }
4021 else
4022 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4023
4024 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
4025 SourceLocation Loc = E->getExprLoc();
4026 if (!PromotionTypeLHS.isNull())
4027 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
4028 E->getExprLoc());
4029 else
4030 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
4031 E->getComputationLHSType(), Loc);
4032
4033 // Expand the binary operator.
4034 Result = (this->*Func)(OpInfo);
4035
4036 // Convert the result back to the LHS type,
4037 // potentially with Implicit Conversion sanitizer check.
4038 // If LHSLV is a bitfield, use default ScalarConversionOpts
4039 // to avoid emit any implicit integer checks.
4040 Value *Previous = nullptr;
4041 if (LHSLV.isBitField()) {
4042 Previous = Result;
4043 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
4044 } else
4045 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
4046 ScalarConversionOpts(CGF.SanOpts));
4047
4048 if (atomicPHI) {
4049 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
4050 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
4051 auto Pair = CGF.EmitAtomicCompareExchange(
4052 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
4053 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
4054 llvm::Value *success = Pair.second;
4055 atomicPHI->addIncoming(old, curBlock);
4056 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
4057 Builder.SetInsertPoint(contBB);
4058 return LHSLV;
4059 }
4060
4061 // Store the result value into the LHS lvalue. Bit-fields are handled
4062 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
4063 // 'An assignment expression has the value of the left operand after the
4064 // assignment...'.
4065 if (LHSLV.isBitField()) {
4066 Value *Src = Previous ? Previous : Result;
4067 QualType SrcType = E->getRHS()->getType();
4068 QualType DstType = E->getLHS()->getType();
4070 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
4071 LHSLV.getBitFieldInfo(), E->getExprLoc());
4072 } else
4074
4075 if (CGF.getLangOpts().OpenMP)
4077 E->getLHS());
4078 return LHSLV;
4079}
4080
4081Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
4082 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
4083 bool Ignore = TestAndClearIgnoreResultAssign();
4084 Value *RHS = nullptr;
4085 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
4086
4087 // If the result is clearly ignored, return now.
4088 if (Ignore)
4089 return nullptr;
4090
4091 // The result of an assignment in C is the assigned r-value.
4092 if (!CGF.getLangOpts().CPlusPlus)
4093 return RHS;
4094
4095 // If the lvalue is non-volatile, return the computed value of the assignment.
4096 if (!LHS.isVolatileQualified())
4097 return RHS;
4098
4099 // Otherwise, reload the value.
4100 return EmitLoadOfLValue(LHS, E->getExprLoc());
4101}
4102
4103void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4104 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4105 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4106 Checks;
4107
4108 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4109 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4110 SanitizerKind::SO_IntegerDivideByZero));
4111 }
4112
4113 const auto *BO = cast<BinaryOperator>(Ops.E);
4114 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4115 Ops.Ty->hasSignedIntegerRepresentation() &&
4116 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4117 Ops.mayHaveIntegerOverflow()) {
4118 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4119
4120 llvm::Value *IntMin =
4121 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4122 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4123
4124 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4125 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4126 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4127 Checks.push_back(
4128 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4129 }
4130
4131 if (Checks.size() > 0)
4132 EmitBinOpCheck(Checks, Ops);
4133}
4134
4135Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4136 {
4137 SanitizerDebugLocation SanScope(&CGF,
4138 {SanitizerKind::SO_IntegerDivideByZero,
4139 SanitizerKind::SO_SignedIntegerOverflow,
4140 SanitizerKind::SO_FloatDivideByZero},
4141 SanitizerHandler::DivremOverflow);
4142 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4143 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4144 Ops.Ty->isIntegerType() &&
4145 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4146 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4147 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4148 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4149 Ops.Ty->isRealFloatingType() &&
4150 Ops.mayHaveFloatDivisionByZero()) {
4151 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4152 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4153 EmitBinOpCheck(
4154 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4155 }
4156 }
4157
4158 if (Ops.Ty->isConstantMatrixType()) {
4159 llvm::MatrixBuilder MB(Builder);
4160 // We need to check the types of the operands of the operator to get the
4161 // correct matrix dimensions.
4162 auto *BO = cast<BinaryOperator>(Ops.E);
4163 (void)BO;
4164 assert(
4166 "first operand must be a matrix");
4167 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4168 "second operand must be an arithmetic type");
4169 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4170 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4171 Ops.Ty->hasUnsignedIntegerRepresentation());
4172 }
4173
4174 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4175 llvm::Value *Val;
4176 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4177 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4178 CGF.SetDivFPAccuracy(Val);
4179 return Val;
4180 }
4181 else if (Ops.isFixedPointOp())
4182 return EmitFixedPointBinOp(Ops);
4183 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4184 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4185 else
4186 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4187}
4188
4189Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4190 // Rem in C can't be a floating point type: C99 6.5.5p2.
4191 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4192 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4193 Ops.Ty->isIntegerType() &&
4194 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4195 SanitizerDebugLocation SanScope(&CGF,
4196 {SanitizerKind::SO_IntegerDivideByZero,
4197 SanitizerKind::SO_SignedIntegerOverflow},
4198 SanitizerHandler::DivremOverflow);
4199 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4200 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4201 }
4202
4203 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4204 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4205
4206 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4207 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4208
4209 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4210}
4211
4212Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4213 unsigned IID;
4214 unsigned OpID = 0;
4215 SanitizerHandler OverflowKind;
4216
4217 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4218 switch (Ops.Opcode) {
4219 case BO_Add:
4220 case BO_AddAssign:
4221 OpID = 1;
4222 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4223 llvm::Intrinsic::uadd_with_overflow;
4224 OverflowKind = SanitizerHandler::AddOverflow;
4225 break;
4226 case BO_Sub:
4227 case BO_SubAssign:
4228 OpID = 2;
4229 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4230 llvm::Intrinsic::usub_with_overflow;
4231 OverflowKind = SanitizerHandler::SubOverflow;
4232 break;
4233 case BO_Mul:
4234 case BO_MulAssign:
4235 OpID = 3;
4236 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4237 llvm::Intrinsic::umul_with_overflow;
4238 OverflowKind = SanitizerHandler::MulOverflow;
4239 break;
4240 default:
4241 llvm_unreachable("Unsupported operation for overflow detection");
4242 }
4243 OpID <<= 1;
4244 if (isSigned)
4245 OpID |= 1;
4246
4247 SanitizerDebugLocation SanScope(&CGF,
4248 {SanitizerKind::SO_SignedIntegerOverflow,
4249 SanitizerKind::SO_UnsignedIntegerOverflow},
4250 OverflowKind);
4251 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4252
4253 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4254
4255 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4256 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4257 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4258
4259 // Handle overflow with llvm.trap if no custom handler has been specified.
4260 const std::string *handlerName =
4262 if (handlerName->empty()) {
4263 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4264 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4265 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4266 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
4268 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4269 : SanitizerKind::SO_UnsignedIntegerOverflow;
4270 EmitBinOpCheck(std::make_pair(NotOverflow, Ordinal), Ops);
4271 } else
4272 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4273 return result;
4274 }
4275
4276 // Branch in case of overflow.
4277 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4278 llvm::BasicBlock *continueBB =
4279 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4280 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4281
4282 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4283
4284 // If an overflow handler is set, then we want to call it and then use its
4285 // result, if it returns.
4286 Builder.SetInsertPoint(overflowBB);
4287
4288 // Get the overflow handler.
4289 llvm::Type *Int8Ty = CGF.Int8Ty;
4290 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4291 llvm::FunctionType *handlerTy =
4292 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4293 llvm::FunctionCallee handler =
4294 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4295
4296 // Sign extend the args to 64-bit, so that we can use the same handler for
4297 // all types of overflow.
4298 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4299 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4300
4301 // Call the handler with the two arguments, the operation, and the size of
4302 // the result.
4303 llvm::Value *handlerArgs[] = {
4304 lhs,
4305 rhs,
4306 Builder.getInt8(OpID),
4307 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4308 };
4309 llvm::Value *handlerResult =
4310 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4311
4312 // Truncate the result back to the desired size.
4313 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4314 Builder.CreateBr(continueBB);
4315
4316 Builder.SetInsertPoint(continueBB);
4317 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4318 phi->addIncoming(result, initialBB);
4319 phi->addIncoming(handlerResult, overflowBB);
4320
4321 return phi;
4322}
4323
4324/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4325/// information.
4326/// This function is used for BO_AddAssign/BO_SubAssign.
4327static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4328 bool isSubtraction) {
4329 // Must have binary (not unary) expr here. Unary pointer
4330 // increment/decrement doesn't use this path.
4332
4333 Value *pointer = op.LHS;
4334 Expr *pointerOperand = expr->getLHS();
4335 Value *index = op.RHS;
4336 Expr *indexOperand = expr->getRHS();
4337
4338 // In a subtraction, the LHS is always the pointer.
4339 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4340 std::swap(pointer, index);
4341 std::swap(pointerOperand, indexOperand);
4342 }
4343
4344 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4345 index, isSubtraction);
4346}
4347
4348/// Emit pointer + index arithmetic.
4350 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4351 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4352 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4353
4354 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4355 auto &DL = CGM.getDataLayout();
4356 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4357
4358 // Some versions of glibc and gcc use idioms (particularly in their malloc
4359 // routines) that add a pointer-sized integer (known to be a pointer value)
4360 // to a null pointer in order to cast the value back to an integer or as
4361 // part of a pointer alignment algorithm. This is undefined behavior, but
4362 // we'd like to be able to compile programs that use it.
4363 //
4364 // Normally, we'd generate a GEP with a null-pointer base here in response
4365 // to that code, but it's also UB to dereference a pointer created that
4366 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4367 // generate a direct cast of the integer value to a pointer.
4368 //
4369 // The idiom (p = nullptr + N) is not met if any of the following are true:
4370 //
4371 // The operation is subtraction.
4372 // The index is not pointer-sized.
4373 // The pointer type is not byte-sized.
4374 //
4375 // Note that we do not suppress the pointer overflow check in this case.
4377 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4378 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4379 if (getLangOpts().PointerOverflowDefined ||
4380 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4381 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4382 PtrTy->getPointerAddressSpace()))
4383 return Ptr;
4384 // The inbounds GEP of null is valid iff the index is zero.
4385 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4386 auto CheckHandler = SanitizerHandler::PointerOverflow;
4387 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4388 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4389 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4390 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4391 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4392 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4393 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4394 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4395 DynamicArgs);
4396 return Ptr;
4397 }
4398
4399 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4400 // Zero-extend or sign-extend the pointer value according to
4401 // whether the index is signed or not.
4402 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4403 "idx.ext");
4404 }
4405
4406 // If this is subtraction, negate the index.
4407 if (isSubtraction)
4408 index = Builder.CreateNeg(index, "idx.neg");
4409
4410 if (SanOpts.has(SanitizerKind::ArrayBounds))
4411 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4412 /*Accessed*/ false);
4413
4414 const PointerType *pointerType =
4415 pointerOperand->getType()->getAs<PointerType>();
4416 if (!pointerType) {
4417 QualType objectType = pointerOperand->getType()
4419 ->getPointeeType();
4420 llvm::Value *objectSize =
4421 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4422
4423 index = Builder.CreateMul(index, objectSize);
4424
4425 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4426 return Builder.CreateBitCast(result, pointer->getType());
4427 }
4428
4429 QualType elementType = pointerType->getPointeeType();
4430 if (const VariableArrayType *vla =
4431 getContext().getAsVariableArrayType(elementType)) {
4432 // The element count here is the total number of non-VLA elements.
4433 llvm::Value *numElements = getVLASize(vla).NumElts;
4434
4435 // Effectively, the multiply by the VLA size is part of the GEP.
4436 // GEP indexes are signed, and scaling an index isn't permitted to
4437 // signed-overflow, so we use the same semantics for our explicit
4438 // multiply. We suppress this if overflow is not undefined behavior.
4439 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4440 if (getLangOpts().PointerOverflowDefined) {
4441 index = Builder.CreateMul(index, numElements, "vla.index");
4442 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4443 } else {
4444 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4445 pointer =
4446 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4447 isSubtraction, BO->getExprLoc(), "add.ptr");
4448 }
4449 return pointer;
4450 }
4451
4452 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4453 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4454 // future proof.
4455 llvm::Type *elemTy;
4456 if (elementType->isVoidType() || elementType->isFunctionType())
4457 elemTy = Int8Ty;
4458 else
4459 elemTy = ConvertTypeForMem(elementType);
4460
4461 if (getLangOpts().PointerOverflowDefined)
4462 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4463
4464 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4465 BO->getExprLoc(), "add.ptr");
4466}
4467
4468// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4469// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4470// the add operand respectively. This allows fmuladd to represent a*b-c, or
4471// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4472// efficient operations.
4473static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4474 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4475 bool negMul, bool negAdd) {
4476 Value *MulOp0 = MulOp->getOperand(0);
4477 Value *MulOp1 = MulOp->getOperand(1);
4478 if (negMul)
4479 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4480 if (negAdd)
4481 Addend = Builder.CreateFNeg(Addend, "neg");
4482
4483 Value *FMulAdd = nullptr;
4484 if (Builder.getIsFPConstrained()) {
4485 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4486 "Only constrained operation should be created when Builder is in FP "
4487 "constrained mode");
4488 FMulAdd = Builder.CreateConstrainedFPCall(
4489 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4490 Addend->getType()),
4491 {MulOp0, MulOp1, Addend});
4492 } else {
4493 FMulAdd = Builder.CreateCall(
4494 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4495 {MulOp0, MulOp1, Addend});
4496 }
4497 MulOp->eraseFromParent();
4498
4499 return FMulAdd;
4500}
4501
4502// Check whether it would be legal to emit an fmuladd intrinsic call to
4503// represent op and if so, build the fmuladd.
4504//
4505// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4506// Does NOT check the type of the operation - it's assumed that this function
4507// will be called from contexts where it's known that the type is contractable.
4508static Value* tryEmitFMulAdd(const BinOpInfo &op,
4509 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4510 bool isSub=false) {
4511
4512 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4513 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4514 "Only fadd/fsub can be the root of an fmuladd.");
4515
4516 // Check whether this op is marked as fusable.
4517 if (!op.FPFeatures.allowFPContractWithinStatement())
4518 return nullptr;
4519
4520 Value *LHS = op.LHS;
4521 Value *RHS = op.RHS;
4522
4523 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4524 // it is the only use of its operand.
4525 bool NegLHS = false;
4526 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4527 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4528 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4529 LHS = LHSUnOp->getOperand(0);
4530 NegLHS = true;
4531 }
4532 }
4533
4534 bool NegRHS = false;
4535 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4536 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4537 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4538 RHS = RHSUnOp->getOperand(0);
4539 NegRHS = true;
4540 }
4541 }
4542
4543 // We have a potentially fusable op. Look for a mul on one of the operands.
4544 // Also, make sure that the mul result isn't used directly. In that case,
4545 // there's no point creating a muladd operation.
4546 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4547 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4548 (LHSBinOp->use_empty() || NegLHS)) {
4549 // If we looked through fneg, erase it.
4550 if (NegLHS)
4551 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4552 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4553 }
4554 }
4555 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4556 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4557 (RHSBinOp->use_empty() || NegRHS)) {
4558 // If we looked through fneg, erase it.
4559 if (NegRHS)
4560 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4561 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4562 }
4563 }
4564
4565 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4566 if (LHSBinOp->getIntrinsicID() ==
4567 llvm::Intrinsic::experimental_constrained_fmul &&
4568 (LHSBinOp->use_empty() || NegLHS)) {
4569 // If we looked through fneg, erase it.
4570 if (NegLHS)
4571 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4572 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4573 }
4574 }
4575 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4576 if (RHSBinOp->getIntrinsicID() ==
4577 llvm::Intrinsic::experimental_constrained_fmul &&
4578 (RHSBinOp->use_empty() || NegRHS)) {
4579 // If we looked through fneg, erase it.
4580 if (NegRHS)
4581 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4582 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4583 }
4584 }
4585
4586 return nullptr;
4587}
4588
4589Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4590 if (op.LHS->getType()->isPointerTy() ||
4591 op.RHS->getType()->isPointerTy())
4593
4594 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4595 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4596 case LangOptions::SOB_Defined:
4597 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4598 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4599 [[fallthrough]];
4600 case LangOptions::SOB_Undefined:
4601 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4602 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4603 [[fallthrough]];
4604 case LangOptions::SOB_Trapping:
4605 if (CanElideOverflowCheck(CGF.getContext(), op))
4606 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4607 return EmitOverflowCheckedBinOp(op);
4608 }
4609 }
4610
4611 // For vector and matrix adds, try to fold into a fmuladd.
4612 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4613 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4614 // Try to form an fmuladd.
4615 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4616 return FMulAdd;
4617 }
4618
4619 if (op.Ty->isConstantMatrixType()) {
4620 llvm::MatrixBuilder MB(Builder);
4621 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4622 return MB.CreateAdd(op.LHS, op.RHS);
4623 }
4624
4625 if (op.Ty->isUnsignedIntegerType() &&
4626 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4627 !CanElideOverflowCheck(CGF.getContext(), op))
4628 return EmitOverflowCheckedBinOp(op);
4629
4630 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4631 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4632 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4633 }
4634
4635 if (op.isFixedPointOp())
4636 return EmitFixedPointBinOp(op);
4637
4638 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4639}
4640
4641/// The resulting value must be calculated with exact precision, so the operands
4642/// may not be the same type.
4643Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4644 using llvm::APSInt;
4645 using llvm::ConstantInt;
4646
4647 // This is either a binary operation where at least one of the operands is
4648 // a fixed-point type, or a unary operation where the operand is a fixed-point
4649 // type. The result type of a binary operation is determined by
4650 // Sema::handleFixedPointConversions().
4651 QualType ResultTy = op.Ty;
4652 QualType LHSTy, RHSTy;
4653 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4654 RHSTy = BinOp->getRHS()->getType();
4655 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4656 // For compound assignment, the effective type of the LHS at this point
4657 // is the computation LHS type, not the actual LHS type, and the final
4658 // result type is not the type of the expression but rather the
4659 // computation result type.
4660 LHSTy = CAO->getComputationLHSType();
4661 ResultTy = CAO->getComputationResultType();
4662 } else
4663 LHSTy = BinOp->getLHS()->getType();
4664 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4665 LHSTy = UnOp->getSubExpr()->getType();
4666 RHSTy = UnOp->getSubExpr()->getType();
4667 }
4668 ASTContext &Ctx = CGF.getContext();
4669 Value *LHS = op.LHS;
4670 Value *RHS = op.RHS;
4671
4672 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4673 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4674 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4675 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4676
4677 // Perform the actual operation.
4678 Value *Result;
4679 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4680 switch (op.Opcode) {
4681 case BO_AddAssign:
4682 case BO_Add:
4683 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4684 break;
4685 case BO_SubAssign:
4686 case BO_Sub:
4687 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4688 break;
4689 case BO_MulAssign:
4690 case BO_Mul:
4691 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4692 break;
4693 case BO_DivAssign:
4694 case BO_Div:
4695 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4696 break;
4697 case BO_ShlAssign:
4698 case BO_Shl:
4699 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4700 break;
4701 case BO_ShrAssign:
4702 case BO_Shr:
4703 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4704 break;
4705 case BO_LT:
4706 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4707 case BO_GT:
4708 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4709 case BO_LE:
4710 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4711 case BO_GE:
4712 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4713 case BO_EQ:
4714 // For equality operations, we assume any padding bits on unsigned types are
4715 // zero'd out. They could be overwritten through non-saturating operations
4716 // that cause overflow, but this leads to undefined behavior.
4717 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4718 case BO_NE:
4719 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4720 case BO_Cmp:
4721 case BO_LAnd:
4722 case BO_LOr:
4723 llvm_unreachable("Found unimplemented fixed point binary operation");
4724 case BO_PtrMemD:
4725 case BO_PtrMemI:
4726 case BO_Rem:
4727 case BO_Xor:
4728 case BO_And:
4729 case BO_Or:
4730 case BO_Assign:
4731 case BO_RemAssign:
4732 case BO_AndAssign:
4733 case BO_XorAssign:
4734 case BO_OrAssign:
4735 case BO_Comma:
4736 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4737 }
4738
4739 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4741 // Convert to the result type.
4742 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4743 : CommonFixedSema,
4744 ResultFixedSema);
4745}
4746
4747Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4748 // The LHS is always a pointer if either side is.
4749 if (!op.LHS->getType()->isPointerTy()) {
4750 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4751 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4752 case LangOptions::SOB_Defined:
4753 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4754 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4755 [[fallthrough]];
4756 case LangOptions::SOB_Undefined:
4757 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4758 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4759 [[fallthrough]];
4760 case LangOptions::SOB_Trapping:
4761 if (CanElideOverflowCheck(CGF.getContext(), op))
4762 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4763 return EmitOverflowCheckedBinOp(op);
4764 }
4765 }
4766
4767 // For vector and matrix subs, try to fold into a fmuladd.
4768 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4769 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4770 // Try to form an fmuladd.
4771 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4772 return FMulAdd;
4773 }
4774
4775 if (op.Ty->isConstantMatrixType()) {
4776 llvm::MatrixBuilder MB(Builder);
4777 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4778 return MB.CreateSub(op.LHS, op.RHS);
4779 }
4780
4781 if (op.Ty->isUnsignedIntegerType() &&
4782 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4783 !CanElideOverflowCheck(CGF.getContext(), op))
4784 return EmitOverflowCheckedBinOp(op);
4785
4786 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4787 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4788 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4789 }
4790
4791 if (op.isFixedPointOp())
4792 return EmitFixedPointBinOp(op);
4793
4794 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4795 }
4796
4797 // If the RHS is not a pointer, then we have normal pointer
4798 // arithmetic.
4799 if (!op.RHS->getType()->isPointerTy())
4801
4802 // Otherwise, this is a pointer subtraction.
4803
4804 // Do the raw subtraction part.
4805 llvm::Value *LHS
4806 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4807 llvm::Value *RHS
4808 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4809 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4810
4811 // Okay, figure out the element size.
4812 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4813 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4814
4815 llvm::Value *divisor = nullptr;
4816
4817 // For a variable-length array, this is going to be non-constant.
4818 if (const VariableArrayType *vla
4819 = CGF.getContext().getAsVariableArrayType(elementType)) {
4820 auto VlaSize = CGF.getVLASize(vla);
4821 elementType = VlaSize.Type;
4822 divisor = VlaSize.NumElts;
4823
4824 // Scale the number of non-VLA elements by the non-VLA element size.
4825 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4826 if (!eltSize.isOne())
4827 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4828
4829 // For everything elese, we can just compute it, safe in the
4830 // assumption that Sema won't let anything through that we can't
4831 // safely compute the size of.
4832 } else {
4833 CharUnits elementSize;
4834 // Handle GCC extension for pointer arithmetic on void* and
4835 // function pointer types.
4836 if (elementType->isVoidType() || elementType->isFunctionType())
4837 elementSize = CharUnits::One();
4838 else
4839 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4840
4841 // Don't even emit the divide for element size of 1.
4842 if (elementSize.isOne())
4843 return diffInChars;
4844
4845 divisor = CGF.CGM.getSize(elementSize);
4846 }
4847
4848 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4849 // pointer difference in C is only defined in the case where both operands
4850 // are pointing to elements of an array.
4851 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4852}
4853
4854Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4855 bool RHSIsSigned) {
4856 llvm::IntegerType *Ty;
4857 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4858 Ty = cast<llvm::IntegerType>(VT->getElementType());
4859 else
4860 Ty = cast<llvm::IntegerType>(LHS->getType());
4861 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4862 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4863 // this in ConstantInt::get, this results in the value getting truncated.
4864 // Constrain the return value to be max(RHS) in this case.
4865 llvm::Type *RHSTy = RHS->getType();
4866 llvm::APInt RHSMax =
4867 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4868 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4869 if (RHSMax.ult(Ty->getBitWidth()))
4870 return llvm::ConstantInt::get(RHSTy, RHSMax);
4871 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4872}
4873
4874Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4875 const Twine &Name) {
4876 llvm::IntegerType *Ty;
4877 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4878 Ty = cast<llvm::IntegerType>(VT->getElementType());
4879 else
4880 Ty = cast<llvm::IntegerType>(LHS->getType());
4881
4882 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4883 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4884
4885 return Builder.CreateURem(
4886 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4887}
4888
4889Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4890 // TODO: This misses out on the sanitizer check below.
4891 if (Ops.isFixedPointOp())
4892 return EmitFixedPointBinOp(Ops);
4893
4894 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4895 // RHS to the same size as the LHS.
4896 Value *RHS = Ops.RHS;
4897 if (Ops.LHS->getType() != RHS->getType())
4898 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4899
4900 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4901 Ops.Ty->hasSignedIntegerRepresentation() &&
4903 !CGF.getLangOpts().CPlusPlus20;
4904 bool SanitizeUnsignedBase =
4905 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4906 Ops.Ty->hasUnsignedIntegerRepresentation();
4907 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4908 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4909 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4910 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4911 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4912 else if ((SanitizeBase || SanitizeExponent) &&
4913 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4914 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4915 if (SanitizeSignedBase)
4916 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
4917 if (SanitizeUnsignedBase)
4918 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
4919 if (SanitizeExponent)
4920 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
4921
4922 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4923 SanitizerHandler::ShiftOutOfBounds);
4924 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4925 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4926 llvm::Value *WidthMinusOne =
4927 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4928 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4929
4930 if (SanitizeExponent) {
4931 Checks.push_back(
4932 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
4933 }
4934
4935 if (SanitizeBase) {
4936 // Check whether we are shifting any non-zero bits off the top of the
4937 // integer. We only emit this check if exponent is valid - otherwise
4938 // instructions below will have undefined behavior themselves.
4939 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4940 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4941 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4942 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4943 llvm::Value *PromotedWidthMinusOne =
4944 (RHS == Ops.RHS) ? WidthMinusOne
4945 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4946 CGF.EmitBlock(CheckShiftBase);
4947 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4948 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4949 /*NUW*/ true, /*NSW*/ true),
4950 "shl.check");
4951 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4952 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4953 // Under C++11's rules, shifting a 1 bit into the sign bit is
4954 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4955 // define signed left shifts, so we use the C99 and C++11 rules there).
4956 // Unsigned shifts can always shift into the top bit.
4957 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4958 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4959 }
4960 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4961 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4962 CGF.EmitBlock(Cont);
4963 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4964 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4965 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4966 Checks.push_back(std::make_pair(
4967 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4968 : SanitizerKind::SO_UnsignedShiftBase));
4969 }
4970
4971 assert(!Checks.empty());
4972 EmitBinOpCheck(Checks, Ops);
4973 }
4974
4975 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4976}
4977
4978Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4979 // TODO: This misses out on the sanitizer check below.
4980 if (Ops.isFixedPointOp())
4981 return EmitFixedPointBinOp(Ops);
4982
4983 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4984 // RHS to the same size as the LHS.
4985 Value *RHS = Ops.RHS;
4986 if (Ops.LHS->getType() != RHS->getType())
4987 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4988
4989 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4990 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4991 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4992 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4993 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4994 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4995 SanitizerHandler::ShiftOutOfBounds);
4996 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4997 llvm::Value *Valid = Builder.CreateICmpULE(
4998 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4999 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
5000 }
5001
5002 if (Ops.Ty->hasUnsignedIntegerRepresentation())
5003 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
5004 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
5005}
5006
5008// return corresponding comparison intrinsic for given vector type
5009static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
5010 BuiltinType::Kind ElemKind) {
5011 switch (ElemKind) {
5012 default: llvm_unreachable("unexpected element type");
5013 case BuiltinType::Char_U:
5014 case BuiltinType::UChar:
5015 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5016 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
5017 case BuiltinType::Char_S:
5018 case BuiltinType::SChar:
5019 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5020 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
5021 case BuiltinType::UShort:
5022 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5023 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
5024 case BuiltinType::Short:
5025 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5026 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
5027 case BuiltinType::UInt:
5028 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5029 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
5030 case BuiltinType::Int:
5031 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5032 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
5033 case BuiltinType::ULong:
5034 case BuiltinType::ULongLong:
5035 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5036 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
5037 case BuiltinType::Long:
5038 case BuiltinType::LongLong:
5039 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5040 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
5041 case BuiltinType::Float:
5042 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
5043 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
5044 case BuiltinType::Double:
5045 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
5046 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
5047 case BuiltinType::UInt128:
5048 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5049 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
5050 case BuiltinType::Int128:
5051 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5052 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
5053 }
5054}
5055
5056Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
5057 llvm::CmpInst::Predicate UICmpOpc,
5058 llvm::CmpInst::Predicate SICmpOpc,
5059 llvm::CmpInst::Predicate FCmpOpc,
5060 bool IsSignaling) {
5061 TestAndClearIgnoreResultAssign();
5062 Value *Result;
5063 QualType LHSTy = E->getLHS()->getType();
5064 QualType RHSTy = E->getRHS()->getType();
5065 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
5066 assert(E->getOpcode() == BO_EQ ||
5067 E->getOpcode() == BO_NE);
5068 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
5069 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
5071 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
5072 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
5073 BinOpInfo BOInfo = EmitBinOps(E);
5074 Value *LHS = BOInfo.LHS;
5075 Value *RHS = BOInfo.RHS;
5076
5077 // If AltiVec, the comparison results in a numeric type, so we use
5078 // intrinsics comparing vectors and giving 0 or 1 as a result
5079 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
5080 // constants for mapping CR6 register bits to predicate result
5081 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
5082
5083 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
5084
5085 // in several cases vector arguments order will be reversed
5086 Value *FirstVecArg = LHS,
5087 *SecondVecArg = RHS;
5088
5089 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
5090 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
5091
5092 switch(E->getOpcode()) {
5093 default: llvm_unreachable("is not a comparison operation");
5094 case BO_EQ:
5095 CR6 = CR6_LT;
5096 ID = GetIntrinsic(VCMPEQ, ElementKind);
5097 break;
5098 case BO_NE:
5099 CR6 = CR6_EQ;
5100 ID = GetIntrinsic(VCMPEQ, ElementKind);
5101 break;
5102 case BO_LT:
5103 CR6 = CR6_LT;
5104 ID = GetIntrinsic(VCMPGT, ElementKind);
5105 std::swap(FirstVecArg, SecondVecArg);
5106 break;
5107 case BO_GT:
5108 CR6 = CR6_LT;
5109 ID = GetIntrinsic(VCMPGT, ElementKind);
5110 break;
5111 case BO_LE:
5112 if (ElementKind == BuiltinType::Float) {
5113 CR6 = CR6_LT;
5114 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5115 std::swap(FirstVecArg, SecondVecArg);
5116 }
5117 else {
5118 CR6 = CR6_EQ;
5119 ID = GetIntrinsic(VCMPGT, ElementKind);
5120 }
5121 break;
5122 case BO_GE:
5123 if (ElementKind == BuiltinType::Float) {
5124 CR6 = CR6_LT;
5125 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5126 }
5127 else {
5128 CR6 = CR6_EQ;
5129 ID = GetIntrinsic(VCMPGT, ElementKind);
5130 std::swap(FirstVecArg, SecondVecArg);
5131 }
5132 break;
5133 }
5134
5135 Value *CR6Param = Builder.getInt32(CR6);
5136 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5137 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5138
5139 // The result type of intrinsic may not be same as E->getType().
5140 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5141 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5142 // do nothing, if ResultTy is not i1 at the same time, it will cause
5143 // crash later.
5144 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5145 if (ResultTy->getBitWidth() > 1 &&
5146 E->getType() == CGF.getContext().BoolTy)
5147 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5148 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5149 E->getExprLoc());
5150 }
5151
5152 if (BOInfo.isFixedPointOp()) {
5153 Result = EmitFixedPointBinOp(BOInfo);
5154 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5155 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5156 if (!IsSignaling)
5157 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5158 else
5159 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5160 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5161 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5162 } else {
5163 // Unsigned integers and pointers.
5164
5165 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5168
5169 // Dynamic information is required to be stripped for comparisons,
5170 // because it could leak the dynamic information. Based on comparisons
5171 // of pointers to dynamic objects, the optimizer can replace one pointer
5172 // with another, which might be incorrect in presence of invariant
5173 // groups. Comparison with null is safe because null does not carry any
5174 // dynamic information.
5175 if (LHSTy.mayBeDynamicClass())
5176 LHS = Builder.CreateStripInvariantGroup(LHS);
5177 if (RHSTy.mayBeDynamicClass())
5178 RHS = Builder.CreateStripInvariantGroup(RHS);
5179 }
5180
5181 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5182 }
5183
5184 // If this is a vector comparison, sign extend the result to the appropriate
5185 // vector integer type and return it (don't convert to bool).
5186 if (LHSTy->isVectorType())
5187 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5188
5189 } else {
5190 // Complex Comparison: can only be an equality comparison.
5192 QualType CETy;
5193 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5194 LHS = CGF.EmitComplexExpr(E->getLHS());
5195 CETy = CTy->getElementType();
5196 } else {
5197 LHS.first = Visit(E->getLHS());
5198 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5199 CETy = LHSTy;
5200 }
5201 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5202 RHS = CGF.EmitComplexExpr(E->getRHS());
5203 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5204 CTy->getElementType()) &&
5205 "The element types must always match.");
5206 (void)CTy;
5207 } else {
5208 RHS.first = Visit(E->getRHS());
5209 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5210 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5211 "The element types must always match.");
5212 }
5213
5214 Value *ResultR, *ResultI;
5215 if (CETy->isRealFloatingType()) {
5216 // As complex comparisons can only be equality comparisons, they
5217 // are never signaling comparisons.
5218 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5219 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5220 } else {
5221 // Complex comparisons can only be equality comparisons. As such, signed
5222 // and unsigned opcodes are the same.
5223 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5224 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5225 }
5226
5227 if (E->getOpcode() == BO_EQ) {
5228 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5229 } else {
5230 assert(E->getOpcode() == BO_NE &&
5231 "Complex comparison other than == or != ?");
5232 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5233 }
5234 }
5235
5236 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5237 E->getExprLoc());
5238}
5239
5241 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5242 // In case we have the integer or bitfield sanitizer checks enabled
5243 // we want to get the expression before scalar conversion.
5244 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5245 CastKind Kind = ICE->getCastKind();
5246 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5247 *SrcType = ICE->getSubExpr()->getType();
5248 *Previous = EmitScalarExpr(ICE->getSubExpr());
5249 // Pass default ScalarConversionOpts to avoid emitting
5250 // integer sanitizer checks as E refers to bitfield.
5251 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5252 ICE->getExprLoc());
5253 }
5254 }
5255 return EmitScalarExpr(E->getRHS());
5256}
5257
5258Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5259 ApplyAtomGroup Grp(CGF.getDebugInfo());
5260 bool Ignore = TestAndClearIgnoreResultAssign();
5261
5262 Value *RHS;
5263 LValue LHS;
5264
5265 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5268 llvm::Value *RV =
5269 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5270 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5272
5273 if (Ignore)
5274 return nullptr;
5275 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5276 LV.getAddress(), /*nonnull*/ false);
5277 return RV;
5278 }
5279
5280 switch (E->getLHS()->getType().getObjCLifetime()) {
5282 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5283 break;
5284
5286 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5287 break;
5288
5290 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5291 break;
5292
5294 RHS = Visit(E->getRHS());
5295 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5296 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5297 break;
5298
5300 // __block variables need to have the rhs evaluated first, plus
5301 // this should improve codegen just a little.
5302 Value *Previous = nullptr;
5303 QualType SrcType = E->getRHS()->getType();
5304 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5305 // we want to extract that value and potentially (if the bitfield sanitizer
5306 // is enabled) use it to check for an implicit conversion.
5307 if (E->getLHS()->refersToBitField())
5308 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5309 else
5310 RHS = Visit(E->getRHS());
5311
5312 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5313
5314 // Store the value into the LHS. Bit-fields are handled specially
5315 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5316 // 'An assignment expression has the value of the left operand after
5317 // the assignment...'.
5318 if (LHS.isBitField()) {
5319 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5320 // If the expression contained an implicit conversion, make sure
5321 // to use the value before the scalar conversion.
5322 Value *Src = Previous ? Previous : RHS;
5323 QualType DstType = E->getLHS()->getType();
5324 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5325 LHS.getBitFieldInfo(), E->getExprLoc());
5326 } else {
5327 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5328 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5329 }
5330 }
5331 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5332 if (CGF.getLangOpts().OpenMP) {
5334 E->getLHS());
5335 }
5336
5337 // If the result is clearly ignored, return now.
5338 if (Ignore)
5339 return nullptr;
5340
5341 // The result of an assignment in C is the assigned r-value.
5342 if (!CGF.getLangOpts().CPlusPlus)
5343 return RHS;
5344
5345 // If the lvalue is non-volatile, return the computed value of the assignment.
5346 if (!LHS.isVolatileQualified())
5347 return RHS;
5348
5349 // Otherwise, reload the value.
5350 return EmitLoadOfLValue(LHS, E->getExprLoc());
5351}
5352
5353Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5354 // Perform vector logical and on comparisons with zero vectors.
5355 if (E->getType()->isVectorType()) {
5357
5358 Value *LHS = Visit(E->getLHS());
5359 Value *RHS = Visit(E->getRHS());
5360 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5361 if (LHS->getType()->isFPOrFPVectorTy()) {
5362 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5363 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5364 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5365 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5366 } else {
5367 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5368 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5369 }
5370 Value *And = Builder.CreateAnd(LHS, RHS);
5371 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5372 }
5373
5374 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5375 llvm::Type *ResTy = ConvertType(E->getType());
5376
5377 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5378 // If we have 1 && X, just emit X without inserting the control flow.
5379 bool LHSCondVal;
5380 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5381 if (LHSCondVal) { // If we have 1 && X, just emit X.
5383
5384 // If the top of the logical operator nest, reset the MCDC temp to 0.
5385 if (CGF.MCDCLogOpStack.empty())
5387
5388 CGF.MCDCLogOpStack.push_back(E);
5389
5390 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5391
5392 // If we're generating for profiling or coverage, generate a branch to a
5393 // block that increments the RHS counter needed to track branch condition
5394 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5395 // "FalseBlock" after the increment is done.
5396 if (InstrumentRegions &&
5398 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5399 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5400 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5401 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
5402 CGF.EmitBlock(RHSBlockCnt);
5404 CGF.EmitBranch(FBlock);
5405 CGF.EmitBlock(FBlock);
5406 } else
5407 CGF.markStmtMaybeUsed(E->getRHS());
5408
5409 CGF.MCDCLogOpStack.pop_back();
5410 // If the top of the logical operator nest, update the MCDC bitmap.
5411 if (CGF.MCDCLogOpStack.empty())
5413
5414 // ZExt result to int or bool.
5415 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5416 }
5417
5418 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5419 if (!CGF.ContainsLabel(E->getRHS())) {
5420 CGF.markStmtMaybeUsed(E->getRHS());
5421 return llvm::Constant::getNullValue(ResTy);
5422 }
5423 }
5424
5425 // If the top of the logical operator nest, reset the MCDC temp to 0.
5426 if (CGF.MCDCLogOpStack.empty())
5428
5429 CGF.MCDCLogOpStack.push_back(E);
5430
5431 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5432 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5433
5434 CodeGenFunction::ConditionalEvaluation eval(CGF);
5435
5436 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5437 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
5438 CGF.getProfileCount(E->getRHS()));
5439
5440 // Any edges into the ContBlock are now from an (indeterminate number of)
5441 // edges from this first condition. All of these values will be false. Start
5442 // setting up the PHI node in the Cont Block for this.
5443 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5444 "", ContBlock);
5445 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5446 PI != PE; ++PI)
5447 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5448
5449 eval.begin(CGF);
5450 CGF.EmitBlock(RHSBlock);
5452 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5453 eval.end(CGF);
5454
5455 // Reaquire the RHS block, as there may be subblocks inserted.
5456 RHSBlock = Builder.GetInsertBlock();
5457
5458 // If we're generating for profiling or coverage, generate a branch on the
5459 // RHS to a block that increments the RHS true counter needed to track branch
5460 // condition coverage.
5461 if (InstrumentRegions &&
5463 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5464 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5465 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
5466 CGF.EmitBlock(RHSBlockCnt);
5468 CGF.EmitBranch(ContBlock);
5469 PN->addIncoming(RHSCond, RHSBlockCnt);
5470 }
5471
5472 // Emit an unconditional branch from this block to ContBlock.
5473 {
5474 // There is no need to emit line number for unconditional branch.
5475 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5476 CGF.EmitBlock(ContBlock);
5477 }
5478 // Insert an entry into the phi node for the edge with the value of RHSCond.
5479 PN->addIncoming(RHSCond, RHSBlock);
5480
5481 CGF.MCDCLogOpStack.pop_back();
5482 // If the top of the logical operator nest, update the MCDC bitmap.
5483 if (CGF.MCDCLogOpStack.empty())
5485
5486 // Artificial location to preserve the scope information
5487 {
5489 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5490 }
5491
5492 // ZExt result to int.
5493 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5494}
5495
5496Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5497 // Perform vector logical or on comparisons with zero vectors.
5498 if (E->getType()->isVectorType()) {
5500
5501 Value *LHS = Visit(E->getLHS());
5502 Value *RHS = Visit(E->getRHS());
5503 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5504 if (LHS->getType()->isFPOrFPVectorTy()) {
5505 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5506 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5507 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5508 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5509 } else {
5510 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5511 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5512 }
5513 Value *Or = Builder.CreateOr(LHS, RHS);
5514 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5515 }
5516
5517 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5518 llvm::Type *ResTy = ConvertType(E->getType());
5519
5520 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5521 // If we have 0 || X, just emit X without inserting the control flow.
5522 bool LHSCondVal;
5523 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5524 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5526
5527 // If the top of the logical operator nest, reset the MCDC temp to 0.
5528 if (CGF.MCDCLogOpStack.empty())
5530
5531 CGF.MCDCLogOpStack.push_back(E);
5532
5533 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5534
5535 // If we're generating for profiling or coverage, generate a branch to a
5536 // block that increments the RHS counter need to track branch condition
5537 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5538 // "FalseBlock" after the increment is done.
5539 if (InstrumentRegions &&
5541 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5542 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5543 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5544 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5545 CGF.EmitBlock(RHSBlockCnt);
5547 CGF.EmitBranch(FBlock);
5548 CGF.EmitBlock(FBlock);
5549 } else
5550 CGF.markStmtMaybeUsed(E->getRHS());
5551
5552 CGF.MCDCLogOpStack.pop_back();
5553 // If the top of the logical operator nest, update the MCDC bitmap.
5554 if (CGF.MCDCLogOpStack.empty())
5556
5557 // ZExt result to int or bool.
5558 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5559 }
5560
5561 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5562 if (!CGF.ContainsLabel(E->getRHS())) {
5563 CGF.markStmtMaybeUsed(E->getRHS());
5564 return llvm::ConstantInt::get(ResTy, 1);
5565 }
5566 }
5567
5568 // If the top of the logical operator nest, reset the MCDC temp to 0.
5569 if (CGF.MCDCLogOpStack.empty())
5571
5572 CGF.MCDCLogOpStack.push_back(E);
5573
5574 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5575 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5576
5577 CodeGenFunction::ConditionalEvaluation eval(CGF);
5578
5579 // Branch on the LHS first. If it is true, go to the success (cont) block.
5580 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5582 CGF.getProfileCount(E->getRHS()));
5583
5584 // Any edges into the ContBlock are now from an (indeterminate number of)
5585 // edges from this first condition. All of these values will be true. Start
5586 // setting up the PHI node in the Cont Block for this.
5587 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5588 "", ContBlock);
5589 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5590 PI != PE; ++PI)
5591 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5592
5593 eval.begin(CGF);
5594
5595 // Emit the RHS condition as a bool value.
5596 CGF.EmitBlock(RHSBlock);
5598 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5599
5600 eval.end(CGF);
5601
5602 // Reaquire the RHS block, as there may be subblocks inserted.
5603 RHSBlock = Builder.GetInsertBlock();
5604
5605 // If we're generating for profiling or coverage, generate a branch on the
5606 // RHS to a block that increments the RHS true counter needed to track branch
5607 // condition coverage.
5608 if (InstrumentRegions &&
5610 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5611 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5612 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5613 CGF.EmitBlock(RHSBlockCnt);
5615 CGF.EmitBranch(ContBlock);
5616 PN->addIncoming(RHSCond, RHSBlockCnt);
5617 }
5618
5619 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5620 // into the phi node for the edge with the value of RHSCond.
5621 CGF.EmitBlock(ContBlock);
5622 PN->addIncoming(RHSCond, RHSBlock);
5623
5624 CGF.MCDCLogOpStack.pop_back();
5625 // If the top of the logical operator nest, update the MCDC bitmap.
5626 if (CGF.MCDCLogOpStack.empty())
5628
5629 // ZExt result to int.
5630 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5631}
5632
5633Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5634 CGF.EmitIgnoredExpr(E->getLHS());
5635 CGF.EnsureInsertPoint();
5636 return Visit(E->getRHS());
5637}
5638
5639//===----------------------------------------------------------------------===//
5640// Other Operators
5641//===----------------------------------------------------------------------===//
5642
5643/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5644/// expression is cheap enough and side-effect-free enough to evaluate
5645/// unconditionally instead of conditionally. This is used to convert control
5646/// flow into selects in some cases.
5648 CodeGenFunction &CGF) {
5649 // Anything that is an integer or floating point constant is fine.
5650 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5651
5652 // Even non-volatile automatic variables can't be evaluated unconditionally.
5653 // Referencing a thread_local may cause non-trivial initialization work to
5654 // occur. If we're inside a lambda and one of the variables is from the scope
5655 // outside the lambda, that function may have returned already. Reading its
5656 // locals is a bad idea. Also, these reads may introduce races there didn't
5657 // exist in the source-level program.
5658}
5659
5660
5661Value *ScalarExprEmitter::
5662VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5663 TestAndClearIgnoreResultAssign();
5664
5665 // Bind the common expression if necessary.
5666 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5667
5668 Expr *condExpr = E->getCond();
5669 Expr *lhsExpr = E->getTrueExpr();
5670 Expr *rhsExpr = E->getFalseExpr();
5671
5672 // If the condition constant folds and can be elided, try to avoid emitting
5673 // the condition and the dead arm.
5674 bool CondExprBool;
5675 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5676 Expr *live = lhsExpr, *dead = rhsExpr;
5677 if (!CondExprBool) std::swap(live, dead);
5678
5679 // If the dead side doesn't have labels we need, just emit the Live part.
5680 if (!CGF.ContainsLabel(dead)) {
5681 if (CondExprBool) {
5683 CGF.incrementProfileCounter(lhsExpr);
5684 CGF.incrementProfileCounter(rhsExpr);
5685 }
5687 }
5688 Value *Result = Visit(live);
5689 CGF.markStmtMaybeUsed(dead);
5690
5691 // If the live part is a throw expression, it acts like it has a void
5692 // type, so evaluating it returns a null Value*. However, a conditional
5693 // with non-void type must return a non-null Value*.
5694 if (!Result && !E->getType()->isVoidType())
5695 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5696
5697 return Result;
5698 }
5699 }
5700
5701 // OpenCL: If the condition is a vector, we can treat this condition like
5702 // the select function.
5703 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5704 condExpr->getType()->isExtVectorType())) {
5706
5707 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5708 llvm::Value *LHS = Visit(lhsExpr);
5709 llvm::Value *RHS = Visit(rhsExpr);
5710
5711 llvm::Type *condType = ConvertType(condExpr->getType());
5712 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5713
5714 unsigned numElem = vecTy->getNumElements();
5715 llvm::Type *elemType = vecTy->getElementType();
5716
5717 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5718 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5719 llvm::Value *tmp = Builder.CreateSExt(
5720 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5721 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5722
5723 // Cast float to int to perform ANDs if necessary.
5724 llvm::Value *RHSTmp = RHS;
5725 llvm::Value *LHSTmp = LHS;
5726 bool wasCast = false;
5727 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5728 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5729 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5730 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5731 wasCast = true;
5732 }
5733
5734 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5735 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5736 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5737 if (wasCast)
5738 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5739
5740 return tmp5;
5741 }
5742
5743 if (condExpr->getType()->isVectorType() ||
5744 condExpr->getType()->isSveVLSBuiltinType()) {
5746
5747 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5748 llvm::Value *LHS = Visit(lhsExpr);
5749 llvm::Value *RHS = Visit(rhsExpr);
5750
5751 llvm::Type *CondType = ConvertType(condExpr->getType());
5752 auto *VecTy = cast<llvm::VectorType>(CondType);
5753
5754 if (VecTy->getElementType()->isIntegerTy(1))
5755 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5756
5757 // OpenCL uses the MSB of the mask vector.
5758 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5759 if (condExpr->getType()->isExtVectorType())
5760 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5761 else
5762 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5763 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5764 }
5765
5766 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5767 // select instead of as control flow. We can only do this if it is cheap and
5768 // safe to evaluate the LHS and RHS unconditionally.
5769 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
5771 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5772 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5773
5775 CGF.incrementProfileCounter(lhsExpr);
5776 CGF.incrementProfileCounter(rhsExpr);
5778 } else
5779 CGF.incrementProfileCounter(E, StepV);
5780
5781 llvm::Value *LHS = Visit(lhsExpr);
5782 llvm::Value *RHS = Visit(rhsExpr);
5783 if (!LHS) {
5784 // If the conditional has void type, make sure we return a null Value*.
5785 assert(!RHS && "LHS and RHS types must match");
5786 return nullptr;
5787 }
5788 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5789 }
5790
5791 // If the top of the logical operator nest, reset the MCDC temp to 0.
5792 if (CGF.MCDCLogOpStack.empty())
5793 CGF.maybeResetMCDCCondBitmap(condExpr);
5794
5795 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5796 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5797 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5798
5799 CodeGenFunction::ConditionalEvaluation eval(CGF);
5800 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5801 CGF.getProfileCount(lhsExpr));
5802
5803 CGF.EmitBlock(LHSBlock);
5804
5805 // If the top of the logical operator nest, update the MCDC bitmap for the
5806 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5807 // may also contain a boolean expression.
5808 if (CGF.MCDCLogOpStack.empty())
5809 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5810
5812 CGF.incrementProfileCounter(lhsExpr);
5813 else
5815
5816 eval.begin(CGF);
5817 Value *LHS = Visit(lhsExpr);
5818 eval.end(CGF);
5819
5820 LHSBlock = Builder.GetInsertBlock();
5821 Builder.CreateBr(ContBlock);
5822
5823 CGF.EmitBlock(RHSBlock);
5824
5825 // If the top of the logical operator nest, update the MCDC bitmap for the
5826 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5827 // may also contain a boolean expression.
5828 if (CGF.MCDCLogOpStack.empty())
5829 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5830
5832 CGF.incrementProfileCounter(rhsExpr);
5833
5834 eval.begin(CGF);
5835 Value *RHS = Visit(rhsExpr);
5836 eval.end(CGF);
5837
5838 RHSBlock = Builder.GetInsertBlock();
5839 CGF.EmitBlock(ContBlock);
5840
5841 // If the LHS or RHS is a throw expression, it will be legitimately null.
5842 if (!LHS)
5843 return RHS;
5844 if (!RHS)
5845 return LHS;
5846
5847 // Create a PHI node for the real part.
5848 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5849 PN->addIncoming(LHS, LHSBlock);
5850 PN->addIncoming(RHS, RHSBlock);
5851
5852 // When single byte coverage mode is enabled, add a counter to continuation
5853 // block.
5856
5857 return PN;
5858}
5859
5860Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5861 return Visit(E->getChosenSubExpr());
5862}
5863
5864Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5865 Address ArgValue = Address::invalid();
5866 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5867
5868 return ArgPtr.getScalarVal();
5869}
5870
5871Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5872 return CGF.EmitBlockLiteral(block);
5873}
5874
5875// Convert a vec3 to vec4, or vice versa.
5877 Value *Src, unsigned NumElementsDst) {
5878 static constexpr int Mask[] = {0, 1, 2, -1};
5879 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5880}
5881
5882// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5883// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5884// but could be scalar or vectors of different lengths, and either can be
5885// pointer.
5886// There are 4 cases:
5887// 1. non-pointer -> non-pointer : needs 1 bitcast
5888// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5889// 3. pointer -> non-pointer
5890// a) pointer -> intptr_t : needs 1 ptrtoint
5891// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5892// 4. non-pointer -> pointer
5893// a) intptr_t -> pointer : needs 1 inttoptr
5894// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5895// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5896// allow casting directly between pointer types and non-integer non-pointer
5897// types.
5899 const llvm::DataLayout &DL,
5900 Value *Src, llvm::Type *DstTy,
5901 StringRef Name = "") {
5902 auto SrcTy = Src->getType();
5903
5904 // Case 1.
5905 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5906 return Builder.CreateBitCast(Src, DstTy, Name);
5907
5908 // Case 2.
5909 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5910 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5911
5912 // Case 3.
5913 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5914 // Case 3b.
5915 if (!DstTy->isIntegerTy())
5916 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5917 // Cases 3a and 3b.
5918 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5919 }
5920
5921 // Case 4b.
5922 if (!SrcTy->isIntegerTy())
5923 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5924 // Cases 4a and 4b.
5925 return Builder.CreateIntToPtr(Src, DstTy, Name);
5926}
5927
5928Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5929 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5930 llvm::Type *DstTy = ConvertType(E->getType());
5931
5932 llvm::Type *SrcTy = Src->getType();
5933 unsigned NumElementsSrc =
5935 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5936 : 0;
5937 unsigned NumElementsDst =
5939 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5940 : 0;
5941
5942 // Use bit vector expansion for ext_vector_type boolean vectors.
5943 if (E->getType()->isExtVectorBoolType())
5944 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5945
5946 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5947 // vector to get a vec4, then a bitcast if the target type is different.
5948 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5949 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5950 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5951 DstTy);
5952
5953 Src->setName("astype");
5954 return Src;
5955 }
5956
5957 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5958 // to vec4 if the original type is not vec4, then a shuffle vector to
5959 // get a vec3.
5960 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5961 auto *Vec4Ty = llvm::FixedVectorType::get(
5962 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5963 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5964 Vec4Ty);
5965
5966 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5967 Src->setName("astype");
5968 return Src;
5969 }
5970
5971 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5972 Src, DstTy, "astype");
5973}
5974
5975Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5976 return CGF.EmitAtomicExpr(E).getScalarVal();
5977}
5978
5979//===----------------------------------------------------------------------===//
5980// Entry Point into this File
5981//===----------------------------------------------------------------------===//
5982
5983/// Emit the computation of the specified expression of scalar type, ignoring
5984/// the result.
5985Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5986 assert(E && hasScalarEvaluationKind(E->getType()) &&
5987 "Invalid scalar expression to emit");
5988
5989 return ScalarExprEmitter(*this, IgnoreResultAssign)
5990 .Visit(const_cast<Expr *>(E));
5991}
5992
5993/// Emit a conversion from the specified type to the specified destination type,
5994/// both of which are LLVM scalar types.
5996 QualType DstTy,
5997 SourceLocation Loc) {
5998 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5999 "Invalid scalar expression to emit");
6000 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
6001}
6002
6003/// Emit a conversion from the specified complex type to the specified
6004/// destination type, where the destination type is an LLVM scalar type.
6006 QualType SrcTy,
6007 QualType DstTy,
6008 SourceLocation Loc) {
6009 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
6010 "Invalid complex -> scalar conversion");
6011 return ScalarExprEmitter(*this)
6012 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
6013}
6014
6015
6016Value *
6018 QualType PromotionType) {
6019 if (!PromotionType.isNull())
6020 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
6021 else
6022 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
6023}
6024
6025
6028 bool isInc, bool isPre) {
6029 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
6030}
6031
6033 // object->isa or (*object).isa
6034 // Generate code as for: *(Class*)object
6035
6036 Expr *BaseExpr = E->getBase();
6038 if (BaseExpr->isPRValue()) {
6039 llvm::Type *BaseTy =
6041 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
6042 } else {
6043 Addr = EmitLValue(BaseExpr).getAddress();
6044 }
6045
6046 // Cast the address to Class*.
6047 Addr = Addr.withElementType(ConvertType(E->getType()));
6048 return MakeAddrLValue(Addr, E->getType());
6049}
6050
6051
6053 const CompoundAssignOperator *E) {
6055 ScalarExprEmitter Scalar(*this);
6056 Value *Result = nullptr;
6057 switch (E->getOpcode()) {
6058#define COMPOUND_OP(Op) \
6059 case BO_##Op##Assign: \
6060 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
6061 Result)
6062 COMPOUND_OP(Mul);
6063 COMPOUND_OP(Div);
6064 COMPOUND_OP(Rem);
6065 COMPOUND_OP(Add);
6066 COMPOUND_OP(Sub);
6067 COMPOUND_OP(Shl);
6068 COMPOUND_OP(Shr);
6070 COMPOUND_OP(Xor);
6071 COMPOUND_OP(Or);
6072#undef COMPOUND_OP
6073
6074 case BO_PtrMemD:
6075 case BO_PtrMemI:
6076 case BO_Mul:
6077 case BO_Div:
6078 case BO_Rem:
6079 case BO_Add:
6080 case BO_Sub:
6081 case BO_Shl:
6082 case BO_Shr:
6083 case BO_LT:
6084 case BO_GT:
6085 case BO_LE:
6086 case BO_GE:
6087 case BO_EQ:
6088 case BO_NE:
6089 case BO_Cmp:
6090 case BO_And:
6091 case BO_Xor:
6092 case BO_Or:
6093 case BO_LAnd:
6094 case BO_LOr:
6095 case BO_Assign:
6096 case BO_Comma:
6097 llvm_unreachable("Not valid compound assignment operators");
6098 }
6099
6100 llvm_unreachable("Unhandled compound assignment operator");
6101}
6102
6104 // The total (signed) byte offset for the GEP.
6105 llvm::Value *TotalOffset;
6106 // The offset overflow flag - true if the total offset overflows.
6107 llvm::Value *OffsetOverflows;
6108};
6109
6110/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6111/// and compute the total offset it applies from it's base pointer BasePtr.
6112/// Returns offset in bytes and a boolean flag whether an overflow happened
6113/// during evaluation.
6115 llvm::LLVMContext &VMContext,
6116 CodeGenModule &CGM,
6117 CGBuilderTy &Builder) {
6118 const auto &DL = CGM.getDataLayout();
6119
6120 // The total (signed) byte offset for the GEP.
6121 llvm::Value *TotalOffset = nullptr;
6122
6123 // Was the GEP already reduced to a constant?
6124 if (isa<llvm::Constant>(GEPVal)) {
6125 // Compute the offset by casting both pointers to integers and subtracting:
6126 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6127 Value *BasePtr_int =
6128 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6129 Value *GEPVal_int =
6130 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6131 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6132 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6133 }
6134
6135 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6136 assert(GEP->getPointerOperand() == BasePtr &&
6137 "BasePtr must be the base of the GEP.");
6138 assert(GEP->isInBounds() && "Expected inbounds GEP");
6139
6140 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6141
6142 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6143 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6144 auto *SAddIntrinsic =
6145 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6146 auto *SMulIntrinsic =
6147 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6148
6149 // The offset overflow flag - true if the total offset overflows.
6150 llvm::Value *OffsetOverflows = Builder.getFalse();
6151
6152 /// Return the result of the given binary operation.
6153 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6154 llvm::Value *RHS) -> llvm::Value * {
6155 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6156
6157 // If the operands are constants, return a constant result.
6158 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6159 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6160 llvm::APInt N;
6161 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6162 /*Signed=*/true, N);
6163 if (HasOverflow)
6164 OffsetOverflows = Builder.getTrue();
6165 return llvm::ConstantInt::get(VMContext, N);
6166 }
6167 }
6168
6169 // Otherwise, compute the result with checked arithmetic.
6170 auto *ResultAndOverflow = Builder.CreateCall(
6171 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6172 OffsetOverflows = Builder.CreateOr(
6173 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6174 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6175 };
6176
6177 // Determine the total byte offset by looking at each GEP operand.
6178 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6179 GTI != GTE; ++GTI) {
6180 llvm::Value *LocalOffset;
6181 auto *Index = GTI.getOperand();
6182 // Compute the local offset contributed by this indexing step:
6183 if (auto *STy = GTI.getStructTypeOrNull()) {
6184 // For struct indexing, the local offset is the byte position of the
6185 // specified field.
6186 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6187 LocalOffset = llvm::ConstantInt::get(
6188 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6189 } else {
6190 // Otherwise this is array-like indexing. The local offset is the index
6191 // multiplied by the element size.
6192 auto *ElementSize =
6193 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6194 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6195 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6196 }
6197
6198 // If this is the first offset, set it as the total offset. Otherwise, add
6199 // the local offset into the running total.
6200 if (!TotalOffset || TotalOffset == Zero)
6201 TotalOffset = LocalOffset;
6202 else
6203 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6204 }
6205
6206 return {TotalOffset, OffsetOverflows};
6207}
6208
6209Value *
6210CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6211 ArrayRef<Value *> IdxList,
6212 bool SignedIndices, bool IsSubtraction,
6213 SourceLocation Loc, const Twine &Name) {
6214 llvm::Type *PtrTy = Ptr->getType();
6215
6216 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6217 if (!SignedIndices && !IsSubtraction)
6218 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6219
6220 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6221
6222 // If the pointer overflow sanitizer isn't enabled, do nothing.
6223 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6224 return GEPVal;
6225
6226 // Perform nullptr-and-offset check unless the nullptr is defined.
6227 bool PerformNullCheck = !NullPointerIsDefined(
6228 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6229 // Check for overflows unless the GEP got constant-folded,
6230 // and only in the default address space
6231 bool PerformOverflowCheck =
6232 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6233
6234 if (!(PerformNullCheck || PerformOverflowCheck))
6235 return GEPVal;
6236
6237 const auto &DL = CGM.getDataLayout();
6238
6239 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6240 auto CheckHandler = SanitizerHandler::PointerOverflow;
6241 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6242 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6243
6244 GEPOffsetAndOverflow EvaluatedGEP =
6245 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6246
6247 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6248 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6249 "If the offset got constant-folded, we don't expect that there was an "
6250 "overflow.");
6251
6252 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6253
6254 // Common case: if the total offset is zero, don't emit a check.
6255 if (EvaluatedGEP.TotalOffset == Zero)
6256 return GEPVal;
6257
6258 // Now that we've computed the total offset, add it to the base pointer (with
6259 // wrapping semantics).
6260 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6261 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6262
6263 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6264 2>
6265 Checks;
6266
6267 if (PerformNullCheck) {
6268 // If the base pointer evaluates to a null pointer value,
6269 // the only valid pointer this inbounds GEP can produce is also
6270 // a null pointer, so the offset must also evaluate to zero.
6271 // Likewise, if we have non-zero base pointer, we can not get null pointer
6272 // as a result, so the offset can not be -intptr_t(BasePtr).
6273 // In other words, both pointers are either null, or both are non-null,
6274 // or the behaviour is undefined.
6275 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6276 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6277 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6278 Checks.emplace_back(Valid, CheckOrdinal);
6279 }
6280
6281 if (PerformOverflowCheck) {
6282 // The GEP is valid if:
6283 // 1) The total offset doesn't overflow, and
6284 // 2) The sign of the difference between the computed address and the base
6285 // pointer matches the sign of the total offset.
6286 llvm::Value *ValidGEP;
6287 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6288 if (SignedIndices) {
6289 // GEP is computed as `unsigned base + signed offset`, therefore:
6290 // * If offset was positive, then the computed pointer can not be
6291 // [unsigned] less than the base pointer, unless it overflowed.
6292 // * If offset was negative, then the computed pointer can not be
6293 // [unsigned] greater than the bas pointere, unless it overflowed.
6294 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6295 auto *PosOrZeroOffset =
6296 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6297 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6298 ValidGEP =
6299 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6300 } else if (!IsSubtraction) {
6301 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6302 // computed pointer can not be [unsigned] less than base pointer,
6303 // unless there was an overflow.
6304 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6305 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6306 } else {
6307 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6308 // computed pointer can not be [unsigned] greater than base pointer,
6309 // unless there was an overflow.
6310 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6311 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6312 }
6313 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6314 Checks.emplace_back(ValidGEP, CheckOrdinal);
6315 }
6316
6317 assert(!Checks.empty() && "Should have produced some checks.");
6318
6319 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6320 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6321 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6322 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6323
6324 return GEPVal;
6325}
6326
6328 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6329 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6330 const Twine &Name) {
6331 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6332 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6333 if (!SignedIndices && !IsSubtraction)
6334 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6335
6336 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6337 }
6338
6339 return RawAddress(
6340 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6341 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6342 elementType, Align);
6343}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc, bool isPre, ASTContext &Ctx)
For the purposes of overflow pattern exclusion, does this match the "while(i--)" pattern?
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
static uint32_t getBitWidth(const Expr *E)
llvm::APSInt APSInt
Definition Compiler.cpp:24
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isLValue() const
Definition APValue.h:472
bool isInt() const
Definition APValue.h:467
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
ParentMapContext & getParentMapContext()
Returns the dynamic AST node parent map context.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:944
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4531
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4537
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4543
LabelDecl * getLabel() const
Definition Expr.h:4573
uint64_t getValue() const
Definition ExprCXX.h:3044
QualType getElementType() const
Definition TypeBase.h:3735
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6704
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4185
bool isCompoundAssignmentOp() const
Definition Expr.h:4182
SourceLocation getExprLoc() const
Definition Expr.h:4079
bool isShiftOp() const
Definition Expr.h:4127
Expr * getRHS() const
Definition Expr.h:4090
bool isShiftAssignOp() const
Definition Expr.h:4196
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4251
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2204
Opcode getOpcode() const
Definition Expr.h:4083
BinaryOperatorKind Opcode
Definition Expr.h:4043
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4332
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
path_iterator path_begin()
Definition Expr.h:3746
CastKind getCastKind() const
Definition Expr.h:3720
bool changesVolatileQualification() const
Return.
Definition Expr.h:3810
path_iterator path_end()
Definition Expr.h:3747
Expr * getSubExpr()
Definition Expr.h:3726
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4884
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:102
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:94
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:84
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:71
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3089
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6951
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:257
SanitizerSet SanOpts
Sanitizers enabled for this function.
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:184
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:251
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2857
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3855
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6329
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7051
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:247
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:394
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2999
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3629
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3745
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:177
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3953
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:245
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6198
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2402
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:64
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1239
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4003
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6151
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2029
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3493
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2215
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6137
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2638
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4433
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:573
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:267
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:899
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7060
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1575
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:676
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1656
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:737
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4033
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:4982
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4345
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2245
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:51
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1926
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1691
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3524
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1371
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:183
bool isBitField() const
Definition CGValue.h:288
bool isVolatileQualified() const
Definition CGValue.h:297
const Qualifiers & getQuals() const
Definition CGValue.h:350
Address getAddress() const
Definition CGValue.h:373
QualType getType() const
Definition CGValue.h:303
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:446
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4300
QualType getComputationLHSType() const
Definition Expr.h:4334
QualType getComputationResultType() const
Definition Expr.h:4337
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
APValue getAPValueResult() const
Definition Expr.cpp:412
bool hasAPValueResult() const
Definition Expr.h:1157
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4388
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4809
T * getAttr() const
Definition DeclBase.h:573
ChildElementIter< false > begin()
Definition Expr.h:5232
size_t getDataElementCount() const
Definition Expr.h:5148
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isIntegerConstantExpr(const ASTContext &Ctx) const
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:476
QualType getType() const
Definition Expr.h:144
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1575
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3853
unsigned getNumInits() const
Definition Expr.h:5329
bool hadArrayRangeDesignator() const
Definition Expr.h:5483
const Expr * getInit(unsigned Init) const
Definition Expr.h:5353
@ PostDecrInWhile
while (count–)
bool isSignedOverflowDefined() const
bool isOverflowPatternExcluded(OverflowPatternExclusionKind Kind) const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4338
Expr * getBase() const
Definition Expr.h:3441
bool isArrow() const
Definition Expr.h:3548
VersionTuple getVersion() const
Definition ExprObjC.h:1723
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1495
Expr * getBase() const
Definition ExprObjC.h:1520
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1543
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1361
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:7910
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:7947
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2586
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2479
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
DynTypedNodeList getParents(const NodeT &Node)
Returns the parents of the given node (within the traversal scope).
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1453
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:131
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8292
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8477
QualType getCanonicalType() const
Definition TypeBase.h:8344
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1613
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:136
bool isCanonical() const
Definition TypeBase.h:8349
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4524
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:586
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4695
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4676
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4682
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4515
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2281
SourceLocation getLocation() const
Definition Expr.h:5061
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4612
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:788
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:798
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:809
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:817
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:825
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8274
bool getBoolValue() const
Definition ExprCXX.h:2947
const APValue & getAPValue() const
Definition ExprCXX.h:2952
bool isStoredAsBoolean() const
Definition ExprCXX.h:2943
bool isVoidType() const
Definition TypeBase.h:8891
bool isBooleanType() const
Definition TypeBase.h:9021
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8541
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2226
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2274
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2338
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8935
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
bool isReferenceType() const
Definition TypeBase.h:8553
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1910
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isExtVectorType() const
Definition TypeBase.h:8672
bool isExtVectorBoolType() const
Definition TypeBase.h:8676
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8810
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8652
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8664
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8947
bool isHalfType() const
Definition TypeBase.h:8895
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2244
bool isQueueT() const
Definition TypeBase.h:8781
bool isMatrixType() const
Definition TypeBase.h:8692
bool isEventT() const
Definition TypeBase.h:8773
bool isFunctionType() const
Definition TypeBase.h:8525
bool isVectorType() const
Definition TypeBase.h:8668
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2321
bool isFloatingType() const
Definition Type.cpp:2305
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2254
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2929
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool isNullPtrType() const
Definition TypeBase.h:8928
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2400
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5576
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
Represents a GCC generic vector type.
Definition TypeBase.h:4176
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2688
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1282
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1945
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1297
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184