clang 22.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
86 : LHSAP.usub_ov(RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
89 : LHSAP.umul_ov(RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(BaseTy) ||
184 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Check if we can skip the overflow check for \p Op.
196static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
197 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
198 "Expected a unary or binary operator");
199
200 // If the binop has constant inputs and we can prove there is no overflow,
201 // we can elide the overflow check.
202 if (!Op.mayHaveIntegerOverflow())
203 return true;
204
205 if (Op.Ty->isSignedIntegerType() &&
206 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
207 Op.Ty)) {
208 return true;
209 }
210
211 if (Op.Ty->isUnsignedIntegerType() &&
212 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
213 Op.Ty)) {
214 return true;
215 }
216
217 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
218
219 if (UO && UO->getOpcode() == UO_Minus &&
222 UO->isIntegerConstantExpr(Ctx))
223 return true;
224
225 // If a unary op has a widened operand, the op cannot overflow.
226 if (UO)
227 return !UO->canOverflow();
228
229 // We usually don't need overflow checks for binops with widened operands.
230 // Multiplication with promoted unsigned operands is a special case.
231 const auto *BO = cast<BinaryOperator>(Op.E);
232 if (BO->hasExcludedOverflowPattern())
233 return true;
234
235 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
236 if (!OptionalLHSTy)
237 return false;
238
239 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
240 if (!OptionalRHSTy)
241 return false;
242
243 QualType LHSTy = *OptionalLHSTy;
244 QualType RHSTy = *OptionalRHSTy;
245
246 // This is the simple case: binops without unsigned multiplication, and with
247 // widened operands. No overflow check is needed here.
248 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
249 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
250 return true;
251
252 // For unsigned multiplication the overflow check can be elided if either one
253 // of the unpromoted types are less than half the size of the promoted type.
254 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
255 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
256 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
257}
258
259class ScalarExprEmitter
260 : public StmtVisitor<ScalarExprEmitter, Value*> {
261 CodeGenFunction &CGF;
262 CGBuilderTy &Builder;
263 bool IgnoreResultAssign;
264 llvm::LLVMContext &VMContext;
265public:
266
267 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
268 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
269 VMContext(cgf.getLLVMContext()) {
270 }
271
272 //===--------------------------------------------------------------------===//
273 // Utilities
274 //===--------------------------------------------------------------------===//
275
276 bool TestAndClearIgnoreResultAssign() {
277 bool I = IgnoreResultAssign;
278 IgnoreResultAssign = false;
279 return I;
280 }
281
282 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
283 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
284 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
285 return CGF.EmitCheckedLValue(E, TCK);
286 }
287
288 void EmitBinOpCheck(
289 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
290 const BinOpInfo &Info);
291
292 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
293 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
294 }
295
296 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
297 const AlignValueAttr *AVAttr = nullptr;
298 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
299 const ValueDecl *VD = DRE->getDecl();
300
301 if (VD->getType()->isReferenceType()) {
302 if (const auto *TTy =
303 VD->getType().getNonReferenceType()->getAs<TypedefType>())
304 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
305 } else {
306 // Assumptions for function parameters are emitted at the start of the
307 // function, so there is no need to repeat that here,
308 // unless the alignment-assumption sanitizer is enabled,
309 // then we prefer the assumption over alignment attribute
310 // on IR function param.
311 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
312 return;
313
314 AVAttr = VD->getAttr<AlignValueAttr>();
315 }
316 }
317
318 if (!AVAttr)
319 if (const auto *TTy = E->getType()->getAs<TypedefType>())
320 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
321
322 if (!AVAttr)
323 return;
324
325 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
326 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
327 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
328 }
329
330 /// EmitLoadOfLValue - Given an expression with complex type that represents a
331 /// value l-value, this method emits the address of the l-value, then loads
332 /// and returns the result.
333 Value *EmitLoadOfLValue(const Expr *E) {
334 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
335 E->getExprLoc());
336
337 EmitLValueAlignmentAssumption(E, V);
338 return V;
339 }
340
341 /// EmitConversionToBool - Convert the specified expression value to a
342 /// boolean (i1) truth value. This is equivalent to "Val != 0".
343 Value *EmitConversionToBool(Value *Src, QualType DstTy);
344
345 /// Emit a check that a conversion from a floating-point type does not
346 /// overflow.
347 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
348 Value *Src, QualType SrcType, QualType DstType,
349 llvm::Type *DstTy, SourceLocation Loc);
350
351 /// Known implicit conversion check kinds.
352 /// This is used for bitfield conversion checks as well.
353 /// Keep in sync with the enum of the same name in ubsan_handlers.h
354 enum ImplicitConversionCheckKind : unsigned char {
355 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
356 ICCK_UnsignedIntegerTruncation = 1,
357 ICCK_SignedIntegerTruncation = 2,
358 ICCK_IntegerSignChange = 3,
359 ICCK_SignedIntegerTruncationOrSignChange = 4,
360 };
361
362 /// Emit a check that an [implicit] truncation of an integer does not
363 /// discard any bits. It is not UB, so we use the value after truncation.
364 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
365 QualType DstType, SourceLocation Loc);
366
367 /// Emit a check that an [implicit] conversion of an integer does not change
368 /// the sign of the value. It is not UB, so we use the value after conversion.
369 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
370 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
371 QualType DstType, SourceLocation Loc);
372
373 /// Emit a conversion from the specified type to the specified destination
374 /// type, both of which are LLVM scalar types.
375 struct ScalarConversionOpts {
376 bool TreatBooleanAsSigned;
377 bool EmitImplicitIntegerTruncationChecks;
378 bool EmitImplicitIntegerSignChangeChecks;
379
380 ScalarConversionOpts()
381 : TreatBooleanAsSigned(false),
382 EmitImplicitIntegerTruncationChecks(false),
383 EmitImplicitIntegerSignChangeChecks(false) {}
384
385 ScalarConversionOpts(clang::SanitizerSet SanOpts)
386 : TreatBooleanAsSigned(false),
387 EmitImplicitIntegerTruncationChecks(
388 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
389 EmitImplicitIntegerSignChangeChecks(
390 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
391 };
392 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
393 llvm::Type *SrcTy, llvm::Type *DstTy,
394 ScalarConversionOpts Opts);
395 Value *
396 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
397 SourceLocation Loc,
398 ScalarConversionOpts Opts = ScalarConversionOpts());
399
400 /// Convert between either a fixed point and other fixed point or fixed point
401 /// and an integer.
402 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
403 SourceLocation Loc);
404
405 /// Emit a conversion from the specified complex type to the specified
406 /// destination type, where the destination type is an LLVM scalar type.
407 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
408 QualType SrcTy, QualType DstTy,
409 SourceLocation Loc);
410
411 /// EmitNullValue - Emit a value that corresponds to null for the given type.
412 Value *EmitNullValue(QualType Ty);
413
414 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
415 Value *EmitFloatToBoolConversion(Value *V) {
416 // Compare against 0.0 for fp scalars.
417 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
418 return Builder.CreateFCmpUNE(V, Zero, "tobool");
419 }
420
421 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
422 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
423 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
424
425 return Builder.CreateICmpNE(V, Zero, "tobool");
426 }
427
428 Value *EmitIntToBoolConversion(Value *V) {
429 // Because of the type rules of C, we often end up computing a
430 // logical value, then zero extending it to int, then wanting it
431 // as a logical value again. Optimize this common case.
432 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
433 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
434 Value *Result = ZI->getOperand(0);
435 // If there aren't any more uses, zap the instruction to save space.
436 // Note that there can be more uses, for example if this
437 // is the result of an assignment.
438 if (ZI->use_empty())
439 ZI->eraseFromParent();
440 return Result;
441 }
442 }
443
444 return Builder.CreateIsNotNull(V, "tobool");
445 }
446
447 //===--------------------------------------------------------------------===//
448 // Visitor Methods
449 //===--------------------------------------------------------------------===//
450
451 Value *Visit(Expr *E) {
452 ApplyDebugLocation DL(CGF, E);
453 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
454 }
455
456 Value *VisitStmt(Stmt *S) {
457 S->dump(llvm::errs(), CGF.getContext());
458 llvm_unreachable("Stmt can't have complex result type!");
459 }
460 Value *VisitExpr(Expr *S);
461
462 Value *VisitConstantExpr(ConstantExpr *E) {
463 // A constant expression of type 'void' generates no code and produces no
464 // value.
465 if (E->getType()->isVoidType())
466 return nullptr;
467
468 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
469 if (E->isGLValue()) {
470 // This was already converted to an rvalue when it was constant
471 // evaluated.
472 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
473 return Result;
474 return CGF.EmitLoadOfScalar(
475 Address(Result, CGF.convertTypeForLoadStore(E->getType()),
477 /*Volatile*/ false, E->getType(), E->getExprLoc());
478 }
479 return Result;
480 }
481 return Visit(E->getSubExpr());
482 }
483 Value *VisitParenExpr(ParenExpr *PE) {
484 return Visit(PE->getSubExpr());
485 }
486 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
487 return Visit(E->getReplacement());
488 }
489 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
490 return Visit(GE->getResultExpr());
491 }
492 Value *VisitCoawaitExpr(CoawaitExpr *S) {
493 return CGF.EmitCoawaitExpr(*S).getScalarVal();
494 }
495 Value *VisitCoyieldExpr(CoyieldExpr *S) {
496 return CGF.EmitCoyieldExpr(*S).getScalarVal();
497 }
498 Value *VisitUnaryCoawait(const UnaryOperator *E) {
499 return Visit(E->getSubExpr());
500 }
501
502 // Leaves.
503 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
504 return Builder.getInt(E->getValue());
505 }
506 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
507 return Builder.getInt(E->getValue());
508 }
509 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
510 return llvm::ConstantFP::get(VMContext, E->getValue());
511 }
512 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
513 // Character literals are always stored in an unsigned (even for signed
514 // char), so allow implicit truncation here.
515 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue(),
516 /*IsSigned=*/false, /*ImplicitTrunc=*/true);
517 }
518 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
519 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
520 }
521 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
522 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
523 }
524 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
525 if (E->getType()->isVoidType())
526 return nullptr;
527
528 return EmitNullValue(E->getType());
529 }
530 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
531 return EmitNullValue(E->getType());
532 }
533 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
534 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
535 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
536 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
537 return Builder.CreateBitCast(V, ConvertType(E->getType()));
538 }
539
540 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
541 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
542 }
543
544 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
545 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
546 }
547
548 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
549 Value *VisitEmbedExpr(EmbedExpr *E);
550
551 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
552 if (E->isGLValue())
553 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
554 E->getExprLoc());
555
556 // Otherwise, assume the mapping is the scalar directly.
558 }
559
560 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
561 llvm_unreachable("Codegen for this isn't defined/implemented");
562 }
563
564 // l-values.
565 Value *VisitDeclRefExpr(DeclRefExpr *E) {
566 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
567 return CGF.emitScalarConstant(Constant, E);
568 return EmitLoadOfLValue(E);
569 }
570
571 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
572 return CGF.EmitObjCSelectorExpr(E);
573 }
574 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
575 return CGF.EmitObjCProtocolExpr(E);
576 }
577 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
578 return EmitLoadOfLValue(E);
579 }
580 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
581 if (E->getMethodDecl() &&
583 return EmitLoadOfLValue(E);
584 return CGF.EmitObjCMessageExpr(E).getScalarVal();
585 }
586
587 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
588 LValue LV = CGF.EmitObjCIsaExpr(E);
590 return V;
591 }
592
593 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
594 VersionTuple Version = E->getVersion();
595
596 // If we're checking for a platform older than our minimum deployment
597 // target, we can fold the check away.
598 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
599 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
600
601 return CGF.EmitBuiltinAvailable(Version);
602 }
603
604 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
605 Value *VisitMatrixSingleSubscriptExpr(MatrixSingleSubscriptExpr *E);
606 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
607 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
608 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
609 Value *VisitMemberExpr(MemberExpr *E);
610 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
611 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
612 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
613 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
614 // literals aren't l-values in C++. We do so simply because that's the
615 // cleanest way to handle compound literals in C++.
616 // See the discussion here: https://reviews.llvm.org/D64464
617 return EmitLoadOfLValue(E);
618 }
619
620 Value *VisitInitListExpr(InitListExpr *E);
621
622 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
623 assert(CGF.getArrayInitIndex() &&
624 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
625 return CGF.getArrayInitIndex();
626 }
627
628 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
629 return EmitNullValue(E->getType());
630 }
631 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
632 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
633 return VisitCastExpr(E);
634 }
635 Value *VisitCastExpr(CastExpr *E);
636
637 Value *VisitCallExpr(const CallExpr *E) {
639 return EmitLoadOfLValue(E);
640
641 Value *V = CGF.EmitCallExpr(E).getScalarVal();
642
643 EmitLValueAlignmentAssumption(E, V);
644 return V;
645 }
646
647 Value *VisitStmtExpr(const StmtExpr *E);
648
649 // Unary Operators.
650 Value *VisitUnaryPostDec(const UnaryOperator *E) {
651 LValue LV = EmitLValue(E->getSubExpr());
652 return EmitScalarPrePostIncDec(E, LV, false, false);
653 }
654 Value *VisitUnaryPostInc(const UnaryOperator *E) {
655 LValue LV = EmitLValue(E->getSubExpr());
656 return EmitScalarPrePostIncDec(E, LV, true, false);
657 }
658 Value *VisitUnaryPreDec(const UnaryOperator *E) {
659 LValue LV = EmitLValue(E->getSubExpr());
660 return EmitScalarPrePostIncDec(E, LV, false, true);
661 }
662 Value *VisitUnaryPreInc(const UnaryOperator *E) {
663 LValue LV = EmitLValue(E->getSubExpr());
664 return EmitScalarPrePostIncDec(E, LV, true, true);
665 }
666
667 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
668 llvm::Value *InVal,
669 bool IsInc);
670
671 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
672 bool isInc, bool isPre);
673
674
675 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
676 if (isa<MemberPointerType>(E->getType())) // never sugared
677 return CGF.CGM.getMemberPointerConstant(E);
678
679 return EmitLValue(E->getSubExpr()).getPointer(CGF);
680 }
681 Value *VisitUnaryDeref(const UnaryOperator *E) {
682 if (E->getType()->isVoidType())
683 return Visit(E->getSubExpr()); // the actual value should be unused
684 return EmitLoadOfLValue(E);
685 }
686
687 Value *VisitUnaryPlus(const UnaryOperator *E,
688 QualType PromotionType = QualType());
689 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
690 Value *VisitUnaryMinus(const UnaryOperator *E,
691 QualType PromotionType = QualType());
692 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
693
694 Value *VisitUnaryNot (const UnaryOperator *E);
695 Value *VisitUnaryLNot (const UnaryOperator *E);
696 Value *VisitUnaryReal(const UnaryOperator *E,
697 QualType PromotionType = QualType());
698 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
699 Value *VisitUnaryImag(const UnaryOperator *E,
700 QualType PromotionType = QualType());
701 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
702 Value *VisitUnaryExtension(const UnaryOperator *E) {
703 return Visit(E->getSubExpr());
704 }
705
706 // C++
707 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
708 return EmitLoadOfLValue(E);
709 }
710 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
711 auto &Ctx = CGF.getContext();
714 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
715 SLE->getType());
716 }
717
718 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
719 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
720 return Visit(DAE->getExpr());
721 }
722 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
723 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
724 return Visit(DIE->getExpr());
725 }
726 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
727 return CGF.LoadCXXThis();
728 }
729
730 Value *VisitExprWithCleanups(ExprWithCleanups *E);
731 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
732 return CGF.EmitCXXNewExpr(E);
733 }
734 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
735 CGF.EmitCXXDeleteExpr(E);
736 return nullptr;
737 }
738
739 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
740 if (E->isStoredAsBoolean())
741 return llvm::ConstantInt::get(ConvertType(E->getType()),
742 E->getBoolValue());
743 assert(E->getAPValue().isInt() && "APValue type not supported");
744 return llvm::ConstantInt::get(ConvertType(E->getType()),
745 E->getAPValue().getInt());
746 }
747
748 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
749 return Builder.getInt1(E->isSatisfied());
750 }
751
752 Value *VisitRequiresExpr(const RequiresExpr *E) {
753 return Builder.getInt1(E->isSatisfied());
754 }
755
756 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
757 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
758 }
759
760 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
761 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
762 }
763
764 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
765 // C++ [expr.pseudo]p1:
766 // The result shall only be used as the operand for the function call
767 // operator (), and the result of such a call has type void. The only
768 // effect is the evaluation of the postfix-expression before the dot or
769 // arrow.
770 CGF.EmitScalarExpr(E->getBase());
771 return nullptr;
772 }
773
774 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
775 return EmitNullValue(E->getType());
776 }
777
778 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
779 CGF.EmitCXXThrowExpr(E);
780 return nullptr;
781 }
782
783 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
784 return Builder.getInt1(E->getValue());
785 }
786
787 // Binary Operators.
788 Value *EmitMul(const BinOpInfo &Ops) {
789 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
790 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
791 case LangOptions::SOB_Defined:
792 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
793 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
794 [[fallthrough]];
795 case LangOptions::SOB_Undefined:
796 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
797 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
798 [[fallthrough]];
799 case LangOptions::SOB_Trapping:
800 if (CanElideOverflowCheck(CGF.getContext(), Ops))
801 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
802 return EmitOverflowCheckedBinOp(Ops);
803 }
804 }
805
806 if (Ops.Ty->isConstantMatrixType()) {
807 llvm::MatrixBuilder MB(Builder);
808 // We need to check the types of the operands of the operator to get the
809 // correct matrix dimensions.
810 auto *BO = cast<BinaryOperator>(Ops.E);
811 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
812 BO->getLHS()->getType().getCanonicalType());
813 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
814 BO->getRHS()->getType().getCanonicalType());
815 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
816 if (LHSMatTy && RHSMatTy)
817 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
818 LHSMatTy->getNumColumns(),
819 RHSMatTy->getNumColumns());
820 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
821 }
822
823 if (Ops.Ty->isUnsignedIntegerType() &&
824 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
825 !CanElideOverflowCheck(CGF.getContext(), Ops))
826 return EmitOverflowCheckedBinOp(Ops);
827
828 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
829 // Preserve the old values
830 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
831 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
832 }
833 if (Ops.isFixedPointOp())
834 return EmitFixedPointBinOp(Ops);
835 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
836 }
837 /// Create a binary op that checks for overflow.
838 /// Currently only supports +, - and *.
839 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
840
841 // Check for undefined division and modulus behaviors.
842 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
843 llvm::Value *Zero,bool isDiv);
844 // Common helper for getting how wide LHS of shift is.
845 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
846
847 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
848 // non powers of two.
849 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
850
851 Value *EmitDiv(const BinOpInfo &Ops);
852 Value *EmitRem(const BinOpInfo &Ops);
853 Value *EmitAdd(const BinOpInfo &Ops);
854 Value *EmitSub(const BinOpInfo &Ops);
855 Value *EmitShl(const BinOpInfo &Ops);
856 Value *EmitShr(const BinOpInfo &Ops);
857 Value *EmitAnd(const BinOpInfo &Ops) {
858 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
859 }
860 Value *EmitXor(const BinOpInfo &Ops) {
861 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
862 }
863 Value *EmitOr (const BinOpInfo &Ops) {
864 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
865 }
866
867 // Helper functions for fixed point binary operations.
868 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
869
870 BinOpInfo EmitBinOps(const BinaryOperator *E,
871 QualType PromotionTy = QualType());
872
873 Value *EmitPromotedValue(Value *result, QualType PromotionType);
874 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
875 Value *EmitPromoted(const Expr *E, QualType PromotionType);
876
877 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
878 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
879 Value *&Result);
880
881 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
882 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
883
884 QualType getPromotionType(QualType Ty) {
885 const auto &Ctx = CGF.getContext();
886 if (auto *CT = Ty->getAs<ComplexType>()) {
887 QualType ElementType = CT->getElementType();
888 if (ElementType.UseExcessPrecision(Ctx))
889 return Ctx.getComplexType(Ctx.FloatTy);
890 }
891
892 if (Ty.UseExcessPrecision(Ctx)) {
893 if (auto *VT = Ty->getAs<VectorType>()) {
894 unsigned NumElements = VT->getNumElements();
895 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
896 }
897 return Ctx.FloatTy;
898 }
899
900 return QualType();
901 }
902
903 // Binary operators and binary compound assignment operators.
904#define HANDLEBINOP(OP) \
905 Value *VisitBin##OP(const BinaryOperator *E) { \
906 QualType promotionTy = getPromotionType(E->getType()); \
907 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
908 if (result && !promotionTy.isNull()) \
909 result = EmitUnPromotedValue(result, E->getType()); \
910 return result; \
911 } \
912 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
913 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
914 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
915 }
916 HANDLEBINOP(Mul)
917 HANDLEBINOP(Div)
918 HANDLEBINOP(Rem)
919 HANDLEBINOP(Add)
920 HANDLEBINOP(Sub)
921 HANDLEBINOP(Shl)
922 HANDLEBINOP(Shr)
924 HANDLEBINOP(Xor)
926#undef HANDLEBINOP
927
928 // Comparisons.
929 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
930 llvm::CmpInst::Predicate SICmpOpc,
931 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
932#define VISITCOMP(CODE, UI, SI, FP, SIG) \
933 Value *VisitBin##CODE(const BinaryOperator *E) { \
934 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
935 llvm::FCmpInst::FP, SIG); }
936 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
937 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
938 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
939 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
940 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
941 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
942#undef VISITCOMP
943
944 Value *VisitBinAssign (const BinaryOperator *E);
945
946 Value *VisitBinLAnd (const BinaryOperator *E);
947 Value *VisitBinLOr (const BinaryOperator *E);
948 Value *VisitBinComma (const BinaryOperator *E);
949
950 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
951 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
952
953 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
954 return Visit(E->getSemanticForm());
955 }
956
957 // Other Operators.
958 Value *VisitBlockExpr(const BlockExpr *BE);
959 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
960 Value *VisitChooseExpr(ChooseExpr *CE);
961 Value *VisitVAArgExpr(VAArgExpr *VE);
962 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
963 return CGF.EmitObjCStringLiteral(E);
964 }
965 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
966 return CGF.EmitObjCBoxedExpr(E);
967 }
968 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
969 return CGF.EmitObjCArrayLiteral(E);
970 }
971 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
972 return CGF.EmitObjCDictionaryLiteral(E);
973 }
974 Value *VisitAsTypeExpr(AsTypeExpr *CE);
975 Value *VisitAtomicExpr(AtomicExpr *AE);
976 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
977 return Visit(E->getSelectedExpr());
978 }
979};
980} // end anonymous namespace.
981
982//===----------------------------------------------------------------------===//
983// Utilities
984//===----------------------------------------------------------------------===//
985
986/// EmitConversionToBool - Convert the specified expression value to a
987/// boolean (i1) truth value. This is equivalent to "Val != 0".
988Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
989 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
990
991 if (SrcType->isRealFloatingType())
992 return EmitFloatToBoolConversion(Src);
993
994 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
995 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
996
997 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
998 "Unknown scalar type to convert");
999
1000 if (isa<llvm::IntegerType>(Src->getType()))
1001 return EmitIntToBoolConversion(Src);
1002
1003 assert(isa<llvm::PointerType>(Src->getType()));
1004 return EmitPointerToBoolConversion(Src, SrcType);
1005}
1006
1007void ScalarExprEmitter::EmitFloatConversionCheck(
1008 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1009 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1010 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1011 if (!isa<llvm::IntegerType>(DstTy))
1012 return;
1013
1014 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1015 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1016 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1017 using llvm::APFloat;
1018 using llvm::APSInt;
1019
1020 llvm::Value *Check = nullptr;
1021 const llvm::fltSemantics &SrcSema =
1022 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1023
1024 // Floating-point to integer. This has undefined behavior if the source is
1025 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1026 // to an integer).
1027 unsigned Width = CGF.getContext().getIntWidth(DstType);
1029
1030 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1031 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1032 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1033 APFloat::opOverflow)
1034 // Don't need an overflow check for lower bound. Just check for
1035 // -Inf/NaN.
1036 MinSrc = APFloat::getInf(SrcSema, true);
1037 else
1038 // Find the largest value which is too small to represent (before
1039 // truncation toward zero).
1040 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1041
1042 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1043 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1044 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1045 APFloat::opOverflow)
1046 // Don't need an overflow check for upper bound. Just check for
1047 // +Inf/NaN.
1048 MaxSrc = APFloat::getInf(SrcSema, false);
1049 else
1050 // Find the smallest value which is too large to represent (before
1051 // truncation toward zero).
1052 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1053
1054 // If we're converting from __half, convert the range to float to match
1055 // the type of src.
1056 if (OrigSrcType->isHalfType()) {
1057 const llvm::fltSemantics &Sema =
1058 CGF.getContext().getFloatTypeSemantics(SrcType);
1059 bool IsInexact;
1060 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1061 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1062 }
1063
1064 llvm::Value *GE =
1065 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1066 llvm::Value *LE =
1067 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1068 Check = Builder.CreateAnd(GE, LE);
1069
1070 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1071 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1072 CGF.EmitCheckTypeDescriptor(DstType)};
1073 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1074 OrigSrc);
1075}
1076
1077// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1078// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1079static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1080 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1082 QualType DstType, CGBuilderTy &Builder) {
1083 llvm::Type *SrcTy = Src->getType();
1084 llvm::Type *DstTy = Dst->getType();
1085 (void)DstTy; // Only used in assert()
1086
1087 // This should be truncation of integral types.
1088 assert(Src != Dst);
1089 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1090 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1091 "non-integer llvm type");
1092
1093 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1094 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1095
1096 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1097 // Else, it is a signed truncation.
1098 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1100 if (!SrcSigned && !DstSigned) {
1101 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1102 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1103 } else {
1104 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1105 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1106 }
1107
1108 llvm::Value *Check = nullptr;
1109 // 1. Extend the truncated value back to the same width as the Src.
1110 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1111 // 2. Equality-compare with the original source value
1112 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1113 // If the comparison result is 'i1 false', then the truncation was lossy.
1114 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1115}
1116
1118 QualType SrcType, QualType DstType) {
1119 return SrcType->isIntegerType() && DstType->isIntegerType();
1120}
1121
1122void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1123 Value *Dst, QualType DstType,
1124 SourceLocation Loc) {
1125 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1126 return;
1127
1128 // We only care about int->int conversions here.
1129 // We ignore conversions to/from pointer and/or bool.
1131 DstType))
1132 return;
1133
1134 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1135 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1136 // This must be truncation. Else we do not care.
1137 if (SrcBits <= DstBits)
1138 return;
1139
1140 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1141
1142 // If the integer sign change sanitizer is enabled,
1143 // and we are truncating from larger unsigned type to smaller signed type,
1144 // let that next sanitizer deal with it.
1145 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1146 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1147 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1148 (!SrcSigned && DstSigned))
1149 return;
1150
1151 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1152 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1153 Check;
1154
1155 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1156 {
1157 // We don't know the check kind until we call
1158 // EmitIntegerTruncationCheckHelper, but we want to annotate
1159 // EmitIntegerTruncationCheckHelper's instructions too.
1160 SanitizerDebugLocation SanScope(
1161 &CGF,
1162 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1163 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1164 CheckHandler);
1165 Check =
1166 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1167 // If the comparison result is 'i1 false', then the truncation was lossy.
1168 }
1169
1170 // Do we care about this type of truncation?
1171 if (!CGF.SanOpts.has(Check.second.second))
1172 return;
1173
1174 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1175
1176 // Does some SSCL ignore this type?
1178 SanitizerMask::bitPosToMask(Check.second.second), DstType))
1179 return;
1180
1181 llvm::Constant *StaticArgs[] = {
1182 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1183 CGF.EmitCheckTypeDescriptor(DstType),
1184 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1185 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1186
1187 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1188}
1189
1190static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1191 const char *Name,
1192 CGBuilderTy &Builder) {
1193 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1194 llvm::Type *VTy = V->getType();
1195 if (!VSigned) {
1196 // If the value is unsigned, then it is never negative.
1197 return llvm::ConstantInt::getFalse(VTy->getContext());
1198 }
1199 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1200 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1201 llvm::Twine(Name) + "." + V->getName() +
1202 ".negativitycheck");
1203}
1204
1205// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1206// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1207static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1208 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1210 QualType DstType, CGBuilderTy &Builder) {
1211 llvm::Type *SrcTy = Src->getType();
1212 llvm::Type *DstTy = Dst->getType();
1213
1214 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1215 "non-integer llvm type");
1216
1217 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1218 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1219 (void)SrcSigned; // Only used in assert()
1220 (void)DstSigned; // Only used in assert()
1221 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1222 unsigned DstBits = DstTy->getScalarSizeInBits();
1223 (void)SrcBits; // Only used in assert()
1224 (void)DstBits; // Only used in assert()
1225
1226 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1227 "either the widths should be different, or the signednesses.");
1228
1229 // 1. Was the old Value negative?
1230 llvm::Value *SrcIsNegative =
1231 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1232 // 2. Is the new Value negative?
1233 llvm::Value *DstIsNegative =
1234 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1235 // 3. Now, was the 'negativity status' preserved during the conversion?
1236 // NOTE: conversion from negative to zero is considered to change the sign.
1237 // (We want to get 'false' when the conversion changed the sign)
1238 // So we should just equality-compare the negativity statuses.
1239 llvm::Value *Check = nullptr;
1240 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1241 // If the comparison result is 'false', then the conversion changed the sign.
1242 return std::make_pair(
1243 ScalarExprEmitter::ICCK_IntegerSignChange,
1244 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1245}
1246
1247void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1248 Value *Dst, QualType DstType,
1249 SourceLocation Loc) {
1250 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange))
1251 return;
1252
1253 llvm::Type *SrcTy = Src->getType();
1254 llvm::Type *DstTy = Dst->getType();
1255
1256 // We only care about int->int conversions here.
1257 // We ignore conversions to/from pointer and/or bool.
1259 DstType))
1260 return;
1261
1262 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1263 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1264 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1265 unsigned DstBits = DstTy->getScalarSizeInBits();
1266
1267 // Now, we do not need to emit the check in *all* of the cases.
1268 // We can avoid emitting it in some obvious cases where it would have been
1269 // dropped by the opt passes (instcombine) always anyways.
1270 // If it's a cast between effectively the same type, no check.
1271 // NOTE: this is *not* equivalent to checking the canonical types.
1272 if (SrcSigned == DstSigned && SrcBits == DstBits)
1273 return;
1274 // At least one of the values needs to have signed type.
1275 // If both are unsigned, then obviously, neither of them can be negative.
1276 if (!SrcSigned && !DstSigned)
1277 return;
1278 // If the conversion is to *larger* *signed* type, then no check is needed.
1279 // Because either sign-extension happens (so the sign will remain),
1280 // or zero-extension will happen (the sign bit will be zero.)
1281 if ((DstBits > SrcBits) && DstSigned)
1282 return;
1283 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1284 (SrcBits > DstBits) && SrcSigned) {
1285 // If the signed integer truncation sanitizer is enabled,
1286 // and this is a truncation from signed type, then no check is needed.
1287 // Because here sign change check is interchangeable with truncation check.
1288 return;
1289 }
1290 // Does an SSCL have an entry for the DstType under its respective sanitizer
1291 // section?
1292 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1293 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1294 return;
1295 if (!DstSigned &&
1297 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1298 return;
1299 // That's it. We can't rule out any more cases with the data we have.
1300
1301 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1302 SanitizerDebugLocation SanScope(
1303 &CGF,
1304 {SanitizerKind::SO_ImplicitIntegerSignChange,
1305 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1306 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1307 CheckHandler);
1308
1309 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1310 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1311 Check;
1312
1313 // Each of these checks needs to return 'false' when an issue was detected.
1314 ImplicitConversionCheckKind CheckKind;
1315 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1316 2>
1317 Checks;
1318 // So we can 'and' all the checks together, and still get 'false',
1319 // if at least one of the checks detected an issue.
1320
1321 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1322 CheckKind = Check.first;
1323 Checks.emplace_back(Check.second);
1324
1325 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1326 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1327 // If the signed integer truncation sanitizer was enabled,
1328 // and we are truncating from larger unsigned type to smaller signed type,
1329 // let's handle the case we skipped in that check.
1330 Check =
1331 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1332 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1333 Checks.emplace_back(Check.second);
1334 // If the comparison result is 'i1 false', then the truncation was lossy.
1335 }
1336
1337 llvm::Constant *StaticArgs[] = {
1338 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1339 CGF.EmitCheckTypeDescriptor(DstType),
1340 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1341 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1342 // EmitCheck() will 'and' all the checks together.
1343 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1344}
1345
1346// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1347// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1348static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1349 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1351 QualType DstType, CGBuilderTy &Builder) {
1352 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1353 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1354
1355 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1356 if (!SrcSigned && !DstSigned)
1357 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1358 else
1359 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1360
1361 llvm::Value *Check = nullptr;
1362 // 1. Extend the truncated value back to the same width as the Src.
1363 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1364 // 2. Equality-compare with the original source value
1365 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1366 // If the comparison result is 'i1 false', then the truncation was lossy.
1367
1368 return std::make_pair(
1369 Kind,
1370 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1371}
1372
1373// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1374// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1375static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1376 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1378 QualType DstType, CGBuilderTy &Builder) {
1379 // 1. Was the old Value negative?
1380 llvm::Value *SrcIsNegative =
1381 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1382 // 2. Is the new Value negative?
1383 llvm::Value *DstIsNegative =
1384 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1385 // 3. Now, was the 'negativity status' preserved during the conversion?
1386 // NOTE: conversion from negative to zero is considered to change the sign.
1387 // (We want to get 'false' when the conversion changed the sign)
1388 // So we should just equality-compare the negativity statuses.
1389 llvm::Value *Check = nullptr;
1390 Check =
1391 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1392 // If the comparison result is 'false', then the conversion changed the sign.
1393 return std::make_pair(
1394 ScalarExprEmitter::ICCK_IntegerSignChange,
1395 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1396}
1397
1399 Value *Dst, QualType DstType,
1400 const CGBitFieldInfo &Info,
1401 SourceLocation Loc) {
1402
1403 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1404 return;
1405
1406 // We only care about int->int conversions here.
1407 // We ignore conversions to/from pointer and/or bool.
1409 DstType))
1410 return;
1411
1412 if (DstType->isBooleanType() || SrcType->isBooleanType())
1413 return;
1414
1415 // This should be truncation of integral types.
1416 assert(isa<llvm::IntegerType>(Src->getType()) &&
1417 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1418
1419 // TODO: Calculate src width to avoid emitting code
1420 // for unecessary cases.
1421 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1422 unsigned DstBits = Info.Size;
1423
1424 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1425 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1426
1427 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1428 SanitizerDebugLocation SanScope(
1429 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1430
1431 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1432 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1433 Check;
1434
1435 // Truncation
1436 bool EmitTruncation = DstBits < SrcBits;
1437 // If Dst is signed and Src unsigned, we want to be more specific
1438 // about the CheckKind we emit, in this case we want to emit
1439 // ICCK_SignedIntegerTruncationOrSignChange.
1440 bool EmitTruncationFromUnsignedToSigned =
1441 EmitTruncation && DstSigned && !SrcSigned;
1442 // Sign change
1443 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1444 bool BothUnsigned = !SrcSigned && !DstSigned;
1445 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1446 // We can avoid emitting sign change checks in some obvious cases
1447 // 1. If Src and Dst have the same signedness and size
1448 // 2. If both are unsigned sign check is unecessary!
1449 // 3. If Dst is signed and bigger than Src, either
1450 // sign-extension or zero-extension will make sure
1451 // the sign remains.
1452 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1453
1454 if (EmitTruncation)
1455 Check =
1456 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1457 else if (EmitSignChange) {
1458 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1459 "either the widths should be different, or the signednesses.");
1460 Check =
1461 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1462 } else
1463 return;
1464
1465 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1466 if (EmitTruncationFromUnsignedToSigned)
1467 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1468
1469 llvm::Constant *StaticArgs[] = {
1471 EmitCheckTypeDescriptor(DstType),
1472 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1473 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1474
1475 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1476}
1477
1478Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1479 QualType DstType, llvm::Type *SrcTy,
1480 llvm::Type *DstTy,
1481 ScalarConversionOpts Opts) {
1482 // The Element types determine the type of cast to perform.
1483 llvm::Type *SrcElementTy;
1484 llvm::Type *DstElementTy;
1485 QualType SrcElementType;
1486 QualType DstElementType;
1487 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1488 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1489 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1490 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1491 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1492 } else {
1493 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1494 "cannot cast between matrix and non-matrix types");
1495 SrcElementTy = SrcTy;
1496 DstElementTy = DstTy;
1497 SrcElementType = SrcType;
1498 DstElementType = DstType;
1499 }
1500
1501 if (isa<llvm::IntegerType>(SrcElementTy)) {
1502 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1503 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1504 InputSigned = true;
1505 }
1506
1507 if (isa<llvm::IntegerType>(DstElementTy))
1508 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1509 if (InputSigned)
1510 return Builder.CreateSIToFP(Src, DstTy, "conv");
1511 return Builder.CreateUIToFP(Src, DstTy, "conv");
1512 }
1513
1514 if (isa<llvm::IntegerType>(DstElementTy)) {
1515 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1516 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1517
1518 // If we can't recognize overflow as undefined behavior, assume that
1519 // overflow saturates. This protects against normal optimizations if we are
1520 // compiling with non-standard FP semantics.
1521 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1522 llvm::Intrinsic::ID IID =
1523 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1524 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1525 }
1526
1527 if (IsSigned)
1528 return Builder.CreateFPToSI(Src, DstTy, "conv");
1529 return Builder.CreateFPToUI(Src, DstTy, "conv");
1530 }
1531
1532 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1533 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1534 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1535 }
1536 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1537 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1538 return Builder.CreateFPExt(Src, DstTy, "conv");
1539}
1540
1541/// Emit a conversion from the specified type to the specified destination type,
1542/// both of which are LLVM scalar types.
1543Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1544 QualType DstType,
1545 SourceLocation Loc,
1546 ScalarConversionOpts Opts) {
1547 // All conversions involving fixed point types should be handled by the
1548 // EmitFixedPoint family functions. This is done to prevent bloating up this
1549 // function more, and although fixed point numbers are represented by
1550 // integers, we do not want to follow any logic that assumes they should be
1551 // treated as integers.
1552 // TODO(leonardchan): When necessary, add another if statement checking for
1553 // conversions to fixed point types from other types.
1554 if (SrcType->isFixedPointType()) {
1555 if (DstType->isBooleanType())
1556 // It is important that we check this before checking if the dest type is
1557 // an integer because booleans are technically integer types.
1558 // We do not need to check the padding bit on unsigned types if unsigned
1559 // padding is enabled because overflow into this bit is undefined
1560 // behavior.
1561 return Builder.CreateIsNotNull(Src, "tobool");
1562 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1563 DstType->isRealFloatingType())
1564 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1565
1566 llvm_unreachable(
1567 "Unhandled scalar conversion from a fixed point type to another type.");
1568 } else if (DstType->isFixedPointType()) {
1569 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1570 // This also includes converting booleans and enums to fixed point types.
1571 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1572
1573 llvm_unreachable(
1574 "Unhandled scalar conversion to a fixed point type from another type.");
1575 }
1576
1577 QualType NoncanonicalSrcType = SrcType;
1578 QualType NoncanonicalDstType = DstType;
1579
1580 SrcType = CGF.getContext().getCanonicalType(SrcType);
1581 DstType = CGF.getContext().getCanonicalType(DstType);
1582 if (SrcType == DstType) return Src;
1583
1584 if (DstType->isVoidType()) return nullptr;
1585
1586 llvm::Value *OrigSrc = Src;
1587 QualType OrigSrcType = SrcType;
1588 llvm::Type *SrcTy = Src->getType();
1589
1590 // Handle conversions to bool first, they are special: comparisons against 0.
1591 if (DstType->isBooleanType())
1592 return EmitConversionToBool(Src, SrcType);
1593
1594 llvm::Type *DstTy = ConvertType(DstType);
1595
1596 // Cast from half through float if half isn't a native type.
1597 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1598 // Cast to FP using the intrinsic if the half type itself isn't supported.
1599 if (DstTy->isFloatingPointTy()) {
1601 return Builder.CreateCall(
1602 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1603 Src);
1604 } else {
1605 // Cast to other types through float, using either the intrinsic or FPExt,
1606 // depending on whether the half type itself is supported
1607 // (as opposed to operations on half, available with NativeHalfType).
1609 Src = Builder.CreateCall(
1610 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1611 CGF.CGM.FloatTy),
1612 Src);
1613 } else {
1614 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1615 }
1616 SrcType = CGF.getContext().FloatTy;
1617 SrcTy = CGF.FloatTy;
1618 }
1619 }
1620
1621 // Ignore conversions like int -> uint.
1622 if (SrcTy == DstTy) {
1623 if (Opts.EmitImplicitIntegerSignChangeChecks)
1624 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1625 NoncanonicalDstType, Loc);
1626
1627 return Src;
1628 }
1629
1630 // Handle pointer conversions next: pointers can only be converted to/from
1631 // other pointers and integers. Check for pointer types in terms of LLVM, as
1632 // some native types (like Obj-C id) may map to a pointer type.
1633 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1634 // The source value may be an integer, or a pointer.
1635 if (isa<llvm::PointerType>(SrcTy))
1636 return Src;
1637
1638 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1639 // First, convert to the correct width so that we control the kind of
1640 // extension.
1641 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1642 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1643 llvm::Value* IntResult =
1644 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1645 // Then, cast to pointer.
1646 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1647 }
1648
1649 if (isa<llvm::PointerType>(SrcTy)) {
1650 // Must be an ptr to int cast.
1651 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1652 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1653 }
1654
1655 // A scalar can be splatted to an extended vector of the same element type
1656 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1657 // Sema should add casts to make sure that the source expression's type is
1658 // the same as the vector's element type (sans qualifiers)
1659 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1660 SrcType.getTypePtr() &&
1661 "Splatted expr doesn't match with vector element type?");
1662
1663 // Splat the element across to all elements
1664 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1665 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1666 }
1667
1668 if (SrcType->isMatrixType() && DstType->isMatrixType())
1669 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1670
1671 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1672 // Allow bitcast from vector to integer/fp of the same size.
1673 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1674 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1675 if (SrcSize == DstSize)
1676 return Builder.CreateBitCast(Src, DstTy, "conv");
1677
1678 // Conversions between vectors of different sizes are not allowed except
1679 // when vectors of half are involved. Operations on storage-only half
1680 // vectors require promoting half vector operands to float vectors and
1681 // truncating the result, which is either an int or float vector, to a
1682 // short or half vector.
1683
1684 // Source and destination are both expected to be vectors.
1685 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1686 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1687 (void)DstElementTy;
1688
1689 assert(((SrcElementTy->isIntegerTy() &&
1690 DstElementTy->isIntegerTy()) ||
1691 (SrcElementTy->isFloatingPointTy() &&
1692 DstElementTy->isFloatingPointTy())) &&
1693 "unexpected conversion between a floating-point vector and an "
1694 "integer vector");
1695
1696 // Truncate an i32 vector to an i16 vector.
1697 if (SrcElementTy->isIntegerTy())
1698 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1699
1700 // Truncate a float vector to a half vector.
1701 if (SrcSize > DstSize)
1702 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1703
1704 // Promote a half vector to a float vector.
1705 return Builder.CreateFPExt(Src, DstTy, "conv");
1706 }
1707
1708 // Finally, we have the arithmetic types: real int/float.
1709 Value *Res = nullptr;
1710 llvm::Type *ResTy = DstTy;
1711
1712 // An overflowing conversion has undefined behavior if either the source type
1713 // or the destination type is a floating-point type. However, we consider the
1714 // range of representable values for all floating-point types to be
1715 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1716 // floating-point type.
1717 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1718 OrigSrcType->isFloatingType())
1719 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1720 Loc);
1721
1722 // Cast to half through float if half isn't a native type.
1723 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1724 // Make sure we cast in a single step if from another FP type.
1725 if (SrcTy->isFloatingPointTy()) {
1726 // Use the intrinsic if the half type itself isn't supported
1727 // (as opposed to operations on half, available with NativeHalfType).
1729 return Builder.CreateCall(
1730 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1731 // If the half type is supported, just use an fptrunc.
1732 return Builder.CreateFPTrunc(Src, DstTy);
1733 }
1734 DstTy = CGF.FloatTy;
1735 }
1736
1737 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1738
1739 if (DstTy != ResTy) {
1741 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1742 Res = Builder.CreateCall(
1743 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1744 Res);
1745 } else {
1746 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1747 }
1748 }
1749
1750 if (Opts.EmitImplicitIntegerTruncationChecks)
1751 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1752 NoncanonicalDstType, Loc);
1753
1754 if (Opts.EmitImplicitIntegerSignChangeChecks)
1755 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1756 NoncanonicalDstType, Loc);
1757
1758 return Res;
1759}
1760
1761Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1762 QualType DstTy,
1763 SourceLocation Loc) {
1764 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1765 llvm::Value *Result;
1766 if (SrcTy->isRealFloatingType())
1767 Result = FPBuilder.CreateFloatingToFixed(Src,
1768 CGF.getContext().getFixedPointSemantics(DstTy));
1769 else if (DstTy->isRealFloatingType())
1770 Result = FPBuilder.CreateFixedToFloating(Src,
1772 ConvertType(DstTy));
1773 else {
1774 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1775 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1776
1777 if (DstTy->isIntegerType())
1778 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1779 DstFPSema.getWidth(),
1780 DstFPSema.isSigned());
1781 else if (SrcTy->isIntegerType())
1782 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1783 DstFPSema);
1784 else
1785 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1786 }
1787 return Result;
1788}
1789
1790/// Emit a conversion from the specified complex type to the specified
1791/// destination type, where the destination type is an LLVM scalar type.
1792Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1793 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1794 SourceLocation Loc) {
1795 // Get the source element type.
1796 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1797
1798 // Handle conversions to bool first, they are special: comparisons against 0.
1799 if (DstTy->isBooleanType()) {
1800 // Complex != 0 -> (Real != 0) | (Imag != 0)
1801 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1802 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1803 return Builder.CreateOr(Src.first, Src.second, "tobool");
1804 }
1805
1806 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1807 // the imaginary part of the complex value is discarded and the value of the
1808 // real part is converted according to the conversion rules for the
1809 // corresponding real type.
1810 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1811}
1812
1813Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1814 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1815}
1816
1817/// Emit a sanitization check for the given "binary" operation (which
1818/// might actually be a unary increment which has been lowered to a binary
1819/// operation). The check passes if all values in \p Checks (which are \c i1),
1820/// are \c true.
1821void ScalarExprEmitter::EmitBinOpCheck(
1822 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1823 const BinOpInfo &Info) {
1824 assert(CGF.IsSanitizerScope);
1825 SanitizerHandler Check;
1826 SmallVector<llvm::Constant *, 4> StaticData;
1827 SmallVector<llvm::Value *, 2> DynamicData;
1828 TrapReason TR;
1829
1830 BinaryOperatorKind Opcode = Info.Opcode;
1833
1834 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1835 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1836 if (UO && UO->getOpcode() == UO_Minus) {
1837 Check = SanitizerHandler::NegateOverflow;
1838 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1839 DynamicData.push_back(Info.RHS);
1840 } else {
1841 if (BinaryOperator::isShiftOp(Opcode)) {
1842 // Shift LHS negative or too large, or RHS out of bounds.
1843 Check = SanitizerHandler::ShiftOutOfBounds;
1844 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1845 StaticData.push_back(
1846 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1847 StaticData.push_back(
1848 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1849 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1850 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1851 Check = SanitizerHandler::DivremOverflow;
1852 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1853 } else {
1854 // Arithmetic overflow (+, -, *).
1855 int ArithOverflowKind = 0;
1856 switch (Opcode) {
1857 case BO_Add: {
1858 Check = SanitizerHandler::AddOverflow;
1859 ArithOverflowKind = diag::UBSanArithKind::Add;
1860 break;
1861 }
1862 case BO_Sub: {
1863 Check = SanitizerHandler::SubOverflow;
1864 ArithOverflowKind = diag::UBSanArithKind::Sub;
1865 break;
1866 }
1867 case BO_Mul: {
1868 Check = SanitizerHandler::MulOverflow;
1869 ArithOverflowKind = diag::UBSanArithKind::Mul;
1870 break;
1871 }
1872 default:
1873 llvm_unreachable("unexpected opcode for bin op check");
1874 }
1875 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1877 SanitizerKind::UnsignedIntegerOverflow) ||
1879 SanitizerKind::SignedIntegerOverflow)) {
1880 // Only pay the cost for constructing the trap diagnostic if they are
1881 // going to be used.
1882 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1883 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1884 << Info.E;
1885 }
1886 }
1887 DynamicData.push_back(Info.LHS);
1888 DynamicData.push_back(Info.RHS);
1889 }
1890
1891 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1892}
1893
1894//===----------------------------------------------------------------------===//
1895// Visitor Methods
1896//===----------------------------------------------------------------------===//
1897
1898Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1899 CGF.ErrorUnsupported(E, "scalar expression");
1900 if (E->getType()->isVoidType())
1901 return nullptr;
1902 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1903}
1904
1905Value *
1906ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1907 ASTContext &Context = CGF.getContext();
1908 unsigned AddrSpace =
1910 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1911 E->ComputeName(Context), "__usn_str", AddrSpace);
1912
1913 llvm::Type *ExprTy = ConvertType(E->getType());
1914 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1915 "usn_addr_cast");
1916}
1917
1918Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1919 assert(E->getDataElementCount() == 1);
1920 auto It = E->begin();
1921 return Builder.getInt((*It)->getValue());
1922}
1923
1924Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1925 // Vector Mask Case
1926 if (E->getNumSubExprs() == 2) {
1927 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1928 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1929 Value *Mask;
1930
1931 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1932 unsigned LHSElts = LTy->getNumElements();
1933
1934 Mask = RHS;
1935
1936 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1937
1938 // Mask off the high bits of each shuffle index.
1939 Value *MaskBits =
1940 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1941 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1942
1943 // newv = undef
1944 // mask = mask & maskbits
1945 // for each elt
1946 // n = extract mask i
1947 // x = extract val n
1948 // newv = insert newv, x, i
1949 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1950 MTy->getNumElements());
1951 Value* NewV = llvm::PoisonValue::get(RTy);
1952 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1953 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1954 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1955
1956 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1957 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1958 }
1959 return NewV;
1960 }
1961
1962 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1963 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1964
1965 SmallVector<int, 32> Indices;
1966 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1967 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
1968 // Check for -1 and output it as undef in the IR.
1969 if (Idx.isSigned() && Idx.isAllOnes())
1970 Indices.push_back(-1);
1971 else
1972 Indices.push_back(Idx.getZExtValue());
1973 }
1974
1975 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1976}
1977
1978Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1979 QualType SrcType = E->getSrcExpr()->getType(),
1980 DstType = E->getType();
1981
1982 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1983
1984 SrcType = CGF.getContext().getCanonicalType(SrcType);
1985 DstType = CGF.getContext().getCanonicalType(DstType);
1986 if (SrcType == DstType) return Src;
1987
1988 assert(SrcType->isVectorType() &&
1989 "ConvertVector source type must be a vector");
1990 assert(DstType->isVectorType() &&
1991 "ConvertVector destination type must be a vector");
1992
1993 llvm::Type *SrcTy = Src->getType();
1994 llvm::Type *DstTy = ConvertType(DstType);
1995
1996 // Ignore conversions like int -> uint.
1997 if (SrcTy == DstTy)
1998 return Src;
1999
2000 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
2001 DstEltType = DstType->castAs<VectorType>()->getElementType();
2002
2003 assert(SrcTy->isVectorTy() &&
2004 "ConvertVector source IR type must be a vector");
2005 assert(DstTy->isVectorTy() &&
2006 "ConvertVector destination IR type must be a vector");
2007
2008 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
2009 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2010
2011 if (DstEltType->isBooleanType()) {
2012 assert((SrcEltTy->isFloatingPointTy() ||
2013 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2014
2015 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2016 if (SrcEltTy->isFloatingPointTy()) {
2017 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2018 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2019 } else {
2020 return Builder.CreateICmpNE(Src, Zero, "tobool");
2021 }
2022 }
2023
2024 // We have the arithmetic types: real int/float.
2025 Value *Res = nullptr;
2026
2027 if (isa<llvm::IntegerType>(SrcEltTy)) {
2028 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2029 if (isa<llvm::IntegerType>(DstEltTy))
2030 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2031 else {
2032 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2033 if (InputSigned)
2034 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2035 else
2036 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2037 }
2038 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2039 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2040 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2041 if (DstEltType->isSignedIntegerOrEnumerationType())
2042 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2043 else
2044 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2045 } else {
2046 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2047 "Unknown real conversion");
2048 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2049 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2050 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2051 else
2052 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2053 }
2054
2055 return Res;
2056}
2057
2058Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2059 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2060 CGF.EmitIgnoredExpr(E->getBase());
2061 return CGF.emitScalarConstant(Constant, E);
2062 } else {
2063 Expr::EvalResult Result;
2065 llvm::APSInt Value = Result.Val.getInt();
2066 CGF.EmitIgnoredExpr(E->getBase());
2067 return Builder.getInt(Value);
2068 }
2069 }
2070
2071 llvm::Value *Result = EmitLoadOfLValue(E);
2072
2073 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2074 // debug info for the pointer, even if there is no variable associated with
2075 // the pointer's expression.
2076 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2077 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2078 if (llvm::GetElementPtrInst *GEP =
2079 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2080 if (llvm::Instruction *Pointer =
2081 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2082 QualType Ty = E->getBase()->getType();
2083 if (!E->isArrow())
2084 Ty = CGF.getContext().getPointerType(Ty);
2085 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2086 }
2087 }
2088 }
2089 }
2090 return Result;
2091}
2092
2093Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2094 TestAndClearIgnoreResultAssign();
2095
2096 // Emit subscript expressions in rvalue context's. For most cases, this just
2097 // loads the lvalue formed by the subscript expr. However, we have to be
2098 // careful, because the base of a vector subscript is occasionally an rvalue,
2099 // so we can't get it as an lvalue.
2100 if (!E->getBase()->getType()->isVectorType() &&
2102 return EmitLoadOfLValue(E);
2103
2104 // Handle the vector case. The base must be a vector, the index must be an
2105 // integer value.
2106 Value *Base = Visit(E->getBase());
2107 Value *Idx = Visit(E->getIdx());
2108 QualType IdxTy = E->getIdx()->getType();
2109
2110 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2111 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2112
2113 return Builder.CreateExtractElement(Base, Idx, "vecext");
2114}
2115
2116Value *ScalarExprEmitter::VisitMatrixSingleSubscriptExpr(
2117 MatrixSingleSubscriptExpr *E) {
2118 TestAndClearIgnoreResultAssign();
2119
2120 auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2121 unsigned NumRows = MatrixTy->getNumRows();
2122 unsigned NumColumns = MatrixTy->getNumColumns();
2123
2124 // Row index
2125 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2126 llvm::MatrixBuilder MB(Builder);
2127
2128 // The row index must be in [0, NumRows)
2129 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2130 MB.CreateIndexAssumption(RowIdx, NumRows);
2131
2132 Value *FlatMatrix = Visit(E->getBase());
2133 llvm::Type *ElemTy = CGF.ConvertType(MatrixTy->getElementType());
2134 auto *ResultTy = llvm::FixedVectorType::get(ElemTy, NumColumns);
2135 Value *RowVec = llvm::PoisonValue::get(ResultTy);
2136
2137 for (unsigned Col = 0; Col != NumColumns; ++Col) {
2138 Value *ColVal = llvm::ConstantInt::get(RowIdx->getType(), Col);
2139 Value *EltIdx = MB.CreateIndex(RowIdx, ColVal, NumRows, "matrix_row_idx");
2140 Value *Elt =
2141 Builder.CreateExtractElement(FlatMatrix, EltIdx, "matrix_elem");
2142 Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2143 RowVec = Builder.CreateInsertElement(RowVec, Elt, Lane, "matrix_row_ins");
2144 }
2145
2146 return RowVec;
2147}
2148
2149Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2150 TestAndClearIgnoreResultAssign();
2151
2152 // Handle the vector case. The base must be a vector, the index must be an
2153 // integer value.
2154 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2155 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2156
2157 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2158 unsigned NumRows = MatrixTy->getNumRows();
2159 llvm::MatrixBuilder MB(Builder);
2160 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2161 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2162 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2163
2164 Value *Matrix = Visit(E->getBase());
2165
2166 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2167 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2168}
2169
2170static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2171 unsigned Off) {
2172 int MV = SVI->getMaskValue(Idx);
2173 if (MV == -1)
2174 return -1;
2175 return Off + MV;
2176}
2177
2178static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2179 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2180 "Index operand too large for shufflevector mask!");
2181 return C->getZExtValue();
2182}
2183
2184Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2185 bool Ignore = TestAndClearIgnoreResultAssign();
2186 (void)Ignore;
2187 unsigned NumInitElements = E->getNumInits();
2188 assert((Ignore == false ||
2189 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2190 "init list ignored");
2191
2192 // HLSL initialization lists in the AST are an expansion which can contain
2193 // side-effecting expressions wrapped in opaque value expressions. To properly
2194 // emit these we need to emit the opaque values before we emit the argument
2195 // expressions themselves. This is a little hacky, but it prevents us needing
2196 // to do a bigger AST-level change for a language feature that we need
2197 // deprecate in the near future. See related HLSL language proposals in the
2198 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2199 // * 0005-strict-initializer-lists.md
2200 // * 0032-constructors.md
2201 if (CGF.getLangOpts().HLSL)
2203
2204 if (E->hadArrayRangeDesignator())
2205 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2206
2207 llvm::VectorType *VType =
2208 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2209
2210 if (!VType) {
2211 if (NumInitElements == 0) {
2212 // C++11 value-initialization for the scalar.
2213 return EmitNullValue(E->getType());
2214 }
2215 // We have a scalar in braces. Just use the first element.
2216 return Visit(E->getInit(0));
2217 }
2218
2219 if (isa<llvm::ScalableVectorType>(VType)) {
2220 if (NumInitElements == 0) {
2221 // C++11 value-initialization for the vector.
2222 return EmitNullValue(E->getType());
2223 }
2224
2225 if (NumInitElements == 1) {
2226 Expr *InitVector = E->getInit(0);
2227
2228 // Initialize from another scalable vector of the same type.
2229 if (InitVector->getType().getCanonicalType() ==
2231 return Visit(InitVector);
2232 }
2233
2234 llvm_unreachable("Unexpected initialization of a scalable vector!");
2235 }
2236
2237 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2238
2239 // Loop over initializers collecting the Value for each, and remembering
2240 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2241 // us to fold the shuffle for the swizzle into the shuffle for the vector
2242 // initializer, since LLVM optimizers generally do not want to touch
2243 // shuffles.
2244 unsigned CurIdx = 0;
2245 bool VIsPoisonShuffle = false;
2246 llvm::Value *V = llvm::PoisonValue::get(VType);
2247 for (unsigned i = 0; i != NumInitElements; ++i) {
2248 Expr *IE = E->getInit(i);
2249 Value *Init = Visit(IE);
2250 SmallVector<int, 16> Args;
2251
2252 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2253
2254 // Handle scalar elements. If the scalar initializer is actually one
2255 // element of a different vector of the same width, use shuffle instead of
2256 // extract+insert.
2257 if (!VVT) {
2258 if (isa<ExtVectorElementExpr>(IE)) {
2259 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2260
2261 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2262 ->getNumElements() == ResElts) {
2263 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2264 Value *LHS = nullptr, *RHS = nullptr;
2265 if (CurIdx == 0) {
2266 // insert into poison -> shuffle (src, poison)
2267 // shufflemask must use an i32
2268 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2269 Args.resize(ResElts, -1);
2270
2271 LHS = EI->getVectorOperand();
2272 RHS = V;
2273 VIsPoisonShuffle = true;
2274 } else if (VIsPoisonShuffle) {
2275 // insert into poison shuffle && size match -> shuffle (v, src)
2276 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2277 for (unsigned j = 0; j != CurIdx; ++j)
2278 Args.push_back(getMaskElt(SVV, j, 0));
2279 Args.push_back(ResElts + C->getZExtValue());
2280 Args.resize(ResElts, -1);
2281
2282 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2283 RHS = EI->getVectorOperand();
2284 VIsPoisonShuffle = false;
2285 }
2286 if (!Args.empty()) {
2287 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2288 ++CurIdx;
2289 continue;
2290 }
2291 }
2292 }
2293 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2294 "vecinit");
2295 VIsPoisonShuffle = false;
2296 ++CurIdx;
2297 continue;
2298 }
2299
2300 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2301
2302 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2303 // input is the same width as the vector being constructed, generate an
2304 // optimized shuffle of the swizzle input into the result.
2305 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2306 if (isa<ExtVectorElementExpr>(IE)) {
2307 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2308 Value *SVOp = SVI->getOperand(0);
2309 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2310
2311 if (OpTy->getNumElements() == ResElts) {
2312 for (unsigned j = 0; j != CurIdx; ++j) {
2313 // If the current vector initializer is a shuffle with poison, merge
2314 // this shuffle directly into it.
2315 if (VIsPoisonShuffle) {
2316 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2317 } else {
2318 Args.push_back(j);
2319 }
2320 }
2321 for (unsigned j = 0, je = InitElts; j != je; ++j)
2322 Args.push_back(getMaskElt(SVI, j, Offset));
2323 Args.resize(ResElts, -1);
2324
2325 if (VIsPoisonShuffle)
2326 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2327
2328 Init = SVOp;
2329 }
2330 }
2331
2332 // Extend init to result vector length, and then shuffle its contribution
2333 // to the vector initializer into V.
2334 if (Args.empty()) {
2335 for (unsigned j = 0; j != InitElts; ++j)
2336 Args.push_back(j);
2337 Args.resize(ResElts, -1);
2338 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2339
2340 Args.clear();
2341 for (unsigned j = 0; j != CurIdx; ++j)
2342 Args.push_back(j);
2343 for (unsigned j = 0; j != InitElts; ++j)
2344 Args.push_back(j + Offset);
2345 Args.resize(ResElts, -1);
2346 }
2347
2348 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2349 // merging subsequent shuffles into this one.
2350 if (CurIdx == 0)
2351 std::swap(V, Init);
2352 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2353 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2354 CurIdx += InitElts;
2355 }
2356
2357 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2358 // Emit remaining default initializers.
2359 llvm::Type *EltTy = VType->getElementType();
2360
2361 // Emit remaining default initializers
2362 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2363 Value *Idx = Builder.getInt32(CurIdx);
2364 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2365 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2366 }
2367 return V;
2368}
2369
2371 return !D->isWeak();
2372}
2373
2374static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2375 E = E->IgnoreParens();
2376
2377 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2378 if (UO->getOpcode() == UO_Deref)
2379 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2380
2381 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2382 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2383
2384 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2385 if (isa<FieldDecl>(ME->getMemberDecl()))
2386 return true;
2387 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2388 }
2389
2390 // Array subscripts? Anything else?
2391
2392 return false;
2393}
2394
2396 assert(E->getType()->isSignableType(getContext()));
2397
2398 E = E->IgnoreParens();
2399
2400 if (isa<CXXThisExpr>(E))
2401 return true;
2402
2403 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2404 if (UO->getOpcode() == UO_AddrOf)
2405 return isLValueKnownNonNull(*this, UO->getSubExpr());
2406
2407 if (const auto *CE = dyn_cast<CastExpr>(E))
2408 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2409 CE->getCastKind() == CK_ArrayToPointerDecay)
2410 return isLValueKnownNonNull(*this, CE->getSubExpr());
2411
2412 // Maybe honor __nonnull?
2413
2414 return false;
2415}
2416
2418 const Expr *E = CE->getSubExpr();
2419
2420 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2421 return false;
2422
2423 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2424 // We always assume that 'this' is never null.
2425 return false;
2426 }
2427
2428 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2429 // And that glvalue casts are never null.
2430 if (ICE->isGLValue())
2431 return false;
2432 }
2433
2434 return true;
2435}
2436
2437// RHS is an aggregate type
2439 QualType DestTy, SourceLocation Loc) {
2440 SmallVector<LValue, 16> LoadList;
2441 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
2442 // Dest is either a vector or a builtin?
2443 // if its a vector create a temp alloca to store into and return that
2444 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2445 assert(LoadList.size() >= VecTy->getNumElements() &&
2446 "Flattened type on RHS must have the same number or more elements "
2447 "than vector on LHS.");
2448 llvm::Value *V =
2449 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2450 // write to V.
2451 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2452 RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
2453 assert(RVal.isScalar() &&
2454 "All flattened source values should be scalars.");
2455 llvm::Value *Cast =
2456 CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
2457 VecTy->getElementType(), Loc);
2458 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2459 }
2460 return V;
2461 }
2462 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2463 assert(LoadList.size() >= MatTy->getNumElementsFlattened() &&
2464 "Flattened type on RHS must have the same number or more elements "
2465 "than vector on LHS.");
2466
2467 llvm::Value *V =
2468 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2469 // V is an allocated temporary to build the truncated matrix into.
2470 for (unsigned I = 0, E = MatTy->getNumElementsFlattened(); I < E; I++) {
2471 unsigned ColMajorIndex =
2472 (I % MatTy->getNumRows()) * MatTy->getNumColumns() +
2473 (I / MatTy->getNumRows());
2474 RValue RVal = CGF.EmitLoadOfLValue(LoadList[ColMajorIndex], Loc);
2475 assert(RVal.isScalar() &&
2476 "All flattened source values should be scalars.");
2477 llvm::Value *Cast = CGF.EmitScalarConversion(
2478 RVal.getScalarVal(), LoadList[ColMajorIndex].getType(),
2479 MatTy->getElementType(), Loc);
2480 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2481 }
2482 return V;
2483 }
2484 // if its a builtin just do an extract element or load.
2485 assert(DestTy->isBuiltinType() &&
2486 "Destination type must be a vector, matrix, or builtin type.");
2487 RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
2488 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2489 return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
2490 DestTy, Loc);
2491}
2492
2493// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2494// have to handle a more broad range of conversions than explicit casts, as they
2495// handle things like function to ptr-to-function decay etc.
2496Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2497 auto RestoreCurCast =
2498 llvm::make_scope_exit([this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2499 CGF.CurCast = CE;
2500
2501 Expr *E = CE->getSubExpr();
2502 QualType DestTy = CE->getType();
2503 CastKind Kind = CE->getCastKind();
2504 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2505
2506 // These cases are generally not written to ignore the result of
2507 // evaluating their sub-expressions, so we clear this now.
2508 bool Ignored = TestAndClearIgnoreResultAssign();
2509
2510 // Since almost all cast kinds apply to scalars, this switch doesn't have
2511 // a default case, so the compiler will warn on a missing case. The cases
2512 // are in the same order as in the CastKind enum.
2513 switch (Kind) {
2514 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2515 case CK_BuiltinFnToFnPtr:
2516 llvm_unreachable("builtin functions are handled elsewhere");
2517
2518 case CK_LValueBitCast:
2519 case CK_ObjCObjectLValueCast: {
2520 Address Addr = EmitLValue(E).getAddress();
2521 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2522 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2523 return EmitLoadOfLValue(LV, CE->getExprLoc());
2524 }
2525
2526 case CK_LValueToRValueBitCast: {
2527 LValue SourceLVal = CGF.EmitLValue(E);
2528 Address Addr =
2529 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2530 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2531 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2532 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2533 }
2534
2535 case CK_CPointerToObjCPointerCast:
2536 case CK_BlockPointerToObjCPointerCast:
2537 case CK_AnyPointerToBlockPointerCast:
2538 case CK_BitCast: {
2539 Value *Src = Visit(E);
2540 llvm::Type *SrcTy = Src->getType();
2541 llvm::Type *DstTy = ConvertType(DestTy);
2542
2543 // FIXME: this is a gross but seemingly necessary workaround for an issue
2544 // manifesting when a target uses a non-default AS for indirect sret args,
2545 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2546 // on the address of a local struct that gets returned by value yields an
2547 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2548 // DefaultAS. We can only do this subversive thing because sret args are
2549 // manufactured and them residing in the IndirectAS is a target specific
2550 // detail, and doing an AS cast here still retains the semantics the user
2551 // expects. It is desirable to remove this iff a better solution is found.
2552 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2554 CGF, Src, E->getType().getAddressSpace(), DstTy);
2555
2556 assert(
2557 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2558 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2559 "Address-space cast must be used to convert address spaces");
2560
2561 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2562 if (auto *PT = DestTy->getAs<PointerType>()) {
2564 PT->getPointeeType(),
2565 Address(Src,
2567 E->getType()->castAs<PointerType>()->getPointeeType()),
2568 CGF.getPointerAlign()),
2569 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2570 CE->getBeginLoc());
2571 }
2572 }
2573
2574 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2575 const QualType SrcType = E->getType();
2576
2577 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2578 // Casting to pointer that could carry dynamic information (provided by
2579 // invariant.group) requires launder.
2580 Src = Builder.CreateLaunderInvariantGroup(Src);
2581 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2582 // Casting to pointer that does not carry dynamic information (provided
2583 // by invariant.group) requires stripping it. Note that we don't do it
2584 // if the source could not be dynamic type and destination could be
2585 // dynamic because dynamic information is already laundered. It is
2586 // because launder(strip(src)) == launder(src), so there is no need to
2587 // add extra strip before launder.
2588 Src = Builder.CreateStripInvariantGroup(Src);
2589 }
2590 }
2591
2592 // Update heapallocsite metadata when there is an explicit pointer cast.
2593 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2594 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2595 !isa<CastExpr>(E)) {
2596 QualType PointeeType = DestTy->getPointeeType();
2597 if (!PointeeType.isNull())
2598 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2599 CE->getExprLoc());
2600 }
2601 }
2602
2603 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2604 // same element type, use the llvm.vector.insert intrinsic to perform the
2605 // bitcast.
2606 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2607 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2608 // If we are casting a fixed i8 vector to a scalable i1 predicate
2609 // vector, use a vector insert and bitcast the result.
2610 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2611 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2612 ScalableDstTy = llvm::ScalableVectorType::get(
2613 FixedSrcTy->getElementType(),
2614 llvm::divideCeil(
2615 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2616 }
2617 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2618 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2619 llvm::Value *Result = Builder.CreateInsertVector(
2620 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2621 ScalableDstTy = cast<llvm::ScalableVectorType>(
2622 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2623 if (Result->getType() != ScalableDstTy)
2624 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2625 if (Result->getType() != DstTy)
2626 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2627 return Result;
2628 }
2629 }
2630 }
2631
2632 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2633 // same element type, use the llvm.vector.extract intrinsic to perform the
2634 // bitcast.
2635 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2636 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2637 // If we are casting a scalable i1 predicate vector to a fixed i8
2638 // vector, bitcast the source and use a vector extract.
2639 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2640 FixedDstTy->getElementType()->isIntegerTy(8)) {
2641 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2642 ScalableSrcTy = llvm::ScalableVectorType::get(
2643 ScalableSrcTy->getElementType(),
2644 llvm::alignTo<8>(
2645 ScalableSrcTy->getElementCount().getKnownMinValue()));
2646 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2647 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2648 uint64_t(0));
2649 }
2650
2651 ScalableSrcTy = llvm::ScalableVectorType::get(
2652 FixedDstTy->getElementType(),
2653 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2654 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2655 }
2656 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2657 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2658 "cast.fixed");
2659 }
2660 }
2661
2662 // Perform VLAT <-> VLST bitcast through memory.
2663 // TODO: since the llvm.vector.{insert,extract} intrinsics
2664 // require the element types of the vectors to be the same, we
2665 // need to keep this around for bitcasts between VLAT <-> VLST where
2666 // the element types of the vectors are not the same, until we figure
2667 // out a better way of doing these casts.
2668 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2672 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2673 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2674 CGF.EmitStoreOfScalar(Src, LV);
2675 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2676 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2677 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2678 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2679 }
2680
2681 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2682 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2683 }
2684 case CK_AddressSpaceConversion: {
2685 Expr::EvalResult Result;
2686 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2687 Result.Val.isNullPointer()) {
2688 // If E has side effect, it is emitted even if its final result is a
2689 // null pointer. In that case, a DCE pass should be able to
2690 // eliminate the useless instructions emitted during translating E.
2691 if (Result.HasSideEffects)
2692 Visit(E);
2694 ConvertType(DestTy)), DestTy);
2695 }
2696 // Since target may map different address spaces in AST to the same address
2697 // space, an address space conversion may end up as a bitcast.
2699 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2700 ConvertType(DestTy));
2701 }
2702 case CK_AtomicToNonAtomic:
2703 case CK_NonAtomicToAtomic:
2704 case CK_UserDefinedConversion:
2705 return Visit(E);
2706
2707 case CK_NoOp: {
2708 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2709 }
2710
2711 case CK_BaseToDerived: {
2712 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2713 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2714
2715 Address Base = CGF.EmitPointerWithAlignment(E);
2716 Address Derived =
2717 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2718 CE->path_begin(), CE->path_end(),
2720
2721 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2722 // performed and the object is not of the derived type.
2723 if (CGF.sanitizePerformTypeCheck())
2725 Derived, DestTy->getPointeeType());
2726
2727 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2728 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2729 /*MayBeNull=*/true,
2731 CE->getBeginLoc());
2732
2733 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2734 }
2735 case CK_UncheckedDerivedToBase:
2736 case CK_DerivedToBase: {
2737 // The EmitPointerWithAlignment path does this fine; just discard
2738 // the alignment.
2740 CE->getType()->getPointeeType());
2741 }
2742
2743 case CK_Dynamic: {
2744 Address V = CGF.EmitPointerWithAlignment(E);
2745 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2746 return CGF.EmitDynamicCast(V, DCE);
2747 }
2748
2749 case CK_ArrayToPointerDecay:
2751 CE->getType()->getPointeeType());
2752 case CK_FunctionToPointerDecay:
2753 return EmitLValue(E).getPointer(CGF);
2754
2755 case CK_NullToPointer:
2756 if (MustVisitNullValue(E))
2757 CGF.EmitIgnoredExpr(E);
2758
2759 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2760 DestTy);
2761
2762 case CK_NullToMemberPointer: {
2763 if (MustVisitNullValue(E))
2764 CGF.EmitIgnoredExpr(E);
2765
2766 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2767 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2768 }
2769
2770 case CK_ReinterpretMemberPointer:
2771 case CK_BaseToDerivedMemberPointer:
2772 case CK_DerivedToBaseMemberPointer: {
2773 Value *Src = Visit(E);
2774
2775 // Note that the AST doesn't distinguish between checked and
2776 // unchecked member pointer conversions, so we always have to
2777 // implement checked conversions here. This is inefficient when
2778 // actual control flow may be required in order to perform the
2779 // check, which it is for data member pointers (but not member
2780 // function pointers on Itanium and ARM).
2781 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2782 }
2783
2784 case CK_ARCProduceObject:
2785 return CGF.EmitARCRetainScalarExpr(E);
2786 case CK_ARCConsumeObject:
2787 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2788 case CK_ARCReclaimReturnedObject:
2789 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2790 case CK_ARCExtendBlockObject:
2791 return CGF.EmitARCExtendBlockObject(E);
2792
2793 case CK_CopyAndAutoreleaseBlockObject:
2794 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2795
2796 case CK_FloatingRealToComplex:
2797 case CK_FloatingComplexCast:
2798 case CK_IntegralRealToComplex:
2799 case CK_IntegralComplexCast:
2800 case CK_IntegralComplexToFloatingComplex:
2801 case CK_FloatingComplexToIntegralComplex:
2802 case CK_ConstructorConversion:
2803 case CK_ToUnion:
2804 case CK_HLSLArrayRValue:
2805 llvm_unreachable("scalar cast to non-scalar value");
2806
2807 case CK_LValueToRValue:
2808 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2809 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2810 return Visit(E);
2811
2812 case CK_IntegralToPointer: {
2813 Value *Src = Visit(E);
2814
2815 // First, convert to the correct width so that we control the kind of
2816 // extension.
2817 auto DestLLVMTy = ConvertType(DestTy);
2818 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2819 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2820 llvm::Value* IntResult =
2821 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2822
2823 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2824
2825 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2826 // Going from integer to pointer that could be dynamic requires reloading
2827 // dynamic information from invariant.group.
2828 if (DestTy.mayBeDynamicClass())
2829 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2830 }
2831
2832 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2833 return IntToPtr;
2834 }
2835 case CK_PointerToIntegral: {
2836 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2837 auto *PtrExpr = Visit(E);
2838
2839 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2840 const QualType SrcType = E->getType();
2841
2842 // Casting to integer requires stripping dynamic information as it does
2843 // not carries it.
2844 if (SrcType.mayBeDynamicClass())
2845 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2846 }
2847
2848 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2849 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2850 }
2851 case CK_ToVoid: {
2852 CGF.EmitIgnoredExpr(E);
2853 return nullptr;
2854 }
2855 case CK_MatrixCast: {
2856 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2857 CE->getExprLoc());
2858 }
2859 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2860 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2861 // To perform any necessary Scalar Cast, so this Cast can be handled
2862 // by the regular Vector Splat cast code.
2863 case CK_HLSLAggregateSplatCast:
2864 case CK_VectorSplat: {
2865 llvm::Type *DstTy = ConvertType(DestTy);
2866 Value *Elt = Visit(E);
2867 // Splat the element across to all elements
2868 llvm::ElementCount NumElements =
2869 cast<llvm::VectorType>(DstTy)->getElementCount();
2870 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2871 }
2872
2873 case CK_FixedPointCast:
2874 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2875 CE->getExprLoc());
2876
2877 case CK_FixedPointToBoolean:
2878 assert(E->getType()->isFixedPointType() &&
2879 "Expected src type to be fixed point type");
2880 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2881 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2882 CE->getExprLoc());
2883
2884 case CK_FixedPointToIntegral:
2885 assert(E->getType()->isFixedPointType() &&
2886 "Expected src type to be fixed point type");
2887 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2888 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2889 CE->getExprLoc());
2890
2891 case CK_IntegralToFixedPoint:
2892 assert(E->getType()->isIntegerType() &&
2893 "Expected src type to be an integer");
2894 assert(DestTy->isFixedPointType() &&
2895 "Expected dest type to be fixed point type");
2896 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2897 CE->getExprLoc());
2898
2899 case CK_IntegralCast: {
2900 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2901 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2902 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2904 "conv");
2905 }
2906 ScalarConversionOpts Opts;
2907 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2908 if (!ICE->isPartOfExplicitCast())
2909 Opts = ScalarConversionOpts(CGF.SanOpts);
2910 }
2911 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2912 CE->getExprLoc(), Opts);
2913 }
2914 case CK_IntegralToFloating: {
2915 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2916 // TODO: Support constrained FP intrinsics.
2917 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2918 if (SrcElTy->isSignedIntegerOrEnumerationType())
2919 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2920 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2921 }
2922 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2923 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2924 CE->getExprLoc());
2925 }
2926 case CK_FloatingToIntegral: {
2927 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2928 // TODO: Support constrained FP intrinsics.
2929 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2930 if (DstElTy->isSignedIntegerOrEnumerationType())
2931 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2932 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2933 }
2934 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2935 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2936 CE->getExprLoc());
2937 }
2938 case CK_FloatingCast: {
2939 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2940 // TODO: Support constrained FP intrinsics.
2941 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2942 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2943 if (DstElTy->castAs<BuiltinType>()->getKind() <
2944 SrcElTy->castAs<BuiltinType>()->getKind())
2945 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2946 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2947 }
2948 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2949 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2950 CE->getExprLoc());
2951 }
2952 case CK_FixedPointToFloating:
2953 case CK_FloatingToFixedPoint: {
2954 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2955 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2956 CE->getExprLoc());
2957 }
2958 case CK_BooleanToSignedIntegral: {
2959 ScalarConversionOpts Opts;
2960 Opts.TreatBooleanAsSigned = true;
2961 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2962 CE->getExprLoc(), Opts);
2963 }
2964 case CK_IntegralToBoolean:
2965 return EmitIntToBoolConversion(Visit(E));
2966 case CK_PointerToBoolean:
2967 return EmitPointerToBoolConversion(Visit(E), E->getType());
2968 case CK_FloatingToBoolean: {
2969 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2970 return EmitFloatToBoolConversion(Visit(E));
2971 }
2972 case CK_MemberPointerToBoolean: {
2973 llvm::Value *MemPtr = Visit(E);
2974 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2975 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2976 }
2977
2978 case CK_FloatingComplexToReal:
2979 case CK_IntegralComplexToReal:
2980 return CGF.EmitComplexExpr(E, false, true).first;
2981
2982 case CK_FloatingComplexToBoolean:
2983 case CK_IntegralComplexToBoolean: {
2985
2986 // TODO: kill this function off, inline appropriate case here
2987 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2988 CE->getExprLoc());
2989 }
2990
2991 case CK_ZeroToOCLOpaqueType: {
2992 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2993 DestTy->isOCLIntelSubgroupAVCType()) &&
2994 "CK_ZeroToOCLEvent cast on non-event type");
2995 return llvm::Constant::getNullValue(ConvertType(DestTy));
2996 }
2997
2998 case CK_IntToOCLSampler:
2999 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
3000
3001 case CK_HLSLVectorTruncation: {
3002 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
3003 "Destination type must be a vector or builtin type.");
3004 Value *Vec = Visit(E);
3005 if (auto *VecTy = DestTy->getAs<VectorType>()) {
3006 SmallVector<int> Mask;
3007 unsigned NumElts = VecTy->getNumElements();
3008 for (unsigned I = 0; I != NumElts; ++I)
3009 Mask.push_back(I);
3010
3011 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
3012 }
3013 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3014 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
3015 }
3016 case CK_HLSLMatrixTruncation: {
3017 assert((DestTy->isMatrixType() || DestTy->isBuiltinType()) &&
3018 "Destination type must be a matrix or builtin type.");
3019 Value *Mat = Visit(E);
3020 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
3021 SmallVector<int> Mask;
3022 unsigned NumCols = MatTy->getNumColumns();
3023 unsigned NumRows = MatTy->getNumRows();
3024 unsigned ColOffset = NumCols;
3025 if (auto *SrcMatTy = E->getType()->getAs<ConstantMatrixType>())
3026 ColOffset = SrcMatTy->getNumColumns();
3027 for (unsigned R = 0; R < NumRows; R++) {
3028 for (unsigned C = 0; C < NumCols; C++) {
3029 unsigned I = R * ColOffset + C;
3030 Mask.push_back(I);
3031 }
3032 }
3033
3034 return Builder.CreateShuffleVector(Mat, Mask, "trunc");
3035 }
3036 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3037 return Builder.CreateExtractElement(Mat, Zero, "cast.mtrunc");
3038 }
3039 case CK_HLSLElementwiseCast: {
3040 RValue RV = CGF.EmitAnyExpr(E);
3041 SourceLocation Loc = CE->getExprLoc();
3042
3043 Address SrcAddr = Address::invalid();
3044
3045 if (RV.isAggregate()) {
3046 SrcAddr = RV.getAggregateAddress();
3047 } else {
3048 SrcAddr = CGF.CreateMemTemp(E->getType(), "hlsl.ewcast.src");
3049 LValue TmpLV = CGF.MakeAddrLValue(SrcAddr, E->getType());
3050 CGF.EmitStoreThroughLValue(RV, TmpLV);
3051 }
3052
3053 LValue SrcVal = CGF.MakeAddrLValue(SrcAddr, E->getType());
3054 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
3055 }
3056
3057 } // end of switch
3058
3059 llvm_unreachable("unknown scalar cast");
3060}
3061
3062Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
3063 CodeGenFunction::StmtExprEvaluation eval(CGF);
3064 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
3065 !E->getType()->isVoidType());
3066 if (!RetAlloca.isValid())
3067 return nullptr;
3068 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
3069 E->getExprLoc());
3070}
3071
3072Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
3073 CodeGenFunction::RunCleanupsScope Scope(CGF);
3074 Value *V = Visit(E->getSubExpr());
3075 // Defend against dominance problems caused by jumps out of expression
3076 // evaluation through the shared cleanup block.
3077 Scope.ForceCleanup({&V});
3078 return V;
3079}
3080
3081//===----------------------------------------------------------------------===//
3082// Unary Operators
3083//===----------------------------------------------------------------------===//
3084
3086 llvm::Value *InVal, bool IsInc,
3087 FPOptions FPFeatures) {
3088 BinOpInfo BinOp;
3089 BinOp.LHS = InVal;
3090 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
3091 BinOp.Ty = E->getType();
3092 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3093 BinOp.FPFeatures = FPFeatures;
3094 BinOp.E = E;
3095 return BinOp;
3096}
3097
3098llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3099 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3100 llvm::Value *Amount =
3101 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
3102 StringRef Name = IsInc ? "inc" : "dec";
3103 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3104 case LangOptions::SOB_Defined:
3105 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3106 return Builder.CreateAdd(InVal, Amount, Name);
3107 [[fallthrough]];
3108 case LangOptions::SOB_Undefined:
3109 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3110 return Builder.CreateNSWAdd(InVal, Amount, Name);
3111 [[fallthrough]];
3112 case LangOptions::SOB_Trapping:
3113 BinOpInfo Info = createBinOpInfoFromIncDec(
3114 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3115 if (!E->canOverflow() || CanElideOverflowCheck(CGF.getContext(), Info))
3116 return Builder.CreateNSWAdd(InVal, Amount, Name);
3117 return EmitOverflowCheckedBinOp(Info);
3118 }
3119 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
3120}
3121
3122/// For the purposes of overflow pattern exclusion, does this match the
3123/// "while(i--)" pattern?
3124static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
3125 bool isPre, ASTContext &Ctx) {
3126 if (isInc || isPre)
3127 return false;
3128
3129 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3132 return false;
3133
3134 // all Parents (usually just one) must be a WhileStmt
3135 for (const auto &Parent : Ctx.getParentMapContext().getParents(*UO))
3136 if (!Parent.get<WhileStmt>())
3137 return false;
3138
3139 return true;
3140}
3141
3142namespace {
3143/// Handles check and update for lastprivate conditional variables.
3144class OMPLastprivateConditionalUpdateRAII {
3145private:
3146 CodeGenFunction &CGF;
3147 const UnaryOperator *E;
3148
3149public:
3150 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3151 const UnaryOperator *E)
3152 : CGF(CGF), E(E) {}
3153 ~OMPLastprivateConditionalUpdateRAII() {
3154 if (CGF.getLangOpts().OpenMP)
3156 CGF, E->getSubExpr());
3157 }
3158};
3159} // namespace
3160
3161llvm::Value *
3162ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3163 bool isInc, bool isPre) {
3164 ApplyAtomGroup Grp(CGF.getDebugInfo());
3165 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3166 QualType type = E->getSubExpr()->getType();
3167 llvm::PHINode *atomicPHI = nullptr;
3168 llvm::Value *value;
3169 llvm::Value *input;
3170 llvm::Value *Previous = nullptr;
3171 QualType SrcType = E->getType();
3172
3173 int amount = (isInc ? 1 : -1);
3174 bool isSubtraction = !isInc;
3175
3176 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3177 type = atomicTy->getValueType();
3178 if (isInc && type->isBooleanType()) {
3179 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3180 if (isPre) {
3181 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3182 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3183 return Builder.getTrue();
3184 }
3185 // For atomic bool increment, we just store true and return it for
3186 // preincrement, do an atomic swap with true for postincrement
3187 return Builder.CreateAtomicRMW(
3188 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3189 llvm::AtomicOrdering::SequentiallyConsistent);
3190 }
3191 // Special case for atomic increment / decrement on integers, emit
3192 // atomicrmw instructions. We skip this if we want to be doing overflow
3193 // checking, and fall into the slow path with the atomic cmpxchg loop.
3194 if (!type->isBooleanType() && type->isIntegerType() &&
3195 !(type->isUnsignedIntegerType() &&
3196 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3197 CGF.getLangOpts().getSignedOverflowBehavior() !=
3198 LangOptions::SOB_Trapping) {
3199 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3200 llvm::AtomicRMWInst::Sub;
3201 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3202 llvm::Instruction::Sub;
3203 llvm::Value *amt = CGF.EmitToMemory(
3204 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3205 llvm::Value *old =
3206 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3207 llvm::AtomicOrdering::SequentiallyConsistent);
3208 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3209 }
3210 // Special case for atomic increment/decrement on floats.
3211 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3212 if (type->isFloatingType()) {
3213 llvm::Type *Ty = ConvertType(type);
3214 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3215 llvm::AtomicRMWInst::BinOp aop =
3216 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3217 llvm::Instruction::BinaryOps op =
3218 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3219 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3220 llvm::AtomicRMWInst *old =
3221 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3222 llvm::AtomicOrdering::SequentiallyConsistent);
3223
3224 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3225 }
3226 }
3227 value = EmitLoadOfLValue(LV, E->getExprLoc());
3228 input = value;
3229 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3230 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3231 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3232 value = CGF.EmitToMemory(value, type);
3233 Builder.CreateBr(opBB);
3234 Builder.SetInsertPoint(opBB);
3235 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3236 atomicPHI->addIncoming(value, startBB);
3237 value = atomicPHI;
3238 } else {
3239 value = EmitLoadOfLValue(LV, E->getExprLoc());
3240 input = value;
3241 }
3242
3243 // Special case of integer increment that we have to check first: bool++.
3244 // Due to promotion rules, we get:
3245 // bool++ -> bool = bool + 1
3246 // -> bool = (int)bool + 1
3247 // -> bool = ((int)bool + 1 != 0)
3248 // An interesting aspect of this is that increment is always true.
3249 // Decrement does not have this property.
3250 if (isInc && type->isBooleanType()) {
3251 value = Builder.getTrue();
3252
3253 // Most common case by far: integer increment.
3254 } else if (type->isIntegerType()) {
3255 QualType promotedType;
3256 bool canPerformLossyDemotionCheck = false;
3257
3258 bool excludeOverflowPattern =
3259 matchesPostDecrInWhile(E, isInc, isPre, CGF.getContext());
3260
3262 promotedType = CGF.getContext().getPromotedIntegerType(type);
3263 assert(promotedType != type && "Shouldn't promote to the same type.");
3264 canPerformLossyDemotionCheck = true;
3265 canPerformLossyDemotionCheck &=
3267 CGF.getContext().getCanonicalType(promotedType);
3268 canPerformLossyDemotionCheck &=
3270 type, promotedType);
3271 assert((!canPerformLossyDemotionCheck ||
3272 type->isSignedIntegerOrEnumerationType() ||
3273 promotedType->isSignedIntegerOrEnumerationType() ||
3274 ConvertType(type)->getScalarSizeInBits() ==
3275 ConvertType(promotedType)->getScalarSizeInBits()) &&
3276 "The following check expects that if we do promotion to different "
3277 "underlying canonical type, at least one of the types (either "
3278 "base or promoted) will be signed, or the bitwidths will match.");
3279 }
3280 if (CGF.SanOpts.hasOneOf(
3281 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3282 SanitizerKind::ImplicitBitfieldConversion) &&
3283 canPerformLossyDemotionCheck) {
3284 // While `x += 1` (for `x` with width less than int) is modeled as
3285 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3286 // ease; inc/dec with width less than int can't overflow because of
3287 // promotion rules, so we omit promotion+demotion, which means that we can
3288 // not catch lossy "demotion". Because we still want to catch these cases
3289 // when the sanitizer is enabled, we perform the promotion, then perform
3290 // the increment/decrement in the wider type, and finally
3291 // perform the demotion. This will catch lossy demotions.
3292
3293 // We have a special case for bitfields defined using all the bits of the
3294 // type. In this case we need to do the same trick as for the integer
3295 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3296
3297 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3298 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3299 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3300 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3301 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3302 // checks will take care of the conversion.
3303 ScalarConversionOpts Opts;
3304 if (!LV.isBitField())
3305 Opts = ScalarConversionOpts(CGF.SanOpts);
3306 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3307 Previous = value;
3308 SrcType = promotedType;
3309 }
3310
3311 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3312 Opts);
3313
3314 // Note that signed integer inc/dec with width less than int can't
3315 // overflow because of promotion rules; we're just eliding a few steps
3316 // here.
3317 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3318 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3319 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3320 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3321 !excludeOverflowPattern &&
3323 SanitizerKind::UnsignedIntegerOverflow, E->getType())) {
3324 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
3325 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
3326 } else {
3327 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3328 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3329 }
3330
3331 // Next most common: pointer increment.
3332 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3333 QualType type = ptr->getPointeeType();
3334
3335 // VLA types don't have constant size.
3336 if (const VariableArrayType *vla
3338 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3339 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3340 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3341 if (CGF.getLangOpts().PointerOverflowDefined)
3342 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3343 else
3344 value = CGF.EmitCheckedInBoundsGEP(
3345 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3346 E->getExprLoc(), "vla.inc");
3347
3348 // Arithmetic on function pointers (!) is just +-1.
3349 } else if (type->isFunctionType()) {
3350 llvm::Value *amt = Builder.getInt32(amount);
3351
3352 if (CGF.getLangOpts().PointerOverflowDefined)
3353 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3354 else
3355 value =
3356 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3357 /*SignedIndices=*/false, isSubtraction,
3358 E->getExprLoc(), "incdec.funcptr");
3359
3360 // For everything else, we can just do a simple increment.
3361 } else {
3362 llvm::Value *amt = Builder.getInt32(amount);
3363 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3364 if (CGF.getLangOpts().PointerOverflowDefined)
3365 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3366 else
3367 value = CGF.EmitCheckedInBoundsGEP(
3368 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3369 E->getExprLoc(), "incdec.ptr");
3370 }
3371
3372 // Vector increment/decrement.
3373 } else if (type->isVectorType()) {
3374 if (type->hasIntegerRepresentation()) {
3375 llvm::Value *amt = llvm::ConstantInt::getSigned(value->getType(), amount);
3376
3377 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3378 } else {
3379 value = Builder.CreateFAdd(
3380 value,
3381 llvm::ConstantFP::get(value->getType(), amount),
3382 isInc ? "inc" : "dec");
3383 }
3384
3385 // Floating point.
3386 } else if (type->isRealFloatingType()) {
3387 // Add the inc/dec to the real part.
3388 llvm::Value *amt;
3389 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3390
3391 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3392 // Another special case: half FP increment should be done via float
3394 value = Builder.CreateCall(
3395 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
3396 CGF.CGM.FloatTy),
3397 input, "incdec.conv");
3398 } else {
3399 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
3400 }
3401 }
3402
3403 if (value->getType()->isFloatTy())
3404 amt = llvm::ConstantFP::get(VMContext,
3405 llvm::APFloat(static_cast<float>(amount)));
3406 else if (value->getType()->isDoubleTy())
3407 amt = llvm::ConstantFP::get(VMContext,
3408 llvm::APFloat(static_cast<double>(amount)));
3409 else {
3410 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3411 // Convert from float.
3412 llvm::APFloat F(static_cast<float>(amount));
3413 bool ignored;
3414 const llvm::fltSemantics *FS;
3415 // Don't use getFloatTypeSemantics because Half isn't
3416 // necessarily represented using the "half" LLVM type.
3417 if (value->getType()->isFP128Ty())
3418 FS = &CGF.getTarget().getFloat128Format();
3419 else if (value->getType()->isHalfTy())
3420 FS = &CGF.getTarget().getHalfFormat();
3421 else if (value->getType()->isBFloatTy())
3422 FS = &CGF.getTarget().getBFloat16Format();
3423 else if (value->getType()->isPPC_FP128Ty())
3424 FS = &CGF.getTarget().getIbm128Format();
3425 else
3426 FS = &CGF.getTarget().getLongDoubleFormat();
3427 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3428 amt = llvm::ConstantFP::get(VMContext, F);
3429 }
3430 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3431
3432 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3434 value = Builder.CreateCall(
3435 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3436 CGF.CGM.FloatTy),
3437 value, "incdec.conv");
3438 } else {
3439 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3440 }
3441 }
3442
3443 // Fixed-point types.
3444 } else if (type->isFixedPointType()) {
3445 // Fixed-point types are tricky. In some cases, it isn't possible to
3446 // represent a 1 or a -1 in the type at all. Piggyback off of
3447 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3448 BinOpInfo Info;
3449 Info.E = E;
3450 Info.Ty = E->getType();
3451 Info.Opcode = isInc ? BO_Add : BO_Sub;
3452 Info.LHS = value;
3453 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3454 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3455 // since -1 is guaranteed to be representable.
3456 if (type->isSignedFixedPointType()) {
3457 Info.Opcode = isInc ? BO_Sub : BO_Add;
3458 Info.RHS = Builder.CreateNeg(Info.RHS);
3459 }
3460 // Now, convert from our invented integer literal to the type of the unary
3461 // op. This will upscale and saturate if necessary. This value can become
3462 // undef in some cases.
3463 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3464 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3465 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3466 value = EmitFixedPointBinOp(Info);
3467
3468 // Objective-C pointer types.
3469 } else {
3470 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3471
3472 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3473 if (!isInc) size = -size;
3474 llvm::Value *sizeValue =
3475 llvm::ConstantInt::getSigned(CGF.SizeTy, size.getQuantity());
3476
3477 if (CGF.getLangOpts().PointerOverflowDefined)
3478 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3479 else
3480 value = CGF.EmitCheckedInBoundsGEP(
3481 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3482 E->getExprLoc(), "incdec.objptr");
3483 value = Builder.CreateBitCast(value, input->getType());
3484 }
3485
3486 if (atomicPHI) {
3487 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3488 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3489 auto Pair = CGF.EmitAtomicCompareExchange(
3490 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3491 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3492 llvm::Value *success = Pair.second;
3493 atomicPHI->addIncoming(old, curBlock);
3494 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3495 Builder.SetInsertPoint(contBB);
3496 return isPre ? value : input;
3497 }
3498
3499 // Store the updated result through the lvalue.
3500 if (LV.isBitField()) {
3501 Value *Src = Previous ? Previous : value;
3502 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3503 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3504 LV.getBitFieldInfo(), E->getExprLoc());
3505 } else
3506 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3507
3508 // If this is a postinc, return the value read from memory, otherwise use the
3509 // updated value.
3510 return isPre ? value : input;
3511}
3512
3513
3514Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3515 QualType PromotionType) {
3516 QualType promotionTy = PromotionType.isNull()
3517 ? getPromotionType(E->getSubExpr()->getType())
3518 : PromotionType;
3519 Value *result = VisitPlus(E, promotionTy);
3520 if (result && !promotionTy.isNull())
3521 result = EmitUnPromotedValue(result, E->getType());
3522 return result;
3523}
3524
3525Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3526 QualType PromotionType) {
3527 // This differs from gcc, though, most likely due to a bug in gcc.
3528 TestAndClearIgnoreResultAssign();
3529 if (!PromotionType.isNull())
3530 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3531 return Visit(E->getSubExpr());
3532}
3533
3534Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3535 QualType PromotionType) {
3536 QualType promotionTy = PromotionType.isNull()
3537 ? getPromotionType(E->getSubExpr()->getType())
3538 : PromotionType;
3539 Value *result = VisitMinus(E, promotionTy);
3540 if (result && !promotionTy.isNull())
3541 result = EmitUnPromotedValue(result, E->getType());
3542 return result;
3543}
3544
3545Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3546 QualType PromotionType) {
3547 TestAndClearIgnoreResultAssign();
3548 Value *Op;
3549 if (!PromotionType.isNull())
3550 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3551 else
3552 Op = Visit(E->getSubExpr());
3553
3554 // Generate a unary FNeg for FP ops.
3555 if (Op->getType()->isFPOrFPVectorTy())
3556 return Builder.CreateFNeg(Op, "fneg");
3557
3558 // Emit unary minus with EmitSub so we handle overflow cases etc.
3559 BinOpInfo BinOp;
3560 BinOp.RHS = Op;
3561 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3562 BinOp.Ty = E->getType();
3563 BinOp.Opcode = BO_Sub;
3564 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3565 BinOp.E = E;
3566 return EmitSub(BinOp);
3567}
3568
3569Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3570 TestAndClearIgnoreResultAssign();
3571 Value *Op = Visit(E->getSubExpr());
3572 return Builder.CreateNot(Op, "not");
3573}
3574
3575Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3576 // Perform vector logical not on comparison with zero vector.
3577 if (E->getType()->isVectorType() &&
3578 E->getType()->castAs<VectorType>()->getVectorKind() ==
3579 VectorKind::Generic) {
3580 Value *Oper = Visit(E->getSubExpr());
3581 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3582 Value *Result;
3583 if (Oper->getType()->isFPOrFPVectorTy()) {
3584 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3585 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3586 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3587 } else
3588 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3589 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3590 }
3591
3592 // Compare operand to zero.
3593 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3594
3595 // Invert value.
3596 // TODO: Could dynamically modify easy computations here. For example, if
3597 // the operand is an icmp ne, turn into icmp eq.
3598 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3599
3600 // ZExt result to the expr type.
3601 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3602}
3603
3604Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3605 // Try folding the offsetof to a constant.
3606 Expr::EvalResult EVResult;
3607 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3608 llvm::APSInt Value = EVResult.Val.getInt();
3609 return Builder.getInt(Value);
3610 }
3611
3612 // Loop over the components of the offsetof to compute the value.
3613 unsigned n = E->getNumComponents();
3614 llvm::Type* ResultType = ConvertType(E->getType());
3615 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3616 QualType CurrentType = E->getTypeSourceInfo()->getType();
3617 for (unsigned i = 0; i != n; ++i) {
3618 OffsetOfNode ON = E->getComponent(i);
3619 llvm::Value *Offset = nullptr;
3620 switch (ON.getKind()) {
3621 case OffsetOfNode::Array: {
3622 // Compute the index
3623 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3624 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3625 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3626 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3627
3628 // Save the element type
3629 CurrentType =
3630 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3631
3632 // Compute the element size
3633 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3634 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3635
3636 // Multiply out to compute the result
3637 Offset = Builder.CreateMul(Idx, ElemSize);
3638 break;
3639 }
3640
3641 case OffsetOfNode::Field: {
3642 FieldDecl *MemberDecl = ON.getField();
3643 auto *RD = CurrentType->castAsRecordDecl();
3644 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3645
3646 // Compute the index of the field in its parent.
3647 unsigned i = 0;
3648 // FIXME: It would be nice if we didn't have to loop here!
3649 for (RecordDecl::field_iterator Field = RD->field_begin(),
3650 FieldEnd = RD->field_end();
3651 Field != FieldEnd; ++Field, ++i) {
3652 if (*Field == MemberDecl)
3653 break;
3654 }
3655 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3656
3657 // Compute the offset to the field
3658 int64_t OffsetInt = RL.getFieldOffset(i) /
3659 CGF.getContext().getCharWidth();
3660 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3661
3662 // Save the element type.
3663 CurrentType = MemberDecl->getType();
3664 break;
3665 }
3666
3668 llvm_unreachable("dependent __builtin_offsetof");
3669
3670 case OffsetOfNode::Base: {
3671 if (ON.getBase()->isVirtual()) {
3672 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3673 continue;
3674 }
3675
3676 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3677 CurrentType->castAsCanonical<RecordType>()->getDecl());
3678
3679 // Save the element type.
3680 CurrentType = ON.getBase()->getType();
3681
3682 // Compute the offset to the base.
3683 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3684 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3685 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3686 break;
3687 }
3688 }
3689 Result = Builder.CreateAdd(Result, Offset);
3690 }
3691 return Result;
3692}
3693
3694/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3695/// argument of the sizeof expression as an integer.
3696Value *
3697ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3698 const UnaryExprOrTypeTraitExpr *E) {
3699 QualType TypeToSize = E->getTypeOfArgument();
3700 if (auto Kind = E->getKind();
3701 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3702 if (const VariableArrayType *VAT =
3703 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3704 // For _Countof, we only want to evaluate if the extent is actually
3705 // variable as opposed to a multi-dimensional array whose extent is
3706 // constant but whose element type is variable.
3707 bool EvaluateExtent = true;
3708 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3709 EvaluateExtent =
3710 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3711 }
3712 if (EvaluateExtent) {
3713 if (E->isArgumentType()) {
3714 // sizeof(type) - make sure to emit the VLA size.
3715 CGF.EmitVariablyModifiedType(TypeToSize);
3716 } else {
3717 // C99 6.5.3.4p2: If the argument is an expression of type
3718 // VLA, it is evaluated.
3720 }
3721
3722 // For _Countof, we just want to return the size of a single dimension.
3723 if (Kind == UETT_CountOf)
3724 return CGF.getVLAElements1D(VAT).NumElts;
3725
3726 // For sizeof and __datasizeof, we need to scale the number of elements
3727 // by the size of the array element type.
3728 auto VlaSize = CGF.getVLASize(VAT);
3729
3730 // Scale the number of non-VLA elements by the non-VLA element size.
3731 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3732 if (!eltSize.isOne())
3733 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3734 VlaSize.NumElts);
3735 return VlaSize.NumElts;
3736 }
3737 }
3738 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3739 auto Alignment =
3740 CGF.getContext()
3743 .getQuantity();
3744 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3745 } else if (E->getKind() == UETT_VectorElements) {
3746 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3747 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3748 }
3749
3750 // If this isn't sizeof(vla), the result must be constant; use the constant
3751 // folding logic so we don't have to duplicate it here.
3752 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3753}
3754
3755Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3756 QualType PromotionType) {
3757 QualType promotionTy = PromotionType.isNull()
3758 ? getPromotionType(E->getSubExpr()->getType())
3759 : PromotionType;
3760 Value *result = VisitReal(E, promotionTy);
3761 if (result && !promotionTy.isNull())
3762 result = EmitUnPromotedValue(result, E->getType());
3763 return result;
3764}
3765
3766Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3767 QualType PromotionType) {
3768 Expr *Op = E->getSubExpr();
3769 if (Op->getType()->isAnyComplexType()) {
3770 // If it's an l-value, load through the appropriate subobject l-value.
3771 // Note that we have to ask E because Op might be an l-value that
3772 // this won't work for, e.g. an Obj-C property.
3773 if (E->isGLValue()) {
3774 if (!PromotionType.isNull()) {
3776 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3777 PromotionType = PromotionType->isAnyComplexType()
3778 ? PromotionType
3779 : CGF.getContext().getComplexType(PromotionType);
3780 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3781 : result.first;
3782 }
3783
3784 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3785 .getScalarVal();
3786 }
3787 // Otherwise, calculate and project.
3788 return CGF.EmitComplexExpr(Op, false, true).first;
3789 }
3790
3791 if (!PromotionType.isNull())
3792 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3793 return Visit(Op);
3794}
3795
3796Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3797 QualType PromotionType) {
3798 QualType promotionTy = PromotionType.isNull()
3799 ? getPromotionType(E->getSubExpr()->getType())
3800 : PromotionType;
3801 Value *result = VisitImag(E, promotionTy);
3802 if (result && !promotionTy.isNull())
3803 result = EmitUnPromotedValue(result, E->getType());
3804 return result;
3805}
3806
3807Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3808 QualType PromotionType) {
3809 Expr *Op = E->getSubExpr();
3810 if (Op->getType()->isAnyComplexType()) {
3811 // If it's an l-value, load through the appropriate subobject l-value.
3812 // Note that we have to ask E because Op might be an l-value that
3813 // this won't work for, e.g. an Obj-C property.
3814 if (Op->isGLValue()) {
3815 if (!PromotionType.isNull()) {
3817 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3818 PromotionType = PromotionType->isAnyComplexType()
3819 ? PromotionType
3820 : CGF.getContext().getComplexType(PromotionType);
3821 return result.second
3822 ? CGF.EmitPromotedValue(result, PromotionType).second
3823 : result.second;
3824 }
3825
3826 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3827 .getScalarVal();
3828 }
3829 // Otherwise, calculate and project.
3830 return CGF.EmitComplexExpr(Op, true, false).second;
3831 }
3832
3833 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3834 // effects are evaluated, but not the actual value.
3835 if (Op->isGLValue())
3836 CGF.EmitLValue(Op);
3837 else if (!PromotionType.isNull())
3838 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3839 else
3840 CGF.EmitScalarExpr(Op, true);
3841 if (!PromotionType.isNull())
3842 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3843 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3844}
3845
3846//===----------------------------------------------------------------------===//
3847// Binary Operators
3848//===----------------------------------------------------------------------===//
3849
3850Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3851 QualType PromotionType) {
3852 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3853}
3854
3855Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3856 QualType ExprType) {
3857 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3858}
3859
3860Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3861 E = E->IgnoreParens();
3862 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3863 switch (BO->getOpcode()) {
3864#define HANDLE_BINOP(OP) \
3865 case BO_##OP: \
3866 return Emit##OP(EmitBinOps(BO, PromotionType));
3867 HANDLE_BINOP(Add)
3868 HANDLE_BINOP(Sub)
3869 HANDLE_BINOP(Mul)
3870 HANDLE_BINOP(Div)
3871#undef HANDLE_BINOP
3872 default:
3873 break;
3874 }
3875 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3876 switch (UO->getOpcode()) {
3877 case UO_Imag:
3878 return VisitImag(UO, PromotionType);
3879 case UO_Real:
3880 return VisitReal(UO, PromotionType);
3881 case UO_Minus:
3882 return VisitMinus(UO, PromotionType);
3883 case UO_Plus:
3884 return VisitPlus(UO, PromotionType);
3885 default:
3886 break;
3887 }
3888 }
3889 auto result = Visit(const_cast<Expr *>(E));
3890 if (result) {
3891 if (!PromotionType.isNull())
3892 return EmitPromotedValue(result, PromotionType);
3893 else
3894 return EmitUnPromotedValue(result, E->getType());
3895 }
3896 return result;
3897}
3898
3899BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3900 QualType PromotionType) {
3901 TestAndClearIgnoreResultAssign();
3902 BinOpInfo Result;
3903 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3904 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3905 if (!PromotionType.isNull())
3906 Result.Ty = PromotionType;
3907 else
3908 Result.Ty = E->getType();
3909 Result.Opcode = E->getOpcode();
3910 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3911 Result.E = E;
3912 return Result;
3913}
3914
3915LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3916 const CompoundAssignOperator *E,
3917 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3918 Value *&Result) {
3919 QualType LHSTy = E->getLHS()->getType();
3920 BinOpInfo OpInfo;
3921
3924
3925 // Emit the RHS first. __block variables need to have the rhs evaluated
3926 // first, plus this should improve codegen a little.
3927
3928 QualType PromotionTypeCR;
3929 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3930 if (PromotionTypeCR.isNull())
3931 PromotionTypeCR = E->getComputationResultType();
3932 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3933 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3934 if (!PromotionTypeRHS.isNull())
3935 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3936 else
3937 OpInfo.RHS = Visit(E->getRHS());
3938 OpInfo.Ty = PromotionTypeCR;
3939 OpInfo.Opcode = E->getOpcode();
3940 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3941 OpInfo.E = E;
3942 // Load/convert the LHS.
3943 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3944
3945 llvm::PHINode *atomicPHI = nullptr;
3946 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3947 QualType type = atomicTy->getValueType();
3948 if (!type->isBooleanType() && type->isIntegerType() &&
3949 !(type->isUnsignedIntegerType() &&
3950 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3951 CGF.getLangOpts().getSignedOverflowBehavior() !=
3952 LangOptions::SOB_Trapping) {
3953 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3954 llvm::Instruction::BinaryOps Op;
3955 switch (OpInfo.Opcode) {
3956 // We don't have atomicrmw operands for *, %, /, <<, >>
3957 case BO_MulAssign: case BO_DivAssign:
3958 case BO_RemAssign:
3959 case BO_ShlAssign:
3960 case BO_ShrAssign:
3961 break;
3962 case BO_AddAssign:
3963 AtomicOp = llvm::AtomicRMWInst::Add;
3964 Op = llvm::Instruction::Add;
3965 break;
3966 case BO_SubAssign:
3967 AtomicOp = llvm::AtomicRMWInst::Sub;
3968 Op = llvm::Instruction::Sub;
3969 break;
3970 case BO_AndAssign:
3971 AtomicOp = llvm::AtomicRMWInst::And;
3972 Op = llvm::Instruction::And;
3973 break;
3974 case BO_XorAssign:
3975 AtomicOp = llvm::AtomicRMWInst::Xor;
3976 Op = llvm::Instruction::Xor;
3977 break;
3978 case BO_OrAssign:
3979 AtomicOp = llvm::AtomicRMWInst::Or;
3980 Op = llvm::Instruction::Or;
3981 break;
3982 default:
3983 llvm_unreachable("Invalid compound assignment type");
3984 }
3985 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3986 llvm::Value *Amt = CGF.EmitToMemory(
3987 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3988 E->getExprLoc()),
3989 LHSTy);
3990
3991 llvm::AtomicRMWInst *OldVal =
3992 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
3993
3994 // Since operation is atomic, the result type is guaranteed to be the
3995 // same as the input in LLVM terms.
3996 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3997 return LHSLV;
3998 }
3999 }
4000 // FIXME: For floating point types, we should be saving and restoring the
4001 // floating point environment in the loop.
4002 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
4003 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
4004 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4005 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
4006 Builder.CreateBr(opBB);
4007 Builder.SetInsertPoint(opBB);
4008 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
4009 atomicPHI->addIncoming(OpInfo.LHS, startBB);
4010 OpInfo.LHS = atomicPHI;
4011 }
4012 else
4013 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4014
4015 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
4016 SourceLocation Loc = E->getExprLoc();
4017 if (!PromotionTypeLHS.isNull())
4018 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
4019 E->getExprLoc());
4020 else
4021 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
4022 E->getComputationLHSType(), Loc);
4023
4024 // Expand the binary operator.
4025 Result = (this->*Func)(OpInfo);
4026
4027 // Convert the result back to the LHS type,
4028 // potentially with Implicit Conversion sanitizer check.
4029 // If LHSLV is a bitfield, use default ScalarConversionOpts
4030 // to avoid emit any implicit integer checks.
4031 Value *Previous = nullptr;
4032 if (LHSLV.isBitField()) {
4033 Previous = Result;
4034 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
4035 } else
4036 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
4037 ScalarConversionOpts(CGF.SanOpts));
4038
4039 if (atomicPHI) {
4040 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
4041 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
4042 auto Pair = CGF.EmitAtomicCompareExchange(
4043 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
4044 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
4045 llvm::Value *success = Pair.second;
4046 atomicPHI->addIncoming(old, curBlock);
4047 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
4048 Builder.SetInsertPoint(contBB);
4049 return LHSLV;
4050 }
4051
4052 // Store the result value into the LHS lvalue. Bit-fields are handled
4053 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
4054 // 'An assignment expression has the value of the left operand after the
4055 // assignment...'.
4056 if (LHSLV.isBitField()) {
4057 Value *Src = Previous ? Previous : Result;
4058 QualType SrcType = E->getRHS()->getType();
4059 QualType DstType = E->getLHS()->getType();
4061 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
4062 LHSLV.getBitFieldInfo(), E->getExprLoc());
4063 } else
4065
4066 if (CGF.getLangOpts().OpenMP)
4068 E->getLHS());
4069 return LHSLV;
4070}
4071
4072Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
4073 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
4074 bool Ignore = TestAndClearIgnoreResultAssign();
4075 Value *RHS = nullptr;
4076 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
4077
4078 // If the result is clearly ignored, return now.
4079 if (Ignore)
4080 return nullptr;
4081
4082 // The result of an assignment in C is the assigned r-value.
4083 if (!CGF.getLangOpts().CPlusPlus)
4084 return RHS;
4085
4086 // If the lvalue is non-volatile, return the computed value of the assignment.
4087 if (!LHS.isVolatileQualified())
4088 return RHS;
4089
4090 // Otherwise, reload the value.
4091 return EmitLoadOfLValue(LHS, E->getExprLoc());
4092}
4093
4094void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4095 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4096 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4097 Checks;
4098
4099 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4100 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4101 SanitizerKind::SO_IntegerDivideByZero));
4102 }
4103
4104 const auto *BO = cast<BinaryOperator>(Ops.E);
4105 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4106 Ops.Ty->hasSignedIntegerRepresentation() &&
4107 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4108 Ops.mayHaveIntegerOverflow()) {
4109 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4110
4111 llvm::Value *IntMin =
4112 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4113 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4114
4115 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4116 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4117 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4118 Checks.push_back(
4119 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4120 }
4121
4122 if (Checks.size() > 0)
4123 EmitBinOpCheck(Checks, Ops);
4124}
4125
4126Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4127 {
4128 SanitizerDebugLocation SanScope(&CGF,
4129 {SanitizerKind::SO_IntegerDivideByZero,
4130 SanitizerKind::SO_SignedIntegerOverflow,
4131 SanitizerKind::SO_FloatDivideByZero},
4132 SanitizerHandler::DivremOverflow);
4133 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4134 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4135 Ops.Ty->isIntegerType() &&
4136 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4137 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4138 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4139 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4140 Ops.Ty->isRealFloatingType() &&
4141 Ops.mayHaveFloatDivisionByZero()) {
4142 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4143 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4144 EmitBinOpCheck(
4145 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4146 }
4147 }
4148
4149 if (Ops.Ty->isConstantMatrixType()) {
4150 llvm::MatrixBuilder MB(Builder);
4151 // We need to check the types of the operands of the operator to get the
4152 // correct matrix dimensions.
4153 auto *BO = cast<BinaryOperator>(Ops.E);
4154 (void)BO;
4155 assert(
4157 "first operand must be a matrix");
4158 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4159 "second operand must be an arithmetic type");
4160 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4161 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4162 Ops.Ty->hasUnsignedIntegerRepresentation());
4163 }
4164
4165 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4166 llvm::Value *Val;
4167 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4168 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4169 CGF.SetDivFPAccuracy(Val);
4170 return Val;
4171 }
4172 else if (Ops.isFixedPointOp())
4173 return EmitFixedPointBinOp(Ops);
4174 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4175 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4176 else
4177 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4178}
4179
4180Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4181 // Rem in C can't be a floating point type: C99 6.5.5p2.
4182 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4183 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4184 Ops.Ty->isIntegerType() &&
4185 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4186 SanitizerDebugLocation SanScope(&CGF,
4187 {SanitizerKind::SO_IntegerDivideByZero,
4188 SanitizerKind::SO_SignedIntegerOverflow},
4189 SanitizerHandler::DivremOverflow);
4190 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4191 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4192 }
4193
4194 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4195 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4196
4197 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4198 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4199
4200 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4201}
4202
4203Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4204 unsigned IID;
4205 unsigned OpID = 0;
4206 SanitizerHandler OverflowKind;
4207
4208 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4209 switch (Ops.Opcode) {
4210 case BO_Add:
4211 case BO_AddAssign:
4212 OpID = 1;
4213 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4214 llvm::Intrinsic::uadd_with_overflow;
4215 OverflowKind = SanitizerHandler::AddOverflow;
4216 break;
4217 case BO_Sub:
4218 case BO_SubAssign:
4219 OpID = 2;
4220 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4221 llvm::Intrinsic::usub_with_overflow;
4222 OverflowKind = SanitizerHandler::SubOverflow;
4223 break;
4224 case BO_Mul:
4225 case BO_MulAssign:
4226 OpID = 3;
4227 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4228 llvm::Intrinsic::umul_with_overflow;
4229 OverflowKind = SanitizerHandler::MulOverflow;
4230 break;
4231 default:
4232 llvm_unreachable("Unsupported operation for overflow detection");
4233 }
4234 OpID <<= 1;
4235 if (isSigned)
4236 OpID |= 1;
4237
4238 SanitizerDebugLocation SanScope(&CGF,
4239 {SanitizerKind::SO_SignedIntegerOverflow,
4240 SanitizerKind::SO_UnsignedIntegerOverflow},
4241 OverflowKind);
4242 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4243
4244 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4245
4246 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4247 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4248 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4249
4250 // Handle overflow with llvm.trap if no custom handler has been specified.
4251 const std::string *handlerName =
4253 if (handlerName->empty()) {
4254 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4255 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4256 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4257 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
4259 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4260 : SanitizerKind::SO_UnsignedIntegerOverflow;
4261 EmitBinOpCheck(std::make_pair(NotOverflow, Ordinal), Ops);
4262 } else
4263 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4264 return result;
4265 }
4266
4267 // Branch in case of overflow.
4268 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4269 llvm::BasicBlock *continueBB =
4270 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4271 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4272
4273 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4274
4275 // If an overflow handler is set, then we want to call it and then use its
4276 // result, if it returns.
4277 Builder.SetInsertPoint(overflowBB);
4278
4279 // Get the overflow handler.
4280 llvm::Type *Int8Ty = CGF.Int8Ty;
4281 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4282 llvm::FunctionType *handlerTy =
4283 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4284 llvm::FunctionCallee handler =
4285 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4286
4287 // Sign extend the args to 64-bit, so that we can use the same handler for
4288 // all types of overflow.
4289 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4290 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4291
4292 // Call the handler with the two arguments, the operation, and the size of
4293 // the result.
4294 llvm::Value *handlerArgs[] = {
4295 lhs,
4296 rhs,
4297 Builder.getInt8(OpID),
4298 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4299 };
4300 llvm::Value *handlerResult =
4301 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4302
4303 // Truncate the result back to the desired size.
4304 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4305 Builder.CreateBr(continueBB);
4306
4307 Builder.SetInsertPoint(continueBB);
4308 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4309 phi->addIncoming(result, initialBB);
4310 phi->addIncoming(handlerResult, overflowBB);
4311
4312 return phi;
4313}
4314
4315/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4316/// information.
4317/// This function is used for BO_AddAssign/BO_SubAssign.
4318static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4319 bool isSubtraction) {
4320 // Must have binary (not unary) expr here. Unary pointer
4321 // increment/decrement doesn't use this path.
4323
4324 Value *pointer = op.LHS;
4325 Expr *pointerOperand = expr->getLHS();
4326 Value *index = op.RHS;
4327 Expr *indexOperand = expr->getRHS();
4328
4329 // In a subtraction, the LHS is always the pointer.
4330 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4331 std::swap(pointer, index);
4332 std::swap(pointerOperand, indexOperand);
4333 }
4334
4335 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4336 index, isSubtraction);
4337}
4338
4339/// Emit pointer + index arithmetic.
4341 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4342 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4343 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4344
4345 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4346 auto &DL = CGM.getDataLayout();
4347 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4348
4349 // Some versions of glibc and gcc use idioms (particularly in their malloc
4350 // routines) that add a pointer-sized integer (known to be a pointer value)
4351 // to a null pointer in order to cast the value back to an integer or as
4352 // part of a pointer alignment algorithm. This is undefined behavior, but
4353 // we'd like to be able to compile programs that use it.
4354 //
4355 // Normally, we'd generate a GEP with a null-pointer base here in response
4356 // to that code, but it's also UB to dereference a pointer created that
4357 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4358 // generate a direct cast of the integer value to a pointer.
4359 //
4360 // The idiom (p = nullptr + N) is not met if any of the following are true:
4361 //
4362 // The operation is subtraction.
4363 // The index is not pointer-sized.
4364 // The pointer type is not byte-sized.
4365 //
4366 // Note that we do not suppress the pointer overflow check in this case.
4368 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4369 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4370 if (getLangOpts().PointerOverflowDefined ||
4371 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4372 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4373 PtrTy->getPointerAddressSpace()))
4374 return Ptr;
4375 // The inbounds GEP of null is valid iff the index is zero.
4376 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4377 auto CheckHandler = SanitizerHandler::PointerOverflow;
4378 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4379 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4380 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4381 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4382 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4383 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4384 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4385 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4386 DynamicArgs);
4387 return Ptr;
4388 }
4389
4390 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4391 // Zero-extend or sign-extend the pointer value according to
4392 // whether the index is signed or not.
4393 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4394 "idx.ext");
4395 }
4396
4397 // If this is subtraction, negate the index.
4398 if (isSubtraction)
4399 index = Builder.CreateNeg(index, "idx.neg");
4400
4401 if (SanOpts.has(SanitizerKind::ArrayBounds))
4402 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4403 /*Accessed*/ false);
4404
4405 const PointerType *pointerType =
4406 pointerOperand->getType()->getAs<PointerType>();
4407 if (!pointerType) {
4408 QualType objectType = pointerOperand->getType()
4410 ->getPointeeType();
4411 llvm::Value *objectSize =
4412 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4413
4414 index = Builder.CreateMul(index, objectSize);
4415
4416 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4417 return Builder.CreateBitCast(result, pointer->getType());
4418 }
4419
4420 QualType elementType = pointerType->getPointeeType();
4421 if (const VariableArrayType *vla =
4422 getContext().getAsVariableArrayType(elementType)) {
4423 // The element count here is the total number of non-VLA elements.
4424 llvm::Value *numElements = getVLASize(vla).NumElts;
4425
4426 // Effectively, the multiply by the VLA size is part of the GEP.
4427 // GEP indexes are signed, and scaling an index isn't permitted to
4428 // signed-overflow, so we use the same semantics for our explicit
4429 // multiply. We suppress this if overflow is not undefined behavior.
4430 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4431 if (getLangOpts().PointerOverflowDefined) {
4432 index = Builder.CreateMul(index, numElements, "vla.index");
4433 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4434 } else {
4435 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4436 pointer =
4437 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4438 isSubtraction, BO->getExprLoc(), "add.ptr");
4439 }
4440 return pointer;
4441 }
4442
4443 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4444 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4445 // future proof.
4446 llvm::Type *elemTy;
4447 if (elementType->isVoidType() || elementType->isFunctionType())
4448 elemTy = Int8Ty;
4449 else
4450 elemTy = ConvertTypeForMem(elementType);
4451
4452 if (getLangOpts().PointerOverflowDefined)
4453 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4454
4455 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4456 BO->getExprLoc(), "add.ptr");
4457}
4458
4459// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4460// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4461// the add operand respectively. This allows fmuladd to represent a*b-c, or
4462// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4463// efficient operations.
4464static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4465 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4466 bool negMul, bool negAdd) {
4467 Value *MulOp0 = MulOp->getOperand(0);
4468 Value *MulOp1 = MulOp->getOperand(1);
4469 if (negMul)
4470 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4471 if (negAdd)
4472 Addend = Builder.CreateFNeg(Addend, "neg");
4473
4474 Value *FMulAdd = nullptr;
4475 if (Builder.getIsFPConstrained()) {
4476 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4477 "Only constrained operation should be created when Builder is in FP "
4478 "constrained mode");
4479 FMulAdd = Builder.CreateConstrainedFPCall(
4480 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4481 Addend->getType()),
4482 {MulOp0, MulOp1, Addend});
4483 } else {
4484 FMulAdd = Builder.CreateCall(
4485 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4486 {MulOp0, MulOp1, Addend});
4487 }
4488 MulOp->eraseFromParent();
4489
4490 return FMulAdd;
4491}
4492
4493// Check whether it would be legal to emit an fmuladd intrinsic call to
4494// represent op and if so, build the fmuladd.
4495//
4496// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4497// Does NOT check the type of the operation - it's assumed that this function
4498// will be called from contexts where it's known that the type is contractable.
4499static Value* tryEmitFMulAdd(const BinOpInfo &op,
4500 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4501 bool isSub=false) {
4502
4503 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4504 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4505 "Only fadd/fsub can be the root of an fmuladd.");
4506
4507 // Check whether this op is marked as fusable.
4508 if (!op.FPFeatures.allowFPContractWithinStatement())
4509 return nullptr;
4510
4511 Value *LHS = op.LHS;
4512 Value *RHS = op.RHS;
4513
4514 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4515 // it is the only use of its operand.
4516 bool NegLHS = false;
4517 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4518 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4519 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4520 LHS = LHSUnOp->getOperand(0);
4521 NegLHS = true;
4522 }
4523 }
4524
4525 bool NegRHS = false;
4526 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4527 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4528 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4529 RHS = RHSUnOp->getOperand(0);
4530 NegRHS = true;
4531 }
4532 }
4533
4534 // We have a potentially fusable op. Look for a mul on one of the operands.
4535 // Also, make sure that the mul result isn't used directly. In that case,
4536 // there's no point creating a muladd operation.
4537 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4538 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4539 (LHSBinOp->use_empty() || NegLHS)) {
4540 // If we looked through fneg, erase it.
4541 if (NegLHS)
4542 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4543 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4544 }
4545 }
4546 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4547 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4548 (RHSBinOp->use_empty() || NegRHS)) {
4549 // If we looked through fneg, erase it.
4550 if (NegRHS)
4551 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4552 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4553 }
4554 }
4555
4556 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4557 if (LHSBinOp->getIntrinsicID() ==
4558 llvm::Intrinsic::experimental_constrained_fmul &&
4559 (LHSBinOp->use_empty() || NegLHS)) {
4560 // If we looked through fneg, erase it.
4561 if (NegLHS)
4562 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4563 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4564 }
4565 }
4566 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4567 if (RHSBinOp->getIntrinsicID() ==
4568 llvm::Intrinsic::experimental_constrained_fmul &&
4569 (RHSBinOp->use_empty() || NegRHS)) {
4570 // If we looked through fneg, erase it.
4571 if (NegRHS)
4572 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4573 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4574 }
4575 }
4576
4577 return nullptr;
4578}
4579
4580Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4581 if (op.LHS->getType()->isPointerTy() ||
4582 op.RHS->getType()->isPointerTy())
4584
4585 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4586 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4587 case LangOptions::SOB_Defined:
4588 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4589 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4590 [[fallthrough]];
4591 case LangOptions::SOB_Undefined:
4592 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4593 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4594 [[fallthrough]];
4595 case LangOptions::SOB_Trapping:
4596 if (CanElideOverflowCheck(CGF.getContext(), op))
4597 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4598 return EmitOverflowCheckedBinOp(op);
4599 }
4600 }
4601
4602 // For vector and matrix adds, try to fold into a fmuladd.
4603 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4604 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4605 // Try to form an fmuladd.
4606 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4607 return FMulAdd;
4608 }
4609
4610 if (op.Ty->isConstantMatrixType()) {
4611 llvm::MatrixBuilder MB(Builder);
4612 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4613 return MB.CreateAdd(op.LHS, op.RHS);
4614 }
4615
4616 if (op.Ty->isUnsignedIntegerType() &&
4617 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4618 !CanElideOverflowCheck(CGF.getContext(), op))
4619 return EmitOverflowCheckedBinOp(op);
4620
4621 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4622 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4623 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4624 }
4625
4626 if (op.isFixedPointOp())
4627 return EmitFixedPointBinOp(op);
4628
4629 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4630}
4631
4632/// The resulting value must be calculated with exact precision, so the operands
4633/// may not be the same type.
4634Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4635 using llvm::APSInt;
4636 using llvm::ConstantInt;
4637
4638 // This is either a binary operation where at least one of the operands is
4639 // a fixed-point type, or a unary operation where the operand is a fixed-point
4640 // type. The result type of a binary operation is determined by
4641 // Sema::handleFixedPointConversions().
4642 QualType ResultTy = op.Ty;
4643 QualType LHSTy, RHSTy;
4644 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4645 RHSTy = BinOp->getRHS()->getType();
4646 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4647 // For compound assignment, the effective type of the LHS at this point
4648 // is the computation LHS type, not the actual LHS type, and the final
4649 // result type is not the type of the expression but rather the
4650 // computation result type.
4651 LHSTy = CAO->getComputationLHSType();
4652 ResultTy = CAO->getComputationResultType();
4653 } else
4654 LHSTy = BinOp->getLHS()->getType();
4655 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4656 LHSTy = UnOp->getSubExpr()->getType();
4657 RHSTy = UnOp->getSubExpr()->getType();
4658 }
4659 ASTContext &Ctx = CGF.getContext();
4660 Value *LHS = op.LHS;
4661 Value *RHS = op.RHS;
4662
4663 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4664 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4665 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4666 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4667
4668 // Perform the actual operation.
4669 Value *Result;
4670 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4671 switch (op.Opcode) {
4672 case BO_AddAssign:
4673 case BO_Add:
4674 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4675 break;
4676 case BO_SubAssign:
4677 case BO_Sub:
4678 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4679 break;
4680 case BO_MulAssign:
4681 case BO_Mul:
4682 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4683 break;
4684 case BO_DivAssign:
4685 case BO_Div:
4686 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4687 break;
4688 case BO_ShlAssign:
4689 case BO_Shl:
4690 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4691 break;
4692 case BO_ShrAssign:
4693 case BO_Shr:
4694 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4695 break;
4696 case BO_LT:
4697 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4698 case BO_GT:
4699 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4700 case BO_LE:
4701 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4702 case BO_GE:
4703 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4704 case BO_EQ:
4705 // For equality operations, we assume any padding bits on unsigned types are
4706 // zero'd out. They could be overwritten through non-saturating operations
4707 // that cause overflow, but this leads to undefined behavior.
4708 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4709 case BO_NE:
4710 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4711 case BO_Cmp:
4712 case BO_LAnd:
4713 case BO_LOr:
4714 llvm_unreachable("Found unimplemented fixed point binary operation");
4715 case BO_PtrMemD:
4716 case BO_PtrMemI:
4717 case BO_Rem:
4718 case BO_Xor:
4719 case BO_And:
4720 case BO_Or:
4721 case BO_Assign:
4722 case BO_RemAssign:
4723 case BO_AndAssign:
4724 case BO_XorAssign:
4725 case BO_OrAssign:
4726 case BO_Comma:
4727 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4728 }
4729
4730 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4732 // Convert to the result type.
4733 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4734 : CommonFixedSema,
4735 ResultFixedSema);
4736}
4737
4738Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4739 // The LHS is always a pointer if either side is.
4740 if (!op.LHS->getType()->isPointerTy()) {
4741 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4742 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4743 case LangOptions::SOB_Defined:
4744 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4745 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4746 [[fallthrough]];
4747 case LangOptions::SOB_Undefined:
4748 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4749 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4750 [[fallthrough]];
4751 case LangOptions::SOB_Trapping:
4752 if (CanElideOverflowCheck(CGF.getContext(), op))
4753 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4754 return EmitOverflowCheckedBinOp(op);
4755 }
4756 }
4757
4758 // For vector and matrix subs, try to fold into a fmuladd.
4759 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4760 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4761 // Try to form an fmuladd.
4762 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4763 return FMulAdd;
4764 }
4765
4766 if (op.Ty->isConstantMatrixType()) {
4767 llvm::MatrixBuilder MB(Builder);
4768 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4769 return MB.CreateSub(op.LHS, op.RHS);
4770 }
4771
4772 if (op.Ty->isUnsignedIntegerType() &&
4773 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4774 !CanElideOverflowCheck(CGF.getContext(), op))
4775 return EmitOverflowCheckedBinOp(op);
4776
4777 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4778 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4779 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4780 }
4781
4782 if (op.isFixedPointOp())
4783 return EmitFixedPointBinOp(op);
4784
4785 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4786 }
4787
4788 // If the RHS is not a pointer, then we have normal pointer
4789 // arithmetic.
4790 if (!op.RHS->getType()->isPointerTy())
4792
4793 // Otherwise, this is a pointer subtraction.
4794
4795 // Do the raw subtraction part.
4796 llvm::Value *LHS
4797 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4798 llvm::Value *RHS
4799 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4800 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4801
4802 // Okay, figure out the element size.
4803 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4804 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4805
4806 llvm::Value *divisor = nullptr;
4807
4808 // For a variable-length array, this is going to be non-constant.
4809 if (const VariableArrayType *vla
4810 = CGF.getContext().getAsVariableArrayType(elementType)) {
4811 auto VlaSize = CGF.getVLASize(vla);
4812 elementType = VlaSize.Type;
4813 divisor = VlaSize.NumElts;
4814
4815 // Scale the number of non-VLA elements by the non-VLA element size.
4816 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4817 if (!eltSize.isOne())
4818 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4819
4820 // For everything elese, we can just compute it, safe in the
4821 // assumption that Sema won't let anything through that we can't
4822 // safely compute the size of.
4823 } else {
4824 CharUnits elementSize;
4825 // Handle GCC extension for pointer arithmetic on void* and
4826 // function pointer types.
4827 if (elementType->isVoidType() || elementType->isFunctionType())
4828 elementSize = CharUnits::One();
4829 else
4830 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4831
4832 // Don't even emit the divide for element size of 1.
4833 if (elementSize.isOne())
4834 return diffInChars;
4835
4836 divisor = CGF.CGM.getSize(elementSize);
4837 }
4838
4839 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4840 // pointer difference in C is only defined in the case where both operands
4841 // are pointing to elements of an array.
4842 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4843}
4844
4845Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4846 bool RHSIsSigned) {
4847 llvm::IntegerType *Ty;
4848 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4849 Ty = cast<llvm::IntegerType>(VT->getElementType());
4850 else
4851 Ty = cast<llvm::IntegerType>(LHS->getType());
4852 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4853 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4854 // this in ConstantInt::get, this results in the value getting truncated.
4855 // Constrain the return value to be max(RHS) in this case.
4856 llvm::Type *RHSTy = RHS->getType();
4857 llvm::APInt RHSMax =
4858 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4859 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4860 if (RHSMax.ult(Ty->getBitWidth()))
4861 return llvm::ConstantInt::get(RHSTy, RHSMax);
4862 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4863}
4864
4865Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4866 const Twine &Name) {
4867 llvm::IntegerType *Ty;
4868 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4869 Ty = cast<llvm::IntegerType>(VT->getElementType());
4870 else
4871 Ty = cast<llvm::IntegerType>(LHS->getType());
4872
4873 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4874 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4875
4876 return Builder.CreateURem(
4877 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4878}
4879
4880Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4881 // TODO: This misses out on the sanitizer check below.
4882 if (Ops.isFixedPointOp())
4883 return EmitFixedPointBinOp(Ops);
4884
4885 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4886 // RHS to the same size as the LHS.
4887 Value *RHS = Ops.RHS;
4888 if (Ops.LHS->getType() != RHS->getType())
4889 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4890
4891 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4892 Ops.Ty->hasSignedIntegerRepresentation() &&
4894 !CGF.getLangOpts().CPlusPlus20;
4895 bool SanitizeUnsignedBase =
4896 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4897 Ops.Ty->hasUnsignedIntegerRepresentation();
4898 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4899 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4900 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4901 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4902 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4903 else if ((SanitizeBase || SanitizeExponent) &&
4904 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4905 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4906 if (SanitizeSignedBase)
4907 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
4908 if (SanitizeUnsignedBase)
4909 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
4910 if (SanitizeExponent)
4911 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
4912
4913 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4914 SanitizerHandler::ShiftOutOfBounds);
4915 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4916 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4917 llvm::Value *WidthMinusOne =
4918 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4919 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4920
4921 if (SanitizeExponent) {
4922 Checks.push_back(
4923 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
4924 }
4925
4926 if (SanitizeBase) {
4927 // Check whether we are shifting any non-zero bits off the top of the
4928 // integer. We only emit this check if exponent is valid - otherwise
4929 // instructions below will have undefined behavior themselves.
4930 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4931 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4932 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4933 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4934 llvm::Value *PromotedWidthMinusOne =
4935 (RHS == Ops.RHS) ? WidthMinusOne
4936 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4937 CGF.EmitBlock(CheckShiftBase);
4938 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4939 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4940 /*NUW*/ true, /*NSW*/ true),
4941 "shl.check");
4942 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4943 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4944 // Under C++11's rules, shifting a 1 bit into the sign bit is
4945 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4946 // define signed left shifts, so we use the C99 and C++11 rules there).
4947 // Unsigned shifts can always shift into the top bit.
4948 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4949 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4950 }
4951 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4952 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4953 CGF.EmitBlock(Cont);
4954 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4955 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4956 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4957 Checks.push_back(std::make_pair(
4958 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4959 : SanitizerKind::SO_UnsignedShiftBase));
4960 }
4961
4962 assert(!Checks.empty());
4963 EmitBinOpCheck(Checks, Ops);
4964 }
4965
4966 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4967}
4968
4969Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4970 // TODO: This misses out on the sanitizer check below.
4971 if (Ops.isFixedPointOp())
4972 return EmitFixedPointBinOp(Ops);
4973
4974 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4975 // RHS to the same size as the LHS.
4976 Value *RHS = Ops.RHS;
4977 if (Ops.LHS->getType() != RHS->getType())
4978 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4979
4980 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4981 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4982 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4983 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4984 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4985 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4986 SanitizerHandler::ShiftOutOfBounds);
4987 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4988 llvm::Value *Valid = Builder.CreateICmpULE(
4989 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4990 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
4991 }
4992
4993 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4994 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4995 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4996}
4997
4999// return corresponding comparison intrinsic for given vector type
5000static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
5001 BuiltinType::Kind ElemKind) {
5002 switch (ElemKind) {
5003 default: llvm_unreachable("unexpected element type");
5004 case BuiltinType::Char_U:
5005 case BuiltinType::UChar:
5006 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5007 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
5008 case BuiltinType::Char_S:
5009 case BuiltinType::SChar:
5010 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5011 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
5012 case BuiltinType::UShort:
5013 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5014 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
5015 case BuiltinType::Short:
5016 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5017 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
5018 case BuiltinType::UInt:
5019 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5020 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
5021 case BuiltinType::Int:
5022 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5023 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
5024 case BuiltinType::ULong:
5025 case BuiltinType::ULongLong:
5026 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5027 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
5028 case BuiltinType::Long:
5029 case BuiltinType::LongLong:
5030 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5031 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
5032 case BuiltinType::Float:
5033 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
5034 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
5035 case BuiltinType::Double:
5036 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
5037 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
5038 case BuiltinType::UInt128:
5039 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5040 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
5041 case BuiltinType::Int128:
5042 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5043 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
5044 }
5045}
5046
5047Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
5048 llvm::CmpInst::Predicate UICmpOpc,
5049 llvm::CmpInst::Predicate SICmpOpc,
5050 llvm::CmpInst::Predicate FCmpOpc,
5051 bool IsSignaling) {
5052 TestAndClearIgnoreResultAssign();
5053 Value *Result;
5054 QualType LHSTy = E->getLHS()->getType();
5055 QualType RHSTy = E->getRHS()->getType();
5056 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
5057 assert(E->getOpcode() == BO_EQ ||
5058 E->getOpcode() == BO_NE);
5059 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
5060 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
5062 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
5063 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
5064 BinOpInfo BOInfo = EmitBinOps(E);
5065 Value *LHS = BOInfo.LHS;
5066 Value *RHS = BOInfo.RHS;
5067
5068 // If AltiVec, the comparison results in a numeric type, so we use
5069 // intrinsics comparing vectors and giving 0 or 1 as a result
5070 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
5071 // constants for mapping CR6 register bits to predicate result
5072 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
5073
5074 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
5075
5076 // in several cases vector arguments order will be reversed
5077 Value *FirstVecArg = LHS,
5078 *SecondVecArg = RHS;
5079
5080 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
5081 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
5082
5083 switch(E->getOpcode()) {
5084 default: llvm_unreachable("is not a comparison operation");
5085 case BO_EQ:
5086 CR6 = CR6_LT;
5087 ID = GetIntrinsic(VCMPEQ, ElementKind);
5088 break;
5089 case BO_NE:
5090 CR6 = CR6_EQ;
5091 ID = GetIntrinsic(VCMPEQ, ElementKind);
5092 break;
5093 case BO_LT:
5094 CR6 = CR6_LT;
5095 ID = GetIntrinsic(VCMPGT, ElementKind);
5096 std::swap(FirstVecArg, SecondVecArg);
5097 break;
5098 case BO_GT:
5099 CR6 = CR6_LT;
5100 ID = GetIntrinsic(VCMPGT, ElementKind);
5101 break;
5102 case BO_LE:
5103 if (ElementKind == BuiltinType::Float) {
5104 CR6 = CR6_LT;
5105 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5106 std::swap(FirstVecArg, SecondVecArg);
5107 }
5108 else {
5109 CR6 = CR6_EQ;
5110 ID = GetIntrinsic(VCMPGT, ElementKind);
5111 }
5112 break;
5113 case BO_GE:
5114 if (ElementKind == BuiltinType::Float) {
5115 CR6 = CR6_LT;
5116 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5117 }
5118 else {
5119 CR6 = CR6_EQ;
5120 ID = GetIntrinsic(VCMPGT, ElementKind);
5121 std::swap(FirstVecArg, SecondVecArg);
5122 }
5123 break;
5124 }
5125
5126 Value *CR6Param = Builder.getInt32(CR6);
5127 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5128 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5129
5130 // The result type of intrinsic may not be same as E->getType().
5131 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5132 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5133 // do nothing, if ResultTy is not i1 at the same time, it will cause
5134 // crash later.
5135 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5136 if (ResultTy->getBitWidth() > 1 &&
5137 E->getType() == CGF.getContext().BoolTy)
5138 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5139 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5140 E->getExprLoc());
5141 }
5142
5143 if (BOInfo.isFixedPointOp()) {
5144 Result = EmitFixedPointBinOp(BOInfo);
5145 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5146 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5147 if (!IsSignaling)
5148 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5149 else
5150 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5151 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5152 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5153 } else {
5154 // Unsigned integers and pointers.
5155
5156 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5159
5160 // Dynamic information is required to be stripped for comparisons,
5161 // because it could leak the dynamic information. Based on comparisons
5162 // of pointers to dynamic objects, the optimizer can replace one pointer
5163 // with another, which might be incorrect in presence of invariant
5164 // groups. Comparison with null is safe because null does not carry any
5165 // dynamic information.
5166 if (LHSTy.mayBeDynamicClass())
5167 LHS = Builder.CreateStripInvariantGroup(LHS);
5168 if (RHSTy.mayBeDynamicClass())
5169 RHS = Builder.CreateStripInvariantGroup(RHS);
5170 }
5171
5172 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5173 }
5174
5175 // If this is a vector comparison, sign extend the result to the appropriate
5176 // vector integer type and return it (don't convert to bool).
5177 if (LHSTy->isVectorType())
5178 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5179
5180 } else {
5181 // Complex Comparison: can only be an equality comparison.
5183 QualType CETy;
5184 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5185 LHS = CGF.EmitComplexExpr(E->getLHS());
5186 CETy = CTy->getElementType();
5187 } else {
5188 LHS.first = Visit(E->getLHS());
5189 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5190 CETy = LHSTy;
5191 }
5192 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5193 RHS = CGF.EmitComplexExpr(E->getRHS());
5194 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5195 CTy->getElementType()) &&
5196 "The element types must always match.");
5197 (void)CTy;
5198 } else {
5199 RHS.first = Visit(E->getRHS());
5200 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5201 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5202 "The element types must always match.");
5203 }
5204
5205 Value *ResultR, *ResultI;
5206 if (CETy->isRealFloatingType()) {
5207 // As complex comparisons can only be equality comparisons, they
5208 // are never signaling comparisons.
5209 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5210 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5211 } else {
5212 // Complex comparisons can only be equality comparisons. As such, signed
5213 // and unsigned opcodes are the same.
5214 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5215 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5216 }
5217
5218 if (E->getOpcode() == BO_EQ) {
5219 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5220 } else {
5221 assert(E->getOpcode() == BO_NE &&
5222 "Complex comparison other than == or != ?");
5223 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5224 }
5225 }
5226
5227 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5228 E->getExprLoc());
5229}
5230
5232 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5233 // In case we have the integer or bitfield sanitizer checks enabled
5234 // we want to get the expression before scalar conversion.
5235 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5236 CastKind Kind = ICE->getCastKind();
5237 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5238 *SrcType = ICE->getSubExpr()->getType();
5239 *Previous = EmitScalarExpr(ICE->getSubExpr());
5240 // Pass default ScalarConversionOpts to avoid emitting
5241 // integer sanitizer checks as E refers to bitfield.
5242 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5243 ICE->getExprLoc());
5244 }
5245 }
5246 return EmitScalarExpr(E->getRHS());
5247}
5248
5249Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5250 ApplyAtomGroup Grp(CGF.getDebugInfo());
5251 bool Ignore = TestAndClearIgnoreResultAssign();
5252
5253 Value *RHS;
5254 LValue LHS;
5255
5256 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5259 llvm::Value *RV =
5260 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5261 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5263
5264 if (Ignore)
5265 return nullptr;
5266 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5267 LV.getAddress(), /*nonnull*/ false);
5268 return RV;
5269 }
5270
5271 switch (E->getLHS()->getType().getObjCLifetime()) {
5273 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5274 break;
5275
5277 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5278 break;
5279
5281 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5282 break;
5283
5285 RHS = Visit(E->getRHS());
5286 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5287 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5288 break;
5289
5291 // __block variables need to have the rhs evaluated first, plus
5292 // this should improve codegen just a little.
5293 Value *Previous = nullptr;
5294 QualType SrcType = E->getRHS()->getType();
5295 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5296 // we want to extract that value and potentially (if the bitfield sanitizer
5297 // is enabled) use it to check for an implicit conversion.
5298 if (E->getLHS()->refersToBitField())
5299 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5300 else
5301 RHS = Visit(E->getRHS());
5302
5303 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5304
5305 // Store the value into the LHS. Bit-fields are handled specially
5306 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5307 // 'An assignment expression has the value of the left operand after
5308 // the assignment...'.
5309 if (LHS.isBitField()) {
5310 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5311 // If the expression contained an implicit conversion, make sure
5312 // to use the value before the scalar conversion.
5313 Value *Src = Previous ? Previous : RHS;
5314 QualType DstType = E->getLHS()->getType();
5315 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5316 LHS.getBitFieldInfo(), E->getExprLoc());
5317 } else {
5318 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5319 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5320 }
5321 }
5322 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5323 if (CGF.getLangOpts().OpenMP) {
5325 E->getLHS());
5326 }
5327
5328 // If the result is clearly ignored, return now.
5329 if (Ignore)
5330 return nullptr;
5331
5332 // The result of an assignment in C is the assigned r-value.
5333 if (!CGF.getLangOpts().CPlusPlus)
5334 return RHS;
5335
5336 // If the lvalue is non-volatile, return the computed value of the assignment.
5337 if (!LHS.isVolatileQualified())
5338 return RHS;
5339
5340 // Otherwise, reload the value.
5341 return EmitLoadOfLValue(LHS, E->getExprLoc());
5342}
5343
5344Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5345 // Perform vector logical and on comparisons with zero vectors.
5346 if (E->getType()->isVectorType()) {
5348
5349 Value *LHS = Visit(E->getLHS());
5350 Value *RHS = Visit(E->getRHS());
5351 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5352 if (LHS->getType()->isFPOrFPVectorTy()) {
5353 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5354 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5355 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5356 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5357 } else {
5358 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5359 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5360 }
5361 Value *And = Builder.CreateAnd(LHS, RHS);
5362 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5363 }
5364
5365 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5366 llvm::Type *ResTy = ConvertType(E->getType());
5367
5368 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5369 // If we have 1 && X, just emit X without inserting the control flow.
5370 bool LHSCondVal;
5371 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5372 if (LHSCondVal) { // If we have 1 && X, just emit X.
5374
5375 // If the top of the logical operator nest, reset the MCDC temp to 0.
5376 if (CGF.MCDCLogOpStack.empty())
5378
5379 CGF.MCDCLogOpStack.push_back(E);
5380
5381 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5382
5383 // If we're generating for profiling or coverage, generate a branch to a
5384 // block that increments the RHS counter needed to track branch condition
5385 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5386 // "FalseBlock" after the increment is done.
5387 if (InstrumentRegions &&
5389 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5390 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5391 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5392 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
5393 CGF.EmitBlock(RHSBlockCnt);
5395 CGF.EmitBranch(FBlock);
5396 CGF.EmitBlock(FBlock);
5397 } else
5398 CGF.markStmtMaybeUsed(E->getRHS());
5399
5400 CGF.MCDCLogOpStack.pop_back();
5401 // If the top of the logical operator nest, update the MCDC bitmap.
5402 if (CGF.MCDCLogOpStack.empty())
5404
5405 // ZExt result to int or bool.
5406 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5407 }
5408
5409 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5410 if (!CGF.ContainsLabel(E->getRHS())) {
5411 CGF.markStmtMaybeUsed(E->getRHS());
5412 return llvm::Constant::getNullValue(ResTy);
5413 }
5414 }
5415
5416 // If the top of the logical operator nest, reset the MCDC temp to 0.
5417 if (CGF.MCDCLogOpStack.empty())
5419
5420 CGF.MCDCLogOpStack.push_back(E);
5421
5422 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5423 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5424
5425 CodeGenFunction::ConditionalEvaluation eval(CGF);
5426
5427 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5428 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
5429 CGF.getProfileCount(E->getRHS()));
5430
5431 // Any edges into the ContBlock are now from an (indeterminate number of)
5432 // edges from this first condition. All of these values will be false. Start
5433 // setting up the PHI node in the Cont Block for this.
5434 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5435 "", ContBlock);
5436 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5437 PI != PE; ++PI)
5438 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5439
5440 eval.begin(CGF);
5441 CGF.EmitBlock(RHSBlock);
5443 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5444 eval.end(CGF);
5445
5446 // Reaquire the RHS block, as there may be subblocks inserted.
5447 RHSBlock = Builder.GetInsertBlock();
5448
5449 // If we're generating for profiling or coverage, generate a branch on the
5450 // RHS to a block that increments the RHS true counter needed to track branch
5451 // condition coverage.
5452 if (InstrumentRegions &&
5454 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5455 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5456 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
5457 CGF.EmitBlock(RHSBlockCnt);
5459 CGF.EmitBranch(ContBlock);
5460 PN->addIncoming(RHSCond, RHSBlockCnt);
5461 }
5462
5463 // Emit an unconditional branch from this block to ContBlock.
5464 {
5465 // There is no need to emit line number for unconditional branch.
5466 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5467 CGF.EmitBlock(ContBlock);
5468 }
5469 // Insert an entry into the phi node for the edge with the value of RHSCond.
5470 PN->addIncoming(RHSCond, RHSBlock);
5471
5472 CGF.MCDCLogOpStack.pop_back();
5473 // If the top of the logical operator nest, update the MCDC bitmap.
5474 if (CGF.MCDCLogOpStack.empty())
5476
5477 // Artificial location to preserve the scope information
5478 {
5480 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5481 }
5482
5483 // ZExt result to int.
5484 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5485}
5486
5487Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5488 // Perform vector logical or on comparisons with zero vectors.
5489 if (E->getType()->isVectorType()) {
5491
5492 Value *LHS = Visit(E->getLHS());
5493 Value *RHS = Visit(E->getRHS());
5494 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5495 if (LHS->getType()->isFPOrFPVectorTy()) {
5496 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5497 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5498 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5499 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5500 } else {
5501 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5502 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5503 }
5504 Value *Or = Builder.CreateOr(LHS, RHS);
5505 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5506 }
5507
5508 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5509 llvm::Type *ResTy = ConvertType(E->getType());
5510
5511 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5512 // If we have 0 || X, just emit X without inserting the control flow.
5513 bool LHSCondVal;
5514 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5515 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5517
5518 // If the top of the logical operator nest, reset the MCDC temp to 0.
5519 if (CGF.MCDCLogOpStack.empty())
5521
5522 CGF.MCDCLogOpStack.push_back(E);
5523
5524 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5525
5526 // If we're generating for profiling or coverage, generate a branch to a
5527 // block that increments the RHS counter need to track branch condition
5528 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5529 // "FalseBlock" after the increment is done.
5530 if (InstrumentRegions &&
5532 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5533 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5534 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5535 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5536 CGF.EmitBlock(RHSBlockCnt);
5538 CGF.EmitBranch(FBlock);
5539 CGF.EmitBlock(FBlock);
5540 } else
5541 CGF.markStmtMaybeUsed(E->getRHS());
5542
5543 CGF.MCDCLogOpStack.pop_back();
5544 // If the top of the logical operator nest, update the MCDC bitmap.
5545 if (CGF.MCDCLogOpStack.empty())
5547
5548 // ZExt result to int or bool.
5549 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5550 }
5551
5552 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5553 if (!CGF.ContainsLabel(E->getRHS())) {
5554 CGF.markStmtMaybeUsed(E->getRHS());
5555 return llvm::ConstantInt::get(ResTy, 1);
5556 }
5557 }
5558
5559 // If the top of the logical operator nest, reset the MCDC temp to 0.
5560 if (CGF.MCDCLogOpStack.empty())
5562
5563 CGF.MCDCLogOpStack.push_back(E);
5564
5565 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5566 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5567
5568 CodeGenFunction::ConditionalEvaluation eval(CGF);
5569
5570 // Branch on the LHS first. If it is true, go to the success (cont) block.
5571 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5573 CGF.getProfileCount(E->getRHS()));
5574
5575 // Any edges into the ContBlock are now from an (indeterminate number of)
5576 // edges from this first condition. All of these values will be true. Start
5577 // setting up the PHI node in the Cont Block for this.
5578 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5579 "", ContBlock);
5580 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5581 PI != PE; ++PI)
5582 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5583
5584 eval.begin(CGF);
5585
5586 // Emit the RHS condition as a bool value.
5587 CGF.EmitBlock(RHSBlock);
5589 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5590
5591 eval.end(CGF);
5592
5593 // Reaquire the RHS block, as there may be subblocks inserted.
5594 RHSBlock = Builder.GetInsertBlock();
5595
5596 // If we're generating for profiling or coverage, generate a branch on the
5597 // RHS to a block that increments the RHS true counter needed to track branch
5598 // condition coverage.
5599 if (InstrumentRegions &&
5601 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5602 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5603 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5604 CGF.EmitBlock(RHSBlockCnt);
5606 CGF.EmitBranch(ContBlock);
5607 PN->addIncoming(RHSCond, RHSBlockCnt);
5608 }
5609
5610 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5611 // into the phi node for the edge with the value of RHSCond.
5612 CGF.EmitBlock(ContBlock);
5613 PN->addIncoming(RHSCond, RHSBlock);
5614
5615 CGF.MCDCLogOpStack.pop_back();
5616 // If the top of the logical operator nest, update the MCDC bitmap.
5617 if (CGF.MCDCLogOpStack.empty())
5619
5620 // ZExt result to int.
5621 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5622}
5623
5624Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5625 CGF.EmitIgnoredExpr(E->getLHS());
5626 CGF.EnsureInsertPoint();
5627 return Visit(E->getRHS());
5628}
5629
5630//===----------------------------------------------------------------------===//
5631// Other Operators
5632//===----------------------------------------------------------------------===//
5633
5634/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5635/// expression is cheap enough and side-effect-free enough to evaluate
5636/// unconditionally instead of conditionally. This is used to convert control
5637/// flow into selects in some cases.
5639 CodeGenFunction &CGF) {
5640 // Anything that is an integer or floating point constant is fine.
5641 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5642
5643 // Even non-volatile automatic variables can't be evaluated unconditionally.
5644 // Referencing a thread_local may cause non-trivial initialization work to
5645 // occur. If we're inside a lambda and one of the variables is from the scope
5646 // outside the lambda, that function may have returned already. Reading its
5647 // locals is a bad idea. Also, these reads may introduce races there didn't
5648 // exist in the source-level program.
5649}
5650
5651
5652Value *ScalarExprEmitter::
5653VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5654 TestAndClearIgnoreResultAssign();
5655
5656 // Bind the common expression if necessary.
5657 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5658
5659 Expr *condExpr = E->getCond();
5660 Expr *lhsExpr = E->getTrueExpr();
5661 Expr *rhsExpr = E->getFalseExpr();
5662
5663 // If the condition constant folds and can be elided, try to avoid emitting
5664 // the condition and the dead arm.
5665 bool CondExprBool;
5666 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5667 Expr *live = lhsExpr, *dead = rhsExpr;
5668 if (!CondExprBool) std::swap(live, dead);
5669
5670 // If the dead side doesn't have labels we need, just emit the Live part.
5671 if (!CGF.ContainsLabel(dead)) {
5672 if (CondExprBool) {
5674 CGF.incrementProfileCounter(lhsExpr);
5675 CGF.incrementProfileCounter(rhsExpr);
5676 }
5678 }
5679 Value *Result = Visit(live);
5680 CGF.markStmtMaybeUsed(dead);
5681
5682 // If the live part is a throw expression, it acts like it has a void
5683 // type, so evaluating it returns a null Value*. However, a conditional
5684 // with non-void type must return a non-null Value*.
5685 if (!Result && !E->getType()->isVoidType())
5686 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5687
5688 return Result;
5689 }
5690 }
5691
5692 // OpenCL: If the condition is a vector, we can treat this condition like
5693 // the select function.
5694 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5695 condExpr->getType()->isExtVectorType())) {
5697
5698 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5699 llvm::Value *LHS = Visit(lhsExpr);
5700 llvm::Value *RHS = Visit(rhsExpr);
5701
5702 llvm::Type *condType = ConvertType(condExpr->getType());
5703 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5704
5705 unsigned numElem = vecTy->getNumElements();
5706 llvm::Type *elemType = vecTy->getElementType();
5707
5708 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5709 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5710 llvm::Value *tmp = Builder.CreateSExt(
5711 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5712 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5713
5714 // Cast float to int to perform ANDs if necessary.
5715 llvm::Value *RHSTmp = RHS;
5716 llvm::Value *LHSTmp = LHS;
5717 bool wasCast = false;
5718 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5719 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5720 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5721 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5722 wasCast = true;
5723 }
5724
5725 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5726 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5727 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5728 if (wasCast)
5729 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5730
5731 return tmp5;
5732 }
5733
5734 if (condExpr->getType()->isVectorType() ||
5735 condExpr->getType()->isSveVLSBuiltinType()) {
5737
5738 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5739 llvm::Value *LHS = Visit(lhsExpr);
5740 llvm::Value *RHS = Visit(rhsExpr);
5741
5742 llvm::Type *CondType = ConvertType(condExpr->getType());
5743 auto *VecTy = cast<llvm::VectorType>(CondType);
5744
5745 if (VecTy->getElementType()->isIntegerTy(1))
5746 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5747
5748 // OpenCL uses the MSB of the mask vector.
5749 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5750 if (condExpr->getType()->isExtVectorType())
5751 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5752 else
5753 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5754 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5755 }
5756
5757 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5758 // select instead of as control flow. We can only do this if it is cheap and
5759 // safe to evaluate the LHS and RHS unconditionally.
5760 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
5762 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5763 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5764
5766 CGF.incrementProfileCounter(lhsExpr);
5767 CGF.incrementProfileCounter(rhsExpr);
5769 } else
5770 CGF.incrementProfileCounter(E, StepV);
5771
5772 llvm::Value *LHS = Visit(lhsExpr);
5773 llvm::Value *RHS = Visit(rhsExpr);
5774 if (!LHS) {
5775 // If the conditional has void type, make sure we return a null Value*.
5776 assert(!RHS && "LHS and RHS types must match");
5777 return nullptr;
5778 }
5779 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5780 }
5781
5782 // If the top of the logical operator nest, reset the MCDC temp to 0.
5783 if (CGF.MCDCLogOpStack.empty())
5784 CGF.maybeResetMCDCCondBitmap(condExpr);
5785
5786 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5787 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5788 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5789
5790 CodeGenFunction::ConditionalEvaluation eval(CGF);
5791 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5792 CGF.getProfileCount(lhsExpr));
5793
5794 CGF.EmitBlock(LHSBlock);
5795
5796 // If the top of the logical operator nest, update the MCDC bitmap for the
5797 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5798 // may also contain a boolean expression.
5799 if (CGF.MCDCLogOpStack.empty())
5800 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5801
5803 CGF.incrementProfileCounter(lhsExpr);
5804 else
5806
5807 eval.begin(CGF);
5808 Value *LHS = Visit(lhsExpr);
5809 eval.end(CGF);
5810
5811 LHSBlock = Builder.GetInsertBlock();
5812 Builder.CreateBr(ContBlock);
5813
5814 CGF.EmitBlock(RHSBlock);
5815
5816 // If the top of the logical operator nest, update the MCDC bitmap for the
5817 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5818 // may also contain a boolean expression.
5819 if (CGF.MCDCLogOpStack.empty())
5820 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5821
5823 CGF.incrementProfileCounter(rhsExpr);
5824
5825 eval.begin(CGF);
5826 Value *RHS = Visit(rhsExpr);
5827 eval.end(CGF);
5828
5829 RHSBlock = Builder.GetInsertBlock();
5830 CGF.EmitBlock(ContBlock);
5831
5832 // If the LHS or RHS is a throw expression, it will be legitimately null.
5833 if (!LHS)
5834 return RHS;
5835 if (!RHS)
5836 return LHS;
5837
5838 // Create a PHI node for the real part.
5839 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5840 PN->addIncoming(LHS, LHSBlock);
5841 PN->addIncoming(RHS, RHSBlock);
5842
5843 // When single byte coverage mode is enabled, add a counter to continuation
5844 // block.
5847
5848 return PN;
5849}
5850
5851Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5852 return Visit(E->getChosenSubExpr());
5853}
5854
5855Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5856 Address ArgValue = Address::invalid();
5857 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5858
5859 return ArgPtr.getScalarVal();
5860}
5861
5862Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5863 return CGF.EmitBlockLiteral(block);
5864}
5865
5866// Convert a vec3 to vec4, or vice versa.
5868 Value *Src, unsigned NumElementsDst) {
5869 static constexpr int Mask[] = {0, 1, 2, -1};
5870 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5871}
5872
5873// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5874// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5875// but could be scalar or vectors of different lengths, and either can be
5876// pointer.
5877// There are 4 cases:
5878// 1. non-pointer -> non-pointer : needs 1 bitcast
5879// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5880// 3. pointer -> non-pointer
5881// a) pointer -> intptr_t : needs 1 ptrtoint
5882// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5883// 4. non-pointer -> pointer
5884// a) intptr_t -> pointer : needs 1 inttoptr
5885// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5886// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5887// allow casting directly between pointer types and non-integer non-pointer
5888// types.
5890 const llvm::DataLayout &DL,
5891 Value *Src, llvm::Type *DstTy,
5892 StringRef Name = "") {
5893 auto SrcTy = Src->getType();
5894
5895 // Case 1.
5896 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5897 return Builder.CreateBitCast(Src, DstTy, Name);
5898
5899 // Case 2.
5900 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5901 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5902
5903 // Case 3.
5904 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5905 // Case 3b.
5906 if (!DstTy->isIntegerTy())
5907 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5908 // Cases 3a and 3b.
5909 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5910 }
5911
5912 // Case 4b.
5913 if (!SrcTy->isIntegerTy())
5914 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5915 // Cases 4a and 4b.
5916 return Builder.CreateIntToPtr(Src, DstTy, Name);
5917}
5918
5919Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5920 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5921 llvm::Type *DstTy = ConvertType(E->getType());
5922
5923 llvm::Type *SrcTy = Src->getType();
5924 unsigned NumElementsSrc =
5926 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5927 : 0;
5928 unsigned NumElementsDst =
5930 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5931 : 0;
5932
5933 // Use bit vector expansion for ext_vector_type boolean vectors.
5934 if (E->getType()->isExtVectorBoolType())
5935 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5936
5937 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5938 // vector to get a vec4, then a bitcast if the target type is different.
5939 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5940 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5941 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5942 DstTy);
5943
5944 Src->setName("astype");
5945 return Src;
5946 }
5947
5948 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5949 // to vec4 if the original type is not vec4, then a shuffle vector to
5950 // get a vec3.
5951 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5952 auto *Vec4Ty = llvm::FixedVectorType::get(
5953 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5954 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5955 Vec4Ty);
5956
5957 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5958 Src->setName("astype");
5959 return Src;
5960 }
5961
5962 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5963 Src, DstTy, "astype");
5964}
5965
5966Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5967 return CGF.EmitAtomicExpr(E).getScalarVal();
5968}
5969
5970//===----------------------------------------------------------------------===//
5971// Entry Point into this File
5972//===----------------------------------------------------------------------===//
5973
5974/// Emit the computation of the specified expression of scalar type, ignoring
5975/// the result.
5976Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5977 assert(E && hasScalarEvaluationKind(E->getType()) &&
5978 "Invalid scalar expression to emit");
5979
5980 return ScalarExprEmitter(*this, IgnoreResultAssign)
5981 .Visit(const_cast<Expr *>(E));
5982}
5983
5984/// Emit a conversion from the specified type to the specified destination type,
5985/// both of which are LLVM scalar types.
5987 QualType DstTy,
5988 SourceLocation Loc) {
5989 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5990 "Invalid scalar expression to emit");
5991 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5992}
5993
5994/// Emit a conversion from the specified complex type to the specified
5995/// destination type, where the destination type is an LLVM scalar type.
5997 QualType SrcTy,
5998 QualType DstTy,
5999 SourceLocation Loc) {
6000 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
6001 "Invalid complex -> scalar conversion");
6002 return ScalarExprEmitter(*this)
6003 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
6004}
6005
6006
6007Value *
6009 QualType PromotionType) {
6010 if (!PromotionType.isNull())
6011 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
6012 else
6013 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
6014}
6015
6016
6019 bool isInc, bool isPre) {
6020 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
6021}
6022
6024 // object->isa or (*object).isa
6025 // Generate code as for: *(Class*)object
6026
6027 Expr *BaseExpr = E->getBase();
6029 if (BaseExpr->isPRValue()) {
6030 llvm::Type *BaseTy =
6032 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
6033 } else {
6034 Addr = EmitLValue(BaseExpr).getAddress();
6035 }
6036
6037 // Cast the address to Class*.
6038 Addr = Addr.withElementType(ConvertType(E->getType()));
6039 return MakeAddrLValue(Addr, E->getType());
6040}
6041
6042
6044 const CompoundAssignOperator *E) {
6046 ScalarExprEmitter Scalar(*this);
6047 Value *Result = nullptr;
6048 switch (E->getOpcode()) {
6049#define COMPOUND_OP(Op) \
6050 case BO_##Op##Assign: \
6051 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
6052 Result)
6053 COMPOUND_OP(Mul);
6054 COMPOUND_OP(Div);
6055 COMPOUND_OP(Rem);
6056 COMPOUND_OP(Add);
6057 COMPOUND_OP(Sub);
6058 COMPOUND_OP(Shl);
6059 COMPOUND_OP(Shr);
6061 COMPOUND_OP(Xor);
6062 COMPOUND_OP(Or);
6063#undef COMPOUND_OP
6064
6065 case BO_PtrMemD:
6066 case BO_PtrMemI:
6067 case BO_Mul:
6068 case BO_Div:
6069 case BO_Rem:
6070 case BO_Add:
6071 case BO_Sub:
6072 case BO_Shl:
6073 case BO_Shr:
6074 case BO_LT:
6075 case BO_GT:
6076 case BO_LE:
6077 case BO_GE:
6078 case BO_EQ:
6079 case BO_NE:
6080 case BO_Cmp:
6081 case BO_And:
6082 case BO_Xor:
6083 case BO_Or:
6084 case BO_LAnd:
6085 case BO_LOr:
6086 case BO_Assign:
6087 case BO_Comma:
6088 llvm_unreachable("Not valid compound assignment operators");
6089 }
6090
6091 llvm_unreachable("Unhandled compound assignment operator");
6092}
6093
6095 // The total (signed) byte offset for the GEP.
6096 llvm::Value *TotalOffset;
6097 // The offset overflow flag - true if the total offset overflows.
6098 llvm::Value *OffsetOverflows;
6099};
6100
6101/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6102/// and compute the total offset it applies from it's base pointer BasePtr.
6103/// Returns offset in bytes and a boolean flag whether an overflow happened
6104/// during evaluation.
6106 llvm::LLVMContext &VMContext,
6107 CodeGenModule &CGM,
6108 CGBuilderTy &Builder) {
6109 const auto &DL = CGM.getDataLayout();
6110
6111 // The total (signed) byte offset for the GEP.
6112 llvm::Value *TotalOffset = nullptr;
6113
6114 // Was the GEP already reduced to a constant?
6115 if (isa<llvm::Constant>(GEPVal)) {
6116 // Compute the offset by casting both pointers to integers and subtracting:
6117 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6118 Value *BasePtr_int =
6119 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6120 Value *GEPVal_int =
6121 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6122 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6123 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6124 }
6125
6126 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6127 assert(GEP->getPointerOperand() == BasePtr &&
6128 "BasePtr must be the base of the GEP.");
6129 assert(GEP->isInBounds() && "Expected inbounds GEP");
6130
6131 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6132
6133 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6134 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6135 auto *SAddIntrinsic =
6136 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6137 auto *SMulIntrinsic =
6138 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6139
6140 // The offset overflow flag - true if the total offset overflows.
6141 llvm::Value *OffsetOverflows = Builder.getFalse();
6142
6143 /// Return the result of the given binary operation.
6144 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6145 llvm::Value *RHS) -> llvm::Value * {
6146 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6147
6148 // If the operands are constants, return a constant result.
6149 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6150 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6151 llvm::APInt N;
6152 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6153 /*Signed=*/true, N);
6154 if (HasOverflow)
6155 OffsetOverflows = Builder.getTrue();
6156 return llvm::ConstantInt::get(VMContext, N);
6157 }
6158 }
6159
6160 // Otherwise, compute the result with checked arithmetic.
6161 auto *ResultAndOverflow = Builder.CreateCall(
6162 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6163 OffsetOverflows = Builder.CreateOr(
6164 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6165 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6166 };
6167
6168 // Determine the total byte offset by looking at each GEP operand.
6169 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6170 GTI != GTE; ++GTI) {
6171 llvm::Value *LocalOffset;
6172 auto *Index = GTI.getOperand();
6173 // Compute the local offset contributed by this indexing step:
6174 if (auto *STy = GTI.getStructTypeOrNull()) {
6175 // For struct indexing, the local offset is the byte position of the
6176 // specified field.
6177 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6178 LocalOffset = llvm::ConstantInt::get(
6179 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6180 } else {
6181 // Otherwise this is array-like indexing. The local offset is the index
6182 // multiplied by the element size.
6183 auto *ElementSize =
6184 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6185 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6186 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6187 }
6188
6189 // If this is the first offset, set it as the total offset. Otherwise, add
6190 // the local offset into the running total.
6191 if (!TotalOffset || TotalOffset == Zero)
6192 TotalOffset = LocalOffset;
6193 else
6194 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6195 }
6196
6197 return {TotalOffset, OffsetOverflows};
6198}
6199
6200Value *
6201CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6202 ArrayRef<Value *> IdxList,
6203 bool SignedIndices, bool IsSubtraction,
6204 SourceLocation Loc, const Twine &Name) {
6205 llvm::Type *PtrTy = Ptr->getType();
6206
6207 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6208 if (!SignedIndices && !IsSubtraction)
6209 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6210
6211 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6212
6213 // If the pointer overflow sanitizer isn't enabled, do nothing.
6214 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6215 return GEPVal;
6216
6217 // Perform nullptr-and-offset check unless the nullptr is defined.
6218 bool PerformNullCheck = !NullPointerIsDefined(
6219 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6220 // Check for overflows unless the GEP got constant-folded,
6221 // and only in the default address space
6222 bool PerformOverflowCheck =
6223 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6224
6225 if (!(PerformNullCheck || PerformOverflowCheck))
6226 return GEPVal;
6227
6228 const auto &DL = CGM.getDataLayout();
6229
6230 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6231 auto CheckHandler = SanitizerHandler::PointerOverflow;
6232 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6233 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6234
6235 GEPOffsetAndOverflow EvaluatedGEP =
6236 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6237
6238 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6239 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6240 "If the offset got constant-folded, we don't expect that there was an "
6241 "overflow.");
6242
6243 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6244
6245 // Common case: if the total offset is zero, don't emit a check.
6246 if (EvaluatedGEP.TotalOffset == Zero)
6247 return GEPVal;
6248
6249 // Now that we've computed the total offset, add it to the base pointer (with
6250 // wrapping semantics).
6251 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6252 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6253
6254 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6255 2>
6256 Checks;
6257
6258 if (PerformNullCheck) {
6259 // If the base pointer evaluates to a null pointer value,
6260 // the only valid pointer this inbounds GEP can produce is also
6261 // a null pointer, so the offset must also evaluate to zero.
6262 // Likewise, if we have non-zero base pointer, we can not get null pointer
6263 // as a result, so the offset can not be -intptr_t(BasePtr).
6264 // In other words, both pointers are either null, or both are non-null,
6265 // or the behaviour is undefined.
6266 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6267 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6268 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6269 Checks.emplace_back(Valid, CheckOrdinal);
6270 }
6271
6272 if (PerformOverflowCheck) {
6273 // The GEP is valid if:
6274 // 1) The total offset doesn't overflow, and
6275 // 2) The sign of the difference between the computed address and the base
6276 // pointer matches the sign of the total offset.
6277 llvm::Value *ValidGEP;
6278 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6279 if (SignedIndices) {
6280 // GEP is computed as `unsigned base + signed offset`, therefore:
6281 // * If offset was positive, then the computed pointer can not be
6282 // [unsigned] less than the base pointer, unless it overflowed.
6283 // * If offset was negative, then the computed pointer can not be
6284 // [unsigned] greater than the bas pointere, unless it overflowed.
6285 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6286 auto *PosOrZeroOffset =
6287 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6288 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6289 ValidGEP =
6290 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6291 } else if (!IsSubtraction) {
6292 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6293 // computed pointer can not be [unsigned] less than base pointer,
6294 // unless there was an overflow.
6295 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6296 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6297 } else {
6298 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6299 // computed pointer can not be [unsigned] greater than base pointer,
6300 // unless there was an overflow.
6301 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6302 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6303 }
6304 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6305 Checks.emplace_back(ValidGEP, CheckOrdinal);
6306 }
6307
6308 assert(!Checks.empty() && "Should have produced some checks.");
6309
6310 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6311 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6312 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6313 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6314
6315 return GEPVal;
6316}
6317
6319 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6320 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6321 const Twine &Name) {
6322 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6323 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6324 if (!SignedIndices && !IsSubtraction)
6325 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6326
6327 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6328 }
6329
6330 return RawAddress(
6331 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6332 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6333 elementType, Align);
6334}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc, bool isPre, ASTContext &Ctx)
For the purposes of overflow pattern exclusion, does this match the "while(i--)" pattern?
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
static uint32_t getBitWidth(const Expr *E)
llvm::APSInt APSInt
Definition Compiler.cpp:24
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isLValue() const
Definition APValue.h:472
bool isInt() const
Definition APValue.h:467
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
ParentMapContext & getParentMapContext()
Returns the dynamic AST node parent map context.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:944
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4531
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4537
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4543
LabelDecl * getLabel() const
Definition Expr.h:4573
uint64_t getValue() const
Definition ExprCXX.h:3044
QualType getElementType() const
Definition TypeBase.h:3735
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6704
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4185
bool isCompoundAssignmentOp() const
Definition Expr.h:4182
SourceLocation getExprLoc() const
Definition Expr.h:4079
bool isShiftOp() const
Definition Expr.h:4127
Expr * getRHS() const
Definition Expr.h:4090
bool isShiftAssignOp() const
Definition Expr.h:4196
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4251
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2204
Opcode getOpcode() const
Definition Expr.h:4083
BinaryOperatorKind Opcode
Definition Expr.h:4043
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4332
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
path_iterator path_begin()
Definition Expr.h:3746
CastKind getCastKind() const
Definition Expr.h:3720
bool changesVolatileQualification() const
Return.
Definition Expr.h:3810
path_iterator path_end()
Definition Expr.h:3747
Expr * getSubExpr()
Definition Expr.h:3726
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4884
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:103
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:95
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:85
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:72
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3089
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6908
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:257
SanitizerSet SanOpts
Sanitizers enabled for this function.
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:184
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:251
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2827
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3825
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6329
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7008
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:247
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:394
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2999
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3629
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3715
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:177
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3953
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:245
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6155
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2402
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:64
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1239
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3973
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6108
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2029
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3493
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2215
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6094
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2624
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4403
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:573
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:267
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:899
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7017
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1575
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:676
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1656
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:737
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4033
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:4952
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4315
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2245
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:51
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1926
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1691
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3524
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1371
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:183
bool isBitField() const
Definition CGValue.h:285
bool isVolatileQualified() const
Definition CGValue.h:291
const Qualifiers & getQuals() const
Definition CGValue.h:344
Address getAddress() const
Definition CGValue.h:367
QualType getType() const
Definition CGValue.h:297
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:435
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4300
QualType getComputationLHSType() const
Definition Expr.h:4334
QualType getComputationResultType() const
Definition Expr.h:4337
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
APValue getAPValueResult() const
Definition Expr.cpp:412
bool hasAPValueResult() const
Definition Expr.h:1157
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4388
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4809
T * getAttr() const
Definition DeclBase.h:573
ChildElementIter< false > begin()
Definition Expr.h:5232
size_t getDataElementCount() const
Definition Expr.h:5148
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isIntegerConstantExpr(const ASTContext &Ctx) const
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:476
QualType getType() const
Definition Expr.h:144
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1575
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3853
unsigned getNumInits() const
Definition Expr.h:5329
bool hadArrayRangeDesignator() const
Definition Expr.h:5483
const Expr * getInit(unsigned Init) const
Definition Expr.h:5353
@ PostDecrInWhile
while (count–)
bool isSignedOverflowDefined() const
bool isOverflowPatternExcluded(OverflowPatternExclusionKind Kind) const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4338
Expr * getBase() const
Definition Expr.h:3441
bool isArrow() const
Definition Expr.h:3548
VersionTuple getVersion() const
Definition ExprObjC.h:1723
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1495
Expr * getBase() const
Definition ExprObjC.h:1520
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1543
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1361
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:7911
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:7948
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2586
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2479
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
DynTypedNodeList getParents(const NodeT &Node)
Returns the parents of the given node (within the traversal scope).
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1453
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:131
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8293
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8419
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8478
QualType getCanonicalType() const
Definition TypeBase.h:8345
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1613
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:136
bool isCanonical() const
Definition TypeBase.h:8350
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4521
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:586
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4695
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4676
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4682
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4515
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2281
SourceLocation getLocation() const
Definition Expr.h:5061
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4612
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:788
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:798
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:809
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:817
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:825
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8275
bool getBoolValue() const
Definition ExprCXX.h:2947
const APValue & getAPValue() const
Definition ExprCXX.h:2952
bool isStoredAsBoolean() const
Definition ExprCXX.h:2943
bool isVoidType() const
Definition TypeBase.h:8892
bool isBooleanType() const
Definition TypeBase.h:9022
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8542
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2226
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2274
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2338
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8936
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9179
bool isReferenceType() const
Definition TypeBase.h:8554
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1910
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isExtVectorType() const
Definition TypeBase.h:8673
bool isExtVectorBoolType() const
Definition TypeBase.h:8677
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8811
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8653
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8665
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8948
bool isHalfType() const
Definition TypeBase.h:8896
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2244
bool isQueueT() const
Definition TypeBase.h:8782
bool isMatrixType() const
Definition TypeBase.h:8693
bool isEventT() const
Definition TypeBase.h:8774
bool isFunctionType() const
Definition TypeBase.h:8526
bool isVectorType() const
Definition TypeBase.h:8669
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2321
bool isFloatingType() const
Definition Type.cpp:2305
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2254
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2929
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9112
bool isNullPtrType() const
Definition TypeBase.h:8929
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2400
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5515
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
Represents a GCC generic vector type.
Definition TypeBase.h:4176
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2688
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1249
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1912
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1264
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184