clang 23.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
86 : LHSAP.usub_ov(RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
89 : LHSAP.umul_ov(RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(BaseTy) ||
184 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Check if we can skip the overflow check for \p Op.
196static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
197 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
198 "Expected a unary or binary operator");
199
200 // If the binop has constant inputs and we can prove there is no overflow,
201 // we can elide the overflow check.
202 if (!Op.mayHaveIntegerOverflow())
203 return true;
204
205 if (Op.Ty->isSignedIntegerType() &&
206 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
207 Op.Ty)) {
208 return true;
209 }
210
211 if (Op.Ty->isUnsignedIntegerType() &&
212 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
213 Op.Ty)) {
214 return true;
215 }
216
217 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
218
219 if (UO && UO->getOpcode() == UO_Minus &&
222 UO->isIntegerConstantExpr(Ctx))
223 return true;
224
225 // If a unary op has a widened operand, the op cannot overflow.
226 if (UO)
227 return !UO->canOverflow();
228
229 // We usually don't need overflow checks for binops with widened operands.
230 // Multiplication with promoted unsigned operands is a special case.
231 const auto *BO = cast<BinaryOperator>(Op.E);
232 if (BO->hasExcludedOverflowPattern())
233 return true;
234
235 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
236 if (!OptionalLHSTy)
237 return false;
238
239 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
240 if (!OptionalRHSTy)
241 return false;
242
243 QualType LHSTy = *OptionalLHSTy;
244 QualType RHSTy = *OptionalRHSTy;
245
246 // This is the simple case: binops without unsigned multiplication, and with
247 // widened operands. No overflow check is needed here.
248 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
249 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
250 return true;
251
252 // For unsigned multiplication the overflow check can be elided if either one
253 // of the unpromoted types are less than half the size of the promoted type.
254 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
255 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
256 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
257}
258
259class ScalarExprEmitter
260 : public StmtVisitor<ScalarExprEmitter, Value*> {
261 CodeGenFunction &CGF;
262 CGBuilderTy &Builder;
263 bool IgnoreResultAssign;
264 llvm::LLVMContext &VMContext;
265public:
266
267 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
268 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
269 VMContext(cgf.getLLVMContext()) {
270 }
271
272 //===--------------------------------------------------------------------===//
273 // Utilities
274 //===--------------------------------------------------------------------===//
275
276 bool TestAndClearIgnoreResultAssign() {
277 bool I = IgnoreResultAssign;
278 IgnoreResultAssign = false;
279 return I;
280 }
281
282 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
283 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
284 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
285 return CGF.EmitCheckedLValue(E, TCK);
286 }
287
288 void EmitBinOpCheck(
289 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
290 const BinOpInfo &Info);
291
292 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
293 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
294 }
295
296 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
297 const AlignValueAttr *AVAttr = nullptr;
298 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
299 const ValueDecl *VD = DRE->getDecl();
300
301 if (VD->getType()->isReferenceType()) {
302 if (const auto *TTy =
303 VD->getType().getNonReferenceType()->getAs<TypedefType>())
304 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
305 } else {
306 // Assumptions for function parameters are emitted at the start of the
307 // function, so there is no need to repeat that here,
308 // unless the alignment-assumption sanitizer is enabled,
309 // then we prefer the assumption over alignment attribute
310 // on IR function param.
311 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
312 return;
313
314 AVAttr = VD->getAttr<AlignValueAttr>();
315 }
316 }
317
318 if (!AVAttr)
319 if (const auto *TTy = E->getType()->getAs<TypedefType>())
320 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
321
322 if (!AVAttr)
323 return;
324
325 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
326 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
327 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
328 }
329
330 /// EmitLoadOfLValue - Given an expression with complex type that represents a
331 /// value l-value, this method emits the address of the l-value, then loads
332 /// and returns the result.
333 Value *EmitLoadOfLValue(const Expr *E) {
334 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
335 E->getExprLoc());
336
337 EmitLValueAlignmentAssumption(E, V);
338 return V;
339 }
340
341 /// EmitConversionToBool - Convert the specified expression value to a
342 /// boolean (i1) truth value. This is equivalent to "Val != 0".
343 Value *EmitConversionToBool(Value *Src, QualType DstTy);
344
345 /// Emit a check that a conversion from a floating-point type does not
346 /// overflow.
347 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
348 Value *Src, QualType SrcType, QualType DstType,
349 llvm::Type *DstTy, SourceLocation Loc);
350
351 /// Known implicit conversion check kinds.
352 /// This is used for bitfield conversion checks as well.
353 /// Keep in sync with the enum of the same name in ubsan_handlers.h
354 enum ImplicitConversionCheckKind : unsigned char {
355 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
356 ICCK_UnsignedIntegerTruncation = 1,
357 ICCK_SignedIntegerTruncation = 2,
358 ICCK_IntegerSignChange = 3,
359 ICCK_SignedIntegerTruncationOrSignChange = 4,
360 };
361
362 /// Emit a check that an [implicit] truncation of an integer does not
363 /// discard any bits. It is not UB, so we use the value after truncation.
364 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
365 QualType DstType, SourceLocation Loc);
366
367 /// Emit a check that an [implicit] conversion of an integer does not change
368 /// the sign of the value. It is not UB, so we use the value after conversion.
369 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
370 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
371 QualType DstType, SourceLocation Loc);
372
373 /// Emit a conversion from the specified type to the specified destination
374 /// type, both of which are LLVM scalar types.
375 struct ScalarConversionOpts {
376 bool TreatBooleanAsSigned;
377 bool EmitImplicitIntegerTruncationChecks;
378 bool EmitImplicitIntegerSignChangeChecks;
379
380 ScalarConversionOpts()
381 : TreatBooleanAsSigned(false),
382 EmitImplicitIntegerTruncationChecks(false),
383 EmitImplicitIntegerSignChangeChecks(false) {}
384
385 ScalarConversionOpts(clang::SanitizerSet SanOpts)
386 : TreatBooleanAsSigned(false),
387 EmitImplicitIntegerTruncationChecks(
388 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
389 EmitImplicitIntegerSignChangeChecks(
390 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
391 };
392 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
393 llvm::Type *SrcTy, llvm::Type *DstTy,
394 ScalarConversionOpts Opts);
395 Value *
396 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
397 SourceLocation Loc,
398 ScalarConversionOpts Opts = ScalarConversionOpts());
399
400 /// Convert between either a fixed point and other fixed point or fixed point
401 /// and an integer.
402 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
403 SourceLocation Loc);
404
405 /// Emit a conversion from the specified complex type to the specified
406 /// destination type, where the destination type is an LLVM scalar type.
407 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
408 QualType SrcTy, QualType DstTy,
409 SourceLocation Loc);
410
411 /// EmitNullValue - Emit a value that corresponds to null for the given type.
412 Value *EmitNullValue(QualType Ty);
413
414 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
415 Value *EmitFloatToBoolConversion(Value *V) {
416 // Compare against 0.0 for fp scalars.
417 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
418 return Builder.CreateFCmpUNE(V, Zero, "tobool");
419 }
420
421 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
422 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
423 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
424
425 return Builder.CreateICmpNE(V, Zero, "tobool");
426 }
427
428 Value *EmitIntToBoolConversion(Value *V) {
429 // Because of the type rules of C, we often end up computing a
430 // logical value, then zero extending it to int, then wanting it
431 // as a logical value again. Optimize this common case.
432 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
433 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
434 Value *Result = ZI->getOperand(0);
435 // If there aren't any more uses, zap the instruction to save space.
436 // Note that there can be more uses, for example if this
437 // is the result of an assignment.
438 if (ZI->use_empty())
439 ZI->eraseFromParent();
440 return Result;
441 }
442 }
443
444 return Builder.CreateIsNotNull(V, "tobool");
445 }
446
447 //===--------------------------------------------------------------------===//
448 // Visitor Methods
449 //===--------------------------------------------------------------------===//
450
451 Value *Visit(Expr *E) {
452 ApplyDebugLocation DL(CGF, E);
453 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
454 }
455
456 Value *VisitStmt(Stmt *S) {
457 S->dump(llvm::errs(), CGF.getContext());
458 llvm_unreachable("Stmt can't have complex result type!");
459 }
460 Value *VisitExpr(Expr *S);
461
462 Value *VisitConstantExpr(ConstantExpr *E) {
463 // A constant expression of type 'void' generates no code and produces no
464 // value.
465 if (E->getType()->isVoidType())
466 return nullptr;
467
468 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
469 if (E->isGLValue()) {
470 // This was already converted to an rvalue when it was constant
471 // evaluated.
472 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
473 return Result;
474 return CGF.EmitLoadOfScalar(
475 Address(Result, CGF.convertTypeForLoadStore(E->getType()),
477 /*Volatile*/ false, E->getType(), E->getExprLoc());
478 }
479 return Result;
480 }
481 return Visit(E->getSubExpr());
482 }
483 Value *VisitParenExpr(ParenExpr *PE) {
484 return Visit(PE->getSubExpr());
485 }
486 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
487 return Visit(E->getReplacement());
488 }
489 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
490 return Visit(GE->getResultExpr());
491 }
492 Value *VisitCoawaitExpr(CoawaitExpr *S) {
493 return CGF.EmitCoawaitExpr(*S).getScalarVal();
494 }
495 Value *VisitCoyieldExpr(CoyieldExpr *S) {
496 return CGF.EmitCoyieldExpr(*S).getScalarVal();
497 }
498 Value *VisitUnaryCoawait(const UnaryOperator *E) {
499 return Visit(E->getSubExpr());
500 }
501
502 // Leaves.
503 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
504 return Builder.getInt(E->getValue());
505 }
506 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
507 return Builder.getInt(E->getValue());
508 }
509 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
510 return llvm::ConstantFP::get(VMContext, E->getValue());
511 }
512 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
513 // Character literals are always stored in an unsigned (even for signed
514 // char), so allow implicit truncation here.
515 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue(),
516 /*IsSigned=*/false, /*ImplicitTrunc=*/true);
517 }
518 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
519 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
520 }
521 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
522 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
523 }
524 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
525 if (E->getType()->isVoidType())
526 return nullptr;
527
528 return EmitNullValue(E->getType());
529 }
530 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
531 return EmitNullValue(E->getType());
532 }
533 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
534 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
535 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
536 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
537 return Builder.CreateBitCast(V, ConvertType(E->getType()));
538 }
539
540 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
541 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
542 }
543
544 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
545 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
546 }
547
548 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
549 Value *VisitEmbedExpr(EmbedExpr *E);
550
551 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
552 if (E->isGLValue())
553 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
554 E->getExprLoc());
555
556 // Otherwise, assume the mapping is the scalar directly.
558 }
559
560 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
561 llvm_unreachable("Codegen for this isn't defined/implemented");
562 }
563
564 // l-values.
565 Value *VisitDeclRefExpr(DeclRefExpr *E) {
566 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
567 return CGF.emitScalarConstant(Constant, E);
568 return EmitLoadOfLValue(E);
569 }
570
571 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
572 return CGF.EmitObjCSelectorExpr(E);
573 }
574 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
575 return CGF.EmitObjCProtocolExpr(E);
576 }
577 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
578 return EmitLoadOfLValue(E);
579 }
580 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
581 if (E->getMethodDecl() &&
583 return EmitLoadOfLValue(E);
584 return CGF.EmitObjCMessageExpr(E).getScalarVal();
585 }
586
587 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
588 LValue LV = CGF.EmitObjCIsaExpr(E);
590 return V;
591 }
592
593 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
594 VersionTuple Version = E->getVersion();
595
596 // If we're checking for a platform older than our minimum deployment
597 // target, we can fold the check away.
598 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
599 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
600
601 return CGF.EmitBuiltinAvailable(Version);
602 }
603
604 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
605 Value *VisitMatrixSingleSubscriptExpr(MatrixSingleSubscriptExpr *E);
606 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
607 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
608 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
609 Value *VisitMemberExpr(MemberExpr *E);
610 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
611 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
612 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
613 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
614 // literals aren't l-values in C++. We do so simply because that's the
615 // cleanest way to handle compound literals in C++.
616 // See the discussion here: https://reviews.llvm.org/D64464
617 return EmitLoadOfLValue(E);
618 }
619
620 Value *VisitInitListExpr(InitListExpr *E);
621
622 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
623 assert(CGF.getArrayInitIndex() &&
624 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
625 return CGF.getArrayInitIndex();
626 }
627
628 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
629 return EmitNullValue(E->getType());
630 }
631 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
632 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
633 return VisitCastExpr(E);
634 }
635 Value *VisitCastExpr(CastExpr *E);
636
637 Value *VisitCallExpr(const CallExpr *E) {
639 return EmitLoadOfLValue(E);
640
641 Value *V = CGF.EmitCallExpr(E).getScalarVal();
642
643 EmitLValueAlignmentAssumption(E, V);
644 return V;
645 }
646
647 Value *VisitStmtExpr(const StmtExpr *E);
648
649 // Unary Operators.
650 Value *VisitUnaryPostDec(const UnaryOperator *E) {
651 LValue LV = EmitLValue(E->getSubExpr());
652 return EmitScalarPrePostIncDec(E, LV, false, false);
653 }
654 Value *VisitUnaryPostInc(const UnaryOperator *E) {
655 LValue LV = EmitLValue(E->getSubExpr());
656 return EmitScalarPrePostIncDec(E, LV, true, false);
657 }
658 Value *VisitUnaryPreDec(const UnaryOperator *E) {
659 LValue LV = EmitLValue(E->getSubExpr());
660 return EmitScalarPrePostIncDec(E, LV, false, true);
661 }
662 Value *VisitUnaryPreInc(const UnaryOperator *E) {
663 LValue LV = EmitLValue(E->getSubExpr());
664 return EmitScalarPrePostIncDec(E, LV, true, true);
665 }
666
667 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
668 llvm::Value *InVal,
669 bool IsInc);
670
671 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
672 bool isInc, bool isPre);
673
674
675 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
676 if (isa<MemberPointerType>(E->getType())) // never sugared
677 return CGF.CGM.getMemberPointerConstant(E);
678
679 return EmitLValue(E->getSubExpr()).getPointer(CGF);
680 }
681 Value *VisitUnaryDeref(const UnaryOperator *E) {
682 if (E->getType()->isVoidType())
683 return Visit(E->getSubExpr()); // the actual value should be unused
684 return EmitLoadOfLValue(E);
685 }
686
687 Value *VisitUnaryPlus(const UnaryOperator *E,
688 QualType PromotionType = QualType());
689 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
690 Value *VisitUnaryMinus(const UnaryOperator *E,
691 QualType PromotionType = QualType());
692 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
693
694 Value *VisitUnaryNot (const UnaryOperator *E);
695 Value *VisitUnaryLNot (const UnaryOperator *E);
696 Value *VisitUnaryReal(const UnaryOperator *E,
697 QualType PromotionType = QualType());
698 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
699 Value *VisitUnaryImag(const UnaryOperator *E,
700 QualType PromotionType = QualType());
701 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
702 Value *VisitUnaryExtension(const UnaryOperator *E) {
703 return Visit(E->getSubExpr());
704 }
705
706 // C++
707 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
708 return EmitLoadOfLValue(E);
709 }
710 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
711 auto &Ctx = CGF.getContext();
714 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
715 SLE->getType());
716 }
717
718 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
719 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
720 return Visit(DAE->getExpr());
721 }
722 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
723 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
724 return Visit(DIE->getExpr());
725 }
726 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
727 return CGF.LoadCXXThis();
728 }
729
730 Value *VisitExprWithCleanups(ExprWithCleanups *E);
731 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
732 return CGF.EmitCXXNewExpr(E);
733 }
734 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
735 CGF.EmitCXXDeleteExpr(E);
736 return nullptr;
737 }
738
739 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
740 if (E->isStoredAsBoolean())
741 return llvm::ConstantInt::get(ConvertType(E->getType()),
742 E->getBoolValue());
743 assert(E->getAPValue().isInt() && "APValue type not supported");
744 return llvm::ConstantInt::get(ConvertType(E->getType()),
745 E->getAPValue().getInt());
746 }
747
748 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
749 return Builder.getInt1(E->isSatisfied());
750 }
751
752 Value *VisitRequiresExpr(const RequiresExpr *E) {
753 return Builder.getInt1(E->isSatisfied());
754 }
755
756 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
757 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
758 }
759
760 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
761 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
762 }
763
764 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
765 // C++ [expr.pseudo]p1:
766 // The result shall only be used as the operand for the function call
767 // operator (), and the result of such a call has type void. The only
768 // effect is the evaluation of the postfix-expression before the dot or
769 // arrow.
770 CGF.EmitScalarExpr(E->getBase());
771 return nullptr;
772 }
773
774 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
775 return EmitNullValue(E->getType());
776 }
777
778 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
779 CGF.EmitCXXThrowExpr(E);
780 return nullptr;
781 }
782
783 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
784 return Builder.getInt1(E->getValue());
785 }
786
787 // Binary Operators.
788 Value *EmitMul(const BinOpInfo &Ops) {
789 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
790 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
791 case LangOptions::SOB_Defined:
792 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
793 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
794 [[fallthrough]];
795 case LangOptions::SOB_Undefined:
796 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
797 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
798 [[fallthrough]];
799 case LangOptions::SOB_Trapping:
800 if (CanElideOverflowCheck(CGF.getContext(), Ops))
801 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
802 return EmitOverflowCheckedBinOp(Ops);
803 }
804 }
805
806 if (Ops.Ty->isConstantMatrixType()) {
807 llvm::MatrixBuilder MB(Builder);
808 // We need to check the types of the operands of the operator to get the
809 // correct matrix dimensions.
810 auto *BO = cast<BinaryOperator>(Ops.E);
811 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
812 BO->getLHS()->getType().getCanonicalType());
813 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
814 BO->getRHS()->getType().getCanonicalType());
815 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
816 if (LHSMatTy && RHSMatTy)
817 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
818 LHSMatTy->getNumColumns(),
819 RHSMatTy->getNumColumns());
820 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
821 }
822
823 if (Ops.Ty->isUnsignedIntegerType() &&
824 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
825 !CanElideOverflowCheck(CGF.getContext(), Ops))
826 return EmitOverflowCheckedBinOp(Ops);
827
828 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
829 // Preserve the old values
830 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
831 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
832 }
833 if (Ops.isFixedPointOp())
834 return EmitFixedPointBinOp(Ops);
835 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
836 }
837 /// Create a binary op that checks for overflow.
838 /// Currently only supports +, - and *.
839 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
840
841 // Check for undefined division and modulus behaviors.
842 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
843 llvm::Value *Zero,bool isDiv);
844 // Common helper for getting how wide LHS of shift is.
845 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
846
847 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
848 // non powers of two.
849 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
850
851 Value *EmitDiv(const BinOpInfo &Ops);
852 Value *EmitRem(const BinOpInfo &Ops);
853 Value *EmitAdd(const BinOpInfo &Ops);
854 Value *EmitSub(const BinOpInfo &Ops);
855 Value *EmitShl(const BinOpInfo &Ops);
856 Value *EmitShr(const BinOpInfo &Ops);
857 Value *EmitAnd(const BinOpInfo &Ops) {
858 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
859 }
860 Value *EmitXor(const BinOpInfo &Ops) {
861 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
862 }
863 Value *EmitOr (const BinOpInfo &Ops) {
864 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
865 }
866
867 // Helper functions for fixed point binary operations.
868 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
869
870 BinOpInfo EmitBinOps(const BinaryOperator *E,
871 QualType PromotionTy = QualType());
872
873 Value *EmitPromotedValue(Value *result, QualType PromotionType);
874 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
875 Value *EmitPromoted(const Expr *E, QualType PromotionType);
876
877 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
878 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
879 Value *&Result);
880
881 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
882 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
883
884 QualType getPromotionType(QualType Ty) {
885 const auto &Ctx = CGF.getContext();
886 if (auto *CT = Ty->getAs<ComplexType>()) {
887 QualType ElementType = CT->getElementType();
888 if (ElementType.UseExcessPrecision(Ctx))
889 return Ctx.getComplexType(Ctx.FloatTy);
890 }
891
892 if (Ty.UseExcessPrecision(Ctx)) {
893 if (auto *VT = Ty->getAs<VectorType>()) {
894 unsigned NumElements = VT->getNumElements();
895 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
896 }
897 return Ctx.FloatTy;
898 }
899
900 return QualType();
901 }
902
903 // Binary operators and binary compound assignment operators.
904#define HANDLEBINOP(OP) \
905 Value *VisitBin##OP(const BinaryOperator *E) { \
906 QualType promotionTy = getPromotionType(E->getType()); \
907 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
908 if (result && !promotionTy.isNull()) \
909 result = EmitUnPromotedValue(result, E->getType()); \
910 return result; \
911 } \
912 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
913 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
914 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
915 }
916 HANDLEBINOP(Mul)
917 HANDLEBINOP(Div)
918 HANDLEBINOP(Rem)
919 HANDLEBINOP(Add)
920 HANDLEBINOP(Sub)
921 HANDLEBINOP(Shl)
922 HANDLEBINOP(Shr)
924 HANDLEBINOP(Xor)
926#undef HANDLEBINOP
927
928 // Comparisons.
929 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
930 llvm::CmpInst::Predicate SICmpOpc,
931 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
932#define VISITCOMP(CODE, UI, SI, FP, SIG) \
933 Value *VisitBin##CODE(const BinaryOperator *E) { \
934 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
935 llvm::FCmpInst::FP, SIG); }
936 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
937 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
938 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
939 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
940 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
941 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
942#undef VISITCOMP
943
944 Value *VisitBinAssign (const BinaryOperator *E);
945
946 Value *VisitBinLAnd (const BinaryOperator *E);
947 Value *VisitBinLOr (const BinaryOperator *E);
948 Value *VisitBinComma (const BinaryOperator *E);
949
950 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
951 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
952
953 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
954 return Visit(E->getSemanticForm());
955 }
956
957 // Other Operators.
958 Value *VisitBlockExpr(const BlockExpr *BE);
959 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
960 Value *VisitChooseExpr(ChooseExpr *CE);
961 Value *VisitVAArgExpr(VAArgExpr *VE);
962 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
963 return CGF.EmitObjCStringLiteral(E);
964 }
965 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
966 return CGF.EmitObjCBoxedExpr(E);
967 }
968 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
969 return CGF.EmitObjCArrayLiteral(E);
970 }
971 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
972 return CGF.EmitObjCDictionaryLiteral(E);
973 }
974 Value *VisitAsTypeExpr(AsTypeExpr *CE);
975 Value *VisitAtomicExpr(AtomicExpr *AE);
976 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
977 return Visit(E->getSelectedExpr());
978 }
979};
980} // end anonymous namespace.
981
982//===----------------------------------------------------------------------===//
983// Utilities
984//===----------------------------------------------------------------------===//
985
986/// EmitConversionToBool - Convert the specified expression value to a
987/// boolean (i1) truth value. This is equivalent to "Val != 0".
988Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
989 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
990
991 if (SrcType->isRealFloatingType())
992 return EmitFloatToBoolConversion(Src);
993
994 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
995 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
996
997 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
998 "Unknown scalar type to convert");
999
1000 if (isa<llvm::IntegerType>(Src->getType()))
1001 return EmitIntToBoolConversion(Src);
1002
1003 assert(isa<llvm::PointerType>(Src->getType()));
1004 return EmitPointerToBoolConversion(Src, SrcType);
1005}
1006
1007void ScalarExprEmitter::EmitFloatConversionCheck(
1008 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1009 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1010 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1011 if (!isa<llvm::IntegerType>(DstTy))
1012 return;
1013
1014 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1015 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1016 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1017 using llvm::APFloat;
1018 using llvm::APSInt;
1019
1020 llvm::Value *Check = nullptr;
1021 const llvm::fltSemantics &SrcSema =
1022 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1023
1024 // Floating-point to integer. This has undefined behavior if the source is
1025 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1026 // to an integer).
1027 unsigned Width = CGF.getContext().getIntWidth(DstType);
1029
1030 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1031 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1032 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1033 APFloat::opOverflow)
1034 // Don't need an overflow check for lower bound. Just check for
1035 // -Inf/NaN.
1036 MinSrc = APFloat::getInf(SrcSema, true);
1037 else
1038 // Find the largest value which is too small to represent (before
1039 // truncation toward zero).
1040 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1041
1042 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1043 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1044 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1045 APFloat::opOverflow)
1046 // Don't need an overflow check for upper bound. Just check for
1047 // +Inf/NaN.
1048 MaxSrc = APFloat::getInf(SrcSema, false);
1049 else
1050 // Find the smallest value which is too large to represent (before
1051 // truncation toward zero).
1052 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1053
1054 // If we're converting from __half, convert the range to float to match
1055 // the type of src.
1056 if (OrigSrcType->isHalfType()) {
1057 const llvm::fltSemantics &Sema =
1058 CGF.getContext().getFloatTypeSemantics(SrcType);
1059 bool IsInexact;
1060 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1061 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1062 }
1063
1064 llvm::Value *GE =
1065 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1066 llvm::Value *LE =
1067 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1068 Check = Builder.CreateAnd(GE, LE);
1069
1070 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1071 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1072 CGF.EmitCheckTypeDescriptor(DstType)};
1073 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1074 OrigSrc);
1075}
1076
1077// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1078// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1079static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1080 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1082 QualType DstType, CGBuilderTy &Builder) {
1083 llvm::Type *SrcTy = Src->getType();
1084 llvm::Type *DstTy = Dst->getType();
1085 (void)DstTy; // Only used in assert()
1086
1087 // This should be truncation of integral types.
1088 assert(Src != Dst);
1089 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1090 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1091 "non-integer llvm type");
1092
1093 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1094 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1095
1096 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1097 // Else, it is a signed truncation.
1098 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1100 if (!SrcSigned && !DstSigned) {
1101 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1102 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1103 } else {
1104 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1105 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1106 }
1107
1108 llvm::Value *Check = nullptr;
1109 // 1. Extend the truncated value back to the same width as the Src.
1110 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1111 // 2. Equality-compare with the original source value
1112 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1113 // If the comparison result is 'i1 false', then the truncation was lossy.
1114 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1115}
1116
1118 QualType SrcType, QualType DstType) {
1119 return SrcType->isIntegerType() && DstType->isIntegerType();
1120}
1121
1122void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1123 Value *Dst, QualType DstType,
1124 SourceLocation Loc) {
1125 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1126 return;
1127
1128 // We only care about int->int conversions here.
1129 // We ignore conversions to/from pointer and/or bool.
1131 DstType))
1132 return;
1133
1134 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1135 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1136 // This must be truncation. Else we do not care.
1137 if (SrcBits <= DstBits)
1138 return;
1139
1140 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1141
1142 // If the integer sign change sanitizer is enabled,
1143 // and we are truncating from larger unsigned type to smaller signed type,
1144 // let that next sanitizer deal with it.
1145 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1146 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1147 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1148 (!SrcSigned && DstSigned))
1149 return;
1150
1151 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1152 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1153 Check;
1154
1155 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1156 {
1157 // We don't know the check kind until we call
1158 // EmitIntegerTruncationCheckHelper, but we want to annotate
1159 // EmitIntegerTruncationCheckHelper's instructions too.
1160 SanitizerDebugLocation SanScope(
1161 &CGF,
1162 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1163 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1164 CheckHandler);
1165 Check =
1166 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1167 // If the comparison result is 'i1 false', then the truncation was lossy.
1168 }
1169
1170 // Do we care about this type of truncation?
1171 if (!CGF.SanOpts.has(Check.second.second))
1172 return;
1173
1174 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1175
1176 // Does some SSCL ignore this type?
1178 SanitizerMask::bitPosToMask(Check.second.second), DstType))
1179 return;
1180
1181 llvm::Constant *StaticArgs[] = {
1182 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1183 CGF.EmitCheckTypeDescriptor(DstType),
1184 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1185 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1186
1187 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1188}
1189
1190static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1191 const char *Name,
1192 CGBuilderTy &Builder) {
1193 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1194 llvm::Type *VTy = V->getType();
1195 if (!VSigned) {
1196 // If the value is unsigned, then it is never negative.
1197 return llvm::ConstantInt::getFalse(VTy->getContext());
1198 }
1199 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1200 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1201 llvm::Twine(Name) + "." + V->getName() +
1202 ".negativitycheck");
1203}
1204
1205// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1206// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1207static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1208 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1210 QualType DstType, CGBuilderTy &Builder) {
1211 llvm::Type *SrcTy = Src->getType();
1212 llvm::Type *DstTy = Dst->getType();
1213
1214 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1215 "non-integer llvm type");
1216
1217 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1218 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1219 (void)SrcSigned; // Only used in assert()
1220 (void)DstSigned; // Only used in assert()
1221 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1222 unsigned DstBits = DstTy->getScalarSizeInBits();
1223 (void)SrcBits; // Only used in assert()
1224 (void)DstBits; // Only used in assert()
1225
1226 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1227 "either the widths should be different, or the signednesses.");
1228
1229 // 1. Was the old Value negative?
1230 llvm::Value *SrcIsNegative =
1231 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1232 // 2. Is the new Value negative?
1233 llvm::Value *DstIsNegative =
1234 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1235 // 3. Now, was the 'negativity status' preserved during the conversion?
1236 // NOTE: conversion from negative to zero is considered to change the sign.
1237 // (We want to get 'false' when the conversion changed the sign)
1238 // So we should just equality-compare the negativity statuses.
1239 llvm::Value *Check = nullptr;
1240 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1241 // If the comparison result is 'false', then the conversion changed the sign.
1242 return std::make_pair(
1243 ScalarExprEmitter::ICCK_IntegerSignChange,
1244 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1245}
1246
1247void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1248 Value *Dst, QualType DstType,
1249 SourceLocation Loc) {
1250 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange))
1251 return;
1252
1253 llvm::Type *SrcTy = Src->getType();
1254 llvm::Type *DstTy = Dst->getType();
1255
1256 // We only care about int->int conversions here.
1257 // We ignore conversions to/from pointer and/or bool.
1259 DstType))
1260 return;
1261
1262 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1263 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1264 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1265 unsigned DstBits = DstTy->getScalarSizeInBits();
1266
1267 // Now, we do not need to emit the check in *all* of the cases.
1268 // We can avoid emitting it in some obvious cases where it would have been
1269 // dropped by the opt passes (instcombine) always anyways.
1270 // If it's a cast between effectively the same type, no check.
1271 // NOTE: this is *not* equivalent to checking the canonical types.
1272 if (SrcSigned == DstSigned && SrcBits == DstBits)
1273 return;
1274 // At least one of the values needs to have signed type.
1275 // If both are unsigned, then obviously, neither of them can be negative.
1276 if (!SrcSigned && !DstSigned)
1277 return;
1278 // If the conversion is to *larger* *signed* type, then no check is needed.
1279 // Because either sign-extension happens (so the sign will remain),
1280 // or zero-extension will happen (the sign bit will be zero.)
1281 if ((DstBits > SrcBits) && DstSigned)
1282 return;
1283 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1284 (SrcBits > DstBits) && SrcSigned) {
1285 // If the signed integer truncation sanitizer is enabled,
1286 // and this is a truncation from signed type, then no check is needed.
1287 // Because here sign change check is interchangeable with truncation check.
1288 return;
1289 }
1290 // Does an SSCL have an entry for the DstType under its respective sanitizer
1291 // section?
1292 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1293 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1294 return;
1295 if (!DstSigned &&
1297 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1298 return;
1299 // That's it. We can't rule out any more cases with the data we have.
1300
1301 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1302 SanitizerDebugLocation SanScope(
1303 &CGF,
1304 {SanitizerKind::SO_ImplicitIntegerSignChange,
1305 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1306 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1307 CheckHandler);
1308
1309 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1310 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1311 Check;
1312
1313 // Each of these checks needs to return 'false' when an issue was detected.
1314 ImplicitConversionCheckKind CheckKind;
1315 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1316 2>
1317 Checks;
1318 // So we can 'and' all the checks together, and still get 'false',
1319 // if at least one of the checks detected an issue.
1320
1321 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1322 CheckKind = Check.first;
1323 Checks.emplace_back(Check.second);
1324
1325 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1326 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1327 // If the signed integer truncation sanitizer was enabled,
1328 // and we are truncating from larger unsigned type to smaller signed type,
1329 // let's handle the case we skipped in that check.
1330 Check =
1331 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1332 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1333 Checks.emplace_back(Check.second);
1334 // If the comparison result is 'i1 false', then the truncation was lossy.
1335 }
1336
1337 llvm::Constant *StaticArgs[] = {
1338 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1339 CGF.EmitCheckTypeDescriptor(DstType),
1340 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1341 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1342 // EmitCheck() will 'and' all the checks together.
1343 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1344}
1345
1346// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1347// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1348static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1349 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1351 QualType DstType, CGBuilderTy &Builder) {
1352 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1353 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1354
1355 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1356 if (!SrcSigned && !DstSigned)
1357 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1358 else
1359 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1360
1361 llvm::Value *Check = nullptr;
1362 // 1. Extend the truncated value back to the same width as the Src.
1363 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1364 // 2. Equality-compare with the original source value
1365 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1366 // If the comparison result is 'i1 false', then the truncation was lossy.
1367
1368 return std::make_pair(
1369 Kind,
1370 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1371}
1372
1373// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1374// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1375static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1376 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1378 QualType DstType, CGBuilderTy &Builder) {
1379 // 1. Was the old Value negative?
1380 llvm::Value *SrcIsNegative =
1381 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1382 // 2. Is the new Value negative?
1383 llvm::Value *DstIsNegative =
1384 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1385 // 3. Now, was the 'negativity status' preserved during the conversion?
1386 // NOTE: conversion from negative to zero is considered to change the sign.
1387 // (We want to get 'false' when the conversion changed the sign)
1388 // So we should just equality-compare the negativity statuses.
1389 llvm::Value *Check = nullptr;
1390 Check =
1391 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1392 // If the comparison result is 'false', then the conversion changed the sign.
1393 return std::make_pair(
1394 ScalarExprEmitter::ICCK_IntegerSignChange,
1395 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1396}
1397
1399 Value *Dst, QualType DstType,
1400 const CGBitFieldInfo &Info,
1401 SourceLocation Loc) {
1402
1403 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1404 return;
1405
1406 // We only care about int->int conversions here.
1407 // We ignore conversions to/from pointer and/or bool.
1409 DstType))
1410 return;
1411
1412 if (DstType->isBooleanType() || SrcType->isBooleanType())
1413 return;
1414
1415 // This should be truncation of integral types.
1416 assert(isa<llvm::IntegerType>(Src->getType()) &&
1417 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1418
1419 // TODO: Calculate src width to avoid emitting code
1420 // for unecessary cases.
1421 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1422 unsigned DstBits = Info.Size;
1423
1424 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1425 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1426
1427 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1428 SanitizerDebugLocation SanScope(
1429 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1430
1431 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1432 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1433 Check;
1434
1435 // Truncation
1436 bool EmitTruncation = DstBits < SrcBits;
1437 // If Dst is signed and Src unsigned, we want to be more specific
1438 // about the CheckKind we emit, in this case we want to emit
1439 // ICCK_SignedIntegerTruncationOrSignChange.
1440 bool EmitTruncationFromUnsignedToSigned =
1441 EmitTruncation && DstSigned && !SrcSigned;
1442 // Sign change
1443 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1444 bool BothUnsigned = !SrcSigned && !DstSigned;
1445 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1446 // We can avoid emitting sign change checks in some obvious cases
1447 // 1. If Src and Dst have the same signedness and size
1448 // 2. If both are unsigned sign check is unecessary!
1449 // 3. If Dst is signed and bigger than Src, either
1450 // sign-extension or zero-extension will make sure
1451 // the sign remains.
1452 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1453
1454 if (EmitTruncation)
1455 Check =
1456 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1457 else if (EmitSignChange) {
1458 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1459 "either the widths should be different, or the signednesses.");
1460 Check =
1461 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1462 } else
1463 return;
1464
1465 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1466 if (EmitTruncationFromUnsignedToSigned)
1467 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1468
1469 llvm::Constant *StaticArgs[] = {
1471 EmitCheckTypeDescriptor(DstType),
1472 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1473 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1474
1475 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1476}
1477
1478Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1479 QualType DstType, llvm::Type *SrcTy,
1480 llvm::Type *DstTy,
1481 ScalarConversionOpts Opts) {
1482 // The Element types determine the type of cast to perform.
1483 llvm::Type *SrcElementTy;
1484 llvm::Type *DstElementTy;
1485 QualType SrcElementType;
1486 QualType DstElementType;
1487 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1488 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1489 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1490 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1491 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1492 } else {
1493 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1494 "cannot cast between matrix and non-matrix types");
1495 SrcElementTy = SrcTy;
1496 DstElementTy = DstTy;
1497 SrcElementType = SrcType;
1498 DstElementType = DstType;
1499 }
1500
1501 if (isa<llvm::IntegerType>(SrcElementTy)) {
1502 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1503 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1504 InputSigned = true;
1505 }
1506
1507 if (isa<llvm::IntegerType>(DstElementTy))
1508 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1509 if (InputSigned)
1510 return Builder.CreateSIToFP(Src, DstTy, "conv");
1511 return Builder.CreateUIToFP(Src, DstTy, "conv");
1512 }
1513
1514 if (isa<llvm::IntegerType>(DstElementTy)) {
1515 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1516 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1517
1518 // If we can't recognize overflow as undefined behavior, assume that
1519 // overflow saturates. This protects against normal optimizations if we are
1520 // compiling with non-standard FP semantics.
1521 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1522 llvm::Intrinsic::ID IID =
1523 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1524 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1525 }
1526
1527 if (IsSigned)
1528 return Builder.CreateFPToSI(Src, DstTy, "conv");
1529 return Builder.CreateFPToUI(Src, DstTy, "conv");
1530 }
1531
1532 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1533 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1534 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1535 }
1536 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1537 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1538 return Builder.CreateFPExt(Src, DstTy, "conv");
1539}
1540
1541/// Emit a conversion from the specified type to the specified destination type,
1542/// both of which are LLVM scalar types.
1543Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1544 QualType DstType,
1545 SourceLocation Loc,
1546 ScalarConversionOpts Opts) {
1547 // All conversions involving fixed point types should be handled by the
1548 // EmitFixedPoint family functions. This is done to prevent bloating up this
1549 // function more, and although fixed point numbers are represented by
1550 // integers, we do not want to follow any logic that assumes they should be
1551 // treated as integers.
1552 // TODO(leonardchan): When necessary, add another if statement checking for
1553 // conversions to fixed point types from other types.
1554 if (SrcType->isFixedPointType()) {
1555 if (DstType->isBooleanType())
1556 // It is important that we check this before checking if the dest type is
1557 // an integer because booleans are technically integer types.
1558 // We do not need to check the padding bit on unsigned types if unsigned
1559 // padding is enabled because overflow into this bit is undefined
1560 // behavior.
1561 return Builder.CreateIsNotNull(Src, "tobool");
1562 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1563 DstType->isRealFloatingType())
1564 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1565
1566 llvm_unreachable(
1567 "Unhandled scalar conversion from a fixed point type to another type.");
1568 } else if (DstType->isFixedPointType()) {
1569 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1570 // This also includes converting booleans and enums to fixed point types.
1571 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1572
1573 llvm_unreachable(
1574 "Unhandled scalar conversion to a fixed point type from another type.");
1575 }
1576
1577 QualType NoncanonicalSrcType = SrcType;
1578 QualType NoncanonicalDstType = DstType;
1579
1580 SrcType = CGF.getContext().getCanonicalType(SrcType);
1581 DstType = CGF.getContext().getCanonicalType(DstType);
1582 if (SrcType == DstType) return Src;
1583
1584 if (DstType->isVoidType()) return nullptr;
1585
1586 llvm::Value *OrigSrc = Src;
1587 QualType OrigSrcType = SrcType;
1588 llvm::Type *SrcTy = Src->getType();
1589
1590 // Handle conversions to bool first, they are special: comparisons against 0.
1591 if (DstType->isBooleanType())
1592 return EmitConversionToBool(Src, SrcType);
1593
1594 llvm::Type *DstTy = ConvertType(DstType);
1595
1596 // Cast from half through float if half isn't a native type.
1597 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1598 // Cast to FP using the intrinsic if the half type itself isn't supported.
1599 if (DstTy->isFloatingPointTy()) {
1601 Value *BitCast = Builder.CreateBitCast(Src, CGF.CGM.HalfTy);
1602 return Builder.CreateFPExt(BitCast, DstTy, "conv");
1603 }
1604 } else {
1605 // Cast to other types through float, using either the intrinsic or FPExt,
1606 // depending on whether the half type itself is supported
1607 // (as opposed to operations on half, available with NativeHalfType).
1608
1609 if (Src->getType() != CGF.CGM.HalfTy) {
1611 Src = Builder.CreateBitCast(Src, CGF.CGM.HalfTy);
1612 }
1613
1614 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1615 SrcType = CGF.getContext().FloatTy;
1616 SrcTy = CGF.FloatTy;
1617 }
1618 }
1619
1620 // Ignore conversions like int -> uint.
1621 if (SrcTy == DstTy) {
1622 if (Opts.EmitImplicitIntegerSignChangeChecks)
1623 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1624 NoncanonicalDstType, Loc);
1625
1626 return Src;
1627 }
1628
1629 // Handle pointer conversions next: pointers can only be converted to/from
1630 // other pointers and integers. Check for pointer types in terms of LLVM, as
1631 // some native types (like Obj-C id) may map to a pointer type.
1632 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1633 // The source value may be an integer, or a pointer.
1634 if (isa<llvm::PointerType>(SrcTy))
1635 return Src;
1636
1637 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1638 // First, convert to the correct width so that we control the kind of
1639 // extension.
1640 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1641 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1642 llvm::Value* IntResult =
1643 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1644 // Then, cast to pointer.
1645 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1646 }
1647
1648 if (isa<llvm::PointerType>(SrcTy)) {
1649 // Must be an ptr to int cast.
1650 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1651 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1652 }
1653
1654 // A scalar can be splatted to an extended vector of the same element type
1655 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1656 // Sema should add casts to make sure that the source expression's type is
1657 // the same as the vector's element type (sans qualifiers)
1658 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1659 SrcType.getTypePtr() &&
1660 "Splatted expr doesn't match with vector element type?");
1661
1662 // Splat the element across to all elements
1663 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1664 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1665 }
1666
1667 if (SrcType->isMatrixType() && DstType->isMatrixType())
1668 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1669
1670 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1671 // Allow bitcast from vector to integer/fp of the same size.
1672 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1673 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1674 if (SrcSize == DstSize)
1675 return Builder.CreateBitCast(Src, DstTy, "conv");
1676
1677 // Conversions between vectors of different sizes are not allowed except
1678 // when vectors of half are involved. Operations on storage-only half
1679 // vectors require promoting half vector operands to float vectors and
1680 // truncating the result, which is either an int or float vector, to a
1681 // short or half vector.
1682
1683 // Source and destination are both expected to be vectors.
1684 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1685 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1686 (void)DstElementTy;
1687
1688 assert(((SrcElementTy->isIntegerTy() &&
1689 DstElementTy->isIntegerTy()) ||
1690 (SrcElementTy->isFloatingPointTy() &&
1691 DstElementTy->isFloatingPointTy())) &&
1692 "unexpected conversion between a floating-point vector and an "
1693 "integer vector");
1694
1695 // Truncate an i32 vector to an i16 vector.
1696 if (SrcElementTy->isIntegerTy())
1697 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1698
1699 // Truncate a float vector to a half vector.
1700 if (SrcSize > DstSize)
1701 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1702
1703 // Promote a half vector to a float vector.
1704 return Builder.CreateFPExt(Src, DstTy, "conv");
1705 }
1706
1707 // Finally, we have the arithmetic types: real int/float.
1708 Value *Res = nullptr;
1709 llvm::Type *ResTy = DstTy;
1710
1711 // An overflowing conversion has undefined behavior if either the source type
1712 // or the destination type is a floating-point type. However, we consider the
1713 // range of representable values for all floating-point types to be
1714 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1715 // floating-point type.
1716 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1717 OrigSrcType->isFloatingType())
1718 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1719 Loc);
1720
1721 // Cast to half through float if half isn't a native type.
1722 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1723 // Make sure we cast in a single step if from another FP type.
1724 if (SrcTy->isFloatingPointTy()) {
1725 // Handle the case where the half type is represented as an integer (as
1726 // opposed to operations on half, available with NativeHalfType).
1727
1728 // If the half type is supported, just use an fptrunc.
1729 Value *Res = Builder.CreateFPTrunc(Src, CGF.CGM.HalfTy, "conv");
1730 if (DstTy == CGF.CGM.HalfTy)
1731 return Res;
1732
1733 assert(DstTy->isIntegerTy(16) &&
1735 "Only half FP requires extra conversion");
1736 return Builder.CreateBitCast(Res, DstTy);
1737 }
1738
1739 DstTy = CGF.FloatTy;
1740 }
1741
1742 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1743
1744 if (DstTy != ResTy) {
1745 Res = Builder.CreateFPTrunc(Res, CGF.CGM.HalfTy, "conv");
1746
1747 if (ResTy != CGF.CGM.HalfTy) {
1748 assert(ResTy->isIntegerTy(16) &&
1750 "Only half FP requires extra conversion");
1751 Res = Builder.CreateBitCast(Res, ResTy);
1752 }
1753 }
1754
1755 if (Opts.EmitImplicitIntegerTruncationChecks)
1756 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1757 NoncanonicalDstType, Loc);
1758
1759 if (Opts.EmitImplicitIntegerSignChangeChecks)
1760 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1761 NoncanonicalDstType, Loc);
1762
1763 return Res;
1764}
1765
1766Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1767 QualType DstTy,
1768 SourceLocation Loc) {
1769 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1770 llvm::Value *Result;
1771 if (SrcTy->isRealFloatingType())
1772 Result = FPBuilder.CreateFloatingToFixed(Src,
1773 CGF.getContext().getFixedPointSemantics(DstTy));
1774 else if (DstTy->isRealFloatingType())
1775 Result = FPBuilder.CreateFixedToFloating(Src,
1777 ConvertType(DstTy));
1778 else {
1779 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1780 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1781
1782 if (DstTy->isIntegerType())
1783 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1784 DstFPSema.getWidth(),
1785 DstFPSema.isSigned());
1786 else if (SrcTy->isIntegerType())
1787 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1788 DstFPSema);
1789 else
1790 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1791 }
1792 return Result;
1793}
1794
1795/// Emit a conversion from the specified complex type to the specified
1796/// destination type, where the destination type is an LLVM scalar type.
1797Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1798 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1799 SourceLocation Loc) {
1800 // Get the source element type.
1801 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1802
1803 // Handle conversions to bool first, they are special: comparisons against 0.
1804 if (DstTy->isBooleanType()) {
1805 // Complex != 0 -> (Real != 0) | (Imag != 0)
1806 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1807 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1808 return Builder.CreateOr(Src.first, Src.second, "tobool");
1809 }
1810
1811 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1812 // the imaginary part of the complex value is discarded and the value of the
1813 // real part is converted according to the conversion rules for the
1814 // corresponding real type.
1815 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1816}
1817
1818Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1819 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1820}
1821
1822/// Emit a sanitization check for the given "binary" operation (which
1823/// might actually be a unary increment which has been lowered to a binary
1824/// operation). The check passes if all values in \p Checks (which are \c i1),
1825/// are \c true.
1826void ScalarExprEmitter::EmitBinOpCheck(
1827 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1828 const BinOpInfo &Info) {
1829 assert(CGF.IsSanitizerScope);
1830 SanitizerHandler Check;
1831 SmallVector<llvm::Constant *, 4> StaticData;
1832 SmallVector<llvm::Value *, 2> DynamicData;
1833 TrapReason TR;
1834
1835 BinaryOperatorKind Opcode = Info.Opcode;
1838
1839 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1840 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1841 if (UO && UO->getOpcode() == UO_Minus) {
1842 Check = SanitizerHandler::NegateOverflow;
1843 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1844 DynamicData.push_back(Info.RHS);
1845 } else {
1846 if (BinaryOperator::isShiftOp(Opcode)) {
1847 // Shift LHS negative or too large, or RHS out of bounds.
1848 Check = SanitizerHandler::ShiftOutOfBounds;
1849 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1850 StaticData.push_back(
1851 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1852 StaticData.push_back(
1853 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1854 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1855 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1856 Check = SanitizerHandler::DivremOverflow;
1857 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1858 } else {
1859 // Arithmetic overflow (+, -, *).
1860 int ArithOverflowKind = 0;
1861 switch (Opcode) {
1862 case BO_Add: {
1863 Check = SanitizerHandler::AddOverflow;
1864 ArithOverflowKind = diag::UBSanArithKind::Add;
1865 break;
1866 }
1867 case BO_Sub: {
1868 Check = SanitizerHandler::SubOverflow;
1869 ArithOverflowKind = diag::UBSanArithKind::Sub;
1870 break;
1871 }
1872 case BO_Mul: {
1873 Check = SanitizerHandler::MulOverflow;
1874 ArithOverflowKind = diag::UBSanArithKind::Mul;
1875 break;
1876 }
1877 default:
1878 llvm_unreachable("unexpected opcode for bin op check");
1879 }
1880 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1882 SanitizerKind::UnsignedIntegerOverflow) ||
1884 SanitizerKind::SignedIntegerOverflow)) {
1885 // Only pay the cost for constructing the trap diagnostic if they are
1886 // going to be used.
1887 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1888 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1889 << Info.E;
1890 }
1891 }
1892 DynamicData.push_back(Info.LHS);
1893 DynamicData.push_back(Info.RHS);
1894 }
1895
1896 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1897}
1898
1899//===----------------------------------------------------------------------===//
1900// Visitor Methods
1901//===----------------------------------------------------------------------===//
1902
1903Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1904 CGF.ErrorUnsupported(E, "scalar expression");
1905 if (E->getType()->isVoidType())
1906 return nullptr;
1907 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1908}
1909
1910Value *
1911ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1912 ASTContext &Context = CGF.getContext();
1913 unsigned AddrSpace =
1915 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1916 E->ComputeName(Context), "__usn_str", AddrSpace);
1917
1918 llvm::Type *ExprTy = ConvertType(E->getType());
1919 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1920 "usn_addr_cast");
1921}
1922
1923Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1924 assert(E->getDataElementCount() == 1);
1925 auto It = E->begin();
1926 return Builder.getInt((*It)->getValue());
1927}
1928
1929Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1930 // Vector Mask Case
1931 if (E->getNumSubExprs() == 2) {
1932 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1933 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1934 Value *Mask;
1935
1936 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1937 unsigned LHSElts = LTy->getNumElements();
1938
1939 Mask = RHS;
1940
1941 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1942
1943 // Mask off the high bits of each shuffle index.
1944 Value *MaskBits =
1945 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1946 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1947
1948 // newv = undef
1949 // mask = mask & maskbits
1950 // for each elt
1951 // n = extract mask i
1952 // x = extract val n
1953 // newv = insert newv, x, i
1954 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1955 MTy->getNumElements());
1956 Value* NewV = llvm::PoisonValue::get(RTy);
1957 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1958 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1959 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1960
1961 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1962 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1963 }
1964 return NewV;
1965 }
1966
1967 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1968 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1969
1970 SmallVector<int, 32> Indices;
1971 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1972 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
1973 // Check for -1 and output it as undef in the IR.
1974 if (Idx.isSigned() && Idx.isAllOnes())
1975 Indices.push_back(-1);
1976 else
1977 Indices.push_back(Idx.getZExtValue());
1978 }
1979
1980 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1981}
1982
1983Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1984 QualType SrcType = E->getSrcExpr()->getType(),
1985 DstType = E->getType();
1986
1987 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1988
1989 SrcType = CGF.getContext().getCanonicalType(SrcType);
1990 DstType = CGF.getContext().getCanonicalType(DstType);
1991 if (SrcType == DstType) return Src;
1992
1993 assert(SrcType->isVectorType() &&
1994 "ConvertVector source type must be a vector");
1995 assert(DstType->isVectorType() &&
1996 "ConvertVector destination type must be a vector");
1997
1998 llvm::Type *SrcTy = Src->getType();
1999 llvm::Type *DstTy = ConvertType(DstType);
2000
2001 // Ignore conversions like int -> uint.
2002 if (SrcTy == DstTy)
2003 return Src;
2004
2005 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
2006 DstEltType = DstType->castAs<VectorType>()->getElementType();
2007
2008 assert(SrcTy->isVectorTy() &&
2009 "ConvertVector source IR type must be a vector");
2010 assert(DstTy->isVectorTy() &&
2011 "ConvertVector destination IR type must be a vector");
2012
2013 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
2014 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2015
2016 if (DstEltType->isBooleanType()) {
2017 assert((SrcEltTy->isFloatingPointTy() ||
2018 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2019
2020 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2021 if (SrcEltTy->isFloatingPointTy()) {
2022 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2023 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2024 } else {
2025 return Builder.CreateICmpNE(Src, Zero, "tobool");
2026 }
2027 }
2028
2029 // We have the arithmetic types: real int/float.
2030 Value *Res = nullptr;
2031
2032 if (isa<llvm::IntegerType>(SrcEltTy)) {
2033 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2034 if (isa<llvm::IntegerType>(DstEltTy))
2035 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2036 else {
2037 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2038 if (InputSigned)
2039 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2040 else
2041 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2042 }
2043 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2044 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2045 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2046 if (DstEltType->isSignedIntegerOrEnumerationType())
2047 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2048 else
2049 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2050 } else {
2051 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2052 "Unknown real conversion");
2053 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2054 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2055 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2056 else
2057 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2058 }
2059
2060 return Res;
2061}
2062
2063Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2064 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2065 CGF.EmitIgnoredExpr(E->getBase());
2066 return CGF.emitScalarConstant(Constant, E);
2067 } else {
2068 Expr::EvalResult Result;
2070 llvm::APSInt Value = Result.Val.getInt();
2071 CGF.EmitIgnoredExpr(E->getBase());
2072 return Builder.getInt(Value);
2073 }
2074 }
2075
2076 llvm::Value *Result = EmitLoadOfLValue(E);
2077
2078 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2079 // debug info for the pointer, even if there is no variable associated with
2080 // the pointer's expression.
2081 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2082 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2083 if (llvm::GetElementPtrInst *GEP =
2084 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2085 if (llvm::Instruction *Pointer =
2086 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2087 QualType Ty = E->getBase()->getType();
2088 if (!E->isArrow())
2089 Ty = CGF.getContext().getPointerType(Ty);
2090 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2091 }
2092 }
2093 }
2094 }
2095 return Result;
2096}
2097
2098Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2099 TestAndClearIgnoreResultAssign();
2100
2101 // Emit subscript expressions in rvalue context's. For most cases, this just
2102 // loads the lvalue formed by the subscript expr. However, we have to be
2103 // careful, because the base of a vector subscript is occasionally an rvalue,
2104 // so we can't get it as an lvalue.
2105 if (!E->getBase()->getType()->isVectorType() &&
2107 return EmitLoadOfLValue(E);
2108
2109 // Handle the vector case. The base must be a vector, the index must be an
2110 // integer value.
2111 Value *Base = Visit(E->getBase());
2112 Value *Idx = Visit(E->getIdx());
2113 QualType IdxTy = E->getIdx()->getType();
2114
2115 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2116 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2117
2118 return Builder.CreateExtractElement(Base, Idx, "vecext");
2119}
2120
2121Value *ScalarExprEmitter::VisitMatrixSingleSubscriptExpr(
2122 MatrixSingleSubscriptExpr *E) {
2123 TestAndClearIgnoreResultAssign();
2124
2125 auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2126 unsigned NumRows = MatrixTy->getNumRows();
2127 unsigned NumColumns = MatrixTy->getNumColumns();
2128
2129 // Row index
2130 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2131 llvm::MatrixBuilder MB(Builder);
2132
2133 // The row index must be in [0, NumRows)
2134 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2135 MB.CreateIndexAssumption(RowIdx, NumRows);
2136
2137 Value *FlatMatrix = Visit(E->getBase());
2138 llvm::Type *ElemTy = CGF.ConvertTypeForMem(MatrixTy->getElementType());
2139 auto *ResultTy = llvm::FixedVectorType::get(ElemTy, NumColumns);
2140 Value *RowVec = llvm::PoisonValue::get(ResultTy);
2141
2142 for (unsigned Col = 0; Col != NumColumns; ++Col) {
2143 Value *ColVal = llvm::ConstantInt::get(RowIdx->getType(), Col);
2144 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2145 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2146 Value *EltIdx = MB.CreateIndex(RowIdx, ColVal, NumRows, NumColumns,
2147 IsMatrixRowMajor, "matrix_row_idx");
2148 Value *Elt =
2149 Builder.CreateExtractElement(FlatMatrix, EltIdx, "matrix_elem");
2150 Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2151 RowVec = Builder.CreateInsertElement(RowVec, Elt, Lane, "matrix_row_ins");
2152 }
2153
2154 return CGF.EmitFromMemory(RowVec, E->getType());
2155}
2156
2157Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2158 TestAndClearIgnoreResultAssign();
2159
2160 // Handle the vector case. The base must be a vector, the index must be an
2161 // integer value.
2162 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2163 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2164
2165 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2166 llvm::MatrixBuilder MB(Builder);
2167
2168 Value *Idx;
2169 unsigned NumCols = MatrixTy->getNumColumns();
2170 unsigned NumRows = MatrixTy->getNumRows();
2171 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2172 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2173 Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows, NumCols, IsMatrixRowMajor);
2174
2175 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2176 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2177
2178 Value *Matrix = Visit(E->getBase());
2179
2180 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2181 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2182}
2183
2184static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2185 unsigned Off) {
2186 int MV = SVI->getMaskValue(Idx);
2187 if (MV == -1)
2188 return -1;
2189 return Off + MV;
2190}
2191
2192static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2193 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2194 "Index operand too large for shufflevector mask!");
2195 return C->getZExtValue();
2196}
2197
2198Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2199 bool Ignore = TestAndClearIgnoreResultAssign();
2200 (void)Ignore;
2201 unsigned NumInitElements = E->getNumInits();
2202 assert((Ignore == false ||
2203 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2204 "init list ignored");
2205
2206 // HLSL initialization lists in the AST are an expansion which can contain
2207 // side-effecting expressions wrapped in opaque value expressions. To properly
2208 // emit these we need to emit the opaque values before we emit the argument
2209 // expressions themselves. This is a little hacky, but it prevents us needing
2210 // to do a bigger AST-level change for a language feature that we need
2211 // deprecate in the near future. See related HLSL language proposals in the
2212 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2213 // * 0005-strict-initializer-lists.md
2214 // * 0032-constructors.md
2215 if (CGF.getLangOpts().HLSL)
2217
2218 if (E->hadArrayRangeDesignator())
2219 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2220
2221 llvm::VectorType *VType =
2222 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2223
2224 if (!VType) {
2225 if (NumInitElements == 0) {
2226 // C++11 value-initialization for the scalar.
2227 return EmitNullValue(E->getType());
2228 }
2229 // We have a scalar in braces. Just use the first element.
2230 return Visit(E->getInit(0));
2231 }
2232
2233 if (isa<llvm::ScalableVectorType>(VType)) {
2234 if (NumInitElements == 0) {
2235 // C++11 value-initialization for the vector.
2236 return EmitNullValue(E->getType());
2237 }
2238
2239 if (NumInitElements == 1) {
2240 Expr *InitVector = E->getInit(0);
2241
2242 // Initialize from another scalable vector of the same type.
2243 if (InitVector->getType().getCanonicalType() ==
2245 return Visit(InitVector);
2246 }
2247
2248 llvm_unreachable("Unexpected initialization of a scalable vector!");
2249 }
2250
2251 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2252
2253 // Loop over initializers collecting the Value for each, and remembering
2254 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2255 // us to fold the shuffle for the swizzle into the shuffle for the vector
2256 // initializer, since LLVM optimizers generally do not want to touch
2257 // shuffles.
2258 unsigned CurIdx = 0;
2259 bool VIsPoisonShuffle = false;
2260 llvm::Value *V = llvm::PoisonValue::get(VType);
2261 for (unsigned i = 0; i != NumInitElements; ++i) {
2262 Expr *IE = E->getInit(i);
2263 Value *Init = Visit(IE);
2264 SmallVector<int, 16> Args;
2265
2266 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2267
2268 // Handle scalar elements. If the scalar initializer is actually one
2269 // element of a different vector of the same width, use shuffle instead of
2270 // extract+insert.
2271 if (!VVT) {
2272 if (isa<ExtVectorElementExpr>(IE)) {
2273 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2274
2275 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2276 ->getNumElements() == ResElts) {
2277 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2278 Value *LHS = nullptr, *RHS = nullptr;
2279 if (CurIdx == 0) {
2280 // insert into poison -> shuffle (src, poison)
2281 // shufflemask must use an i32
2282 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2283 Args.resize(ResElts, -1);
2284
2285 LHS = EI->getVectorOperand();
2286 RHS = V;
2287 VIsPoisonShuffle = true;
2288 } else if (VIsPoisonShuffle) {
2289 // insert into poison shuffle && size match -> shuffle (v, src)
2290 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2291 for (unsigned j = 0; j != CurIdx; ++j)
2292 Args.push_back(getMaskElt(SVV, j, 0));
2293 Args.push_back(ResElts + C->getZExtValue());
2294 Args.resize(ResElts, -1);
2295
2296 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2297 RHS = EI->getVectorOperand();
2298 VIsPoisonShuffle = false;
2299 }
2300 if (!Args.empty()) {
2301 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2302 ++CurIdx;
2303 continue;
2304 }
2305 }
2306 }
2307 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2308 "vecinit");
2309 VIsPoisonShuffle = false;
2310 ++CurIdx;
2311 continue;
2312 }
2313
2314 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2315
2316 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2317 // input is the same width as the vector being constructed, generate an
2318 // optimized shuffle of the swizzle input into the result.
2319 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2320 if (isa<ExtVectorElementExpr>(IE)) {
2321 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2322 Value *SVOp = SVI->getOperand(0);
2323 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2324
2325 if (OpTy->getNumElements() == ResElts) {
2326 for (unsigned j = 0; j != CurIdx; ++j) {
2327 // If the current vector initializer is a shuffle with poison, merge
2328 // this shuffle directly into it.
2329 if (VIsPoisonShuffle) {
2330 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2331 } else {
2332 Args.push_back(j);
2333 }
2334 }
2335 for (unsigned j = 0, je = InitElts; j != je; ++j)
2336 Args.push_back(getMaskElt(SVI, j, Offset));
2337 Args.resize(ResElts, -1);
2338
2339 if (VIsPoisonShuffle)
2340 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2341
2342 Init = SVOp;
2343 }
2344 }
2345
2346 // Extend init to result vector length, and then shuffle its contribution
2347 // to the vector initializer into V.
2348 if (Args.empty()) {
2349 for (unsigned j = 0; j != InitElts; ++j)
2350 Args.push_back(j);
2351 Args.resize(ResElts, -1);
2352 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2353
2354 Args.clear();
2355 for (unsigned j = 0; j != CurIdx; ++j)
2356 Args.push_back(j);
2357 for (unsigned j = 0; j != InitElts; ++j)
2358 Args.push_back(j + Offset);
2359 Args.resize(ResElts, -1);
2360 }
2361
2362 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2363 // merging subsequent shuffles into this one.
2364 if (CurIdx == 0)
2365 std::swap(V, Init);
2366 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2367 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2368 CurIdx += InitElts;
2369 }
2370
2371 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2372 // Emit remaining default initializers.
2373 llvm::Type *EltTy = VType->getElementType();
2374
2375 // Emit remaining default initializers
2376 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2377 Value *Idx = Builder.getInt32(CurIdx);
2378 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2379 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2380 }
2381 return V;
2382}
2383
2385 return !D->isWeak();
2386}
2387
2388static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2389 E = E->IgnoreParens();
2390
2391 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2392 if (UO->getOpcode() == UO_Deref)
2393 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2394
2395 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2396 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2397
2398 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2399 if (isa<FieldDecl>(ME->getMemberDecl()))
2400 return true;
2401 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2402 }
2403
2404 // Array subscripts? Anything else?
2405
2406 return false;
2407}
2408
2410 assert(E->getType()->isSignableType(getContext()));
2411
2412 E = E->IgnoreParens();
2413
2414 if (isa<CXXThisExpr>(E))
2415 return true;
2416
2417 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2418 if (UO->getOpcode() == UO_AddrOf)
2419 return isLValueKnownNonNull(*this, UO->getSubExpr());
2420
2421 if (const auto *CE = dyn_cast<CastExpr>(E))
2422 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2423 CE->getCastKind() == CK_ArrayToPointerDecay)
2424 return isLValueKnownNonNull(*this, CE->getSubExpr());
2425
2426 // Maybe honor __nonnull?
2427
2428 return false;
2429}
2430
2432 const Expr *E = CE->getSubExpr();
2433
2434 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2435 return false;
2436
2437 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2438 // We always assume that 'this' is never null.
2439 return false;
2440 }
2441
2442 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2443 // And that glvalue casts are never null.
2444 if (ICE->isGLValue())
2445 return false;
2446 }
2447
2448 return true;
2449}
2450
2451// RHS is an aggregate type
2453 QualType DestTy, SourceLocation Loc) {
2454 SmallVector<LValue, 16> LoadList;
2455 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
2456 // Dest is either a vector or a builtin?
2457 // if its a vector create a temp alloca to store into and return that
2458 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2459 assert(LoadList.size() >= VecTy->getNumElements() &&
2460 "Flattened type on RHS must have the same number or more elements "
2461 "than vector on LHS.");
2462 llvm::Value *V =
2463 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2464 // write to V.
2465 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2466 RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
2467 assert(RVal.isScalar() &&
2468 "All flattened source values should be scalars.");
2469 llvm::Value *Cast =
2470 CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
2471 VecTy->getElementType(), Loc);
2472 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2473 }
2474 return V;
2475 }
2476 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2477 assert(LoadList.size() >= MatTy->getNumElementsFlattened() &&
2478 "Flattened type on RHS must have the same number or more elements "
2479 "than vector on LHS.");
2480
2481 llvm::Value *V =
2482 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2483 // V is an allocated temporary to build the truncated matrix into.
2484 for (unsigned I = 0, E = MatTy->getNumElementsFlattened(); I < E; I++) {
2485 unsigned ColMajorIndex =
2486 (I % MatTy->getNumRows()) * MatTy->getNumColumns() +
2487 (I / MatTy->getNumRows());
2488 RValue RVal = CGF.EmitLoadOfLValue(LoadList[ColMajorIndex], Loc);
2489 assert(RVal.isScalar() &&
2490 "All flattened source values should be scalars.");
2491 llvm::Value *Cast = CGF.EmitScalarConversion(
2492 RVal.getScalarVal(), LoadList[ColMajorIndex].getType(),
2493 MatTy->getElementType(), Loc);
2494 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2495 }
2496 return V;
2497 }
2498 // if its a builtin just do an extract element or load.
2499 assert(DestTy->isBuiltinType() &&
2500 "Destination type must be a vector, matrix, or builtin type.");
2501 RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
2502 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2503 return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
2504 DestTy, Loc);
2505}
2506
2507// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2508// have to handle a more broad range of conversions than explicit casts, as they
2509// handle things like function to ptr-to-function decay etc.
2510Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2511 llvm::scope_exit RestoreCurCast(
2512 [this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2513 CGF.CurCast = CE;
2514
2515 Expr *E = CE->getSubExpr();
2516 QualType DestTy = CE->getType();
2517 CastKind Kind = CE->getCastKind();
2518 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2519
2520 // These cases are generally not written to ignore the result of
2521 // evaluating their sub-expressions, so we clear this now.
2522 bool Ignored = TestAndClearIgnoreResultAssign();
2523
2524 // Since almost all cast kinds apply to scalars, this switch doesn't have
2525 // a default case, so the compiler will warn on a missing case. The cases
2526 // are in the same order as in the CastKind enum.
2527 switch (Kind) {
2528 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2529 case CK_BuiltinFnToFnPtr:
2530 llvm_unreachable("builtin functions are handled elsewhere");
2531
2532 case CK_LValueBitCast:
2533 case CK_ObjCObjectLValueCast: {
2534 Address Addr = EmitLValue(E).getAddress();
2535 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2536 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2537 return EmitLoadOfLValue(LV, CE->getExprLoc());
2538 }
2539
2540 case CK_LValueToRValueBitCast: {
2541 LValue SourceLVal = CGF.EmitLValue(E);
2542 Address Addr =
2543 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2544 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2545 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2546 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2547 }
2548
2549 case CK_CPointerToObjCPointerCast:
2550 case CK_BlockPointerToObjCPointerCast:
2551 case CK_AnyPointerToBlockPointerCast:
2552 case CK_BitCast: {
2553 Value *Src = Visit(E);
2554 llvm::Type *SrcTy = Src->getType();
2555 llvm::Type *DstTy = ConvertType(DestTy);
2556
2557 // FIXME: this is a gross but seemingly necessary workaround for an issue
2558 // manifesting when a target uses a non-default AS for indirect sret args,
2559 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2560 // on the address of a local struct that gets returned by value yields an
2561 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2562 // DefaultAS. We can only do this subversive thing because sret args are
2563 // manufactured and them residing in the IndirectAS is a target specific
2564 // detail, and doing an AS cast here still retains the semantics the user
2565 // expects. It is desirable to remove this iff a better solution is found.
2566 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2568 CGF, Src, E->getType().getAddressSpace(), DstTy);
2569
2570 assert(
2571 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2572 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2573 "Address-space cast must be used to convert address spaces");
2574
2575 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2576 if (auto *PT = DestTy->getAs<PointerType>()) {
2578 PT->getPointeeType(),
2579 Address(Src,
2581 E->getType()->castAs<PointerType>()->getPointeeType()),
2582 CGF.getPointerAlign()),
2583 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2584 CE->getBeginLoc());
2585 }
2586 }
2587
2588 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2589 const QualType SrcType = E->getType();
2590
2591 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2592 // Casting to pointer that could carry dynamic information (provided by
2593 // invariant.group) requires launder.
2594 Src = Builder.CreateLaunderInvariantGroup(Src);
2595 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2596 // Casting to pointer that does not carry dynamic information (provided
2597 // by invariant.group) requires stripping it. Note that we don't do it
2598 // if the source could not be dynamic type and destination could be
2599 // dynamic because dynamic information is already laundered. It is
2600 // because launder(strip(src)) == launder(src), so there is no need to
2601 // add extra strip before launder.
2602 Src = Builder.CreateStripInvariantGroup(Src);
2603 }
2604 }
2605
2606 // Update heapallocsite metadata when there is an explicit pointer cast.
2607 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2608 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2609 !isa<CastExpr>(E)) {
2610 QualType PointeeType = DestTy->getPointeeType();
2611 if (!PointeeType.isNull())
2612 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2613 CE->getExprLoc());
2614 }
2615 }
2616
2617 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2618 // same element type, use the llvm.vector.insert intrinsic to perform the
2619 // bitcast.
2620 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2621 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2622 // If we are casting a fixed i8 vector to a scalable i1 predicate
2623 // vector, use a vector insert and bitcast the result.
2624 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2625 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2626 ScalableDstTy = llvm::ScalableVectorType::get(
2627 FixedSrcTy->getElementType(),
2628 llvm::divideCeil(
2629 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2630 }
2631 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2632 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2633 llvm::Value *Result = Builder.CreateInsertVector(
2634 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2635 ScalableDstTy = cast<llvm::ScalableVectorType>(
2636 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2637 if (Result->getType() != ScalableDstTy)
2638 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2639 if (Result->getType() != DstTy)
2640 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2641 return Result;
2642 }
2643 }
2644 }
2645
2646 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2647 // same element type, use the llvm.vector.extract intrinsic to perform the
2648 // bitcast.
2649 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2650 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2651 // If we are casting a scalable i1 predicate vector to a fixed i8
2652 // vector, bitcast the source and use a vector extract.
2653 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2654 FixedDstTy->getElementType()->isIntegerTy(8)) {
2655 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2656 ScalableSrcTy = llvm::ScalableVectorType::get(
2657 ScalableSrcTy->getElementType(),
2658 llvm::alignTo<8>(
2659 ScalableSrcTy->getElementCount().getKnownMinValue()));
2660 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2661 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2662 uint64_t(0));
2663 }
2664
2665 ScalableSrcTy = llvm::ScalableVectorType::get(
2666 FixedDstTy->getElementType(),
2667 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2668 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2669 }
2670 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2671 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2672 "cast.fixed");
2673 }
2674 }
2675
2676 // Perform VLAT <-> VLST bitcast through memory.
2677 // TODO: since the llvm.vector.{insert,extract} intrinsics
2678 // require the element types of the vectors to be the same, we
2679 // need to keep this around for bitcasts between VLAT <-> VLST where
2680 // the element types of the vectors are not the same, until we figure
2681 // out a better way of doing these casts.
2682 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2686 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2687 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2688 CGF.EmitStoreOfScalar(Src, LV);
2689 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2690 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2691 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2692 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2693 }
2694
2695 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2696 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2697 }
2698 case CK_AddressSpaceConversion: {
2699 Expr::EvalResult Result;
2700 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2701 Result.Val.isNullPointer()) {
2702 // If E has side effect, it is emitted even if its final result is a
2703 // null pointer. In that case, a DCE pass should be able to
2704 // eliminate the useless instructions emitted during translating E.
2705 if (Result.HasSideEffects)
2706 Visit(E);
2708 ConvertType(DestTy)), DestTy);
2709 }
2710 // Since target may map different address spaces in AST to the same address
2711 // space, an address space conversion may end up as a bitcast.
2713 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2714 ConvertType(DestTy));
2715 }
2716 case CK_AtomicToNonAtomic:
2717 case CK_NonAtomicToAtomic:
2718 case CK_UserDefinedConversion:
2719 return Visit(E);
2720
2721 case CK_NoOp: {
2722 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2723 }
2724
2725 case CK_BaseToDerived: {
2726 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2727 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2728
2729 Address Base = CGF.EmitPointerWithAlignment(E);
2730 Address Derived =
2731 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2732 CE->path_begin(), CE->path_end(),
2734
2735 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2736 // performed and the object is not of the derived type.
2737 if (CGF.sanitizePerformTypeCheck())
2739 Derived, DestTy->getPointeeType());
2740
2741 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2742 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2743 /*MayBeNull=*/true,
2745 CE->getBeginLoc());
2746
2747 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2748 }
2749 case CK_UncheckedDerivedToBase:
2750 case CK_DerivedToBase: {
2751 // The EmitPointerWithAlignment path does this fine; just discard
2752 // the alignment.
2754 CE->getType()->getPointeeType());
2755 }
2756
2757 case CK_Dynamic: {
2758 Address V = CGF.EmitPointerWithAlignment(E);
2759 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2760 return CGF.EmitDynamicCast(V, DCE);
2761 }
2762
2763 case CK_ArrayToPointerDecay:
2765 CE->getType()->getPointeeType());
2766 case CK_FunctionToPointerDecay:
2767 return EmitLValue(E).getPointer(CGF);
2768
2769 case CK_NullToPointer:
2770 if (MustVisitNullValue(E))
2771 CGF.EmitIgnoredExpr(E);
2772
2773 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2774 DestTy);
2775
2776 case CK_NullToMemberPointer: {
2777 if (MustVisitNullValue(E))
2778 CGF.EmitIgnoredExpr(E);
2779
2780 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2781 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2782 }
2783
2784 case CK_ReinterpretMemberPointer:
2785 case CK_BaseToDerivedMemberPointer:
2786 case CK_DerivedToBaseMemberPointer: {
2787 Value *Src = Visit(E);
2788
2789 // Note that the AST doesn't distinguish between checked and
2790 // unchecked member pointer conversions, so we always have to
2791 // implement checked conversions here. This is inefficient when
2792 // actual control flow may be required in order to perform the
2793 // check, which it is for data member pointers (but not member
2794 // function pointers on Itanium and ARM).
2795 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2796 }
2797
2798 case CK_ARCProduceObject:
2799 return CGF.EmitARCRetainScalarExpr(E);
2800 case CK_ARCConsumeObject:
2801 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2802 case CK_ARCReclaimReturnedObject:
2803 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2804 case CK_ARCExtendBlockObject:
2805 return CGF.EmitARCExtendBlockObject(E);
2806
2807 case CK_CopyAndAutoreleaseBlockObject:
2808 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2809
2810 case CK_FloatingRealToComplex:
2811 case CK_FloatingComplexCast:
2812 case CK_IntegralRealToComplex:
2813 case CK_IntegralComplexCast:
2814 case CK_IntegralComplexToFloatingComplex:
2815 case CK_FloatingComplexToIntegralComplex:
2816 case CK_ConstructorConversion:
2817 case CK_ToUnion:
2818 case CK_HLSLArrayRValue:
2819 llvm_unreachable("scalar cast to non-scalar value");
2820
2821 case CK_LValueToRValue:
2822 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2823 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2824 return Visit(E);
2825
2826 case CK_IntegralToPointer: {
2827 Value *Src = Visit(E);
2828
2829 // First, convert to the correct width so that we control the kind of
2830 // extension.
2831 auto DestLLVMTy = ConvertType(DestTy);
2832 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2833 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2834 llvm::Value* IntResult =
2835 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2836
2837 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2838
2839 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2840 // Going from integer to pointer that could be dynamic requires reloading
2841 // dynamic information from invariant.group.
2842 if (DestTy.mayBeDynamicClass())
2843 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2844 }
2845
2846 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2847 return IntToPtr;
2848 }
2849 case CK_PointerToIntegral: {
2850 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2851 auto *PtrExpr = Visit(E);
2852
2853 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2854 const QualType SrcType = E->getType();
2855
2856 // Casting to integer requires stripping dynamic information as it does
2857 // not carries it.
2858 if (SrcType.mayBeDynamicClass())
2859 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2860 }
2861
2862 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2863 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2864 }
2865 case CK_ToVoid: {
2866 CGF.EmitIgnoredExpr(E);
2867 return nullptr;
2868 }
2869 case CK_MatrixCast: {
2870 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2871 CE->getExprLoc());
2872 }
2873 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2874 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2875 // To perform any necessary Scalar Cast, so this Cast can be handled
2876 // by the regular Vector Splat cast code.
2877 case CK_HLSLAggregateSplatCast:
2878 case CK_VectorSplat: {
2879 llvm::Type *DstTy = ConvertType(DestTy);
2880 Value *Elt = Visit(E);
2881 // Splat the element across to all elements
2882 llvm::ElementCount NumElements =
2883 cast<llvm::VectorType>(DstTy)->getElementCount();
2884 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2885 }
2886
2887 case CK_FixedPointCast:
2888 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2889 CE->getExprLoc());
2890
2891 case CK_FixedPointToBoolean:
2892 assert(E->getType()->isFixedPointType() &&
2893 "Expected src type to be fixed point type");
2894 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2895 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2896 CE->getExprLoc());
2897
2898 case CK_FixedPointToIntegral:
2899 assert(E->getType()->isFixedPointType() &&
2900 "Expected src type to be fixed point type");
2901 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2902 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2903 CE->getExprLoc());
2904
2905 case CK_IntegralToFixedPoint:
2906 assert(E->getType()->isIntegerType() &&
2907 "Expected src type to be an integer");
2908 assert(DestTy->isFixedPointType() &&
2909 "Expected dest type to be fixed point type");
2910 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2911 CE->getExprLoc());
2912
2913 case CK_IntegralCast: {
2914 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2915 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2916 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2918 "conv");
2919 }
2920 ScalarConversionOpts Opts;
2921 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2922 if (!ICE->isPartOfExplicitCast())
2923 Opts = ScalarConversionOpts(CGF.SanOpts);
2924 }
2925 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2926 CE->getExprLoc(), Opts);
2927 }
2928 case CK_IntegralToFloating: {
2929 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2930 // TODO: Support constrained FP intrinsics.
2931 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2932 if (SrcElTy->isSignedIntegerOrEnumerationType())
2933 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2934 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2935 }
2936 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2937 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2938 CE->getExprLoc());
2939 }
2940 case CK_FloatingToIntegral: {
2941 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2942 // TODO: Support constrained FP intrinsics.
2943 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2944 if (DstElTy->isSignedIntegerOrEnumerationType())
2945 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2946 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2947 }
2948 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2949 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2950 CE->getExprLoc());
2951 }
2952 case CK_FloatingCast: {
2953 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2954 // TODO: Support constrained FP intrinsics.
2955 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2956 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2957 if (DstElTy->castAs<BuiltinType>()->getKind() <
2958 SrcElTy->castAs<BuiltinType>()->getKind())
2959 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2960 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2961 }
2962 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2963 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2964 CE->getExprLoc());
2965 }
2966 case CK_FixedPointToFloating:
2967 case CK_FloatingToFixedPoint: {
2968 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2969 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2970 CE->getExprLoc());
2971 }
2972 case CK_BooleanToSignedIntegral: {
2973 ScalarConversionOpts Opts;
2974 Opts.TreatBooleanAsSigned = true;
2975 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2976 CE->getExprLoc(), Opts);
2977 }
2978 case CK_IntegralToBoolean:
2979 return EmitIntToBoolConversion(Visit(E));
2980 case CK_PointerToBoolean:
2981 return EmitPointerToBoolConversion(Visit(E), E->getType());
2982 case CK_FloatingToBoolean: {
2983 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2984 return EmitFloatToBoolConversion(Visit(E));
2985 }
2986 case CK_MemberPointerToBoolean: {
2987 llvm::Value *MemPtr = Visit(E);
2988 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2989 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2990 }
2991
2992 case CK_FloatingComplexToReal:
2993 case CK_IntegralComplexToReal:
2994 return CGF.EmitComplexExpr(E, false, true).first;
2995
2996 case CK_FloatingComplexToBoolean:
2997 case CK_IntegralComplexToBoolean: {
2999
3000 // TODO: kill this function off, inline appropriate case here
3001 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
3002 CE->getExprLoc());
3003 }
3004
3005 case CK_ZeroToOCLOpaqueType: {
3006 assert((DestTy->isEventT() || DestTy->isQueueT() ||
3007 DestTy->isOCLIntelSubgroupAVCType()) &&
3008 "CK_ZeroToOCLEvent cast on non-event type");
3009 return llvm::Constant::getNullValue(ConvertType(DestTy));
3010 }
3011
3012 case CK_IntToOCLSampler:
3013 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
3014
3015 case CK_HLSLVectorTruncation: {
3016 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
3017 "Destination type must be a vector or builtin type.");
3018 Value *Vec = Visit(E);
3019 if (auto *VecTy = DestTy->getAs<VectorType>()) {
3020 SmallVector<int> Mask;
3021 unsigned NumElts = VecTy->getNumElements();
3022 for (unsigned I = 0; I != NumElts; ++I)
3023 Mask.push_back(I);
3024
3025 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
3026 }
3027 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3028 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
3029 }
3030 case CK_HLSLMatrixTruncation: {
3031 assert((DestTy->isMatrixType() || DestTy->isBuiltinType()) &&
3032 "Destination type must be a matrix or builtin type.");
3033 Value *Mat = Visit(E);
3034 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
3035 SmallVector<int> Mask;
3036 unsigned NumCols = MatTy->getNumColumns();
3037 unsigned NumRows = MatTy->getNumRows();
3038 unsigned ColOffset = NumCols;
3039 if (auto *SrcMatTy = E->getType()->getAs<ConstantMatrixType>())
3040 ColOffset = SrcMatTy->getNumColumns();
3041 for (unsigned R = 0; R < NumRows; R++) {
3042 for (unsigned C = 0; C < NumCols; C++) {
3043 unsigned I = R * ColOffset + C;
3044 Mask.push_back(I);
3045 }
3046 }
3047
3048 return Builder.CreateShuffleVector(Mat, Mask, "trunc");
3049 }
3050 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3051 return Builder.CreateExtractElement(Mat, Zero, "cast.mtrunc");
3052 }
3053 case CK_HLSLElementwiseCast: {
3054 RValue RV = CGF.EmitAnyExpr(E);
3055 SourceLocation Loc = CE->getExprLoc();
3056
3057 Address SrcAddr = Address::invalid();
3058
3059 if (RV.isAggregate()) {
3060 SrcAddr = RV.getAggregateAddress();
3061 } else {
3062 SrcAddr = CGF.CreateMemTemp(E->getType(), "hlsl.ewcast.src");
3063 LValue TmpLV = CGF.MakeAddrLValue(SrcAddr, E->getType());
3064 CGF.EmitStoreThroughLValue(RV, TmpLV);
3065 }
3066
3067 LValue SrcVal = CGF.MakeAddrLValue(SrcAddr, E->getType());
3068 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
3069 }
3070
3071 } // end of switch
3072
3073 llvm_unreachable("unknown scalar cast");
3074}
3075
3076Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
3077 CodeGenFunction::StmtExprEvaluation eval(CGF);
3078 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
3079 !E->getType()->isVoidType());
3080 if (!RetAlloca.isValid())
3081 return nullptr;
3082 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
3083 E->getExprLoc());
3084}
3085
3086Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
3087 CodeGenFunction::RunCleanupsScope Scope(CGF);
3088 Value *V = Visit(E->getSubExpr());
3089 // Defend against dominance problems caused by jumps out of expression
3090 // evaluation through the shared cleanup block.
3091 Scope.ForceCleanup({&V});
3092 return V;
3093}
3094
3095//===----------------------------------------------------------------------===//
3096// Unary Operators
3097//===----------------------------------------------------------------------===//
3098
3100 llvm::Value *InVal, bool IsInc,
3101 FPOptions FPFeatures) {
3102 BinOpInfo BinOp;
3103 BinOp.LHS = InVal;
3104 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
3105 BinOp.Ty = E->getType();
3106 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3107 BinOp.FPFeatures = FPFeatures;
3108 BinOp.E = E;
3109 return BinOp;
3110}
3111
3112llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3113 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3114 llvm::Value *Amount =
3115 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
3116 StringRef Name = IsInc ? "inc" : "dec";
3117 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3118 case LangOptions::SOB_Defined:
3119 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3120 return Builder.CreateAdd(InVal, Amount, Name);
3121 [[fallthrough]];
3122 case LangOptions::SOB_Undefined:
3123 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3124 return Builder.CreateNSWAdd(InVal, Amount, Name);
3125 [[fallthrough]];
3126 case LangOptions::SOB_Trapping:
3127 BinOpInfo Info = createBinOpInfoFromIncDec(
3128 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3129 if (!E->canOverflow() || CanElideOverflowCheck(CGF.getContext(), Info))
3130 return Builder.CreateNSWAdd(InVal, Amount, Name);
3131 return EmitOverflowCheckedBinOp(Info);
3132 }
3133 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
3134}
3135
3136/// For the purposes of overflow pattern exclusion, does this match the
3137/// "while(i--)" pattern?
3138static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
3139 bool isPre, ASTContext &Ctx) {
3140 if (isInc || isPre)
3141 return false;
3142
3143 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3146 return false;
3147
3148 // all Parents (usually just one) must be a WhileStmt
3149 for (const auto &Parent : Ctx.getParentMapContext().getParents(*UO))
3150 if (!Parent.get<WhileStmt>())
3151 return false;
3152
3153 return true;
3154}
3155
3156namespace {
3157/// Handles check and update for lastprivate conditional variables.
3158class OMPLastprivateConditionalUpdateRAII {
3159private:
3160 CodeGenFunction &CGF;
3161 const UnaryOperator *E;
3162
3163public:
3164 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3165 const UnaryOperator *E)
3166 : CGF(CGF), E(E) {}
3167 ~OMPLastprivateConditionalUpdateRAII() {
3168 if (CGF.getLangOpts().OpenMP)
3170 CGF, E->getSubExpr());
3171 }
3172};
3173} // namespace
3174
3175llvm::Value *
3176ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3177 bool isInc, bool isPre) {
3178 ApplyAtomGroup Grp(CGF.getDebugInfo());
3179 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3180 QualType type = E->getSubExpr()->getType();
3181 llvm::PHINode *atomicPHI = nullptr;
3182 llvm::Value *value;
3183 llvm::Value *input;
3184 llvm::Value *Previous = nullptr;
3185 QualType SrcType = E->getType();
3186
3187 int amount = (isInc ? 1 : -1);
3188 bool isSubtraction = !isInc;
3189
3190 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3191 type = atomicTy->getValueType();
3192 if (isInc && type->isBooleanType()) {
3193 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3194 if (isPre) {
3195 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3196 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3197 return Builder.getTrue();
3198 }
3199 // For atomic bool increment, we just store true and return it for
3200 // preincrement, do an atomic swap with true for postincrement
3201 return Builder.CreateAtomicRMW(
3202 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3203 llvm::AtomicOrdering::SequentiallyConsistent);
3204 }
3205 // Special case for atomic increment / decrement on integers, emit
3206 // atomicrmw instructions. We skip this if we want to be doing overflow
3207 // checking, and fall into the slow path with the atomic cmpxchg loop.
3208 if (!type->isBooleanType() && type->isIntegerType() &&
3209 !(type->isUnsignedIntegerType() &&
3210 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3211 CGF.getLangOpts().getSignedOverflowBehavior() !=
3212 LangOptions::SOB_Trapping) {
3213 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3214 llvm::AtomicRMWInst::Sub;
3215 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3216 llvm::Instruction::Sub;
3217 llvm::Value *amt = CGF.EmitToMemory(
3218 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3219 llvm::Value *old =
3220 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3221 llvm::AtomicOrdering::SequentiallyConsistent);
3222 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3223 }
3224 // Special case for atomic increment/decrement on floats.
3225 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3226 if (type->isFloatingType()) {
3227 llvm::Type *Ty = ConvertType(type);
3228 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3229 llvm::AtomicRMWInst::BinOp aop =
3230 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3231 llvm::Instruction::BinaryOps op =
3232 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3233 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3234 llvm::AtomicRMWInst *old =
3235 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3236 llvm::AtomicOrdering::SequentiallyConsistent);
3237
3238 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3239 }
3240 }
3241 value = EmitLoadOfLValue(LV, E->getExprLoc());
3242 input = value;
3243 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3244 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3245 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3246 value = CGF.EmitToMemory(value, type);
3247 Builder.CreateBr(opBB);
3248 Builder.SetInsertPoint(opBB);
3249 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3250 atomicPHI->addIncoming(value, startBB);
3251 value = atomicPHI;
3252 } else {
3253 value = EmitLoadOfLValue(LV, E->getExprLoc());
3254 input = value;
3255 }
3256
3257 // Special case of integer increment that we have to check first: bool++.
3258 // Due to promotion rules, we get:
3259 // bool++ -> bool = bool + 1
3260 // -> bool = (int)bool + 1
3261 // -> bool = ((int)bool + 1 != 0)
3262 // An interesting aspect of this is that increment is always true.
3263 // Decrement does not have this property.
3264 if (isInc && type->isBooleanType()) {
3265 value = Builder.getTrue();
3266
3267 // Most common case by far: integer increment.
3268 } else if (type->isIntegerType()) {
3269 QualType promotedType;
3270 bool canPerformLossyDemotionCheck = false;
3271
3272 bool excludeOverflowPattern =
3273 matchesPostDecrInWhile(E, isInc, isPre, CGF.getContext());
3274
3276 promotedType = CGF.getContext().getPromotedIntegerType(type);
3277 assert(promotedType != type && "Shouldn't promote to the same type.");
3278 canPerformLossyDemotionCheck = true;
3279 canPerformLossyDemotionCheck &=
3281 CGF.getContext().getCanonicalType(promotedType);
3282 canPerformLossyDemotionCheck &=
3284 type, promotedType);
3285 assert((!canPerformLossyDemotionCheck ||
3286 type->isSignedIntegerOrEnumerationType() ||
3287 promotedType->isSignedIntegerOrEnumerationType() ||
3288 ConvertType(type)->getScalarSizeInBits() ==
3289 ConvertType(promotedType)->getScalarSizeInBits()) &&
3290 "The following check expects that if we do promotion to different "
3291 "underlying canonical type, at least one of the types (either "
3292 "base or promoted) will be signed, or the bitwidths will match.");
3293 }
3294 if (CGF.SanOpts.hasOneOf(
3295 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3296 SanitizerKind::ImplicitBitfieldConversion) &&
3297 canPerformLossyDemotionCheck) {
3298 // While `x += 1` (for `x` with width less than int) is modeled as
3299 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3300 // ease; inc/dec with width less than int can't overflow because of
3301 // promotion rules, so we omit promotion+demotion, which means that we can
3302 // not catch lossy "demotion". Because we still want to catch these cases
3303 // when the sanitizer is enabled, we perform the promotion, then perform
3304 // the increment/decrement in the wider type, and finally
3305 // perform the demotion. This will catch lossy demotions.
3306
3307 // We have a special case for bitfields defined using all the bits of the
3308 // type. In this case we need to do the same trick as for the integer
3309 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3310
3311 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3312 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3313 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3314 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3315 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3316 // checks will take care of the conversion.
3317 ScalarConversionOpts Opts;
3318 if (!LV.isBitField())
3319 Opts = ScalarConversionOpts(CGF.SanOpts);
3320 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3321 Previous = value;
3322 SrcType = promotedType;
3323 }
3324
3325 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3326 Opts);
3327
3328 // Note that signed integer inc/dec with width less than int can't
3329 // overflow because of promotion rules; we're just eliding a few steps
3330 // here.
3331 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3332 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3333 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3334 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3335 !excludeOverflowPattern &&
3337 SanitizerKind::UnsignedIntegerOverflow, E->getType())) {
3338 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
3339 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
3340 } else {
3341 // Treat positive amount as unsigned to support inc of i1 (needed for
3342 // unsigned _BitInt(1)).
3343 llvm::Value *amt =
3344 llvm::ConstantInt::get(value->getType(), amount, !isInc);
3345 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3346 }
3347
3348 // Next most common: pointer increment.
3349 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3350 QualType type = ptr->getPointeeType();
3351
3352 // VLA types don't have constant size.
3353 if (const VariableArrayType *vla
3355 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3356 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3357 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3358 if (CGF.getLangOpts().PointerOverflowDefined)
3359 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3360 else
3361 value = CGF.EmitCheckedInBoundsGEP(
3362 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3363 E->getExprLoc(), "vla.inc");
3364
3365 // Arithmetic on function pointers (!) is just +-1.
3366 } else if (type->isFunctionType()) {
3367 llvm::Value *amt = Builder.getInt32(amount);
3368
3369 if (CGF.getLangOpts().PointerOverflowDefined)
3370 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3371 else
3372 value =
3373 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3374 /*SignedIndices=*/false, isSubtraction,
3375 E->getExprLoc(), "incdec.funcptr");
3376
3377 // For everything else, we can just do a simple increment.
3378 } else {
3379 llvm::Value *amt = Builder.getInt32(amount);
3380 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3381 if (CGF.getLangOpts().PointerOverflowDefined)
3382 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3383 else
3384 value = CGF.EmitCheckedInBoundsGEP(
3385 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3386 E->getExprLoc(), "incdec.ptr");
3387 }
3388
3389 // Vector increment/decrement.
3390 } else if (type->isVectorType()) {
3391 if (type->hasIntegerRepresentation()) {
3392 llvm::Value *amt = llvm::ConstantInt::getSigned(value->getType(), amount);
3393
3394 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3395 } else {
3396 value = Builder.CreateFAdd(
3397 value,
3398 llvm::ConstantFP::get(value->getType(), amount),
3399 isInc ? "inc" : "dec");
3400 }
3401
3402 // Floating point.
3403 } else if (type->isRealFloatingType()) {
3404 // Add the inc/dec to the real part.
3405 llvm::Value *amt;
3406 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3407
3408 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3409 // Another special case: half FP increment should be done via float. If
3410 // the input isn't already half, it may be i16.
3411 Value *bitcast = Builder.CreateBitCast(input, CGF.CGM.HalfTy);
3412 value = Builder.CreateFPExt(bitcast, CGF.CGM.FloatTy, "incdec.conv");
3413 }
3414
3415 if (value->getType()->isFloatTy())
3416 amt = llvm::ConstantFP::get(VMContext,
3417 llvm::APFloat(static_cast<float>(amount)));
3418 else if (value->getType()->isDoubleTy())
3419 amt = llvm::ConstantFP::get(VMContext,
3420 llvm::APFloat(static_cast<double>(amount)));
3421 else {
3422 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3423 // Convert from float.
3424 llvm::APFloat F(static_cast<float>(amount));
3425 bool ignored;
3426 const llvm::fltSemantics *FS;
3427 // Don't use getFloatTypeSemantics because Half isn't
3428 // necessarily represented using the "half" LLVM type.
3429 if (value->getType()->isFP128Ty())
3430 FS = &CGF.getTarget().getFloat128Format();
3431 else if (value->getType()->isHalfTy())
3432 FS = &CGF.getTarget().getHalfFormat();
3433 else if (value->getType()->isBFloatTy())
3434 FS = &CGF.getTarget().getBFloat16Format();
3435 else if (value->getType()->isPPC_FP128Ty())
3436 FS = &CGF.getTarget().getIbm128Format();
3437 else
3438 FS = &CGF.getTarget().getLongDoubleFormat();
3439 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3440 amt = llvm::ConstantFP::get(VMContext, F);
3441 }
3442 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3443
3444 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3445 value = Builder.CreateFPTrunc(value, CGF.CGM.HalfTy, "incdec.conv");
3446 value = Builder.CreateBitCast(value, input->getType());
3447 }
3448
3449 // Fixed-point types.
3450 } else if (type->isFixedPointType()) {
3451 // Fixed-point types are tricky. In some cases, it isn't possible to
3452 // represent a 1 or a -1 in the type at all. Piggyback off of
3453 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3454 BinOpInfo Info;
3455 Info.E = E;
3456 Info.Ty = E->getType();
3457 Info.Opcode = isInc ? BO_Add : BO_Sub;
3458 Info.LHS = value;
3459 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3460 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3461 // since -1 is guaranteed to be representable.
3462 if (type->isSignedFixedPointType()) {
3463 Info.Opcode = isInc ? BO_Sub : BO_Add;
3464 Info.RHS = Builder.CreateNeg(Info.RHS);
3465 }
3466 // Now, convert from our invented integer literal to the type of the unary
3467 // op. This will upscale and saturate if necessary. This value can become
3468 // undef in some cases.
3469 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3470 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3471 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3472 value = EmitFixedPointBinOp(Info);
3473
3474 // Objective-C pointer types.
3475 } else {
3476 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3477
3478 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3479 if (!isInc) size = -size;
3480 llvm::Value *sizeValue =
3481 llvm::ConstantInt::getSigned(CGF.SizeTy, size.getQuantity());
3482
3483 if (CGF.getLangOpts().PointerOverflowDefined)
3484 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3485 else
3486 value = CGF.EmitCheckedInBoundsGEP(
3487 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3488 E->getExprLoc(), "incdec.objptr");
3489 value = Builder.CreateBitCast(value, input->getType());
3490 }
3491
3492 if (atomicPHI) {
3493 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3494 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3495 auto Pair = CGF.EmitAtomicCompareExchange(
3496 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3497 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3498 llvm::Value *success = Pair.second;
3499 atomicPHI->addIncoming(old, curBlock);
3500 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3501 Builder.SetInsertPoint(contBB);
3502 return isPre ? value : input;
3503 }
3504
3505 // Store the updated result through the lvalue.
3506 if (LV.isBitField()) {
3507 Value *Src = Previous ? Previous : value;
3508 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3509 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3510 LV.getBitFieldInfo(), E->getExprLoc());
3511 } else
3512 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3513
3514 // If this is a postinc, return the value read from memory, otherwise use the
3515 // updated value.
3516 return isPre ? value : input;
3517}
3518
3519
3520Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3521 QualType PromotionType) {
3522 QualType promotionTy = PromotionType.isNull()
3523 ? getPromotionType(E->getSubExpr()->getType())
3524 : PromotionType;
3525 Value *result = VisitPlus(E, promotionTy);
3526 if (result && !promotionTy.isNull())
3527 result = EmitUnPromotedValue(result, E->getType());
3528 return result;
3529}
3530
3531Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3532 QualType PromotionType) {
3533 // This differs from gcc, though, most likely due to a bug in gcc.
3534 TestAndClearIgnoreResultAssign();
3535 if (!PromotionType.isNull())
3536 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3537 return Visit(E->getSubExpr());
3538}
3539
3540Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3541 QualType PromotionType) {
3542 QualType promotionTy = PromotionType.isNull()
3543 ? getPromotionType(E->getSubExpr()->getType())
3544 : PromotionType;
3545 Value *result = VisitMinus(E, promotionTy);
3546 if (result && !promotionTy.isNull())
3547 result = EmitUnPromotedValue(result, E->getType());
3548 return result;
3549}
3550
3551Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3552 QualType PromotionType) {
3553 TestAndClearIgnoreResultAssign();
3554 Value *Op;
3555 if (!PromotionType.isNull())
3556 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3557 else
3558 Op = Visit(E->getSubExpr());
3559
3560 // Generate a unary FNeg for FP ops.
3561 if (Op->getType()->isFPOrFPVectorTy())
3562 return Builder.CreateFNeg(Op, "fneg");
3563
3564 // Emit unary minus with EmitSub so we handle overflow cases etc.
3565 BinOpInfo BinOp;
3566 BinOp.RHS = Op;
3567 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3568 BinOp.Ty = E->getType();
3569 BinOp.Opcode = BO_Sub;
3570 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3571 BinOp.E = E;
3572 return EmitSub(BinOp);
3573}
3574
3575Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3576 TestAndClearIgnoreResultAssign();
3577 Value *Op = Visit(E->getSubExpr());
3578 return Builder.CreateNot(Op, "not");
3579}
3580
3581Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3582 // Perform vector logical not on comparison with zero vector.
3583 if (E->getType()->isVectorType() &&
3584 E->getType()->castAs<VectorType>()->getVectorKind() ==
3585 VectorKind::Generic) {
3586 Value *Oper = Visit(E->getSubExpr());
3587 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3588 Value *Result;
3589 if (Oper->getType()->isFPOrFPVectorTy()) {
3590 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3591 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3592 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3593 } else
3594 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3595 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3596 }
3597
3598 // Compare operand to zero.
3599 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3600
3601 // Invert value.
3602 // TODO: Could dynamically modify easy computations here. For example, if
3603 // the operand is an icmp ne, turn into icmp eq.
3604 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3605
3606 // ZExt result to the expr type.
3607 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3608}
3609
3610Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3611 // Try folding the offsetof to a constant.
3612 Expr::EvalResult EVResult;
3613 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3614 llvm::APSInt Value = EVResult.Val.getInt();
3615 return Builder.getInt(Value);
3616 }
3617
3618 // Loop over the components of the offsetof to compute the value.
3619 unsigned n = E->getNumComponents();
3620 llvm::Type* ResultType = ConvertType(E->getType());
3621 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3622 QualType CurrentType = E->getTypeSourceInfo()->getType();
3623 for (unsigned i = 0; i != n; ++i) {
3624 OffsetOfNode ON = E->getComponent(i);
3625 llvm::Value *Offset = nullptr;
3626 switch (ON.getKind()) {
3627 case OffsetOfNode::Array: {
3628 // Compute the index
3629 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3630 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3631 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3632 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3633
3634 // Save the element type
3635 CurrentType =
3636 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3637
3638 // Compute the element size
3639 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3640 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3641
3642 // Multiply out to compute the result
3643 Offset = Builder.CreateMul(Idx, ElemSize);
3644 break;
3645 }
3646
3647 case OffsetOfNode::Field: {
3648 FieldDecl *MemberDecl = ON.getField();
3649 auto *RD = CurrentType->castAsRecordDecl();
3650 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3651
3652 // Compute the index of the field in its parent.
3653 unsigned i = 0;
3654 // FIXME: It would be nice if we didn't have to loop here!
3655 for (RecordDecl::field_iterator Field = RD->field_begin(),
3656 FieldEnd = RD->field_end();
3657 Field != FieldEnd; ++Field, ++i) {
3658 if (*Field == MemberDecl)
3659 break;
3660 }
3661 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3662
3663 // Compute the offset to the field
3664 int64_t OffsetInt = RL.getFieldOffset(i) /
3665 CGF.getContext().getCharWidth();
3666 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3667
3668 // Save the element type.
3669 CurrentType = MemberDecl->getType();
3670 break;
3671 }
3672
3674 llvm_unreachable("dependent __builtin_offsetof");
3675
3676 case OffsetOfNode::Base: {
3677 if (ON.getBase()->isVirtual()) {
3678 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3679 continue;
3680 }
3681
3682 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3683 CurrentType->castAsCanonical<RecordType>()->getDecl());
3684
3685 // Save the element type.
3686 CurrentType = ON.getBase()->getType();
3687
3688 // Compute the offset to the base.
3689 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3690 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3691 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3692 break;
3693 }
3694 }
3695 Result = Builder.CreateAdd(Result, Offset);
3696 }
3697 return Result;
3698}
3699
3700/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3701/// argument of the sizeof expression as an integer.
3702Value *
3703ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3704 const UnaryExprOrTypeTraitExpr *E) {
3705 QualType TypeToSize = E->getTypeOfArgument();
3706 if (auto Kind = E->getKind();
3707 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3708 if (const VariableArrayType *VAT =
3709 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3710 // For _Countof, we only want to evaluate if the extent is actually
3711 // variable as opposed to a multi-dimensional array whose extent is
3712 // constant but whose element type is variable.
3713 bool EvaluateExtent = true;
3714 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3715 EvaluateExtent =
3716 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3717 }
3718 if (EvaluateExtent) {
3719 if (E->isArgumentType()) {
3720 // sizeof(type) - make sure to emit the VLA size.
3721 CGF.EmitVariablyModifiedType(TypeToSize);
3722 } else {
3723 // C99 6.5.3.4p2: If the argument is an expression of type
3724 // VLA, it is evaluated.
3726 }
3727
3728 // For _Countof, we just want to return the size of a single dimension.
3729 if (Kind == UETT_CountOf)
3730 return CGF.getVLAElements1D(VAT).NumElts;
3731
3732 // For sizeof and __datasizeof, we need to scale the number of elements
3733 // by the size of the array element type.
3734 auto VlaSize = CGF.getVLASize(VAT);
3735
3736 // Scale the number of non-VLA elements by the non-VLA element size.
3737 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3738 if (!eltSize.isOne())
3739 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3740 VlaSize.NumElts);
3741 return VlaSize.NumElts;
3742 }
3743 }
3744 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3745 auto Alignment =
3746 CGF.getContext()
3749 .getQuantity();
3750 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3751 } else if (E->getKind() == UETT_VectorElements) {
3752 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3753 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3754 }
3755
3756 // If this isn't sizeof(vla), the result must be constant; use the constant
3757 // folding logic so we don't have to duplicate it here.
3758 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3759}
3760
3761Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3762 QualType PromotionType) {
3763 QualType promotionTy = PromotionType.isNull()
3764 ? getPromotionType(E->getSubExpr()->getType())
3765 : PromotionType;
3766 Value *result = VisitReal(E, promotionTy);
3767 if (result && !promotionTy.isNull())
3768 result = EmitUnPromotedValue(result, E->getType());
3769 return result;
3770}
3771
3772Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3773 QualType PromotionType) {
3774 Expr *Op = E->getSubExpr();
3775 if (Op->getType()->isAnyComplexType()) {
3776 // If it's an l-value, load through the appropriate subobject l-value.
3777 // Note that we have to ask E because Op might be an l-value that
3778 // this won't work for, e.g. an Obj-C property.
3779 if (E->isGLValue()) {
3780 if (!PromotionType.isNull()) {
3782 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3783 PromotionType = PromotionType->isAnyComplexType()
3784 ? PromotionType
3785 : CGF.getContext().getComplexType(PromotionType);
3786 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3787 : result.first;
3788 }
3789
3790 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3791 .getScalarVal();
3792 }
3793 // Otherwise, calculate and project.
3794 return CGF.EmitComplexExpr(Op, false, true).first;
3795 }
3796
3797 if (!PromotionType.isNull())
3798 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3799 return Visit(Op);
3800}
3801
3802Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3803 QualType PromotionType) {
3804 QualType promotionTy = PromotionType.isNull()
3805 ? getPromotionType(E->getSubExpr()->getType())
3806 : PromotionType;
3807 Value *result = VisitImag(E, promotionTy);
3808 if (result && !promotionTy.isNull())
3809 result = EmitUnPromotedValue(result, E->getType());
3810 return result;
3811}
3812
3813Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3814 QualType PromotionType) {
3815 Expr *Op = E->getSubExpr();
3816 if (Op->getType()->isAnyComplexType()) {
3817 // If it's an l-value, load through the appropriate subobject l-value.
3818 // Note that we have to ask E because Op might be an l-value that
3819 // this won't work for, e.g. an Obj-C property.
3820 if (Op->isGLValue()) {
3821 if (!PromotionType.isNull()) {
3823 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3824 PromotionType = PromotionType->isAnyComplexType()
3825 ? PromotionType
3826 : CGF.getContext().getComplexType(PromotionType);
3827 return result.second
3828 ? CGF.EmitPromotedValue(result, PromotionType).second
3829 : result.second;
3830 }
3831
3832 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3833 .getScalarVal();
3834 }
3835 // Otherwise, calculate and project.
3836 return CGF.EmitComplexExpr(Op, true, false).second;
3837 }
3838
3839 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3840 // effects are evaluated, but not the actual value.
3841 if (Op->isGLValue())
3842 CGF.EmitLValue(Op);
3843 else if (!PromotionType.isNull())
3844 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3845 else
3846 CGF.EmitScalarExpr(Op, true);
3847 if (!PromotionType.isNull())
3848 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3849 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3850}
3851
3852//===----------------------------------------------------------------------===//
3853// Binary Operators
3854//===----------------------------------------------------------------------===//
3855
3856Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3857 QualType PromotionType) {
3858 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3859}
3860
3861Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3862 QualType ExprType) {
3863 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3864}
3865
3866Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3867 E = E->IgnoreParens();
3868 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3869 switch (BO->getOpcode()) {
3870#define HANDLE_BINOP(OP) \
3871 case BO_##OP: \
3872 return Emit##OP(EmitBinOps(BO, PromotionType));
3873 HANDLE_BINOP(Add)
3874 HANDLE_BINOP(Sub)
3875 HANDLE_BINOP(Mul)
3876 HANDLE_BINOP(Div)
3877#undef HANDLE_BINOP
3878 default:
3879 break;
3880 }
3881 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3882 switch (UO->getOpcode()) {
3883 case UO_Imag:
3884 return VisitImag(UO, PromotionType);
3885 case UO_Real:
3886 return VisitReal(UO, PromotionType);
3887 case UO_Minus:
3888 return VisitMinus(UO, PromotionType);
3889 case UO_Plus:
3890 return VisitPlus(UO, PromotionType);
3891 default:
3892 break;
3893 }
3894 }
3895 auto result = Visit(const_cast<Expr *>(E));
3896 if (result) {
3897 if (!PromotionType.isNull())
3898 return EmitPromotedValue(result, PromotionType);
3899 else
3900 return EmitUnPromotedValue(result, E->getType());
3901 }
3902 return result;
3903}
3904
3905BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3906 QualType PromotionType) {
3907 TestAndClearIgnoreResultAssign();
3908 BinOpInfo Result;
3909 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3910 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3911 if (!PromotionType.isNull())
3912 Result.Ty = PromotionType;
3913 else
3914 Result.Ty = E->getType();
3915 Result.Opcode = E->getOpcode();
3916 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3917 Result.E = E;
3918 return Result;
3919}
3920
3921LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3922 const CompoundAssignOperator *E,
3923 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3924 Value *&Result) {
3925 QualType LHSTy = E->getLHS()->getType();
3926 BinOpInfo OpInfo;
3927
3930
3931 // Emit the RHS first. __block variables need to have the rhs evaluated
3932 // first, plus this should improve codegen a little.
3933
3934 QualType PromotionTypeCR;
3935 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3936 if (PromotionTypeCR.isNull())
3937 PromotionTypeCR = E->getComputationResultType();
3938 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3939 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3940 if (!PromotionTypeRHS.isNull())
3941 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3942 else
3943 OpInfo.RHS = Visit(E->getRHS());
3944 OpInfo.Ty = PromotionTypeCR;
3945 OpInfo.Opcode = E->getOpcode();
3946 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3947 OpInfo.E = E;
3948 // Load/convert the LHS.
3949 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3950
3951 llvm::PHINode *atomicPHI = nullptr;
3952 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3953 QualType type = atomicTy->getValueType();
3954 if (!type->isBooleanType() && type->isIntegerType() &&
3955 !(type->isUnsignedIntegerType() &&
3956 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3957 CGF.getLangOpts().getSignedOverflowBehavior() !=
3958 LangOptions::SOB_Trapping) {
3959 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3960 llvm::Instruction::BinaryOps Op;
3961 switch (OpInfo.Opcode) {
3962 // We don't have atomicrmw operands for *, %, /, <<, >>
3963 case BO_MulAssign: case BO_DivAssign:
3964 case BO_RemAssign:
3965 case BO_ShlAssign:
3966 case BO_ShrAssign:
3967 break;
3968 case BO_AddAssign:
3969 AtomicOp = llvm::AtomicRMWInst::Add;
3970 Op = llvm::Instruction::Add;
3971 break;
3972 case BO_SubAssign:
3973 AtomicOp = llvm::AtomicRMWInst::Sub;
3974 Op = llvm::Instruction::Sub;
3975 break;
3976 case BO_AndAssign:
3977 AtomicOp = llvm::AtomicRMWInst::And;
3978 Op = llvm::Instruction::And;
3979 break;
3980 case BO_XorAssign:
3981 AtomicOp = llvm::AtomicRMWInst::Xor;
3982 Op = llvm::Instruction::Xor;
3983 break;
3984 case BO_OrAssign:
3985 AtomicOp = llvm::AtomicRMWInst::Or;
3986 Op = llvm::Instruction::Or;
3987 break;
3988 default:
3989 llvm_unreachable("Invalid compound assignment type");
3990 }
3991 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3992 llvm::Value *Amt = CGF.EmitToMemory(
3993 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3994 E->getExprLoc()),
3995 LHSTy);
3996
3997 llvm::AtomicRMWInst *OldVal =
3998 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
3999
4000 // Since operation is atomic, the result type is guaranteed to be the
4001 // same as the input in LLVM terms.
4002 Result = Builder.CreateBinOp(Op, OldVal, Amt);
4003 return LHSLV;
4004 }
4005 }
4006 // FIXME: For floating point types, we should be saving and restoring the
4007 // floating point environment in the loop.
4008 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
4009 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
4010 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4011 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
4012 Builder.CreateBr(opBB);
4013 Builder.SetInsertPoint(opBB);
4014 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
4015 atomicPHI->addIncoming(OpInfo.LHS, startBB);
4016 OpInfo.LHS = atomicPHI;
4017 }
4018 else
4019 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4020
4021 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
4022 SourceLocation Loc = E->getExprLoc();
4023 if (!PromotionTypeLHS.isNull())
4024 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
4025 E->getExprLoc());
4026 else
4027 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
4028 E->getComputationLHSType(), Loc);
4029
4030 // Expand the binary operator.
4031 Result = (this->*Func)(OpInfo);
4032
4033 // Convert the result back to the LHS type,
4034 // potentially with Implicit Conversion sanitizer check.
4035 // If LHSLV is a bitfield, use default ScalarConversionOpts
4036 // to avoid emit any implicit integer checks.
4037 Value *Previous = nullptr;
4038 if (LHSLV.isBitField()) {
4039 Previous = Result;
4040 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
4041 } else
4042 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
4043 ScalarConversionOpts(CGF.SanOpts));
4044
4045 if (atomicPHI) {
4046 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
4047 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
4048 auto Pair = CGF.EmitAtomicCompareExchange(
4049 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
4050 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
4051 llvm::Value *success = Pair.second;
4052 atomicPHI->addIncoming(old, curBlock);
4053 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
4054 Builder.SetInsertPoint(contBB);
4055 return LHSLV;
4056 }
4057
4058 // Store the result value into the LHS lvalue. Bit-fields are handled
4059 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
4060 // 'An assignment expression has the value of the left operand after the
4061 // assignment...'.
4062 if (LHSLV.isBitField()) {
4063 Value *Src = Previous ? Previous : Result;
4064 QualType SrcType = E->getRHS()->getType();
4065 QualType DstType = E->getLHS()->getType();
4067 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
4068 LHSLV.getBitFieldInfo(), E->getExprLoc());
4069 } else
4071
4072 if (CGF.getLangOpts().OpenMP)
4074 E->getLHS());
4075 return LHSLV;
4076}
4077
4078Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
4079 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
4080 bool Ignore = TestAndClearIgnoreResultAssign();
4081 Value *RHS = nullptr;
4082 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
4083
4084 // If the result is clearly ignored, return now.
4085 if (Ignore)
4086 return nullptr;
4087
4088 // The result of an assignment in C is the assigned r-value.
4089 if (!CGF.getLangOpts().CPlusPlus)
4090 return RHS;
4091
4092 // If the lvalue is non-volatile, return the computed value of the assignment.
4093 if (!LHS.isVolatileQualified())
4094 return RHS;
4095
4096 // Otherwise, reload the value.
4097 return EmitLoadOfLValue(LHS, E->getExprLoc());
4098}
4099
4100void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4101 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4102 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4103 Checks;
4104
4105 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4106 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4107 SanitizerKind::SO_IntegerDivideByZero));
4108 }
4109
4110 const auto *BO = cast<BinaryOperator>(Ops.E);
4111 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4112 Ops.Ty->hasSignedIntegerRepresentation() &&
4113 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4114 Ops.mayHaveIntegerOverflow()) {
4115 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4116
4117 llvm::Value *IntMin =
4118 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4119 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4120
4121 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4122 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4123 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4124 Checks.push_back(
4125 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4126 }
4127
4128 if (Checks.size() > 0)
4129 EmitBinOpCheck(Checks, Ops);
4130}
4131
4132Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4133 {
4134 SanitizerDebugLocation SanScope(&CGF,
4135 {SanitizerKind::SO_IntegerDivideByZero,
4136 SanitizerKind::SO_SignedIntegerOverflow,
4137 SanitizerKind::SO_FloatDivideByZero},
4138 SanitizerHandler::DivremOverflow);
4139 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4140 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4141 Ops.Ty->isIntegerType() &&
4142 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4143 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4144 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4145 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4146 Ops.Ty->isRealFloatingType() &&
4147 Ops.mayHaveFloatDivisionByZero()) {
4148 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4149 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4150 EmitBinOpCheck(
4151 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4152 }
4153 }
4154
4155 if (Ops.Ty->isConstantMatrixType()) {
4156 llvm::MatrixBuilder MB(Builder);
4157 // We need to check the types of the operands of the operator to get the
4158 // correct matrix dimensions.
4159 auto *BO = cast<BinaryOperator>(Ops.E);
4160 (void)BO;
4161 assert(
4163 "first operand must be a matrix");
4164 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4165 "second operand must be an arithmetic type");
4166 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4167 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4168 Ops.Ty->hasUnsignedIntegerRepresentation());
4169 }
4170
4171 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4172 llvm::Value *Val;
4173 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4174 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4175 CGF.SetDivFPAccuracy(Val);
4176 return Val;
4177 }
4178 else if (Ops.isFixedPointOp())
4179 return EmitFixedPointBinOp(Ops);
4180 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4181 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4182 else
4183 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4184}
4185
4186Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4187 // Rem in C can't be a floating point type: C99 6.5.5p2.
4188 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4189 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4190 Ops.Ty->isIntegerType() &&
4191 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4192 SanitizerDebugLocation SanScope(&CGF,
4193 {SanitizerKind::SO_IntegerDivideByZero,
4194 SanitizerKind::SO_SignedIntegerOverflow},
4195 SanitizerHandler::DivremOverflow);
4196 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4197 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4198 }
4199
4200 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4201 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4202
4203 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4204 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4205
4206 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4207}
4208
4209Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4210 unsigned IID;
4211 unsigned OpID = 0;
4212 SanitizerHandler OverflowKind;
4213
4214 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4215 switch (Ops.Opcode) {
4216 case BO_Add:
4217 case BO_AddAssign:
4218 OpID = 1;
4219 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4220 llvm::Intrinsic::uadd_with_overflow;
4221 OverflowKind = SanitizerHandler::AddOverflow;
4222 break;
4223 case BO_Sub:
4224 case BO_SubAssign:
4225 OpID = 2;
4226 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4227 llvm::Intrinsic::usub_with_overflow;
4228 OverflowKind = SanitizerHandler::SubOverflow;
4229 break;
4230 case BO_Mul:
4231 case BO_MulAssign:
4232 OpID = 3;
4233 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4234 llvm::Intrinsic::umul_with_overflow;
4235 OverflowKind = SanitizerHandler::MulOverflow;
4236 break;
4237 default:
4238 llvm_unreachable("Unsupported operation for overflow detection");
4239 }
4240 OpID <<= 1;
4241 if (isSigned)
4242 OpID |= 1;
4243
4244 SanitizerDebugLocation SanScope(&CGF,
4245 {SanitizerKind::SO_SignedIntegerOverflow,
4246 SanitizerKind::SO_UnsignedIntegerOverflow},
4247 OverflowKind);
4248 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4249
4250 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4251
4252 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4253 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4254 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4255
4256 // Handle overflow with llvm.trap if no custom handler has been specified.
4257 const std::string *handlerName =
4259 if (handlerName->empty()) {
4260 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4261 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4262 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4263 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
4265 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4266 : SanitizerKind::SO_UnsignedIntegerOverflow;
4267 EmitBinOpCheck(std::make_pair(NotOverflow, Ordinal), Ops);
4268 } else
4269 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4270 return result;
4271 }
4272
4273 // Branch in case of overflow.
4274 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4275 llvm::BasicBlock *continueBB =
4276 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4277 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4278
4279 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4280
4281 // If an overflow handler is set, then we want to call it and then use its
4282 // result, if it returns.
4283 Builder.SetInsertPoint(overflowBB);
4284
4285 // Get the overflow handler.
4286 llvm::Type *Int8Ty = CGF.Int8Ty;
4287 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4288 llvm::FunctionType *handlerTy =
4289 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4290 llvm::FunctionCallee handler =
4291 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4292
4293 // Sign extend the args to 64-bit, so that we can use the same handler for
4294 // all types of overflow.
4295 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4296 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4297
4298 // Call the handler with the two arguments, the operation, and the size of
4299 // the result.
4300 llvm::Value *handlerArgs[] = {
4301 lhs,
4302 rhs,
4303 Builder.getInt8(OpID),
4304 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4305 };
4306 llvm::Value *handlerResult =
4307 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4308
4309 // Truncate the result back to the desired size.
4310 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4311 Builder.CreateBr(continueBB);
4312
4313 Builder.SetInsertPoint(continueBB);
4314 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4315 phi->addIncoming(result, initialBB);
4316 phi->addIncoming(handlerResult, overflowBB);
4317
4318 return phi;
4319}
4320
4321/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4322/// information.
4323/// This function is used for BO_AddAssign/BO_SubAssign.
4324static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4325 bool isSubtraction) {
4326 // Must have binary (not unary) expr here. Unary pointer
4327 // increment/decrement doesn't use this path.
4329
4330 Value *pointer = op.LHS;
4331 Expr *pointerOperand = expr->getLHS();
4332 Value *index = op.RHS;
4333 Expr *indexOperand = expr->getRHS();
4334
4335 // In a subtraction, the LHS is always the pointer.
4336 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4337 std::swap(pointer, index);
4338 std::swap(pointerOperand, indexOperand);
4339 }
4340
4341 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4342 index, isSubtraction);
4343}
4344
4345/// Emit pointer + index arithmetic.
4347 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4348 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4349 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4350
4351 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4352 auto &DL = CGM.getDataLayout();
4353 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4354
4355 // Some versions of glibc and gcc use idioms (particularly in their malloc
4356 // routines) that add a pointer-sized integer (known to be a pointer value)
4357 // to a null pointer in order to cast the value back to an integer or as
4358 // part of a pointer alignment algorithm. This is undefined behavior, but
4359 // we'd like to be able to compile programs that use it.
4360 //
4361 // Normally, we'd generate a GEP with a null-pointer base here in response
4362 // to that code, but it's also UB to dereference a pointer created that
4363 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4364 // generate a direct cast of the integer value to a pointer.
4365 //
4366 // The idiom (p = nullptr + N) is not met if any of the following are true:
4367 //
4368 // The operation is subtraction.
4369 // The index is not pointer-sized.
4370 // The pointer type is not byte-sized.
4371 //
4372 // Note that we do not suppress the pointer overflow check in this case.
4374 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4375 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4376 if (getLangOpts().PointerOverflowDefined ||
4377 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4378 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4379 PtrTy->getPointerAddressSpace()))
4380 return Ptr;
4381 // The inbounds GEP of null is valid iff the index is zero.
4382 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4383 auto CheckHandler = SanitizerHandler::PointerOverflow;
4384 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4385 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4386 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4387 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4388 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4389 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4390 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4391 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4392 DynamicArgs);
4393 return Ptr;
4394 }
4395
4396 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4397 // Zero-extend or sign-extend the pointer value according to
4398 // whether the index is signed or not.
4399 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4400 "idx.ext");
4401 }
4402
4403 // If this is subtraction, negate the index.
4404 if (isSubtraction)
4405 index = Builder.CreateNeg(index, "idx.neg");
4406
4407 if (SanOpts.has(SanitizerKind::ArrayBounds))
4408 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4409 /*Accessed*/ false);
4410
4411 const PointerType *pointerType =
4412 pointerOperand->getType()->getAs<PointerType>();
4413 if (!pointerType) {
4414 QualType objectType = pointerOperand->getType()
4416 ->getPointeeType();
4417 llvm::Value *objectSize =
4418 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4419
4420 index = Builder.CreateMul(index, objectSize);
4421
4422 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4423 return Builder.CreateBitCast(result, pointer->getType());
4424 }
4425
4426 QualType elementType = pointerType->getPointeeType();
4427 if (const VariableArrayType *vla =
4428 getContext().getAsVariableArrayType(elementType)) {
4429 // The element count here is the total number of non-VLA elements.
4430 llvm::Value *numElements = getVLASize(vla).NumElts;
4431
4432 // Effectively, the multiply by the VLA size is part of the GEP.
4433 // GEP indexes are signed, and scaling an index isn't permitted to
4434 // signed-overflow, so we use the same semantics for our explicit
4435 // multiply. We suppress this if overflow is not undefined behavior.
4436 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4437 if (getLangOpts().PointerOverflowDefined) {
4438 index = Builder.CreateMul(index, numElements, "vla.index");
4439 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4440 } else {
4441 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4442 pointer =
4443 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4444 isSubtraction, BO->getExprLoc(), "add.ptr");
4445 }
4446 return pointer;
4447 }
4448
4449 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4450 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4451 // future proof.
4452 llvm::Type *elemTy;
4453 if (elementType->isVoidType() || elementType->isFunctionType())
4454 elemTy = Int8Ty;
4455 else
4456 elemTy = ConvertTypeForMem(elementType);
4457
4458 if (getLangOpts().PointerOverflowDefined)
4459 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4460
4461 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4462 BO->getExprLoc(), "add.ptr");
4463}
4464
4465// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4466// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4467// the add operand respectively. This allows fmuladd to represent a*b-c, or
4468// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4469// efficient operations.
4470static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4471 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4472 bool negMul, bool negAdd) {
4473 Value *MulOp0 = MulOp->getOperand(0);
4474 Value *MulOp1 = MulOp->getOperand(1);
4475 if (negMul)
4476 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4477 if (negAdd)
4478 Addend = Builder.CreateFNeg(Addend, "neg");
4479
4480 Value *FMulAdd = nullptr;
4481 if (Builder.getIsFPConstrained()) {
4482 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4483 "Only constrained operation should be created when Builder is in FP "
4484 "constrained mode");
4485 FMulAdd = Builder.CreateConstrainedFPCall(
4486 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4487 Addend->getType()),
4488 {MulOp0, MulOp1, Addend});
4489 } else {
4490 FMulAdd = Builder.CreateCall(
4491 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4492 {MulOp0, MulOp1, Addend});
4493 }
4494 MulOp->eraseFromParent();
4495
4496 return FMulAdd;
4497}
4498
4499// Check whether it would be legal to emit an fmuladd intrinsic call to
4500// represent op and if so, build the fmuladd.
4501//
4502// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4503// Does NOT check the type of the operation - it's assumed that this function
4504// will be called from contexts where it's known that the type is contractable.
4505static Value* tryEmitFMulAdd(const BinOpInfo &op,
4506 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4507 bool isSub=false) {
4508
4509 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4510 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4511 "Only fadd/fsub can be the root of an fmuladd.");
4512
4513 // Check whether this op is marked as fusable.
4514 if (!op.FPFeatures.allowFPContractWithinStatement())
4515 return nullptr;
4516
4517 Value *LHS = op.LHS;
4518 Value *RHS = op.RHS;
4519
4520 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4521 // it is the only use of its operand.
4522 bool NegLHS = false;
4523 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4524 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4525 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4526 LHS = LHSUnOp->getOperand(0);
4527 NegLHS = true;
4528 }
4529 }
4530
4531 bool NegRHS = false;
4532 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4533 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4534 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4535 RHS = RHSUnOp->getOperand(0);
4536 NegRHS = true;
4537 }
4538 }
4539
4540 // We have a potentially fusable op. Look for a mul on one of the operands.
4541 // Also, make sure that the mul result isn't used directly. In that case,
4542 // there's no point creating a muladd operation.
4543 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4544 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4545 (LHSBinOp->use_empty() || NegLHS)) {
4546 // If we looked through fneg, erase it.
4547 if (NegLHS)
4548 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4549 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4550 }
4551 }
4552 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4553 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4554 (RHSBinOp->use_empty() || NegRHS)) {
4555 // If we looked through fneg, erase it.
4556 if (NegRHS)
4557 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4558 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4559 }
4560 }
4561
4562 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4563 if (LHSBinOp->getIntrinsicID() ==
4564 llvm::Intrinsic::experimental_constrained_fmul &&
4565 (LHSBinOp->use_empty() || NegLHS)) {
4566 // If we looked through fneg, erase it.
4567 if (NegLHS)
4568 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4569 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4570 }
4571 }
4572 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4573 if (RHSBinOp->getIntrinsicID() ==
4574 llvm::Intrinsic::experimental_constrained_fmul &&
4575 (RHSBinOp->use_empty() || NegRHS)) {
4576 // If we looked through fneg, erase it.
4577 if (NegRHS)
4578 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4579 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4580 }
4581 }
4582
4583 return nullptr;
4584}
4585
4586Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4587 if (op.LHS->getType()->isPointerTy() ||
4588 op.RHS->getType()->isPointerTy())
4590
4591 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4592 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4593 case LangOptions::SOB_Defined:
4594 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4595 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4596 [[fallthrough]];
4597 case LangOptions::SOB_Undefined:
4598 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4599 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4600 [[fallthrough]];
4601 case LangOptions::SOB_Trapping:
4602 if (CanElideOverflowCheck(CGF.getContext(), op))
4603 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4604 return EmitOverflowCheckedBinOp(op);
4605 }
4606 }
4607
4608 // For vector and matrix adds, try to fold into a fmuladd.
4609 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4610 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4611 // Try to form an fmuladd.
4612 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4613 return FMulAdd;
4614 }
4615
4616 if (op.Ty->isConstantMatrixType()) {
4617 llvm::MatrixBuilder MB(Builder);
4618 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4619 return MB.CreateAdd(op.LHS, op.RHS);
4620 }
4621
4622 if (op.Ty->isUnsignedIntegerType() &&
4623 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4624 !CanElideOverflowCheck(CGF.getContext(), op))
4625 return EmitOverflowCheckedBinOp(op);
4626
4627 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4628 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4629 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4630 }
4631
4632 if (op.isFixedPointOp())
4633 return EmitFixedPointBinOp(op);
4634
4635 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4636}
4637
4638/// The resulting value must be calculated with exact precision, so the operands
4639/// may not be the same type.
4640Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4641 using llvm::APSInt;
4642 using llvm::ConstantInt;
4643
4644 // This is either a binary operation where at least one of the operands is
4645 // a fixed-point type, or a unary operation where the operand is a fixed-point
4646 // type. The result type of a binary operation is determined by
4647 // Sema::handleFixedPointConversions().
4648 QualType ResultTy = op.Ty;
4649 QualType LHSTy, RHSTy;
4650 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4651 RHSTy = BinOp->getRHS()->getType();
4652 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4653 // For compound assignment, the effective type of the LHS at this point
4654 // is the computation LHS type, not the actual LHS type, and the final
4655 // result type is not the type of the expression but rather the
4656 // computation result type.
4657 LHSTy = CAO->getComputationLHSType();
4658 ResultTy = CAO->getComputationResultType();
4659 } else
4660 LHSTy = BinOp->getLHS()->getType();
4661 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4662 LHSTy = UnOp->getSubExpr()->getType();
4663 RHSTy = UnOp->getSubExpr()->getType();
4664 }
4665 ASTContext &Ctx = CGF.getContext();
4666 Value *LHS = op.LHS;
4667 Value *RHS = op.RHS;
4668
4669 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4670 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4671 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4672 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4673
4674 // Perform the actual operation.
4675 Value *Result;
4676 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4677 switch (op.Opcode) {
4678 case BO_AddAssign:
4679 case BO_Add:
4680 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4681 break;
4682 case BO_SubAssign:
4683 case BO_Sub:
4684 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4685 break;
4686 case BO_MulAssign:
4687 case BO_Mul:
4688 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4689 break;
4690 case BO_DivAssign:
4691 case BO_Div:
4692 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4693 break;
4694 case BO_ShlAssign:
4695 case BO_Shl:
4696 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4697 break;
4698 case BO_ShrAssign:
4699 case BO_Shr:
4700 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4701 break;
4702 case BO_LT:
4703 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4704 case BO_GT:
4705 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4706 case BO_LE:
4707 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4708 case BO_GE:
4709 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4710 case BO_EQ:
4711 // For equality operations, we assume any padding bits on unsigned types are
4712 // zero'd out. They could be overwritten through non-saturating operations
4713 // that cause overflow, but this leads to undefined behavior.
4714 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4715 case BO_NE:
4716 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4717 case BO_Cmp:
4718 case BO_LAnd:
4719 case BO_LOr:
4720 llvm_unreachable("Found unimplemented fixed point binary operation");
4721 case BO_PtrMemD:
4722 case BO_PtrMemI:
4723 case BO_Rem:
4724 case BO_Xor:
4725 case BO_And:
4726 case BO_Or:
4727 case BO_Assign:
4728 case BO_RemAssign:
4729 case BO_AndAssign:
4730 case BO_XorAssign:
4731 case BO_OrAssign:
4732 case BO_Comma:
4733 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4734 }
4735
4736 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4738 // Convert to the result type.
4739 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4740 : CommonFixedSema,
4741 ResultFixedSema);
4742}
4743
4744Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4745 // The LHS is always a pointer if either side is.
4746 if (!op.LHS->getType()->isPointerTy()) {
4747 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4748 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4749 case LangOptions::SOB_Defined:
4750 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4751 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4752 [[fallthrough]];
4753 case LangOptions::SOB_Undefined:
4754 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4755 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4756 [[fallthrough]];
4757 case LangOptions::SOB_Trapping:
4758 if (CanElideOverflowCheck(CGF.getContext(), op))
4759 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4760 return EmitOverflowCheckedBinOp(op);
4761 }
4762 }
4763
4764 // For vector and matrix subs, try to fold into a fmuladd.
4765 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4766 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4767 // Try to form an fmuladd.
4768 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4769 return FMulAdd;
4770 }
4771
4772 if (op.Ty->isConstantMatrixType()) {
4773 llvm::MatrixBuilder MB(Builder);
4774 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4775 return MB.CreateSub(op.LHS, op.RHS);
4776 }
4777
4778 if (op.Ty->isUnsignedIntegerType() &&
4779 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4780 !CanElideOverflowCheck(CGF.getContext(), op))
4781 return EmitOverflowCheckedBinOp(op);
4782
4783 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4784 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4785 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4786 }
4787
4788 if (op.isFixedPointOp())
4789 return EmitFixedPointBinOp(op);
4790
4791 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4792 }
4793
4794 // If the RHS is not a pointer, then we have normal pointer
4795 // arithmetic.
4796 if (!op.RHS->getType()->isPointerTy())
4798
4799 // Otherwise, this is a pointer subtraction.
4800
4801 // Do the raw subtraction part.
4802 llvm::Value *LHS
4803 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4804 llvm::Value *RHS
4805 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4806 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4807
4808 // Okay, figure out the element size.
4809 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4810 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4811
4812 llvm::Value *divisor = nullptr;
4813
4814 // For a variable-length array, this is going to be non-constant.
4815 if (const VariableArrayType *vla
4816 = CGF.getContext().getAsVariableArrayType(elementType)) {
4817 auto VlaSize = CGF.getVLASize(vla);
4818 elementType = VlaSize.Type;
4819 divisor = VlaSize.NumElts;
4820
4821 // Scale the number of non-VLA elements by the non-VLA element size.
4822 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4823 if (!eltSize.isOne())
4824 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4825
4826 // For everything elese, we can just compute it, safe in the
4827 // assumption that Sema won't let anything through that we can't
4828 // safely compute the size of.
4829 } else {
4830 CharUnits elementSize;
4831 // Handle GCC extension for pointer arithmetic on void* and
4832 // function pointer types.
4833 if (elementType->isVoidType() || elementType->isFunctionType())
4834 elementSize = CharUnits::One();
4835 else
4836 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4837
4838 // Don't even emit the divide for element size of 1.
4839 if (elementSize.isOne())
4840 return diffInChars;
4841
4842 divisor = CGF.CGM.getSize(elementSize);
4843 }
4844
4845 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4846 // pointer difference in C is only defined in the case where both operands
4847 // are pointing to elements of an array.
4848 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4849}
4850
4851Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4852 bool RHSIsSigned) {
4853 llvm::IntegerType *Ty;
4854 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4855 Ty = cast<llvm::IntegerType>(VT->getElementType());
4856 else
4857 Ty = cast<llvm::IntegerType>(LHS->getType());
4858 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4859 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4860 // this in ConstantInt::get, this results in the value getting truncated.
4861 // Constrain the return value to be max(RHS) in this case.
4862 llvm::Type *RHSTy = RHS->getType();
4863 llvm::APInt RHSMax =
4864 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4865 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4866 if (RHSMax.ult(Ty->getBitWidth()))
4867 return llvm::ConstantInt::get(RHSTy, RHSMax);
4868 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4869}
4870
4871Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4872 const Twine &Name) {
4873 llvm::IntegerType *Ty;
4874 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4875 Ty = cast<llvm::IntegerType>(VT->getElementType());
4876 else
4877 Ty = cast<llvm::IntegerType>(LHS->getType());
4878
4879 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4880 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4881
4882 return Builder.CreateURem(
4883 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4884}
4885
4886Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4887 // TODO: This misses out on the sanitizer check below.
4888 if (Ops.isFixedPointOp())
4889 return EmitFixedPointBinOp(Ops);
4890
4891 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4892 // RHS to the same size as the LHS.
4893 Value *RHS = Ops.RHS;
4894 if (Ops.LHS->getType() != RHS->getType())
4895 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4896
4897 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4898 Ops.Ty->hasSignedIntegerRepresentation() &&
4900 !CGF.getLangOpts().CPlusPlus20;
4901 bool SanitizeUnsignedBase =
4902 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4903 Ops.Ty->hasUnsignedIntegerRepresentation();
4904 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4905 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4906 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4907 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4908 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4909 else if ((SanitizeBase || SanitizeExponent) &&
4910 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4911 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4912 if (SanitizeSignedBase)
4913 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
4914 if (SanitizeUnsignedBase)
4915 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
4916 if (SanitizeExponent)
4917 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
4918
4919 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4920 SanitizerHandler::ShiftOutOfBounds);
4921 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4922 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4923 llvm::Value *WidthMinusOne =
4924 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4925 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4926
4927 if (SanitizeExponent) {
4928 Checks.push_back(
4929 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
4930 }
4931
4932 if (SanitizeBase) {
4933 // Check whether we are shifting any non-zero bits off the top of the
4934 // integer. We only emit this check if exponent is valid - otherwise
4935 // instructions below will have undefined behavior themselves.
4936 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4937 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4938 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4939 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4940 llvm::Value *PromotedWidthMinusOne =
4941 (RHS == Ops.RHS) ? WidthMinusOne
4942 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4943 CGF.EmitBlock(CheckShiftBase);
4944 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4945 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4946 /*NUW*/ true, /*NSW*/ true),
4947 "shl.check");
4948 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4949 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4950 // Under C++11's rules, shifting a 1 bit into the sign bit is
4951 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4952 // define signed left shifts, so we use the C99 and C++11 rules there).
4953 // Unsigned shifts can always shift into the top bit.
4954 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4955 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4956 }
4957 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4958 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4959 CGF.EmitBlock(Cont);
4960 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4961 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4962 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4963 Checks.push_back(std::make_pair(
4964 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4965 : SanitizerKind::SO_UnsignedShiftBase));
4966 }
4967
4968 assert(!Checks.empty());
4969 EmitBinOpCheck(Checks, Ops);
4970 }
4971
4972 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4973}
4974
4975Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4976 // TODO: This misses out on the sanitizer check below.
4977 if (Ops.isFixedPointOp())
4978 return EmitFixedPointBinOp(Ops);
4979
4980 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4981 // RHS to the same size as the LHS.
4982 Value *RHS = Ops.RHS;
4983 if (Ops.LHS->getType() != RHS->getType())
4984 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4985
4986 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4987 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4988 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4989 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4990 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4991 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4992 SanitizerHandler::ShiftOutOfBounds);
4993 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4994 llvm::Value *Valid = Builder.CreateICmpULE(
4995 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4996 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
4997 }
4998
4999 if (Ops.Ty->hasUnsignedIntegerRepresentation())
5000 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
5001 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
5002}
5003
5005// return corresponding comparison intrinsic for given vector type
5006static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
5007 BuiltinType::Kind ElemKind) {
5008 switch (ElemKind) {
5009 default: llvm_unreachable("unexpected element type");
5010 case BuiltinType::Char_U:
5011 case BuiltinType::UChar:
5012 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5013 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
5014 case BuiltinType::Char_S:
5015 case BuiltinType::SChar:
5016 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5017 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
5018 case BuiltinType::UShort:
5019 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5020 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
5021 case BuiltinType::Short:
5022 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5023 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
5024 case BuiltinType::UInt:
5025 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5026 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
5027 case BuiltinType::Int:
5028 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5029 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
5030 case BuiltinType::ULong:
5031 case BuiltinType::ULongLong:
5032 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5033 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
5034 case BuiltinType::Long:
5035 case BuiltinType::LongLong:
5036 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5037 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
5038 case BuiltinType::Float:
5039 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
5040 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
5041 case BuiltinType::Double:
5042 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
5043 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
5044 case BuiltinType::UInt128:
5045 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5046 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
5047 case BuiltinType::Int128:
5048 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5049 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
5050 }
5051}
5052
5053Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
5054 llvm::CmpInst::Predicate UICmpOpc,
5055 llvm::CmpInst::Predicate SICmpOpc,
5056 llvm::CmpInst::Predicate FCmpOpc,
5057 bool IsSignaling) {
5058 TestAndClearIgnoreResultAssign();
5059 Value *Result;
5060 QualType LHSTy = E->getLHS()->getType();
5061 QualType RHSTy = E->getRHS()->getType();
5062 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
5063 assert(E->getOpcode() == BO_EQ ||
5064 E->getOpcode() == BO_NE);
5065 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
5066 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
5068 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
5069 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
5070 BinOpInfo BOInfo = EmitBinOps(E);
5071 Value *LHS = BOInfo.LHS;
5072 Value *RHS = BOInfo.RHS;
5073
5074 // If AltiVec, the comparison results in a numeric type, so we use
5075 // intrinsics comparing vectors and giving 0 or 1 as a result
5076 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
5077 // constants for mapping CR6 register bits to predicate result
5078 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
5079
5080 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
5081
5082 // in several cases vector arguments order will be reversed
5083 Value *FirstVecArg = LHS,
5084 *SecondVecArg = RHS;
5085
5086 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
5087 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
5088
5089 switch(E->getOpcode()) {
5090 default: llvm_unreachable("is not a comparison operation");
5091 case BO_EQ:
5092 CR6 = CR6_LT;
5093 ID = GetIntrinsic(VCMPEQ, ElementKind);
5094 break;
5095 case BO_NE:
5096 CR6 = CR6_EQ;
5097 ID = GetIntrinsic(VCMPEQ, ElementKind);
5098 break;
5099 case BO_LT:
5100 CR6 = CR6_LT;
5101 ID = GetIntrinsic(VCMPGT, ElementKind);
5102 std::swap(FirstVecArg, SecondVecArg);
5103 break;
5104 case BO_GT:
5105 CR6 = CR6_LT;
5106 ID = GetIntrinsic(VCMPGT, ElementKind);
5107 break;
5108 case BO_LE:
5109 if (ElementKind == BuiltinType::Float) {
5110 CR6 = CR6_LT;
5111 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5112 std::swap(FirstVecArg, SecondVecArg);
5113 }
5114 else {
5115 CR6 = CR6_EQ;
5116 ID = GetIntrinsic(VCMPGT, ElementKind);
5117 }
5118 break;
5119 case BO_GE:
5120 if (ElementKind == BuiltinType::Float) {
5121 CR6 = CR6_LT;
5122 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5123 }
5124 else {
5125 CR6 = CR6_EQ;
5126 ID = GetIntrinsic(VCMPGT, ElementKind);
5127 std::swap(FirstVecArg, SecondVecArg);
5128 }
5129 break;
5130 }
5131
5132 Value *CR6Param = Builder.getInt32(CR6);
5133 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5134 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5135
5136 // The result type of intrinsic may not be same as E->getType().
5137 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5138 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5139 // do nothing, if ResultTy is not i1 at the same time, it will cause
5140 // crash later.
5141 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5142 if (ResultTy->getBitWidth() > 1 &&
5143 E->getType() == CGF.getContext().BoolTy)
5144 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5145 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5146 E->getExprLoc());
5147 }
5148
5149 if (BOInfo.isFixedPointOp()) {
5150 Result = EmitFixedPointBinOp(BOInfo);
5151 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5152 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5153 if (!IsSignaling)
5154 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5155 else
5156 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5157 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5158 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5159 } else {
5160 // Unsigned integers and pointers.
5161
5162 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5165
5166 // Dynamic information is required to be stripped for comparisons,
5167 // because it could leak the dynamic information. Based on comparisons
5168 // of pointers to dynamic objects, the optimizer can replace one pointer
5169 // with another, which might be incorrect in presence of invariant
5170 // groups. Comparison with null is safe because null does not carry any
5171 // dynamic information.
5172 if (LHSTy.mayBeDynamicClass())
5173 LHS = Builder.CreateStripInvariantGroup(LHS);
5174 if (RHSTy.mayBeDynamicClass())
5175 RHS = Builder.CreateStripInvariantGroup(RHS);
5176 }
5177
5178 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5179 }
5180
5181 // If this is a vector comparison, sign extend the result to the appropriate
5182 // vector integer type and return it (don't convert to bool).
5183 if (LHSTy->isVectorType())
5184 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5185
5186 } else {
5187 // Complex Comparison: can only be an equality comparison.
5189 QualType CETy;
5190 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5191 LHS = CGF.EmitComplexExpr(E->getLHS());
5192 CETy = CTy->getElementType();
5193 } else {
5194 LHS.first = Visit(E->getLHS());
5195 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5196 CETy = LHSTy;
5197 }
5198 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5199 RHS = CGF.EmitComplexExpr(E->getRHS());
5200 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5201 CTy->getElementType()) &&
5202 "The element types must always match.");
5203 (void)CTy;
5204 } else {
5205 RHS.first = Visit(E->getRHS());
5206 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5207 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5208 "The element types must always match.");
5209 }
5210
5211 Value *ResultR, *ResultI;
5212 if (CETy->isRealFloatingType()) {
5213 // As complex comparisons can only be equality comparisons, they
5214 // are never signaling comparisons.
5215 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5216 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5217 } else {
5218 // Complex comparisons can only be equality comparisons. As such, signed
5219 // and unsigned opcodes are the same.
5220 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5221 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5222 }
5223
5224 if (E->getOpcode() == BO_EQ) {
5225 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5226 } else {
5227 assert(E->getOpcode() == BO_NE &&
5228 "Complex comparison other than == or != ?");
5229 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5230 }
5231 }
5232
5233 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5234 E->getExprLoc());
5235}
5236
5238 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5239 // In case we have the integer or bitfield sanitizer checks enabled
5240 // we want to get the expression before scalar conversion.
5241 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5242 CastKind Kind = ICE->getCastKind();
5243 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5244 *SrcType = ICE->getSubExpr()->getType();
5245 *Previous = EmitScalarExpr(ICE->getSubExpr());
5246 // Pass default ScalarConversionOpts to avoid emitting
5247 // integer sanitizer checks as E refers to bitfield.
5248 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5249 ICE->getExprLoc());
5250 }
5251 }
5252 return EmitScalarExpr(E->getRHS());
5253}
5254
5255Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5256 ApplyAtomGroup Grp(CGF.getDebugInfo());
5257 bool Ignore = TestAndClearIgnoreResultAssign();
5258
5259 Value *RHS;
5260 LValue LHS;
5261
5262 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5265 llvm::Value *RV =
5266 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5267 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5269
5270 if (Ignore)
5271 return nullptr;
5272 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5273 LV.getAddress(), /*nonnull*/ false);
5274 return RV;
5275 }
5276
5277 switch (E->getLHS()->getType().getObjCLifetime()) {
5279 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5280 break;
5281
5283 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5284 break;
5285
5287 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5288 break;
5289
5291 RHS = Visit(E->getRHS());
5292 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5293 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5294 break;
5295
5297 // __block variables need to have the rhs evaluated first, plus
5298 // this should improve codegen just a little.
5299 Value *Previous = nullptr;
5300 QualType SrcType = E->getRHS()->getType();
5301 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5302 // we want to extract that value and potentially (if the bitfield sanitizer
5303 // is enabled) use it to check for an implicit conversion.
5304 if (E->getLHS()->refersToBitField())
5305 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5306 else
5307 RHS = Visit(E->getRHS());
5308
5309 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5310
5311 // Store the value into the LHS. Bit-fields are handled specially
5312 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5313 // 'An assignment expression has the value of the left operand after
5314 // the assignment...'.
5315 if (LHS.isBitField()) {
5316 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5317 // If the expression contained an implicit conversion, make sure
5318 // to use the value before the scalar conversion.
5319 Value *Src = Previous ? Previous : RHS;
5320 QualType DstType = E->getLHS()->getType();
5321 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5322 LHS.getBitFieldInfo(), E->getExprLoc());
5323 } else {
5324 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5325 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5326 }
5327 }
5328 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5329 if (CGF.getLangOpts().OpenMP) {
5331 E->getLHS());
5332 }
5333
5334 // If the result is clearly ignored, return now.
5335 if (Ignore)
5336 return nullptr;
5337
5338 // The result of an assignment in C is the assigned r-value.
5339 if (!CGF.getLangOpts().CPlusPlus)
5340 return RHS;
5341
5342 // If the lvalue is non-volatile, return the computed value of the assignment.
5343 if (!LHS.isVolatileQualified())
5344 return RHS;
5345
5346 // Otherwise, reload the value.
5347 return EmitLoadOfLValue(LHS, E->getExprLoc());
5348}
5349
5350Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5351 // Perform vector logical and on comparisons with zero vectors.
5352 if (E->getType()->isVectorType()) {
5354
5355 Value *LHS = Visit(E->getLHS());
5356 Value *RHS = Visit(E->getRHS());
5357 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5358 if (LHS->getType()->isFPOrFPVectorTy()) {
5359 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5360 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5361 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5362 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5363 } else {
5364 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5365 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5366 }
5367 Value *And = Builder.CreateAnd(LHS, RHS);
5368 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5369 }
5370
5371 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5372 llvm::Type *ResTy = ConvertType(E->getType());
5373
5374 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5375 // If we have 1 && X, just emit X without inserting the control flow.
5376 bool LHSCondVal;
5377 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5378 if (LHSCondVal) { // If we have 1 && X, just emit X.
5380
5381 // If the top of the logical operator nest, reset the MCDC temp to 0.
5382 if (CGF.isMCDCDecisionExpr(E))
5384
5385 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5386
5387 // If we're generating for profiling or coverage, generate a branch to a
5388 // block that increments the RHS counter needed to track branch condition
5389 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5390 // "FalseBlock" after the increment is done.
5391 if (InstrumentRegions &&
5393 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5394 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5395 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5396 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
5397 CGF.EmitBlock(RHSBlockCnt);
5399 CGF.EmitBranch(FBlock);
5400 CGF.EmitBlock(FBlock);
5401 } else
5402 CGF.markStmtMaybeUsed(E->getRHS());
5403
5404 // If the top of the logical operator nest, update the MCDC bitmap.
5405 if (CGF.isMCDCDecisionExpr(E))
5407
5408 // ZExt result to int or bool.
5409 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5410 }
5411
5412 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5413 if (!CGF.ContainsLabel(E->getRHS())) {
5414 CGF.markStmtMaybeUsed(E->getRHS());
5415 return llvm::Constant::getNullValue(ResTy);
5416 }
5417 }
5418
5419 // If the top of the logical operator nest, reset the MCDC temp to 0.
5420 if (CGF.isMCDCDecisionExpr(E))
5422
5423 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5424 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5425
5426 CodeGenFunction::ConditionalEvaluation eval(CGF);
5427
5428 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5429 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
5430 CGF.getProfileCount(E->getRHS()));
5431
5432 // Any edges into the ContBlock are now from an (indeterminate number of)
5433 // edges from this first condition. All of these values will be false. Start
5434 // setting up the PHI node in the Cont Block for this.
5435 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5436 "", ContBlock);
5437 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5438 PI != PE; ++PI)
5439 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5440
5441 eval.begin(CGF);
5442 CGF.EmitBlock(RHSBlock);
5444 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5445 eval.end(CGF);
5446
5447 // Reaquire the RHS block, as there may be subblocks inserted.
5448 RHSBlock = Builder.GetInsertBlock();
5449
5450 // If we're generating for profiling or coverage, generate a branch on the
5451 // RHS to a block that increments the RHS true counter needed to track branch
5452 // condition coverage.
5453 if (InstrumentRegions &&
5455 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5456 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5457 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
5458 CGF.EmitBlock(RHSBlockCnt);
5460 CGF.EmitBranch(ContBlock);
5461 PN->addIncoming(RHSCond, RHSBlockCnt);
5462 }
5463
5464 // Emit an unconditional branch from this block to ContBlock.
5465 {
5466 // There is no need to emit line number for unconditional branch.
5467 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5468 CGF.EmitBlock(ContBlock);
5469 }
5470 // Insert an entry into the phi node for the edge with the value of RHSCond.
5471 PN->addIncoming(RHSCond, RHSBlock);
5472
5473 // If the top of the logical operator nest, update the MCDC bitmap.
5474 if (CGF.isMCDCDecisionExpr(E))
5476
5477 // Artificial location to preserve the scope information
5478 {
5480 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5481 }
5482
5483 // ZExt result to int.
5484 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5485}
5486
5487Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5488 // Perform vector logical or on comparisons with zero vectors.
5489 if (E->getType()->isVectorType()) {
5491
5492 Value *LHS = Visit(E->getLHS());
5493 Value *RHS = Visit(E->getRHS());
5494 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5495 if (LHS->getType()->isFPOrFPVectorTy()) {
5496 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5497 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5498 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5499 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5500 } else {
5501 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5502 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5503 }
5504 Value *Or = Builder.CreateOr(LHS, RHS);
5505 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5506 }
5507
5508 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5509 llvm::Type *ResTy = ConvertType(E->getType());
5510
5511 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5512 // If we have 0 || X, just emit X without inserting the control flow.
5513 bool LHSCondVal;
5514 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5515 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5517
5518 // If the top of the logical operator nest, reset the MCDC temp to 0.
5519 if (CGF.isMCDCDecisionExpr(E))
5521
5522 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5523
5524 // If we're generating for profiling or coverage, generate a branch to a
5525 // block that increments the RHS counter need to track branch condition
5526 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5527 // "FalseBlock" after the increment is done.
5528 if (InstrumentRegions &&
5530 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5531 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5532 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5533 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5534 CGF.EmitBlock(RHSBlockCnt);
5536 CGF.EmitBranch(FBlock);
5537 CGF.EmitBlock(FBlock);
5538 } else
5539 CGF.markStmtMaybeUsed(E->getRHS());
5540
5541 // If the top of the logical operator nest, update the MCDC bitmap.
5542 if (CGF.isMCDCDecisionExpr(E))
5544
5545 // ZExt result to int or bool.
5546 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5547 }
5548
5549 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5550 if (!CGF.ContainsLabel(E->getRHS())) {
5551 CGF.markStmtMaybeUsed(E->getRHS());
5552 return llvm::ConstantInt::get(ResTy, 1);
5553 }
5554 }
5555
5556 // If the top of the logical operator nest, reset the MCDC temp to 0.
5557 if (CGF.isMCDCDecisionExpr(E))
5559
5560 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5561 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5562
5563 CodeGenFunction::ConditionalEvaluation eval(CGF);
5564
5565 // Branch on the LHS first. If it is true, go to the success (cont) block.
5566 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5568 CGF.getProfileCount(E->getRHS()));
5569
5570 // Any edges into the ContBlock are now from an (indeterminate number of)
5571 // edges from this first condition. All of these values will be true. Start
5572 // setting up the PHI node in the Cont Block for this.
5573 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5574 "", ContBlock);
5575 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5576 PI != PE; ++PI)
5577 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5578
5579 eval.begin(CGF);
5580
5581 // Emit the RHS condition as a bool value.
5582 CGF.EmitBlock(RHSBlock);
5584 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5585
5586 eval.end(CGF);
5587
5588 // Reaquire the RHS block, as there may be subblocks inserted.
5589 RHSBlock = Builder.GetInsertBlock();
5590
5591 // If we're generating for profiling or coverage, generate a branch on the
5592 // RHS to a block that increments the RHS true counter needed to track branch
5593 // condition coverage.
5594 if (InstrumentRegions &&
5596 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5597 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5598 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5599 CGF.EmitBlock(RHSBlockCnt);
5601 CGF.EmitBranch(ContBlock);
5602 PN->addIncoming(RHSCond, RHSBlockCnt);
5603 }
5604
5605 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5606 // into the phi node for the edge with the value of RHSCond.
5607 CGF.EmitBlock(ContBlock);
5608 PN->addIncoming(RHSCond, RHSBlock);
5609
5610 // If the top of the logical operator nest, update the MCDC bitmap.
5611 if (CGF.isMCDCDecisionExpr(E))
5613
5614 // ZExt result to int.
5615 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5616}
5617
5618Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5619 CGF.EmitIgnoredExpr(E->getLHS());
5620 CGF.EnsureInsertPoint();
5621 return Visit(E->getRHS());
5622}
5623
5624//===----------------------------------------------------------------------===//
5625// Other Operators
5626//===----------------------------------------------------------------------===//
5627
5628/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5629/// expression is cheap enough and side-effect-free enough to evaluate
5630/// unconditionally instead of conditionally. This is used to convert control
5631/// flow into selects in some cases.
5633 CodeGenFunction &CGF) {
5634 // Anything that is an integer or floating point constant is fine.
5635 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5636
5637 // Even non-volatile automatic variables can't be evaluated unconditionally.
5638 // Referencing a thread_local may cause non-trivial initialization work to
5639 // occur. If we're inside a lambda and one of the variables is from the scope
5640 // outside the lambda, that function may have returned already. Reading its
5641 // locals is a bad idea. Also, these reads may introduce races there didn't
5642 // exist in the source-level program.
5643}
5644
5645
5646Value *ScalarExprEmitter::
5647VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5648 TestAndClearIgnoreResultAssign();
5649
5650 // Bind the common expression if necessary.
5651 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5652
5653 Expr *condExpr = E->getCond();
5654 Expr *lhsExpr = E->getTrueExpr();
5655 Expr *rhsExpr = E->getFalseExpr();
5656
5657 // If the condition constant folds and can be elided, try to avoid emitting
5658 // the condition and the dead arm.
5659 bool CondExprBool;
5660 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5661 Expr *live = lhsExpr, *dead = rhsExpr;
5662 if (!CondExprBool) std::swap(live, dead);
5663
5664 // If the dead side doesn't have labels we need, just emit the Live part.
5665 if (!CGF.ContainsLabel(dead)) {
5666 CGF.incrementProfileCounter(CondExprBool ? CGF.UseExecPath
5667 : CGF.UseSkipPath,
5668 E, /*UseBoth=*/true);
5669 Value *Result = Visit(live);
5670 CGF.markStmtMaybeUsed(dead);
5671
5672 // If the live part is a throw expression, it acts like it has a void
5673 // type, so evaluating it returns a null Value*. However, a conditional
5674 // with non-void type must return a non-null Value*.
5675 if (!Result && !E->getType()->isVoidType())
5676 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5677
5678 return Result;
5679 }
5680 }
5681
5682 // OpenCL: If the condition is a vector, we can treat this condition like
5683 // the select function.
5684 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5685 condExpr->getType()->isExtVectorType())) {
5687
5688 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5689 llvm::Value *LHS = Visit(lhsExpr);
5690 llvm::Value *RHS = Visit(rhsExpr);
5691
5692 llvm::Type *condType = ConvertType(condExpr->getType());
5693 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5694
5695 unsigned numElem = vecTy->getNumElements();
5696 llvm::Type *elemType = vecTy->getElementType();
5697
5698 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5699 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5700 llvm::Value *tmp = Builder.CreateSExt(
5701 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5702 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5703
5704 // Cast float to int to perform ANDs if necessary.
5705 llvm::Value *RHSTmp = RHS;
5706 llvm::Value *LHSTmp = LHS;
5707 bool wasCast = false;
5708 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5709 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5710 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5711 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5712 wasCast = true;
5713 }
5714
5715 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5716 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5717 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5718 if (wasCast)
5719 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5720
5721 return tmp5;
5722 }
5723
5724 if (condExpr->getType()->isVectorType() ||
5725 condExpr->getType()->isSveVLSBuiltinType()) {
5727
5728 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5729 llvm::Value *LHS = Visit(lhsExpr);
5730 llvm::Value *RHS = Visit(rhsExpr);
5731
5732 llvm::Type *CondType = ConvertType(condExpr->getType());
5733 auto *VecTy = cast<llvm::VectorType>(CondType);
5734
5735 if (VecTy->getElementType()->isIntegerTy(1))
5736 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5737
5738 // OpenCL uses the MSB of the mask vector.
5739 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5740 if (condExpr->getType()->isExtVectorType())
5741 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5742 else
5743 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5744 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5745 }
5746
5747 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5748 // select instead of as control flow. We can only do this if it is cheap and
5749 // safe to evaluate the LHS and RHS unconditionally.
5753 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5754 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5755
5756 CGF.incrementProfileCounter(E, StepV);
5757
5758 llvm::Value *LHS = Visit(lhsExpr);
5759 llvm::Value *RHS = Visit(rhsExpr);
5760 if (!LHS) {
5761 // If the conditional has void type, make sure we return a null Value*.
5762 assert(!RHS && "LHS and RHS types must match");
5763 return nullptr;
5764 }
5765 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5766 }
5767
5768 // If the top of the logical operator nest, reset the MCDC temp to 0.
5769 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5771
5772 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5773 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5774 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5775
5776 CodeGenFunction::ConditionalEvaluation eval(CGF);
5777 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5778 CGF.getProfileCount(lhsExpr));
5779
5780 CGF.EmitBlock(LHSBlock);
5781
5782 // If the top of the logical operator nest, update the MCDC bitmap for the
5783 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5784 // may also contain a boolean expression.
5785 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5787
5789 eval.begin(CGF);
5790 Value *LHS = Visit(lhsExpr);
5791 eval.end(CGF);
5792
5793 LHSBlock = Builder.GetInsertBlock();
5794 Builder.CreateBr(ContBlock);
5795
5796 CGF.EmitBlock(RHSBlock);
5797
5798 // If the top of the logical operator nest, update the MCDC bitmap for the
5799 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5800 // may also contain a boolean expression.
5801 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5803
5805 eval.begin(CGF);
5806 Value *RHS = Visit(rhsExpr);
5807 eval.end(CGF);
5808
5809 RHSBlock = Builder.GetInsertBlock();
5810 CGF.EmitBlock(ContBlock);
5811
5812 // If the LHS or RHS is a throw expression, it will be legitimately null.
5813 if (!LHS)
5814 return RHS;
5815 if (!RHS)
5816 return LHS;
5817
5818 // Create a PHI node for the real part.
5819 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5820 PN->addIncoming(LHS, LHSBlock);
5821 PN->addIncoming(RHS, RHSBlock);
5822
5823 return PN;
5824}
5825
5826Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5827 return Visit(E->getChosenSubExpr());
5828}
5829
5830Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5831 Address ArgValue = Address::invalid();
5832 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5833
5834 return ArgPtr.getScalarVal();
5835}
5836
5837Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5838 return CGF.EmitBlockLiteral(block);
5839}
5840
5841// Convert a vec3 to vec4, or vice versa.
5843 Value *Src, unsigned NumElementsDst) {
5844 static constexpr int Mask[] = {0, 1, 2, -1};
5845 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5846}
5847
5848// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5849// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5850// but could be scalar or vectors of different lengths, and either can be
5851// pointer.
5852// There are 4 cases:
5853// 1. non-pointer -> non-pointer : needs 1 bitcast
5854// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5855// 3. pointer -> non-pointer
5856// a) pointer -> intptr_t : needs 1 ptrtoint
5857// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5858// 4. non-pointer -> pointer
5859// a) intptr_t -> pointer : needs 1 inttoptr
5860// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5861// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5862// allow casting directly between pointer types and non-integer non-pointer
5863// types.
5865 const llvm::DataLayout &DL,
5866 Value *Src, llvm::Type *DstTy,
5867 StringRef Name = "") {
5868 auto SrcTy = Src->getType();
5869
5870 // Case 1.
5871 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5872 return Builder.CreateBitCast(Src, DstTy, Name);
5873
5874 // Case 2.
5875 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5876 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5877
5878 // Case 3.
5879 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5880 // Case 3b.
5881 if (!DstTy->isIntegerTy())
5882 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5883 // Cases 3a and 3b.
5884 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5885 }
5886
5887 // Case 4b.
5888 if (!SrcTy->isIntegerTy())
5889 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5890 // Cases 4a and 4b.
5891 return Builder.CreateIntToPtr(Src, DstTy, Name);
5892}
5893
5894Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5895 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5896 llvm::Type *DstTy = ConvertType(E->getType());
5897
5898 llvm::Type *SrcTy = Src->getType();
5899 unsigned NumElementsSrc =
5901 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5902 : 0;
5903 unsigned NumElementsDst =
5905 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5906 : 0;
5907
5908 // Use bit vector expansion for ext_vector_type boolean vectors.
5909 if (E->getType()->isExtVectorBoolType())
5910 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5911
5912 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5913 // vector to get a vec4, then a bitcast if the target type is different.
5914 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5915 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5916 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5917 DstTy);
5918
5919 Src->setName("astype");
5920 return Src;
5921 }
5922
5923 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5924 // to vec4 if the original type is not vec4, then a shuffle vector to
5925 // get a vec3.
5926 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5927 auto *Vec4Ty = llvm::FixedVectorType::get(
5928 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5929 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5930 Vec4Ty);
5931
5932 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5933 Src->setName("astype");
5934 return Src;
5935 }
5936
5937 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5938 Src, DstTy, "astype");
5939}
5940
5941Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5942 return CGF.EmitAtomicExpr(E).getScalarVal();
5943}
5944
5945//===----------------------------------------------------------------------===//
5946// Entry Point into this File
5947//===----------------------------------------------------------------------===//
5948
5949/// Emit the computation of the specified expression of scalar type, ignoring
5950/// the result.
5951Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5952 assert(E && hasScalarEvaluationKind(E->getType()) &&
5953 "Invalid scalar expression to emit");
5954
5955 return ScalarExprEmitter(*this, IgnoreResultAssign)
5956 .Visit(const_cast<Expr *>(E));
5957}
5958
5959/// Emit a conversion from the specified type to the specified destination type,
5960/// both of which are LLVM scalar types.
5962 QualType DstTy,
5963 SourceLocation Loc) {
5964 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5965 "Invalid scalar expression to emit");
5966 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5967}
5968
5969/// Emit a conversion from the specified complex type to the specified
5970/// destination type, where the destination type is an LLVM scalar type.
5972 QualType SrcTy,
5973 QualType DstTy,
5974 SourceLocation Loc) {
5975 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5976 "Invalid complex -> scalar conversion");
5977 return ScalarExprEmitter(*this)
5978 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5979}
5980
5981
5982Value *
5984 QualType PromotionType) {
5985 if (!PromotionType.isNull())
5986 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5987 else
5988 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
5989}
5990
5991
5994 bool isInc, bool isPre) {
5995 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5996}
5997
5999 // object->isa or (*object).isa
6000 // Generate code as for: *(Class*)object
6001
6002 Expr *BaseExpr = E->getBase();
6004 if (BaseExpr->isPRValue()) {
6005 llvm::Type *BaseTy =
6007 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
6008 } else {
6009 Addr = EmitLValue(BaseExpr).getAddress();
6010 }
6011
6012 // Cast the address to Class*.
6013 Addr = Addr.withElementType(ConvertType(E->getType()));
6014 return MakeAddrLValue(Addr, E->getType());
6015}
6016
6017
6019 const CompoundAssignOperator *E) {
6021 ScalarExprEmitter Scalar(*this);
6022 Value *Result = nullptr;
6023 switch (E->getOpcode()) {
6024#define COMPOUND_OP(Op) \
6025 case BO_##Op##Assign: \
6026 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
6027 Result)
6028 COMPOUND_OP(Mul);
6029 COMPOUND_OP(Div);
6030 COMPOUND_OP(Rem);
6031 COMPOUND_OP(Add);
6032 COMPOUND_OP(Sub);
6033 COMPOUND_OP(Shl);
6034 COMPOUND_OP(Shr);
6036 COMPOUND_OP(Xor);
6037 COMPOUND_OP(Or);
6038#undef COMPOUND_OP
6039
6040 case BO_PtrMemD:
6041 case BO_PtrMemI:
6042 case BO_Mul:
6043 case BO_Div:
6044 case BO_Rem:
6045 case BO_Add:
6046 case BO_Sub:
6047 case BO_Shl:
6048 case BO_Shr:
6049 case BO_LT:
6050 case BO_GT:
6051 case BO_LE:
6052 case BO_GE:
6053 case BO_EQ:
6054 case BO_NE:
6055 case BO_Cmp:
6056 case BO_And:
6057 case BO_Xor:
6058 case BO_Or:
6059 case BO_LAnd:
6060 case BO_LOr:
6061 case BO_Assign:
6062 case BO_Comma:
6063 llvm_unreachable("Not valid compound assignment operators");
6064 }
6065
6066 llvm_unreachable("Unhandled compound assignment operator");
6067}
6068
6070 // The total (signed) byte offset for the GEP.
6071 llvm::Value *TotalOffset;
6072 // The offset overflow flag - true if the total offset overflows.
6073 llvm::Value *OffsetOverflows;
6074};
6075
6076/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6077/// and compute the total offset it applies from it's base pointer BasePtr.
6078/// Returns offset in bytes and a boolean flag whether an overflow happened
6079/// during evaluation.
6081 llvm::LLVMContext &VMContext,
6082 CodeGenModule &CGM,
6083 CGBuilderTy &Builder) {
6084 const auto &DL = CGM.getDataLayout();
6085
6086 // The total (signed) byte offset for the GEP.
6087 llvm::Value *TotalOffset = nullptr;
6088
6089 // Was the GEP already reduced to a constant?
6090 if (isa<llvm::Constant>(GEPVal)) {
6091 // Compute the offset by casting both pointers to integers and subtracting:
6092 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6093 Value *BasePtr_int =
6094 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6095 Value *GEPVal_int =
6096 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6097 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6098 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6099 }
6100
6101 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6102 assert(GEP->getPointerOperand() == BasePtr &&
6103 "BasePtr must be the base of the GEP.");
6104 assert(GEP->isInBounds() && "Expected inbounds GEP");
6105
6106 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6107
6108 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6109 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6110 auto *SAddIntrinsic =
6111 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6112 auto *SMulIntrinsic =
6113 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6114
6115 // The offset overflow flag - true if the total offset overflows.
6116 llvm::Value *OffsetOverflows = Builder.getFalse();
6117
6118 /// Return the result of the given binary operation.
6119 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6120 llvm::Value *RHS) -> llvm::Value * {
6121 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6122
6123 // If the operands are constants, return a constant result.
6124 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6125 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6126 llvm::APInt N;
6127 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6128 /*Signed=*/true, N);
6129 if (HasOverflow)
6130 OffsetOverflows = Builder.getTrue();
6131 return llvm::ConstantInt::get(VMContext, N);
6132 }
6133 }
6134
6135 // Otherwise, compute the result with checked arithmetic.
6136 auto *ResultAndOverflow = Builder.CreateCall(
6137 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6138 OffsetOverflows = Builder.CreateOr(
6139 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6140 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6141 };
6142
6143 // Determine the total byte offset by looking at each GEP operand.
6144 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6145 GTI != GTE; ++GTI) {
6146 llvm::Value *LocalOffset;
6147 auto *Index = GTI.getOperand();
6148 // Compute the local offset contributed by this indexing step:
6149 if (auto *STy = GTI.getStructTypeOrNull()) {
6150 // For struct indexing, the local offset is the byte position of the
6151 // specified field.
6152 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6153 LocalOffset = llvm::ConstantInt::get(
6154 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6155 } else {
6156 // Otherwise this is array-like indexing. The local offset is the index
6157 // multiplied by the element size.
6158 auto *ElementSize =
6159 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6160 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6161 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6162 }
6163
6164 // If this is the first offset, set it as the total offset. Otherwise, add
6165 // the local offset into the running total.
6166 if (!TotalOffset || TotalOffset == Zero)
6167 TotalOffset = LocalOffset;
6168 else
6169 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6170 }
6171
6172 return {TotalOffset, OffsetOverflows};
6173}
6174
6175Value *
6176CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6177 ArrayRef<Value *> IdxList,
6178 bool SignedIndices, bool IsSubtraction,
6179 SourceLocation Loc, const Twine &Name) {
6180 llvm::Type *PtrTy = Ptr->getType();
6181
6182 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6183 if (!SignedIndices && !IsSubtraction)
6184 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6185
6186 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6187
6188 // If the pointer overflow sanitizer isn't enabled, do nothing.
6189 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6190 return GEPVal;
6191
6192 // Perform nullptr-and-offset check unless the nullptr is defined.
6193 bool PerformNullCheck = !NullPointerIsDefined(
6194 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6195 // Check for overflows unless the GEP got constant-folded,
6196 // and only in the default address space
6197 bool PerformOverflowCheck =
6198 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6199
6200 if (!(PerformNullCheck || PerformOverflowCheck))
6201 return GEPVal;
6202
6203 const auto &DL = CGM.getDataLayout();
6204
6205 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6206 auto CheckHandler = SanitizerHandler::PointerOverflow;
6207 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6208 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6209
6210 GEPOffsetAndOverflow EvaluatedGEP =
6211 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6212
6213 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6214 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6215 "If the offset got constant-folded, we don't expect that there was an "
6216 "overflow.");
6217
6218 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6219
6220 // Common case: if the total offset is zero, don't emit a check.
6221 if (EvaluatedGEP.TotalOffset == Zero)
6222 return GEPVal;
6223
6224 // Now that we've computed the total offset, add it to the base pointer (with
6225 // wrapping semantics).
6226 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6227 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6228
6229 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6230 2>
6231 Checks;
6232
6233 if (PerformNullCheck) {
6234 // If the base pointer evaluates to a null pointer value,
6235 // the only valid pointer this inbounds GEP can produce is also
6236 // a null pointer, so the offset must also evaluate to zero.
6237 // Likewise, if we have non-zero base pointer, we can not get null pointer
6238 // as a result, so the offset can not be -intptr_t(BasePtr).
6239 // In other words, both pointers are either null, or both are non-null,
6240 // or the behaviour is undefined.
6241 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6242 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6243 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6244 Checks.emplace_back(Valid, CheckOrdinal);
6245 }
6246
6247 if (PerformOverflowCheck) {
6248 // The GEP is valid if:
6249 // 1) The total offset doesn't overflow, and
6250 // 2) The sign of the difference between the computed address and the base
6251 // pointer matches the sign of the total offset.
6252 llvm::Value *ValidGEP;
6253 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6254 if (SignedIndices) {
6255 // GEP is computed as `unsigned base + signed offset`, therefore:
6256 // * If offset was positive, then the computed pointer can not be
6257 // [unsigned] less than the base pointer, unless it overflowed.
6258 // * If offset was negative, then the computed pointer can not be
6259 // [unsigned] greater than the bas pointere, unless it overflowed.
6260 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6261 auto *PosOrZeroOffset =
6262 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6263 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6264 ValidGEP =
6265 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6266 } else if (!IsSubtraction) {
6267 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6268 // computed pointer can not be [unsigned] less than base pointer,
6269 // unless there was an overflow.
6270 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6271 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6272 } else {
6273 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6274 // computed pointer can not be [unsigned] greater than base pointer,
6275 // unless there was an overflow.
6276 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6277 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6278 }
6279 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6280 Checks.emplace_back(ValidGEP, CheckOrdinal);
6281 }
6282
6283 assert(!Checks.empty() && "Should have produced some checks.");
6284
6285 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6286 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6287 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6288 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6289
6290 return GEPVal;
6291}
6292
6294 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6295 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6296 const Twine &Name) {
6297 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6298 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6299 if (!SignedIndices && !IsSubtraction)
6300 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6301
6302 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6303 }
6304
6305 return RawAddress(
6306 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6307 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6308 elementType, Align);
6309}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc, bool isPre, ASTContext &Ctx)
For the purposes of overflow pattern exclusion, does this match the "while(i--)" pattern?
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
static uint32_t getBitWidth(const Expr *E)
llvm::APSInt APSInt
Definition Compiler.cpp:24
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isLValue() const
Definition APValue.h:472
bool isInt() const
Definition APValue.h:467
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
ParentMapContext & getParentMapContext()
Returns the dynamic AST node parent map context.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:944
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4531
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4537
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4543
LabelDecl * getLabel() const
Definition Expr.h:4573
uint64_t getValue() const
Definition ExprCXX.h:3044
QualType getElementType() const
Definition TypeBase.h:3735
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6704
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4185
bool isCompoundAssignmentOp() const
Definition Expr.h:4182
SourceLocation getExprLoc() const
Definition Expr.h:4079
bool isShiftOp() const
Definition Expr.h:4127
Expr * getRHS() const
Definition Expr.h:4090
bool isShiftAssignOp() const
Definition Expr.h:4196
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4251
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2204
Opcode getOpcode() const
Definition Expr.h:4083
BinaryOperatorKind Opcode
Definition Expr.h:4043
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4332
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
path_iterator path_begin()
Definition Expr.h:3746
CastKind getCastKind() const
Definition Expr.h:3720
bool changesVolatileQualification() const
Return.
Definition Expr.h:3810
path_iterator path_end()
Definition Expr.h:3747
Expr * getSubExpr()
Definition Expr.h:3726
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4884
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:102
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:94
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:84
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:71
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3089
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7001
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:257
SanitizerSet SanOpts
Sanitizers enabled for this function.
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:184
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:251
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2905
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3903
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6345
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7101
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:247
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:388
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2963
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3629
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3793
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:177
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3953
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:245
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6248
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2407
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:64
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1240
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4051
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6201
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2030
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3493
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2216
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6187
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2643
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4481
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:573
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:267
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:901
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7110
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1576
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:676
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1657
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:738
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4033
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:5030
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4393
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2250
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:51
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1927
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1692
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3524
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
bool isMCDCDecisionExpr(const Expr *E) const
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1372
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:183
bool isBitField() const
Definition CGValue.h:288
bool isVolatileQualified() const
Definition CGValue.h:297
const Qualifiers & getQuals() const
Definition CGValue.h:350
Address getAddress() const
Definition CGValue.h:373
QualType getType() const
Definition CGValue.h:303
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:446
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4300
QualType getComputationLHSType() const
Definition Expr.h:4334
QualType getComputationResultType() const
Definition Expr.h:4337
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
APValue getAPValueResult() const
Definition Expr.cpp:412
bool hasAPValueResult() const
Definition Expr.h:1157
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4388
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4809
T * getAttr() const
Definition DeclBase.h:573
ChildElementIter< false > begin()
Definition Expr.h:5232
size_t getDataElementCount() const
Definition Expr.h:5148
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isIntegerConstantExpr(const ASTContext &Ctx) const
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:476
QualType getType() const
Definition Expr.h:144
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1575
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3853
unsigned getNumInits() const
Definition Expr.h:5329
bool hadArrayRangeDesignator() const
Definition Expr.h:5483
const Expr * getInit(unsigned Init) const
Definition Expr.h:5353
@ PostDecrInWhile
while (count–)
bool isSignedOverflowDefined() const
bool isOverflowPatternExcluded(OverflowPatternExclusionKind Kind) const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4338
Expr * getBase() const
Definition Expr.h:3441
bool isArrow() const
Definition Expr.h:3548
VersionTuple getVersion() const
Definition ExprObjC.h:1723
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1495
Expr * getBase() const
Definition ExprObjC.h:1520
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1543
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1361
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:7910
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:7947
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2586
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2479
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
DynTypedNodeList getParents(const NodeT &Node)
Returns the parents of the given node (within the traversal scope).
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1453
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:131
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8292
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8477
QualType getCanonicalType() const
Definition TypeBase.h:8344
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1613
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:136
bool isCanonical() const
Definition TypeBase.h:8349
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4524
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:586
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4695
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4676
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4682
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4515
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2281
SourceLocation getLocation() const
Definition Expr.h:5061
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4612
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
virtual bool useFP16ConversionIntrinsics() const
Check whether conversions to and from __fp16 should go through an integer bitcast with i16.
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:788
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:798
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:809
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:817
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:825
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8274
bool getBoolValue() const
Definition ExprCXX.h:2947
const APValue & getAPValue() const
Definition ExprCXX.h:2952
bool isStoredAsBoolean() const
Definition ExprCXX.h:2943
bool isVoidType() const
Definition TypeBase.h:8891
bool isBooleanType() const
Definition TypeBase.h:9021
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8541
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2226
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2274
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2338
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8935
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
bool isReferenceType() const
Definition TypeBase.h:8553
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1910
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isExtVectorType() const
Definition TypeBase.h:8672
bool isExtVectorBoolType() const
Definition TypeBase.h:8676
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8810
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8652
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8664
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8947
bool isHalfType() const
Definition TypeBase.h:8895
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2244
bool isQueueT() const
Definition TypeBase.h:8781
bool isMatrixType() const
Definition TypeBase.h:8692
bool isEventT() const
Definition TypeBase.h:8773
bool isFunctionType() const
Definition TypeBase.h:8525
bool isVectorType() const
Definition TypeBase.h:8668
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2321
bool isFloatingType() const
Definition Type.cpp:2305
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2254
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2929
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool isNullPtrType() const
Definition TypeBase.h:8928
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2400
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5576
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
Represents a GCC generic vector type.
Definition TypeBase.h:4176
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2689
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
bool BitCast(InterpState &S, CodePtr OpPC)
Definition Interp.h:3669
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1310
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1970
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1325
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
Definition CGStmt.cpp:48
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::Type * HalfTy
half, bfloat, float, double
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184