clang 23.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
86 : LHSAP.usub_ov(RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
89 : LHSAP.umul_ov(RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(BaseTy) ||
184 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Consider OverflowBehaviorType and language options to calculate the final
196/// overflow behavior for an expression. There are no language options for
197/// unsigned overflow semantics so there is nothing to consider there.
199getOverflowBehaviorConsideringType(const CodeGenFunction &CGF,
200 const QualType Ty) {
201 const OverflowBehaviorType *OBT = Ty->getAs<OverflowBehaviorType>();
202 /// FIXME: Having two enums named `OverflowBehaviorKind` is not ideal, these
203 /// should be unified into one coherent enum that supports both unsigned and
204 /// signed overflow behavior semantics.
205 if (OBT) {
206 switch (OBT->getBehaviorKind()) {
207 case OverflowBehaviorType::OverflowBehaviorKind::Wrap:
209 case OverflowBehaviorType::OverflowBehaviorKind::Trap:
211 }
212 llvm_unreachable("Unknown OverflowBehaviorKind");
213 }
214
215 if (Ty->isUnsignedIntegerType()) {
217 }
218
219 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
226 }
227 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
228}
229
230/// Check if we can skip the overflow check for \p Op.
231static bool CanElideOverflowCheck(ASTContext &Ctx, const BinOpInfo &Op) {
232 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
233 "Expected a unary or binary operator");
234
235 // If the binop has constant inputs and we can prove there is no overflow,
236 // we can elide the overflow check.
237 if (!Op.mayHaveIntegerOverflow())
238 return true;
239
240 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
241 if (UO && Ctx.isUnaryOverflowPatternExcluded(UO))
242 return true;
243
244 const auto *BO = dyn_cast<BinaryOperator>(Op.E);
245 if (BO && BO->hasExcludedOverflowPattern())
246 return true;
247
248 if (Op.Ty.isWrapType())
249 return true;
250 if (Op.Ty.isTrapType())
251 return false;
252
253 if (Op.Ty->isSignedIntegerType() &&
254 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
255 Op.Ty)) {
256 return true;
257 }
258
259 if (Op.Ty->isUnsignedIntegerType() &&
260 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
261 Op.Ty)) {
262 return true;
263 }
264
265 // If a unary op has a widened operand, the op cannot overflow.
266 if (UO)
267 return !UO->canOverflow();
268
269 // We usually don't need overflow checks for binops with widened operands.
270 // Multiplication with promoted unsigned operands is a special case.
271 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
272 if (!OptionalLHSTy)
273 return false;
274
275 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
276 if (!OptionalRHSTy)
277 return false;
278
279 QualType LHSTy = *OptionalLHSTy;
280 QualType RHSTy = *OptionalRHSTy;
281
282 // This is the simple case: binops without unsigned multiplication, and with
283 // widened operands. No overflow check is needed here.
284 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
285 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
286 return true;
287
288 // For unsigned multiplication the overflow check can be elided if either one
289 // of the unpromoted types are less than half the size of the promoted type.
290 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
291 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
292 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
293}
294
295class ScalarExprEmitter
296 : public StmtVisitor<ScalarExprEmitter, Value*> {
297 CodeGenFunction &CGF;
298 CGBuilderTy &Builder;
299 bool IgnoreResultAssign;
300 llvm::LLVMContext &VMContext;
301public:
302
303 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
304 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
305 VMContext(cgf.getLLVMContext()) {
306 }
307
308 //===--------------------------------------------------------------------===//
309 // Utilities
310 //===--------------------------------------------------------------------===//
311
312 bool TestAndClearIgnoreResultAssign() {
313 bool I = IgnoreResultAssign;
314 IgnoreResultAssign = false;
315 return I;
316 }
317
318 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
319 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
320 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
321 return CGF.EmitCheckedLValue(E, TCK);
322 }
323
324 void EmitBinOpCheck(
325 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
326 const BinOpInfo &Info);
327
328 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
329 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
330 }
331
332 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
333 const AlignValueAttr *AVAttr = nullptr;
334 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
335 const ValueDecl *VD = DRE->getDecl();
336
337 if (VD->getType()->isReferenceType()) {
338 if (const auto *TTy =
339 VD->getType().getNonReferenceType()->getAs<TypedefType>())
340 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
341 } else {
342 // Assumptions for function parameters are emitted at the start of the
343 // function, so there is no need to repeat that here,
344 // unless the alignment-assumption sanitizer is enabled,
345 // then we prefer the assumption over alignment attribute
346 // on IR function param.
347 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
348 return;
349
350 AVAttr = VD->getAttr<AlignValueAttr>();
351 }
352 }
353
354 if (!AVAttr)
355 if (const auto *TTy = E->getType()->getAs<TypedefType>())
356 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
357
358 if (!AVAttr)
359 return;
360
361 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
362 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
363 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
364 }
365
366 /// EmitLoadOfLValue - Given an expression with complex type that represents a
367 /// value l-value, this method emits the address of the l-value, then loads
368 /// and returns the result.
369 Value *EmitLoadOfLValue(const Expr *E) {
370 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
371 E->getExprLoc());
372
373 EmitLValueAlignmentAssumption(E, V);
374 return V;
375 }
376
377 /// EmitConversionToBool - Convert the specified expression value to a
378 /// boolean (i1) truth value. This is equivalent to "Val != 0".
379 Value *EmitConversionToBool(Value *Src, QualType DstTy);
380
381 /// Emit a check that a conversion from a floating-point type does not
382 /// overflow.
383 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
384 Value *Src, QualType SrcType, QualType DstType,
385 llvm::Type *DstTy, SourceLocation Loc);
386
387 /// Known implicit conversion check kinds.
388 /// This is used for bitfield conversion checks as well.
389 /// Keep in sync with the enum of the same name in ubsan_handlers.h
390 enum ImplicitConversionCheckKind : unsigned char {
391 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
392 ICCK_UnsignedIntegerTruncation = 1,
393 ICCK_SignedIntegerTruncation = 2,
394 ICCK_IntegerSignChange = 3,
395 ICCK_SignedIntegerTruncationOrSignChange = 4,
396 };
397
398 /// Emit a check that an [implicit] truncation of an integer does not
399 /// discard any bits. It is not UB, so we use the value after truncation.
400 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
401 QualType DstType, SourceLocation Loc,
402 bool OBTrapInvolved = false);
403
404 /// Emit a check that an [implicit] conversion of an integer does not change
405 /// the sign of the value. It is not UB, so we use the value after conversion.
406 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
407 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
408 QualType DstType, SourceLocation Loc,
409 bool OBTrapInvolved = false);
410
411 /// Emit a conversion from the specified type to the specified destination
412 /// type, both of which are LLVM scalar types.
413 struct ScalarConversionOpts {
414 bool TreatBooleanAsSigned;
415 bool EmitImplicitIntegerTruncationChecks;
416 bool EmitImplicitIntegerSignChangeChecks;
417 /* Potential -fsanitize-undefined-ignore-overflow-pattern= */
418 bool PatternExcluded;
419
420 ScalarConversionOpts()
421 : TreatBooleanAsSigned(false),
422 EmitImplicitIntegerTruncationChecks(false),
423 EmitImplicitIntegerSignChangeChecks(false), PatternExcluded(false) {}
424
425 ScalarConversionOpts(clang::SanitizerSet SanOpts)
426 : TreatBooleanAsSigned(false),
427 EmitImplicitIntegerTruncationChecks(
428 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
429 EmitImplicitIntegerSignChangeChecks(
430 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)),
431 PatternExcluded(false) {}
432 };
433 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
434 llvm::Type *SrcTy, llvm::Type *DstTy,
435 ScalarConversionOpts Opts);
436 Value *
437 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
438 SourceLocation Loc,
439 ScalarConversionOpts Opts = ScalarConversionOpts());
440
441 /// Convert between either a fixed point and other fixed point or fixed point
442 /// and an integer.
443 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
444 SourceLocation Loc);
445
446 /// Emit a conversion from the specified complex type to the specified
447 /// destination type, where the destination type is an LLVM scalar type.
448 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
449 QualType SrcTy, QualType DstTy,
450 SourceLocation Loc);
451
452 /// EmitNullValue - Emit a value that corresponds to null for the given type.
453 Value *EmitNullValue(QualType Ty);
454
455 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
456 Value *EmitFloatToBoolConversion(Value *V) {
457 // Compare against 0.0 for fp scalars.
458 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
459 return Builder.CreateFCmpUNE(V, Zero, "tobool");
460 }
461
462 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
463 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
464 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
465
466 return Builder.CreateICmpNE(V, Zero, "tobool");
467 }
468
469 Value *EmitIntToBoolConversion(Value *V) {
470 // Because of the type rules of C, we often end up computing a
471 // logical value, then zero extending it to int, then wanting it
472 // as a logical value again. Optimize this common case.
473 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
474 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
475 Value *Result = ZI->getOperand(0);
476 // If there aren't any more uses, zap the instruction to save space.
477 // Note that there can be more uses, for example if this
478 // is the result of an assignment.
479 if (ZI->use_empty())
480 ZI->eraseFromParent();
481 return Result;
482 }
483 }
484
485 return Builder.CreateIsNotNull(V, "tobool");
486 }
487
488 //===--------------------------------------------------------------------===//
489 // Visitor Methods
490 //===--------------------------------------------------------------------===//
491
492 Value *Visit(Expr *E) {
493 ApplyDebugLocation DL(CGF, E);
494 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
495 }
496
497 Value *VisitStmt(Stmt *S) {
498 S->dump(llvm::errs(), CGF.getContext());
499 llvm_unreachable("Stmt can't have complex result type!");
500 }
501 Value *VisitExpr(Expr *S);
502
503 Value *VisitConstantExpr(ConstantExpr *E) {
504 // A constant expression of type 'void' generates no code and produces no
505 // value.
506 if (E->getType()->isVoidType())
507 return nullptr;
508
509 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
510 if (E->isGLValue()) {
511 // This was already converted to an rvalue when it was constant
512 // evaluated.
513 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
514 return Result;
515 return CGF.EmitLoadOfScalar(
516 Address(Result, CGF.convertTypeForLoadStore(E->getType()),
518 /*Volatile*/ false, E->getType(), E->getExprLoc());
519 }
520 return Result;
521 }
522 return Visit(E->getSubExpr());
523 }
524 Value *VisitParenExpr(ParenExpr *PE) {
525 return Visit(PE->getSubExpr());
526 }
527 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
528 return Visit(E->getReplacement());
529 }
530 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
531 return Visit(GE->getResultExpr());
532 }
533 Value *VisitCoawaitExpr(CoawaitExpr *S) {
534 return CGF.EmitCoawaitExpr(*S).getScalarVal();
535 }
536 Value *VisitCoyieldExpr(CoyieldExpr *S) {
537 return CGF.EmitCoyieldExpr(*S).getScalarVal();
538 }
539 Value *VisitUnaryCoawait(const UnaryOperator *E) {
540 return Visit(E->getSubExpr());
541 }
542
543 // Leaves.
544 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
545 return Builder.getInt(E->getValue());
546 }
547 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
548 return Builder.getInt(E->getValue());
549 }
550 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
551 return llvm::ConstantFP::get(VMContext, E->getValue());
552 }
553 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
554 // Character literals are always stored in an unsigned (even for signed
555 // char), so allow implicit truncation here.
556 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue(),
557 /*IsSigned=*/false, /*ImplicitTrunc=*/true);
558 }
559 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
560 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
561 }
562 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
563 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
564 }
565 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
566 if (E->getType()->isVoidType())
567 return nullptr;
568
569 return EmitNullValue(E->getType());
570 }
571 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
572 return EmitNullValue(E->getType());
573 }
574 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
575 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
576 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
577 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
578 return Builder.CreateBitCast(V, ConvertType(E->getType()));
579 }
580
581 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
582 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
583 }
584
585 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
586 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
587 }
588
589 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
590 Value *VisitEmbedExpr(EmbedExpr *E);
591
592 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
593 if (E->isGLValue())
594 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
595 E->getExprLoc());
596
597 // Otherwise, assume the mapping is the scalar directly.
599 }
600
601 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
602 llvm_unreachable("Codegen for this isn't defined/implemented");
603 }
604
605 // l-values.
606 Value *VisitDeclRefExpr(DeclRefExpr *E) {
607 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
608 return CGF.emitScalarConstant(Constant, E);
609 return EmitLoadOfLValue(E);
610 }
611
612 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
613 return CGF.EmitObjCSelectorExpr(E);
614 }
615 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
616 return CGF.EmitObjCProtocolExpr(E);
617 }
618 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
619 return EmitLoadOfLValue(E);
620 }
621 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
622 if (E->getMethodDecl() &&
624 return EmitLoadOfLValue(E);
625 return CGF.EmitObjCMessageExpr(E).getScalarVal();
626 }
627
628 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
629 LValue LV = CGF.EmitObjCIsaExpr(E);
631 return V;
632 }
633
634 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
635 VersionTuple Version = E->getVersion();
636
637 // If we're checking for a platform older than our minimum deployment
638 // target, we can fold the check away.
639 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
640 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
641
642 return CGF.EmitBuiltinAvailable(Version);
643 }
644
645 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
646 Value *VisitMatrixSingleSubscriptExpr(MatrixSingleSubscriptExpr *E);
647 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
648 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
649 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
650 Value *VisitMemberExpr(MemberExpr *E);
651 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
652 Value *VisitMatrixElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
653 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
654 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
655 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
656 // literals aren't l-values in C++. We do so simply because that's the
657 // cleanest way to handle compound literals in C++.
658 // See the discussion here: https://reviews.llvm.org/D64464
659 return EmitLoadOfLValue(E);
660 }
661
662 Value *VisitInitListExpr(InitListExpr *E);
663
664 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
665 assert(CGF.getArrayInitIndex() &&
666 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
667 return CGF.getArrayInitIndex();
668 }
669
670 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
671 return EmitNullValue(E->getType());
672 }
673 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
674 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
675 return VisitCastExpr(E);
676 }
677 Value *VisitCastExpr(CastExpr *E);
678
679 Value *VisitCallExpr(const CallExpr *E) {
681 return EmitLoadOfLValue(E);
682
683 Value *V = CGF.EmitCallExpr(E).getScalarVal();
684
685 EmitLValueAlignmentAssumption(E, V);
686 return V;
687 }
688
689 Value *VisitStmtExpr(const StmtExpr *E);
690
691 // Unary Operators.
692 Value *VisitUnaryPostDec(const UnaryOperator *E) {
693 LValue LV = EmitLValue(E->getSubExpr());
694 return EmitScalarPrePostIncDec(E, LV, false, false);
695 }
696 Value *VisitUnaryPostInc(const UnaryOperator *E) {
697 LValue LV = EmitLValue(E->getSubExpr());
698 return EmitScalarPrePostIncDec(E, LV, true, false);
699 }
700 Value *VisitUnaryPreDec(const UnaryOperator *E) {
701 LValue LV = EmitLValue(E->getSubExpr());
702 return EmitScalarPrePostIncDec(E, LV, false, true);
703 }
704 Value *VisitUnaryPreInc(const UnaryOperator *E) {
705 LValue LV = EmitLValue(E->getSubExpr());
706 return EmitScalarPrePostIncDec(E, LV, true, true);
707 }
708
709 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
710 llvm::Value *InVal,
711 bool IsInc);
712
713 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
714 bool isInc, bool isPre);
715
716
717 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
718 if (isa<MemberPointerType>(E->getType())) // never sugared
719 return CGF.CGM.getMemberPointerConstant(E);
720
721 return EmitLValue(E->getSubExpr()).getPointer(CGF);
722 }
723 Value *VisitUnaryDeref(const UnaryOperator *E) {
724 if (E->getType()->isVoidType())
725 return Visit(E->getSubExpr()); // the actual value should be unused
726 return EmitLoadOfLValue(E);
727 }
728
729 Value *VisitUnaryPlus(const UnaryOperator *E,
730 QualType PromotionType = QualType());
731 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
732 Value *VisitUnaryMinus(const UnaryOperator *E,
733 QualType PromotionType = QualType());
734 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
735
736 Value *VisitUnaryNot (const UnaryOperator *E);
737 Value *VisitUnaryLNot (const UnaryOperator *E);
738 Value *VisitUnaryReal(const UnaryOperator *E,
739 QualType PromotionType = QualType());
740 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
741 Value *VisitUnaryImag(const UnaryOperator *E,
742 QualType PromotionType = QualType());
743 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
744 Value *VisitUnaryExtension(const UnaryOperator *E) {
745 return Visit(E->getSubExpr());
746 }
747
748 // C++
749 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
750 return EmitLoadOfLValue(E);
751 }
752 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
753 auto &Ctx = CGF.getContext();
756 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
757 SLE->getType());
758 }
759
760 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
761 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
762 return Visit(DAE->getExpr());
763 }
764 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
765 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
766 return Visit(DIE->getExpr());
767 }
768 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
769 return CGF.LoadCXXThis();
770 }
771
772 Value *VisitExprWithCleanups(ExprWithCleanups *E);
773 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
774 return CGF.EmitCXXNewExpr(E);
775 }
776 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
777 CGF.EmitCXXDeleteExpr(E);
778 return nullptr;
779 }
780
781 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
782 if (E->isStoredAsBoolean())
783 return llvm::ConstantInt::get(ConvertType(E->getType()),
784 E->getBoolValue());
785 assert(E->getAPValue().isInt() && "APValue type not supported");
786 return llvm::ConstantInt::get(ConvertType(E->getType()),
787 E->getAPValue().getInt());
788 }
789
790 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
791 return Builder.getInt1(E->isSatisfied());
792 }
793
794 Value *VisitRequiresExpr(const RequiresExpr *E) {
795 return Builder.getInt1(E->isSatisfied());
796 }
797
798 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
799 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
800 }
801
802 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
803 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
804 }
805
806 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
807 // C++ [expr.pseudo]p1:
808 // The result shall only be used as the operand for the function call
809 // operator (), and the result of such a call has type void. The only
810 // effect is the evaluation of the postfix-expression before the dot or
811 // arrow.
812 CGF.EmitScalarExpr(E->getBase());
813 return nullptr;
814 }
815
816 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
817 return EmitNullValue(E->getType());
818 }
819
820 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
821 CGF.EmitCXXThrowExpr(E);
822 return nullptr;
823 }
824
825 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
826 return Builder.getInt1(E->getValue());
827 }
828
829 // Binary Operators.
830 Value *EmitMul(const BinOpInfo &Ops) {
831 if (Ops.Ty->isSignedIntegerOrEnumerationType() ||
832 Ops.Ty->isUnsignedIntegerType()) {
833 const bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
834 const bool hasSan =
835 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
836 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
837 switch (getOverflowBehaviorConsideringType(CGF, Ops.Ty)) {
838 case LangOptions::OB_Wrap:
839 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
840 case LangOptions::OB_SignedAndDefined:
841 if (!hasSan)
842 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
843 [[fallthrough]];
844 case LangOptions::OB_Unset:
845 if (!hasSan)
846 return isSigned ? Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul")
847 : Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
848 [[fallthrough]];
849 case LangOptions::OB_Trap:
850 if (CanElideOverflowCheck(CGF.getContext(), Ops))
851 return isSigned ? Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul")
852 : Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
853 return EmitOverflowCheckedBinOp(Ops);
854 }
855 }
856
857 if (Ops.Ty->isConstantMatrixType()) {
858 llvm::MatrixBuilder MB(Builder);
859 // We need to check the types of the operands of the operator to get the
860 // correct matrix dimensions.
861 auto *BO = cast<BinaryOperator>(Ops.E);
862 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
863 BO->getLHS()->getType().getCanonicalType());
864 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
865 BO->getRHS()->getType().getCanonicalType());
866 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
867 if (LHSMatTy && RHSMatTy)
868 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
869 LHSMatTy->getNumColumns(),
870 RHSMatTy->getNumColumns());
871 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
872 }
873
874 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
875 // Preserve the old values
876 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
877 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
878 }
879 if (Ops.isFixedPointOp())
880 return EmitFixedPointBinOp(Ops);
881 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
882 }
883 /// Create a binary op that checks for overflow.
884 /// Currently only supports +, - and *.
885 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
886
887 // Check for undefined division and modulus behaviors.
888 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
889 llvm::Value *Zero,bool isDiv);
890 // Common helper for getting how wide LHS of shift is.
891 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
892
893 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
894 // non powers of two.
895 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
896
897 Value *EmitDiv(const BinOpInfo &Ops);
898 Value *EmitRem(const BinOpInfo &Ops);
899 Value *EmitAdd(const BinOpInfo &Ops);
900 Value *EmitSub(const BinOpInfo &Ops);
901 Value *EmitShl(const BinOpInfo &Ops);
902 Value *EmitShr(const BinOpInfo &Ops);
903 Value *EmitAnd(const BinOpInfo &Ops) {
904 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
905 }
906 Value *EmitXor(const BinOpInfo &Ops) {
907 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
908 }
909 Value *EmitOr (const BinOpInfo &Ops) {
910 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
911 }
912
913 // Helper functions for fixed point binary operations.
914 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
915
916 BinOpInfo EmitBinOps(const BinaryOperator *E,
917 QualType PromotionTy = QualType());
918
919 Value *EmitPromotedValue(Value *result, QualType PromotionType);
920 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
921 Value *EmitPromoted(const Expr *E, QualType PromotionType);
922
923 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
924 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
925 Value *&Result);
926
927 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
928 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
929
930 QualType getPromotionType(QualType Ty) {
931 const auto &Ctx = CGF.getContext();
932 if (auto *CT = Ty->getAs<ComplexType>()) {
933 QualType ElementType = CT->getElementType();
934 if (ElementType.UseExcessPrecision(Ctx))
935 return Ctx.getComplexType(Ctx.FloatTy);
936 }
937
938 if (Ty.UseExcessPrecision(Ctx)) {
939 if (auto *VT = Ty->getAs<VectorType>()) {
940 unsigned NumElements = VT->getNumElements();
941 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
942 }
943 return Ctx.FloatTy;
944 }
945
946 return QualType();
947 }
948
949 // Binary operators and binary compound assignment operators.
950#define HANDLEBINOP(OP) \
951 Value *VisitBin##OP(const BinaryOperator *E) { \
952 QualType promotionTy = getPromotionType(E->getType()); \
953 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
954 if (result && !promotionTy.isNull()) \
955 result = EmitUnPromotedValue(result, E->getType()); \
956 return result; \
957 } \
958 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
959 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
960 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
961 }
962 HANDLEBINOP(Mul)
963 HANDLEBINOP(Div)
964 HANDLEBINOP(Rem)
965 HANDLEBINOP(Add)
966 HANDLEBINOP(Sub)
967 HANDLEBINOP(Shl)
968 HANDLEBINOP(Shr)
970 HANDLEBINOP(Xor)
972#undef HANDLEBINOP
973
974 // Comparisons.
975 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
976 llvm::CmpInst::Predicate SICmpOpc,
977 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
978#define VISITCOMP(CODE, UI, SI, FP, SIG) \
979 Value *VisitBin##CODE(const BinaryOperator *E) { \
980 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
981 llvm::FCmpInst::FP, SIG); }
982 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
983 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
984 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
985 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
986 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
987 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
988#undef VISITCOMP
989
990 Value *VisitBinAssign (const BinaryOperator *E);
991
992 Value *VisitBinLAnd (const BinaryOperator *E);
993 Value *VisitBinLOr (const BinaryOperator *E);
994 Value *VisitBinComma (const BinaryOperator *E);
995
996 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
997 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
998
999 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
1000 return Visit(E->getSemanticForm());
1001 }
1002
1003 // Other Operators.
1004 Value *VisitBlockExpr(const BlockExpr *BE);
1005 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
1006 Value *VisitChooseExpr(ChooseExpr *CE);
1007 Value *VisitVAArgExpr(VAArgExpr *VE);
1008 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
1009 return CGF.EmitObjCStringLiteral(E);
1010 }
1011 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
1012 return CGF.EmitObjCBoxedExpr(E);
1013 }
1014 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
1015 return CGF.EmitObjCArrayLiteral(E);
1016 }
1017 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
1018 return CGF.EmitObjCDictionaryLiteral(E);
1019 }
1020 Value *VisitAsTypeExpr(AsTypeExpr *CE);
1021 Value *VisitAtomicExpr(AtomicExpr *AE);
1022 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
1023 return Visit(E->getSelectedExpr());
1024 }
1025};
1026} // end anonymous namespace.
1027
1028//===----------------------------------------------------------------------===//
1029// Utilities
1030//===----------------------------------------------------------------------===//
1031
1032/// EmitConversionToBool - Convert the specified expression value to a
1033/// boolean (i1) truth value. This is equivalent to "Val != 0".
1034Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
1035 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
1036
1037 if (SrcType->isRealFloatingType())
1038 return EmitFloatToBoolConversion(Src);
1039
1040 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
1041 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
1042
1043 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
1044 "Unknown scalar type to convert");
1045
1046 if (isa<llvm::IntegerType>(Src->getType()))
1047 return EmitIntToBoolConversion(Src);
1048
1049 assert(isa<llvm::PointerType>(Src->getType()));
1050 return EmitPointerToBoolConversion(Src, SrcType);
1051}
1052
1053void ScalarExprEmitter::EmitFloatConversionCheck(
1054 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1055 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1056 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1057 if (!isa<llvm::IntegerType>(DstTy))
1058 return;
1059
1060 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1061 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1062 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1063 using llvm::APFloat;
1064 using llvm::APSInt;
1065
1066 llvm::Value *Check = nullptr;
1067 const llvm::fltSemantics &SrcSema =
1068 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1069
1070 // Floating-point to integer. This has undefined behavior if the source is
1071 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1072 // to an integer).
1073 unsigned Width = CGF.getContext().getIntWidth(DstType);
1075
1076 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1077 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1078 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1079 APFloat::opOverflow)
1080 // Don't need an overflow check for lower bound. Just check for
1081 // -Inf/NaN.
1082 MinSrc = APFloat::getInf(SrcSema, true);
1083 else
1084 // Find the largest value which is too small to represent (before
1085 // truncation toward zero).
1086 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1087
1088 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1089 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1090 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1091 APFloat::opOverflow)
1092 // Don't need an overflow check for upper bound. Just check for
1093 // +Inf/NaN.
1094 MaxSrc = APFloat::getInf(SrcSema, false);
1095 else
1096 // Find the smallest value which is too large to represent (before
1097 // truncation toward zero).
1098 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1099
1100 // If we're converting from __half, convert the range to float to match
1101 // the type of src.
1102 if (OrigSrcType->isHalfType()) {
1103 const llvm::fltSemantics &Sema =
1104 CGF.getContext().getFloatTypeSemantics(SrcType);
1105 bool IsInexact;
1106 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1107 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1108 }
1109
1110 llvm::Value *GE =
1111 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1112 llvm::Value *LE =
1113 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1114 Check = Builder.CreateAnd(GE, LE);
1115
1116 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1117 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1118 CGF.EmitCheckTypeDescriptor(DstType)};
1119 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1120 OrigSrc);
1121}
1122
1123// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1124// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1125static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1126 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1128 QualType DstType, CGBuilderTy &Builder) {
1129 llvm::Type *SrcTy = Src->getType();
1130 llvm::Type *DstTy = Dst->getType();
1131 (void)DstTy; // Only used in assert()
1132
1133 // This should be truncation of integral types.
1134 assert(Src != Dst);
1135 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1136 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1137 "non-integer llvm type");
1138
1139 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1140 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1141
1142 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1143 // Else, it is a signed truncation.
1144 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1146 if (!SrcSigned && !DstSigned) {
1147 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1148 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1149 } else {
1150 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1151 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1152 }
1153
1154 llvm::Value *Check = nullptr;
1155 // 1. Extend the truncated value back to the same width as the Src.
1156 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1157 // 2. Equality-compare with the original source value
1158 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1159 // If the comparison result is 'i1 false', then the truncation was lossy.
1160 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1161}
1162
1164 QualType SrcType, QualType DstType) {
1165 return SrcType->isIntegerType() && DstType->isIntegerType();
1166}
1167
1168void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1169 Value *Dst, QualType DstType,
1170 SourceLocation Loc,
1171 bool OBTrapInvolved) {
1172 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation) &&
1173 !OBTrapInvolved)
1174 return;
1175
1176 // We only care about int->int conversions here.
1177 // We ignore conversions to/from pointer and/or bool.
1179 DstType))
1180 return;
1181
1182 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1183 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1184 // This must be truncation. Else we do not care.
1185 if (SrcBits <= DstBits)
1186 return;
1187
1188 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1189
1190 // If the integer sign change sanitizer is enabled,
1191 // and we are truncating from larger unsigned type to smaller signed type,
1192 // let that next sanitizer deal with it.
1193 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1194 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1195 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1196 (!SrcSigned && DstSigned))
1197 return;
1198
1199 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1200 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1201 Check;
1202
1203 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1204 {
1205 // We don't know the check kind until we call
1206 // EmitIntegerTruncationCheckHelper, but we want to annotate
1207 // EmitIntegerTruncationCheckHelper's instructions too.
1208 SanitizerDebugLocation SanScope(
1209 &CGF,
1210 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1211 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1212 CheckHandler);
1213 Check =
1214 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1215 // If the comparison result is 'i1 false', then the truncation was lossy.
1216 }
1217
1218 // Do we care about this type of truncation?
1219 if (!CGF.SanOpts.has(Check.second.second)) {
1220 // Just emit a trap check if an __ob_trap was involved but appropriate
1221 // sanitizer isn't enabled.
1222 if (OBTrapInvolved)
1223 CGF.EmitTrapCheck(Check.second.first, CheckHandler);
1224 return;
1225 }
1226
1227 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1228
1229 // Does some SSCL ignore this type?
1230 const bool ignoredBySanitizer = CGF.getContext().isTypeIgnoredBySanitizer(
1231 SanitizerMask::bitPosToMask(Check.second.second), DstType);
1232
1233 // Consider OverflowBehaviorTypes which override SSCL type entries for
1234 // truncation sanitizers.
1235 if (const auto *OBT = DstType->getAs<OverflowBehaviorType>()) {
1236 if (OBT->isWrapKind())
1237 return;
1238 }
1239 if (ignoredBySanitizer && !OBTrapInvolved)
1240 return;
1241
1242 llvm::Constant *StaticArgs[] = {
1243 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1244 CGF.EmitCheckTypeDescriptor(DstType),
1245 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1246 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1247
1248 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1249}
1250
1251static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1252 const char *Name,
1253 CGBuilderTy &Builder) {
1254 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1255 llvm::Type *VTy = V->getType();
1256 if (!VSigned) {
1257 // If the value is unsigned, then it is never negative.
1258 return llvm::ConstantInt::getFalse(VTy->getContext());
1259 }
1260 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1261 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1262 llvm::Twine(Name) + "." + V->getName() +
1263 ".negativitycheck");
1264}
1265
1266// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1267// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1268static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1269 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1271 QualType DstType, CGBuilderTy &Builder) {
1272 llvm::Type *SrcTy = Src->getType();
1273 llvm::Type *DstTy = Dst->getType();
1274
1275 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1276 "non-integer llvm type");
1277
1278 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1279 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1280 (void)SrcSigned; // Only used in assert()
1281 (void)DstSigned; // Only used in assert()
1282 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1283 unsigned DstBits = DstTy->getScalarSizeInBits();
1284 (void)SrcBits; // Only used in assert()
1285 (void)DstBits; // Only used in assert()
1286
1287 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1288 "either the widths should be different, or the signednesses.");
1289
1290 // 1. Was the old Value negative?
1291 llvm::Value *SrcIsNegative =
1292 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1293 // 2. Is the new Value negative?
1294 llvm::Value *DstIsNegative =
1295 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1296 // 3. Now, was the 'negativity status' preserved during the conversion?
1297 // NOTE: conversion from negative to zero is considered to change the sign.
1298 // (We want to get 'false' when the conversion changed the sign)
1299 // So we should just equality-compare the negativity statuses.
1300 llvm::Value *Check = nullptr;
1301 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1302 // If the comparison result is 'false', then the conversion changed the sign.
1303 return std::make_pair(
1304 ScalarExprEmitter::ICCK_IntegerSignChange,
1305 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1306}
1307
1308void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1309 Value *Dst, QualType DstType,
1310 SourceLocation Loc,
1311 bool OBTrapInvolved) {
1312 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange) &&
1313 !OBTrapInvolved)
1314 return;
1315
1316 llvm::Type *SrcTy = Src->getType();
1317 llvm::Type *DstTy = Dst->getType();
1318
1319 // We only care about int->int conversions here.
1320 // We ignore conversions to/from pointer and/or bool.
1322 DstType))
1323 return;
1324
1325 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1326 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1327 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1328 unsigned DstBits = DstTy->getScalarSizeInBits();
1329
1330 // Now, we do not need to emit the check in *all* of the cases.
1331 // We can avoid emitting it in some obvious cases where it would have been
1332 // dropped by the opt passes (instcombine) always anyways.
1333 // If it's a cast between effectively the same type, no check.
1334 // NOTE: this is *not* equivalent to checking the canonical types.
1335 if (SrcSigned == DstSigned && SrcBits == DstBits)
1336 return;
1337 // At least one of the values needs to have signed type.
1338 // If both are unsigned, then obviously, neither of them can be negative.
1339 if (!SrcSigned && !DstSigned)
1340 return;
1341 // If the conversion is to *larger* *signed* type, then no check is needed.
1342 // Because either sign-extension happens (so the sign will remain),
1343 // or zero-extension will happen (the sign bit will be zero.)
1344 if ((DstBits > SrcBits) && DstSigned)
1345 return;
1346 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1347 (SrcBits > DstBits) && SrcSigned) {
1348 // If the signed integer truncation sanitizer is enabled,
1349 // and this is a truncation from signed type, then no check is needed.
1350 // Because here sign change check is interchangeable with truncation check.
1351 return;
1352 }
1353 // Does an SSCL have an entry for the DstType under its respective sanitizer
1354 // section?
1355 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1356 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1357 return;
1358 if (!DstSigned &&
1360 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1361 return;
1362 // That's it. We can't rule out any more cases with the data we have.
1363
1364 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1365 SanitizerDebugLocation SanScope(
1366 &CGF,
1367 {SanitizerKind::SO_ImplicitIntegerSignChange,
1368 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1369 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1370 CheckHandler);
1371
1372 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1373 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1374 Check;
1375
1376 // Each of these checks needs to return 'false' when an issue was detected.
1377 ImplicitConversionCheckKind CheckKind;
1378 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1379 2>
1380 Checks;
1381 // So we can 'and' all the checks together, and still get 'false',
1382 // if at least one of the checks detected an issue.
1383
1384 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1385 CheckKind = Check.first;
1386 Checks.emplace_back(Check.second);
1387
1388 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1389 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1390 // If the signed integer truncation sanitizer was enabled,
1391 // and we are truncating from larger unsigned type to smaller signed type,
1392 // let's handle the case we skipped in that check.
1393 Check =
1394 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1395 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1396 Checks.emplace_back(Check.second);
1397 // If the comparison result is 'i1 false', then the truncation was lossy.
1398 }
1399
1400 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange)) {
1401 if (OBTrapInvolved) {
1402 llvm::Value *Combined = Check.second.first;
1403 for (const auto &C : Checks)
1404 Combined = Builder.CreateAnd(Combined, C.first);
1405 CGF.EmitTrapCheck(Combined, CheckHandler);
1406 }
1407 return;
1408 }
1409
1410 llvm::Constant *StaticArgs[] = {
1411 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1412 CGF.EmitCheckTypeDescriptor(DstType),
1413 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1414 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1415 // EmitCheck() will 'and' all the checks together.
1416 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1417}
1418
1419// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1420// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1421static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1422 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1424 QualType DstType, CGBuilderTy &Builder) {
1425 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1426 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1427
1428 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1429 if (!SrcSigned && !DstSigned)
1430 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1431 else
1432 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1433
1434 llvm::Value *Check = nullptr;
1435 // 1. Extend the truncated value back to the same width as the Src.
1436 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1437 // 2. Equality-compare with the original source value
1438 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1439 // If the comparison result is 'i1 false', then the truncation was lossy.
1440
1441 return std::make_pair(
1442 Kind,
1443 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1444}
1445
1446// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1447// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1448static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1449 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1451 QualType DstType, CGBuilderTy &Builder) {
1452 // 1. Was the old Value negative?
1453 llvm::Value *SrcIsNegative =
1454 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1455 // 2. Is the new Value negative?
1456 llvm::Value *DstIsNegative =
1457 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1458 // 3. Now, was the 'negativity status' preserved during the conversion?
1459 // NOTE: conversion from negative to zero is considered to change the sign.
1460 // (We want to get 'false' when the conversion changed the sign)
1461 // So we should just equality-compare the negativity statuses.
1462 llvm::Value *Check = nullptr;
1463 Check =
1464 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1465 // If the comparison result is 'false', then the conversion changed the sign.
1466 return std::make_pair(
1467 ScalarExprEmitter::ICCK_IntegerSignChange,
1468 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1469}
1470
1472 Value *Dst, QualType DstType,
1473 const CGBitFieldInfo &Info,
1474 SourceLocation Loc) {
1475
1476 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1477 return;
1478
1479 // We only care about int->int conversions here.
1480 // We ignore conversions to/from pointer and/or bool.
1482 DstType))
1483 return;
1484
1485 if (DstType->isBooleanType() || SrcType->isBooleanType())
1486 return;
1487
1488 // This should be truncation of integral types.
1489 assert(isa<llvm::IntegerType>(Src->getType()) &&
1490 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1491
1492 // TODO: Calculate src width to avoid emitting code
1493 // for unecessary cases.
1494 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1495 unsigned DstBits = Info.Size;
1496
1497 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1498 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1499
1500 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1501 SanitizerDebugLocation SanScope(
1502 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1503
1504 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1505 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1506 Check;
1507
1508 // Truncation
1509 bool EmitTruncation = DstBits < SrcBits;
1510 // If Dst is signed and Src unsigned, we want to be more specific
1511 // about the CheckKind we emit, in this case we want to emit
1512 // ICCK_SignedIntegerTruncationOrSignChange.
1513 bool EmitTruncationFromUnsignedToSigned =
1514 EmitTruncation && DstSigned && !SrcSigned;
1515 // Sign change
1516 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1517 bool BothUnsigned = !SrcSigned && !DstSigned;
1518 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1519 // We can avoid emitting sign change checks in some obvious cases
1520 // 1. If Src and Dst have the same signedness and size
1521 // 2. If both are unsigned sign check is unecessary!
1522 // 3. If Dst is signed and bigger than Src, either
1523 // sign-extension or zero-extension will make sure
1524 // the sign remains.
1525 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1526
1527 if (EmitTruncation)
1528 Check =
1529 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1530 else if (EmitSignChange) {
1531 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1532 "either the widths should be different, or the signednesses.");
1533 Check =
1534 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1535 } else
1536 return;
1537
1538 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1539 if (EmitTruncationFromUnsignedToSigned)
1540 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1541
1542 llvm::Constant *StaticArgs[] = {
1544 EmitCheckTypeDescriptor(DstType),
1545 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1546 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1547
1548 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1549}
1550
1551Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1552 QualType DstType, llvm::Type *SrcTy,
1553 llvm::Type *DstTy,
1554 ScalarConversionOpts Opts) {
1555 // The Element types determine the type of cast to perform.
1556 llvm::Type *SrcElementTy;
1557 llvm::Type *DstElementTy;
1558 QualType SrcElementType;
1559 QualType DstElementType;
1560 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1561 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1562 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1563 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1564 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1565 } else {
1566 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1567 "cannot cast between matrix and non-matrix types");
1568 SrcElementTy = SrcTy;
1569 DstElementTy = DstTy;
1570 SrcElementType = SrcType;
1571 DstElementType = DstType;
1572 }
1573
1574 if (isa<llvm::IntegerType>(SrcElementTy)) {
1575 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1576 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1577 InputSigned = true;
1578 }
1579
1580 if (isa<llvm::IntegerType>(DstElementTy))
1581 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1582 if (InputSigned)
1583 return Builder.CreateSIToFP(Src, DstTy, "conv");
1584 return Builder.CreateUIToFP(Src, DstTy, "conv");
1585 }
1586
1587 if (isa<llvm::IntegerType>(DstElementTy)) {
1588 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1589 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1590
1591 // If we can't recognize overflow as undefined behavior, assume that
1592 // overflow saturates. This protects against normal optimizations if we are
1593 // compiling with non-standard FP semantics.
1594 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1595 llvm::Intrinsic::ID IID =
1596 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1597 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1598 }
1599
1600 if (IsSigned)
1601 return Builder.CreateFPToSI(Src, DstTy, "conv");
1602 return Builder.CreateFPToUI(Src, DstTy, "conv");
1603 }
1604
1605 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1606 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1607 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1608 }
1609 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1610 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1611 return Builder.CreateFPExt(Src, DstTy, "conv");
1612}
1613
1614/// Emit a conversion from the specified type to the specified destination type,
1615/// both of which are LLVM scalar types.
1616Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1617 QualType DstType,
1618 SourceLocation Loc,
1619 ScalarConversionOpts Opts) {
1620 // All conversions involving fixed point types should be handled by the
1621 // EmitFixedPoint family functions. This is done to prevent bloating up this
1622 // function more, and although fixed point numbers are represented by
1623 // integers, we do not want to follow any logic that assumes they should be
1624 // treated as integers.
1625 // TODO(leonardchan): When necessary, add another if statement checking for
1626 // conversions to fixed point types from other types.
1627 if (SrcType->isFixedPointType()) {
1628 if (DstType->isBooleanType())
1629 // It is important that we check this before checking if the dest type is
1630 // an integer because booleans are technically integer types.
1631 // We do not need to check the padding bit on unsigned types if unsigned
1632 // padding is enabled because overflow into this bit is undefined
1633 // behavior.
1634 return Builder.CreateIsNotNull(Src, "tobool");
1635 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1636 DstType->isRealFloatingType())
1637 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1638
1639 llvm_unreachable(
1640 "Unhandled scalar conversion from a fixed point type to another type.");
1641 } else if (DstType->isFixedPointType()) {
1642 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1643 // This also includes converting booleans and enums to fixed point types.
1644 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1645
1646 llvm_unreachable(
1647 "Unhandled scalar conversion to a fixed point type from another type.");
1648 }
1649
1650 QualType NoncanonicalSrcType = SrcType;
1651 QualType NoncanonicalDstType = DstType;
1652
1653 SrcType = CGF.getContext().getCanonicalType(SrcType);
1654 DstType = CGF.getContext().getCanonicalType(DstType);
1655 if (SrcType == DstType) return Src;
1656
1657 if (DstType->isVoidType()) return nullptr;
1658
1659 llvm::Value *OrigSrc = Src;
1660 QualType OrigSrcType = SrcType;
1661 llvm::Type *SrcTy = Src->getType();
1662
1663 // Handle conversions to bool first, they are special: comparisons against 0.
1664 if (DstType->isBooleanType())
1665 return EmitConversionToBool(Src, SrcType);
1666
1667 llvm::Type *DstTy = ConvertType(DstType);
1668
1669 // Cast from half through float if half isn't a native type.
1670 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1671 // Cast to FP using the intrinsic if the half type itself isn't supported.
1672 if (DstTy->isFloatingPointTy()) {
1674 Value *BitCast = Builder.CreateBitCast(Src, CGF.CGM.HalfTy);
1675 return Builder.CreateFPExt(BitCast, DstTy, "conv");
1676 }
1677 } else {
1678 // Cast to other types through float, using either the intrinsic or FPExt,
1679 // depending on whether the half type itself is supported
1680 // (as opposed to operations on half, available with NativeHalfType).
1681
1682 if (Src->getType() != CGF.CGM.HalfTy) {
1684 Src = Builder.CreateBitCast(Src, CGF.CGM.HalfTy);
1685 }
1686
1687 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1688 SrcType = CGF.getContext().FloatTy;
1689 SrcTy = CGF.FloatTy;
1690 }
1691 }
1692
1693 // Ignore conversions like int -> uint.
1694 if (SrcTy == DstTy) {
1695 if (Opts.EmitImplicitIntegerSignChangeChecks)
1696 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1697 NoncanonicalDstType, Loc);
1698
1699 return Src;
1700 }
1701
1702 // Handle pointer conversions next: pointers can only be converted to/from
1703 // other pointers and integers. Check for pointer types in terms of LLVM, as
1704 // some native types (like Obj-C id) may map to a pointer type.
1705 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1706 // The source value may be an integer, or a pointer.
1707 if (isa<llvm::PointerType>(SrcTy))
1708 return Src;
1709
1710 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1711 // First, convert to the correct width so that we control the kind of
1712 // extension.
1713 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1714 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1715 llvm::Value* IntResult =
1716 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1717 // Then, cast to pointer.
1718 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1719 }
1720
1721 if (isa<llvm::PointerType>(SrcTy)) {
1722 // Must be an ptr to int cast.
1723 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1724 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1725 }
1726
1727 // A scalar can be splatted to an extended vector of the same element type
1728 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1729 // Sema should add casts to make sure that the source expression's type is
1730 // the same as the vector's element type (sans qualifiers)
1731 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1732 SrcType.getTypePtr() &&
1733 "Splatted expr doesn't match with vector element type?");
1734
1735 // Splat the element across to all elements
1736 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1737 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1738 }
1739
1740 if (SrcType->isMatrixType() && DstType->isMatrixType())
1741 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1742
1743 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1744 // Allow bitcast from vector to integer/fp of the same size.
1745 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1746 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1747 if (SrcSize == DstSize)
1748 return Builder.CreateBitCast(Src, DstTy, "conv");
1749
1750 // Conversions between vectors of different sizes are not allowed except
1751 // when vectors of half are involved. Operations on storage-only half
1752 // vectors require promoting half vector operands to float vectors and
1753 // truncating the result, which is either an int or float vector, to a
1754 // short or half vector.
1755
1756 // Source and destination are both expected to be vectors.
1757 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1758 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1759 (void)DstElementTy;
1760
1761 assert(((SrcElementTy->isIntegerTy() &&
1762 DstElementTy->isIntegerTy()) ||
1763 (SrcElementTy->isFloatingPointTy() &&
1764 DstElementTy->isFloatingPointTy())) &&
1765 "unexpected conversion between a floating-point vector and an "
1766 "integer vector");
1767
1768 // Truncate an i32 vector to an i16 vector.
1769 if (SrcElementTy->isIntegerTy())
1770 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1771
1772 // Truncate a float vector to a half vector.
1773 if (SrcSize > DstSize)
1774 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1775
1776 // Promote a half vector to a float vector.
1777 return Builder.CreateFPExt(Src, DstTy, "conv");
1778 }
1779
1780 // Finally, we have the arithmetic types: real int/float.
1781 Value *Res = nullptr;
1782 llvm::Type *ResTy = DstTy;
1783
1784 // An overflowing conversion has undefined behavior if either the source type
1785 // or the destination type is a floating-point type. However, we consider the
1786 // range of representable values for all floating-point types to be
1787 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1788 // floating-point type.
1789 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1790 OrigSrcType->isFloatingType())
1791 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1792 Loc);
1793
1794 // Cast to half through float if half isn't a native type.
1795 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1796 // Make sure we cast in a single step if from another FP type.
1797 if (SrcTy->isFloatingPointTy()) {
1798 // Handle the case where the half type is represented as an integer (as
1799 // opposed to operations on half, available with NativeHalfType).
1800
1801 // If the half type is supported, just use an fptrunc.
1802 Value *Res = Builder.CreateFPTrunc(Src, CGF.CGM.HalfTy, "conv");
1803 if (DstTy == CGF.CGM.HalfTy)
1804 return Res;
1805
1806 assert(DstTy->isIntegerTy(16) &&
1808 "Only half FP requires extra conversion");
1809 return Builder.CreateBitCast(Res, DstTy);
1810 }
1811
1812 DstTy = CGF.FloatTy;
1813 }
1814
1815 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1816
1817 if (DstTy != ResTy) {
1818 Res = Builder.CreateFPTrunc(Res, CGF.CGM.HalfTy, "conv");
1819
1820 if (ResTy != CGF.CGM.HalfTy) {
1821 assert(ResTy->isIntegerTy(16) &&
1823 "Only half FP requires extra conversion");
1824 Res = Builder.CreateBitCast(Res, ResTy);
1825 }
1826 }
1827
1828 // Determine whether an overflow behavior of 'trap' has been specified for
1829 // either the destination or the source types. If so, we can elide sanitizer
1830 // capability checks as this overflow behavior kind is also capable of
1831 // emitting traps without runtime sanitizer support.
1832 // Also skip instrumentation if either source or destination has 'wrap'
1833 // behavior - the user has explicitly indicated they accept wrapping
1834 // semantics. Use non-canonical types to preserve OBT annotations.
1835 const auto *DstOBT = NoncanonicalDstType->getAs<OverflowBehaviorType>();
1836 const auto *SrcOBT = NoncanonicalSrcType->getAs<OverflowBehaviorType>();
1837 bool OBTrapInvolved =
1838 (DstOBT && DstOBT->isTrapKind()) || (SrcOBT && SrcOBT->isTrapKind());
1839 bool OBWrapInvolved =
1840 (DstOBT && DstOBT->isWrapKind()) || (SrcOBT && SrcOBT->isWrapKind());
1841
1842 if ((Opts.EmitImplicitIntegerTruncationChecks || OBTrapInvolved) &&
1843 !OBWrapInvolved && !Opts.PatternExcluded)
1844 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1845 NoncanonicalDstType, Loc, OBTrapInvolved);
1846
1847 if (Opts.EmitImplicitIntegerSignChangeChecks ||
1848 (OBTrapInvolved && !OBWrapInvolved))
1849 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1850 NoncanonicalDstType, Loc, OBTrapInvolved);
1851
1852 return Res;
1853}
1854
1855Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1856 QualType DstTy,
1857 SourceLocation Loc) {
1858 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1859 llvm::Value *Result;
1860 if (SrcTy->isRealFloatingType())
1861 Result = FPBuilder.CreateFloatingToFixed(Src,
1862 CGF.getContext().getFixedPointSemantics(DstTy));
1863 else if (DstTy->isRealFloatingType())
1864 Result = FPBuilder.CreateFixedToFloating(Src,
1866 ConvertType(DstTy));
1867 else {
1868 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1869 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1870
1871 if (DstTy->isIntegerType())
1872 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1873 DstFPSema.getWidth(),
1874 DstFPSema.isSigned());
1875 else if (SrcTy->isIntegerType())
1876 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1877 DstFPSema);
1878 else
1879 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1880 }
1881 return Result;
1882}
1883
1884/// Emit a conversion from the specified complex type to the specified
1885/// destination type, where the destination type is an LLVM scalar type.
1886Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1887 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1888 SourceLocation Loc) {
1889 // Get the source element type.
1890 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1891
1892 // Handle conversions to bool first, they are special: comparisons against 0.
1893 if (DstTy->isBooleanType()) {
1894 // Complex != 0 -> (Real != 0) | (Imag != 0)
1895 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1896 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1897 return Builder.CreateOr(Src.first, Src.second, "tobool");
1898 }
1899
1900 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1901 // the imaginary part of the complex value is discarded and the value of the
1902 // real part is converted according to the conversion rules for the
1903 // corresponding real type.
1904 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1905}
1906
1907Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1908 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1909}
1910
1911/// Emit a sanitization check for the given "binary" operation (which
1912/// might actually be a unary increment which has been lowered to a binary
1913/// operation). The check passes if all values in \p Checks (which are \c i1),
1914/// are \c true.
1915void ScalarExprEmitter::EmitBinOpCheck(
1916 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1917 const BinOpInfo &Info) {
1918 assert(CGF.IsSanitizerScope);
1919 SanitizerHandler Check;
1920 SmallVector<llvm::Constant *, 4> StaticData;
1921 SmallVector<llvm::Value *, 2> DynamicData;
1922 TrapReason TR;
1923
1924 BinaryOperatorKind Opcode = Info.Opcode;
1927
1928 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1929 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1930 if (UO && UO->getOpcode() == UO_Minus) {
1931 Check = SanitizerHandler::NegateOverflow;
1932 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1933 DynamicData.push_back(Info.RHS);
1934 } else {
1935 if (BinaryOperator::isShiftOp(Opcode)) {
1936 // Shift LHS negative or too large, or RHS out of bounds.
1937 Check = SanitizerHandler::ShiftOutOfBounds;
1938 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1939 StaticData.push_back(
1940 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1941 StaticData.push_back(
1942 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1943 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1944 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1945 Check = SanitizerHandler::DivremOverflow;
1946 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1947 } else {
1948 // Arithmetic overflow (+, -, *).
1949 int ArithOverflowKind = 0;
1950 switch (Opcode) {
1951 case BO_Add: {
1952 Check = SanitizerHandler::AddOverflow;
1953 ArithOverflowKind = diag::UBSanArithKind::Add;
1954 break;
1955 }
1956 case BO_Sub: {
1957 Check = SanitizerHandler::SubOverflow;
1958 ArithOverflowKind = diag::UBSanArithKind::Sub;
1959 break;
1960 }
1961 case BO_Mul: {
1962 Check = SanitizerHandler::MulOverflow;
1963 ArithOverflowKind = diag::UBSanArithKind::Mul;
1964 break;
1965 }
1966 default:
1967 llvm_unreachable("unexpected opcode for bin op check");
1968 }
1969 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1971 SanitizerKind::UnsignedIntegerOverflow) ||
1973 SanitizerKind::SignedIntegerOverflow)) {
1974 // Only pay the cost for constructing the trap diagnostic if they are
1975 // going to be used.
1976 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1977 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1978 << Info.E;
1979 }
1980 }
1981 DynamicData.push_back(Info.LHS);
1982 DynamicData.push_back(Info.RHS);
1983 }
1984
1985 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1986}
1987
1988//===----------------------------------------------------------------------===//
1989// Visitor Methods
1990//===----------------------------------------------------------------------===//
1991
1992Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1993 CGF.ErrorUnsupported(E, "scalar expression");
1994 if (E->getType()->isVoidType())
1995 return nullptr;
1996 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1997}
1998
1999Value *
2000ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
2001 ASTContext &Context = CGF.getContext();
2002 unsigned AddrSpace =
2004 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
2005 E->ComputeName(Context), "__usn_str", AddrSpace);
2006
2007 llvm::Type *ExprTy = ConvertType(E->getType());
2008 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
2009 "usn_addr_cast");
2010}
2011
2012Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
2013 assert(E->getDataElementCount() == 1);
2014 auto It = E->begin();
2015 return Builder.getInt((*It)->getValue());
2016}
2017
2018Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
2019 // Vector Mask Case
2020 if (E->getNumSubExprs() == 2) {
2021 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
2022 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
2023 Value *Mask;
2024
2025 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
2026 unsigned LHSElts = LTy->getNumElements();
2027
2028 Mask = RHS;
2029
2030 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
2031
2032 // Mask off the high bits of each shuffle index.
2033 Value *MaskBits =
2034 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
2035 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
2036
2037 // newv = undef
2038 // mask = mask & maskbits
2039 // for each elt
2040 // n = extract mask i
2041 // x = extract val n
2042 // newv = insert newv, x, i
2043 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
2044 MTy->getNumElements());
2045 Value* NewV = llvm::PoisonValue::get(RTy);
2046 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
2047 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
2048 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
2049
2050 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
2051 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
2052 }
2053 return NewV;
2054 }
2055
2056 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
2057 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
2058
2059 SmallVector<int, 32> Indices;
2060 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
2061 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
2062 // Check for -1 and output it as undef in the IR.
2063 if (Idx.isSigned() && Idx.isAllOnes())
2064 Indices.push_back(-1);
2065 else
2066 Indices.push_back(Idx.getZExtValue());
2067 }
2068
2069 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
2070}
2071
2072Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
2073 QualType SrcType = E->getSrcExpr()->getType(),
2074 DstType = E->getType();
2075
2076 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
2077
2078 SrcType = CGF.getContext().getCanonicalType(SrcType);
2079 DstType = CGF.getContext().getCanonicalType(DstType);
2080 if (SrcType == DstType) return Src;
2081
2082 assert(SrcType->isVectorType() &&
2083 "ConvertVector source type must be a vector");
2084 assert(DstType->isVectorType() &&
2085 "ConvertVector destination type must be a vector");
2086
2087 llvm::Type *SrcTy = Src->getType();
2088 llvm::Type *DstTy = ConvertType(DstType);
2089
2090 // Ignore conversions like int -> uint.
2091 if (SrcTy == DstTy)
2092 return Src;
2093
2094 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
2095 DstEltType = DstType->castAs<VectorType>()->getElementType();
2096
2097 assert(SrcTy->isVectorTy() &&
2098 "ConvertVector source IR type must be a vector");
2099 assert(DstTy->isVectorTy() &&
2100 "ConvertVector destination IR type must be a vector");
2101
2102 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
2103 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2104
2105 if (DstEltType->isBooleanType()) {
2106 assert((SrcEltTy->isFloatingPointTy() ||
2107 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2108
2109 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2110 if (SrcEltTy->isFloatingPointTy()) {
2111 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2112 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2113 } else {
2114 return Builder.CreateICmpNE(Src, Zero, "tobool");
2115 }
2116 }
2117
2118 // We have the arithmetic types: real int/float.
2119 Value *Res = nullptr;
2120
2121 if (isa<llvm::IntegerType>(SrcEltTy)) {
2122 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2123 if (isa<llvm::IntegerType>(DstEltTy))
2124 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2125 else {
2126 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2127 if (InputSigned)
2128 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2129 else
2130 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2131 }
2132 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2133 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2134 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2135 if (DstEltType->isSignedIntegerOrEnumerationType())
2136 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2137 else
2138 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2139 } else {
2140 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2141 "Unknown real conversion");
2142 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2143 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2144 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2145 else
2146 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2147 }
2148
2149 return Res;
2150}
2151
2152Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2153 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2154 CGF.EmitIgnoredExpr(E->getBase());
2155 return CGF.emitScalarConstant(Constant, E);
2156 } else {
2157 Expr::EvalResult Result;
2159 llvm::APSInt Value = Result.Val.getInt();
2160 CGF.EmitIgnoredExpr(E->getBase());
2161 return Builder.getInt(Value);
2162 }
2163 }
2164
2165 llvm::Value *Result = EmitLoadOfLValue(E);
2166
2167 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2168 // debug info for the pointer, even if there is no variable associated with
2169 // the pointer's expression.
2170 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2171 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2172 if (llvm::GetElementPtrInst *GEP =
2173 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2174 if (llvm::Instruction *Pointer =
2175 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2176 QualType Ty = E->getBase()->getType();
2177 if (!E->isArrow())
2178 Ty = CGF.getContext().getPointerType(Ty);
2179 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2180 }
2181 }
2182 }
2183 }
2184 return Result;
2185}
2186
2187Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2188 TestAndClearIgnoreResultAssign();
2189
2190 // Emit subscript expressions in rvalue context's. For most cases, this just
2191 // loads the lvalue formed by the subscript expr. However, we have to be
2192 // careful, because the base of a vector subscript is occasionally an rvalue,
2193 // so we can't get it as an lvalue.
2194 if (!E->getBase()->getType()->isVectorType() &&
2196 return EmitLoadOfLValue(E);
2197
2198 // Handle the vector case. The base must be a vector, the index must be an
2199 // integer value.
2200 Value *Base = Visit(E->getBase());
2201 Value *Idx = Visit(E->getIdx());
2202 QualType IdxTy = E->getIdx()->getType();
2203
2204 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2205 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2206
2207 return Builder.CreateExtractElement(Base, Idx, "vecext");
2208}
2209
2210Value *ScalarExprEmitter::VisitMatrixSingleSubscriptExpr(
2211 MatrixSingleSubscriptExpr *E) {
2212 TestAndClearIgnoreResultAssign();
2213
2214 auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2215 unsigned NumRows = MatrixTy->getNumRows();
2216 unsigned NumColumns = MatrixTy->getNumColumns();
2217
2218 // Row index
2219 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2220 llvm::MatrixBuilder MB(Builder);
2221
2222 // The row index must be in [0, NumRows)
2223 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2224 MB.CreateIndexAssumption(RowIdx, NumRows);
2225
2226 Value *FlatMatrix = Visit(E->getBase());
2227 llvm::Type *ElemTy = CGF.ConvertTypeForMem(MatrixTy->getElementType());
2228 auto *ResultTy = llvm::FixedVectorType::get(ElemTy, NumColumns);
2229 Value *RowVec = llvm::PoisonValue::get(ResultTy);
2230
2231 for (unsigned Col = 0; Col != NumColumns; ++Col) {
2232 Value *ColVal = llvm::ConstantInt::get(RowIdx->getType(), Col);
2233 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2234 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2235 Value *EltIdx = MB.CreateIndex(RowIdx, ColVal, NumRows, NumColumns,
2236 IsMatrixRowMajor, "matrix_row_idx");
2237 Value *Elt =
2238 Builder.CreateExtractElement(FlatMatrix, EltIdx, "matrix_elem");
2239 Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2240 RowVec = Builder.CreateInsertElement(RowVec, Elt, Lane, "matrix_row_ins");
2241 }
2242
2243 return CGF.EmitFromMemory(RowVec, E->getType());
2244}
2245
2246Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2247 TestAndClearIgnoreResultAssign();
2248
2249 // Handle the vector case. The base must be a vector, the index must be an
2250 // integer value.
2251 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2252 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2253
2254 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2255 llvm::MatrixBuilder MB(Builder);
2256
2257 Value *Idx;
2258 unsigned NumCols = MatrixTy->getNumColumns();
2259 unsigned NumRows = MatrixTy->getNumRows();
2260 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2261 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2262 Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows, NumCols, IsMatrixRowMajor);
2263
2264 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2265 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2266
2267 Value *Matrix = Visit(E->getBase());
2268
2269 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2270 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2271}
2272
2273static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2274 unsigned Off) {
2275 int MV = SVI->getMaskValue(Idx);
2276 if (MV == -1)
2277 return -1;
2278 return Off + MV;
2279}
2280
2281static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2282 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2283 "Index operand too large for shufflevector mask!");
2284 return C->getZExtValue();
2285}
2286
2287Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2288 bool Ignore = TestAndClearIgnoreResultAssign();
2289 (void)Ignore;
2290 unsigned NumInitElements = E->getNumInits();
2291 assert((Ignore == false ||
2292 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2293 "init list ignored");
2294
2295 // HLSL initialization lists in the AST are an expansion which can contain
2296 // side-effecting expressions wrapped in opaque value expressions. To properly
2297 // emit these we need to emit the opaque values before we emit the argument
2298 // expressions themselves. This is a little hacky, but it prevents us needing
2299 // to do a bigger AST-level change for a language feature that we need
2300 // deprecate in the near future. See related HLSL language proposals in the
2301 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2302 // * 0005-strict-initializer-lists.md
2303 // * 0032-constructors.md
2304 if (CGF.getLangOpts().HLSL)
2306
2307 if (E->hadArrayRangeDesignator())
2308 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2309
2310 llvm::VectorType *VType =
2311 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2312
2313 if (!VType) {
2314 if (NumInitElements == 0) {
2315 // C++11 value-initialization for the scalar.
2316 return EmitNullValue(E->getType());
2317 }
2318 // We have a scalar in braces. Just use the first element.
2319 return Visit(E->getInit(0));
2320 }
2321
2322 if (isa<llvm::ScalableVectorType>(VType)) {
2323 if (NumInitElements == 0) {
2324 // C++11 value-initialization for the vector.
2325 return EmitNullValue(E->getType());
2326 }
2327
2328 if (NumInitElements == 1) {
2329 Expr *InitVector = E->getInit(0);
2330
2331 // Initialize from another scalable vector of the same type.
2332 if (InitVector->getType().getCanonicalType() ==
2334 return Visit(InitVector);
2335 }
2336
2337 llvm_unreachable("Unexpected initialization of a scalable vector!");
2338 }
2339
2340 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2341
2342 // For column-major matrix types, we insert elements directly at their
2343 // column-major positions rather than inserting sequentially and shuffling.
2344 const ConstantMatrixType *ColMajorMT = nullptr;
2345 if (const auto *MT = E->getType()->getAs<ConstantMatrixType>();
2346 MT && CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2347 LangOptions::MatrixMemoryLayout::MatrixColMajor)
2348 ColMajorMT = MT;
2349
2350 // Loop over initializers collecting the Value for each, and remembering
2351 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2352 // us to fold the shuffle for the swizzle into the shuffle for the vector
2353 // initializer, since LLVM optimizers generally do not want to touch
2354 // shuffles.
2355 unsigned CurIdx = 0;
2356 bool VIsPoisonShuffle = false;
2357 llvm::Value *V = llvm::PoisonValue::get(VType);
2358 for (unsigned i = 0; i != NumInitElements; ++i) {
2359 Expr *IE = E->getInit(i);
2360 Value *Init = Visit(IE);
2361 SmallVector<int, 16> Args;
2362
2363 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2364
2365 // Handle scalar elements. If the scalar initializer is actually one
2366 // element of a different vector of the same width, use shuffle instead of
2367 // extract+insert.
2368 if (!VVT) {
2369 if (isa<ExtVectorElementExpr>(IE)) {
2370 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2371
2372 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2373 ->getNumElements() == ResElts) {
2374 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2375 Value *LHS = nullptr, *RHS = nullptr;
2376 if (CurIdx == 0) {
2377 // insert into poison -> shuffle (src, poison)
2378 // shufflemask must use an i32
2379 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2380 Args.resize(ResElts, -1);
2381
2382 LHS = EI->getVectorOperand();
2383 RHS = V;
2384 VIsPoisonShuffle = true;
2385 } else if (VIsPoisonShuffle) {
2386 // insert into poison shuffle && size match -> shuffle (v, src)
2387 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2388 for (unsigned j = 0; j != CurIdx; ++j)
2389 Args.push_back(getMaskElt(SVV, j, 0));
2390 Args.push_back(ResElts + C->getZExtValue());
2391 Args.resize(ResElts, -1);
2392
2393 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2394 RHS = EI->getVectorOperand();
2395 VIsPoisonShuffle = false;
2396 }
2397 if (!Args.empty()) {
2398 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2399 ++CurIdx;
2400 continue;
2401 }
2402 }
2403 }
2404 unsigned InsertIdx =
2405 ColMajorMT
2406 ? ColMajorMT->mapRowMajorToColumnMajorFlattenedIndex(CurIdx)
2407 : CurIdx;
2408 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(InsertIdx),
2409 "vecinit");
2410 VIsPoisonShuffle = false;
2411 ++CurIdx;
2412 continue;
2413 }
2414
2415 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2416
2417 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2418 // input is the same width as the vector being constructed, generate an
2419 // optimized shuffle of the swizzle input into the result.
2420 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2421 if (isa<ExtVectorElementExpr>(IE)) {
2422 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2423 Value *SVOp = SVI->getOperand(0);
2424 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2425
2426 if (OpTy->getNumElements() == ResElts) {
2427 for (unsigned j = 0; j != CurIdx; ++j) {
2428 // If the current vector initializer is a shuffle with poison, merge
2429 // this shuffle directly into it.
2430 if (VIsPoisonShuffle) {
2431 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2432 } else {
2433 Args.push_back(j);
2434 }
2435 }
2436 for (unsigned j = 0, je = InitElts; j != je; ++j)
2437 Args.push_back(getMaskElt(SVI, j, Offset));
2438 Args.resize(ResElts, -1);
2439
2440 if (VIsPoisonShuffle)
2441 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2442
2443 Init = SVOp;
2444 }
2445 }
2446
2447 // Extend init to result vector length, and then shuffle its contribution
2448 // to the vector initializer into V.
2449 if (Args.empty()) {
2450 for (unsigned j = 0; j != InitElts; ++j)
2451 Args.push_back(j);
2452 Args.resize(ResElts, -1);
2453 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2454
2455 Args.clear();
2456 for (unsigned j = 0; j != CurIdx; ++j)
2457 Args.push_back(j);
2458 for (unsigned j = 0; j != InitElts; ++j)
2459 Args.push_back(j + Offset);
2460 Args.resize(ResElts, -1);
2461 }
2462
2463 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2464 // merging subsequent shuffles into this one.
2465 if (CurIdx == 0)
2466 std::swap(V, Init);
2467 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2468 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2469 CurIdx += InitElts;
2470 }
2471
2472 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2473 // Emit remaining default initializers.
2474 llvm::Type *EltTy = VType->getElementType();
2475
2476 // Emit remaining default initializers
2477 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2478 unsigned InsertIdx =
2479 ColMajorMT ? ColMajorMT->mapRowMajorToColumnMajorFlattenedIndex(CurIdx)
2480 : CurIdx;
2481 Value *Idx = Builder.getInt32(InsertIdx);
2482 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2483 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2484 }
2485
2486 return V;
2487}
2488
2490 return !D->isWeak();
2491}
2492
2493static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2494 E = E->IgnoreParens();
2495
2496 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2497 if (UO->getOpcode() == UO_Deref)
2498 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2499
2500 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2501 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2502
2503 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2504 if (isa<FieldDecl>(ME->getMemberDecl()))
2505 return true;
2506 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2507 }
2508
2509 // Array subscripts? Anything else?
2510
2511 return false;
2512}
2513
2515 assert(E->getType()->isSignableType(getContext()));
2516
2517 E = E->IgnoreParens();
2518
2519 if (isa<CXXThisExpr>(E))
2520 return true;
2521
2522 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2523 if (UO->getOpcode() == UO_AddrOf)
2524 return isLValueKnownNonNull(*this, UO->getSubExpr());
2525
2526 if (const auto *CE = dyn_cast<CastExpr>(E))
2527 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2528 CE->getCastKind() == CK_ArrayToPointerDecay)
2529 return isLValueKnownNonNull(*this, CE->getSubExpr());
2530
2531 // Maybe honor __nonnull?
2532
2533 return false;
2534}
2535
2537 const Expr *E = CE->getSubExpr();
2538
2539 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2540 return false;
2541
2542 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2543 // We always assume that 'this' is never null.
2544 return false;
2545 }
2546
2547 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2548 // And that glvalue casts are never null.
2549 if (ICE->isGLValue())
2550 return false;
2551 }
2552
2553 return true;
2554}
2555
2556// RHS is an aggregate type
2558 QualType DestTy, SourceLocation Loc) {
2559 SmallVector<LValue, 16> LoadList;
2560 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
2561 // Dest is either a vector, constant matrix, or a builtin
2562 // if its a vector create a temp alloca to store into and return that
2563 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2564 assert(LoadList.size() >= VecTy->getNumElements() &&
2565 "Flattened type on RHS must have the same number or more elements "
2566 "than vector on LHS.");
2567 llvm::Value *V = CGF.Builder.CreateLoad(
2568 CGF.CreateIRTempWithoutCast(DestTy, "flatcast.tmp"));
2569 // write to V.
2570 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2571 RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
2572 assert(RVal.isScalar() &&
2573 "All flattened source values should be scalars.");
2574 llvm::Value *Cast =
2575 CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
2576 VecTy->getElementType(), Loc);
2577 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2578 }
2579 return V;
2580 }
2581 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2582 assert(LoadList.size() >= MatTy->getNumElementsFlattened() &&
2583 "Flattened type on RHS must have the same number or more elements "
2584 "than vector on LHS.");
2585
2586 bool IsRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2588
2589 llvm::Value *V = CGF.Builder.CreateLoad(
2590 CGF.CreateIRTempWithoutCast(DestTy, "flatcast.tmp"));
2591 // V is an allocated temporary for constructing the matrix.
2592 for (unsigned Row = 0, RE = MatTy->getNumRows(); Row < RE; Row++) {
2593 for (unsigned Col = 0, CE = MatTy->getNumColumns(); Col < CE; Col++) {
2594 // When interpreted as a matrix, \p LoadList is *always* row-major order
2595 // regardless of the default matrix memory layout.
2596 unsigned LoadIdx = MatTy->getRowMajorFlattenedIndex(Row, Col);
2597 RValue RVal = CGF.EmitLoadOfLValue(LoadList[LoadIdx], Loc);
2598 assert(RVal.isScalar() &&
2599 "All flattened source values should be scalars.");
2600 llvm::Value *Cast = CGF.EmitScalarConversion(
2601 RVal.getScalarVal(), LoadList[LoadIdx].getType(),
2602 MatTy->getElementType(), Loc);
2603 unsigned MatrixIdx = MatTy->getFlattenedIndex(Row, Col, IsRowMajor);
2604 V = CGF.Builder.CreateInsertElement(V, Cast, MatrixIdx);
2605 }
2606 }
2607 return V;
2608 }
2609 // if its a builtin just do an extract element or load.
2610 assert(DestTy->isBuiltinType() &&
2611 "Destination type must be a vector, matrix, or builtin type.");
2612 RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
2613 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2614 return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
2615 DestTy, Loc);
2616}
2617
2618// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2619// have to handle a more broad range of conversions than explicit casts, as they
2620// handle things like function to ptr-to-function decay etc.
2621Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2622 llvm::scope_exit RestoreCurCast(
2623 [this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2624 CGF.CurCast = CE;
2625
2626 Expr *E = CE->getSubExpr();
2627 QualType DestTy = CE->getType();
2628 CastKind Kind = CE->getCastKind();
2629 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2630
2631 // These cases are generally not written to ignore the result of
2632 // evaluating their sub-expressions, so we clear this now.
2633 bool Ignored = TestAndClearIgnoreResultAssign();
2634
2635 // Since almost all cast kinds apply to scalars, this switch doesn't have
2636 // a default case, so the compiler will warn on a missing case. The cases
2637 // are in the same order as in the CastKind enum.
2638 switch (Kind) {
2639 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2640 case CK_BuiltinFnToFnPtr:
2641 llvm_unreachable("builtin functions are handled elsewhere");
2642
2643 case CK_LValueBitCast:
2644 case CK_ObjCObjectLValueCast: {
2645 Address Addr = EmitLValue(E).getAddress();
2646 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2647 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2648 return EmitLoadOfLValue(LV, CE->getExprLoc());
2649 }
2650
2651 case CK_LValueToRValueBitCast: {
2652 LValue SourceLVal = CGF.EmitLValue(E);
2653 Address Addr =
2654 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2655 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2656 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2657 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2658 }
2659
2660 case CK_CPointerToObjCPointerCast:
2661 case CK_BlockPointerToObjCPointerCast:
2662 case CK_AnyPointerToBlockPointerCast:
2663 case CK_BitCast: {
2664 Value *Src = Visit(E);
2665 llvm::Type *SrcTy = Src->getType();
2666 llvm::Type *DstTy = ConvertType(DestTy);
2667
2668 // FIXME: this is a gross but seemingly necessary workaround for an issue
2669 // manifesting when a target uses a non-default AS for indirect sret args,
2670 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2671 // on the address of a local struct that gets returned by value yields an
2672 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2673 // DefaultAS. We can only do this subversive thing because sret args are
2674 // manufactured and them residing in the IndirectAS is a target specific
2675 // detail, and doing an AS cast here still retains the semantics the user
2676 // expects. It is desirable to remove this iff a better solution is found.
2677 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2678 return CGF.performAddrSpaceCast(Src, DstTy);
2679
2680 // FIXME: Similarly to the sret case above, we need to handle BitCasts that
2681 // involve implicit address space conversions. This arises when the source
2682 // language lacks explicit address spaces, but the target's data layout
2683 // assigns different address spaces (e.g., program address space for
2684 // function pointers). Since Sema operates on Clang types (which don't carry
2685 // this information) and selects CK_BitCast, we must detect the address
2686 // space mismatch here in CodeGen when lowering to LLVM types. The most
2687 // common case is casting function pointers (which get the program AS from
2688 // the data layout) to/from object pointers (which use the default AS).
2689 // Ideally, this would be resolved at a higher level, but that would require
2690 // exposing data layout details to Sema.
2691 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2692 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2693 return CGF.performAddrSpaceCast(Src, DstTy);
2694 }
2695
2696 assert(
2697 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2698 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2699 "Address-space cast must be used to convert address spaces");
2700
2701 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2702 if (auto *PT = DestTy->getAs<PointerType>()) {
2704 PT->getPointeeType(),
2705 Address(Src,
2707 E->getType()->castAs<PointerType>()->getPointeeType()),
2708 CGF.getPointerAlign()),
2709 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2710 CE->getBeginLoc());
2711 }
2712 }
2713
2714 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2715 const QualType SrcType = E->getType();
2716
2717 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2718 // Casting to pointer that could carry dynamic information (provided by
2719 // invariant.group) requires launder.
2720 Src = Builder.CreateLaunderInvariantGroup(Src);
2721 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2722 // Casting to pointer that does not carry dynamic information (provided
2723 // by invariant.group) requires stripping it. Note that we don't do it
2724 // if the source could not be dynamic type and destination could be
2725 // dynamic because dynamic information is already laundered. It is
2726 // because launder(strip(src)) == launder(src), so there is no need to
2727 // add extra strip before launder.
2728 Src = Builder.CreateStripInvariantGroup(Src);
2729 }
2730 }
2731
2732 // Update heapallocsite metadata when there is an explicit pointer cast.
2733 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2734 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2735 !isa<CastExpr>(E)) {
2736 QualType PointeeType = DestTy->getPointeeType();
2737 if (!PointeeType.isNull())
2738 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2739 CE->getExprLoc());
2740 }
2741 }
2742
2743 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2744 // same element type, use the llvm.vector.insert intrinsic to perform the
2745 // bitcast.
2746 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2747 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2748 // If we are casting a fixed i8 vector to a scalable i1 predicate
2749 // vector, use a vector insert and bitcast the result.
2750 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2751 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2752 ScalableDstTy = llvm::ScalableVectorType::get(
2753 FixedSrcTy->getElementType(),
2754 llvm::divideCeil(
2755 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2756 }
2757 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2758 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2759 llvm::Value *Result = Builder.CreateInsertVector(
2760 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2761 ScalableDstTy = cast<llvm::ScalableVectorType>(
2762 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2763 if (Result->getType() != ScalableDstTy)
2764 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2765 if (Result->getType() != DstTy)
2766 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2767 return Result;
2768 }
2769 }
2770 }
2771
2772 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2773 // same element type, use the llvm.vector.extract intrinsic to perform the
2774 // bitcast.
2775 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2776 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2777 // If we are casting a scalable i1 predicate vector to a fixed i8
2778 // vector, bitcast the source and use a vector extract.
2779 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2780 FixedDstTy->getElementType()->isIntegerTy(8)) {
2781 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2782 ScalableSrcTy = llvm::ScalableVectorType::get(
2783 ScalableSrcTy->getElementType(),
2784 llvm::alignTo<8>(
2785 ScalableSrcTy->getElementCount().getKnownMinValue()));
2786 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2787 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2788 uint64_t(0));
2789 }
2790
2791 ScalableSrcTy = llvm::ScalableVectorType::get(
2792 FixedDstTy->getElementType(),
2793 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2794 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2795 }
2796 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2797 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2798 "cast.fixed");
2799 }
2800 }
2801
2802 // Perform VLAT <-> VLST bitcast through memory.
2803 // TODO: since the llvm.vector.{insert,extract} intrinsics
2804 // require the element types of the vectors to be the same, we
2805 // need to keep this around for bitcasts between VLAT <-> VLST where
2806 // the element types of the vectors are not the same, until we figure
2807 // out a better way of doing these casts.
2808 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2812 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2813 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2814 CGF.EmitStoreOfScalar(Src, LV);
2815 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2816 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2817 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2818 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2819 }
2820
2821 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2822 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2823 }
2824 case CK_AddressSpaceConversion: {
2825 Expr::EvalResult Result;
2826 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2827 Result.Val.isNullPointer()) {
2828 // If E has side effect, it is emitted even if its final result is a
2829 // null pointer. In that case, a DCE pass should be able to
2830 // eliminate the useless instructions emitted during translating E.
2831 if (Result.HasSideEffects)
2832 Visit(E);
2834 ConvertType(DestTy)), DestTy);
2835 }
2836 // Since target may map different address spaces in AST to the same address
2837 // space, an address space conversion may end up as a bitcast.
2838 return CGF.performAddrSpaceCast(Visit(E), ConvertType(DestTy));
2839 }
2840 case CK_AtomicToNonAtomic:
2841 case CK_NonAtomicToAtomic:
2842 case CK_UserDefinedConversion:
2843 return Visit(E);
2844
2845 case CK_NoOp: {
2846 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2847 }
2848
2849 case CK_BaseToDerived: {
2850 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2851 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2852
2853 Address Base = CGF.EmitPointerWithAlignment(E);
2854 Address Derived =
2855 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2856 CE->path_begin(), CE->path_end(),
2858
2859 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2860 // performed and the object is not of the derived type.
2861 if (CGF.sanitizePerformTypeCheck())
2863 Derived, DestTy->getPointeeType());
2864
2865 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2866 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2867 /*MayBeNull=*/true,
2869 CE->getBeginLoc());
2870
2871 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2872 }
2873 case CK_UncheckedDerivedToBase:
2874 case CK_DerivedToBase: {
2875 // The EmitPointerWithAlignment path does this fine; just discard
2876 // the alignment.
2878 CE->getType()->getPointeeType());
2879 }
2880
2881 case CK_Dynamic: {
2882 Address V = CGF.EmitPointerWithAlignment(E);
2883 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2884 return CGF.EmitDynamicCast(V, DCE);
2885 }
2886
2887 case CK_ArrayToPointerDecay:
2889 CE->getType()->getPointeeType());
2890 case CK_FunctionToPointerDecay:
2891 return EmitLValue(E).getPointer(CGF);
2892
2893 case CK_NullToPointer:
2894 if (MustVisitNullValue(E))
2895 CGF.EmitIgnoredExpr(E);
2896
2897 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2898 DestTy);
2899
2900 case CK_NullToMemberPointer: {
2901 if (MustVisitNullValue(E))
2902 CGF.EmitIgnoredExpr(E);
2903
2904 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2905 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2906 }
2907
2908 case CK_ReinterpretMemberPointer:
2909 case CK_BaseToDerivedMemberPointer:
2910 case CK_DerivedToBaseMemberPointer: {
2911 Value *Src = Visit(E);
2912
2913 // Note that the AST doesn't distinguish between checked and
2914 // unchecked member pointer conversions, so we always have to
2915 // implement checked conversions here. This is inefficient when
2916 // actual control flow may be required in order to perform the
2917 // check, which it is for data member pointers (but not member
2918 // function pointers on Itanium and ARM).
2919 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2920 }
2921
2922 case CK_ARCProduceObject:
2923 return CGF.EmitARCRetainScalarExpr(E);
2924 case CK_ARCConsumeObject:
2925 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2926 case CK_ARCReclaimReturnedObject:
2927 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2928 case CK_ARCExtendBlockObject:
2929 return CGF.EmitARCExtendBlockObject(E);
2930
2931 case CK_CopyAndAutoreleaseBlockObject:
2932 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2933
2934 case CK_FloatingRealToComplex:
2935 case CK_FloatingComplexCast:
2936 case CK_IntegralRealToComplex:
2937 case CK_IntegralComplexCast:
2938 case CK_IntegralComplexToFloatingComplex:
2939 case CK_FloatingComplexToIntegralComplex:
2940 case CK_ConstructorConversion:
2941 case CK_ToUnion:
2942 case CK_HLSLArrayRValue:
2943 llvm_unreachable("scalar cast to non-scalar value");
2944
2945 case CK_LValueToRValue:
2946 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2947 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2948 return Visit(E);
2949
2950 case CK_IntegralToPointer: {
2951 Value *Src = Visit(E);
2952
2953 // First, convert to the correct width so that we control the kind of
2954 // extension.
2955 auto DestLLVMTy = ConvertType(DestTy);
2956 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2957 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2958 llvm::Value* IntResult =
2959 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2960
2961 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2962
2963 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2964 // Going from integer to pointer that could be dynamic requires reloading
2965 // dynamic information from invariant.group.
2966 if (DestTy.mayBeDynamicClass())
2967 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2968 }
2969
2970 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2971 return IntToPtr;
2972 }
2973 case CK_PointerToIntegral: {
2974 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2975 auto *PtrExpr = Visit(E);
2976
2977 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2978 const QualType SrcType = E->getType();
2979
2980 // Casting to integer requires stripping dynamic information as it does
2981 // not carries it.
2982 if (SrcType.mayBeDynamicClass())
2983 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2984 }
2985
2986 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2987 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2988 }
2989 case CK_ToVoid: {
2990 CGF.EmitIgnoredExpr(E);
2991 return nullptr;
2992 }
2993 case CK_MatrixCast: {
2994 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2995 CE->getExprLoc());
2996 }
2997 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2998 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2999 // To perform any necessary Scalar Cast, so this Cast can be handled
3000 // by the regular Vector Splat cast code.
3001 case CK_HLSLAggregateSplatCast:
3002 case CK_VectorSplat: {
3003 llvm::Type *DstTy = ConvertType(DestTy);
3004 Value *Elt = Visit(E);
3005 // Splat the element across to all elements
3006 llvm::ElementCount NumElements =
3007 cast<llvm::VectorType>(DstTy)->getElementCount();
3008 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
3009 }
3010
3011 case CK_FixedPointCast:
3012 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3013 CE->getExprLoc());
3014
3015 case CK_FixedPointToBoolean:
3016 assert(E->getType()->isFixedPointType() &&
3017 "Expected src type to be fixed point type");
3018 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
3019 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3020 CE->getExprLoc());
3021
3022 case CK_FixedPointToIntegral:
3023 assert(E->getType()->isFixedPointType() &&
3024 "Expected src type to be fixed point type");
3025 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
3026 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3027 CE->getExprLoc());
3028
3029 case CK_IntegralToFixedPoint:
3030 assert(E->getType()->isIntegerType() &&
3031 "Expected src type to be an integer");
3032 assert(DestTy->isFixedPointType() &&
3033 "Expected dest type to be fixed point type");
3034 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3035 CE->getExprLoc());
3036
3037 case CK_IntegralCast: {
3038 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
3039 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3040 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
3042 "conv");
3043 }
3044 ScalarConversionOpts Opts;
3045 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
3046 if (!ICE->isPartOfExplicitCast())
3047 Opts = ScalarConversionOpts(CGF.SanOpts);
3048 }
3049 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3050 CE->getExprLoc(), Opts);
3051 }
3052 case CK_IntegralToFloating: {
3053 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3054 // TODO: Support constrained FP intrinsics.
3055 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3056 if (SrcElTy->isSignedIntegerOrEnumerationType())
3057 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
3058 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
3059 }
3060 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3061 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3062 CE->getExprLoc());
3063 }
3064 case CK_FloatingToIntegral: {
3065 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3066 // TODO: Support constrained FP intrinsics.
3067 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
3068 if (DstElTy->isSignedIntegerOrEnumerationType())
3069 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
3070 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
3071 }
3072 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3073 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3074 CE->getExprLoc());
3075 }
3076 case CK_FloatingCast: {
3077 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3078 // TODO: Support constrained FP intrinsics.
3079 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3080 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
3081 if (DstElTy->castAs<BuiltinType>()->getKind() <
3082 SrcElTy->castAs<BuiltinType>()->getKind())
3083 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
3084 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
3085 }
3086 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3087 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3088 CE->getExprLoc());
3089 }
3090 case CK_FixedPointToFloating:
3091 case CK_FloatingToFixedPoint: {
3092 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3093 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3094 CE->getExprLoc());
3095 }
3096 case CK_BooleanToSignedIntegral: {
3097 ScalarConversionOpts Opts;
3098 Opts.TreatBooleanAsSigned = true;
3099 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3100 CE->getExprLoc(), Opts);
3101 }
3102 case CK_IntegralToBoolean:
3103 return EmitIntToBoolConversion(Visit(E));
3104 case CK_PointerToBoolean:
3105 return EmitPointerToBoolConversion(Visit(E), E->getType());
3106 case CK_FloatingToBoolean: {
3107 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3108 return EmitFloatToBoolConversion(Visit(E));
3109 }
3110 case CK_MemberPointerToBoolean: {
3111 llvm::Value *MemPtr = Visit(E);
3112 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
3113 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
3114 }
3115
3116 case CK_FloatingComplexToReal:
3117 case CK_IntegralComplexToReal:
3118 return CGF.EmitComplexExpr(E, false, true).first;
3119
3120 case CK_FloatingComplexToBoolean:
3121 case CK_IntegralComplexToBoolean: {
3123
3124 // TODO: kill this function off, inline appropriate case here
3125 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
3126 CE->getExprLoc());
3127 }
3128
3129 case CK_ZeroToOCLOpaqueType: {
3130 assert((DestTy->isEventT() || DestTy->isQueueT() ||
3131 DestTy->isOCLIntelSubgroupAVCType()) &&
3132 "CK_ZeroToOCLEvent cast on non-event type");
3133 return llvm::Constant::getNullValue(ConvertType(DestTy));
3134 }
3135
3136 case CK_IntToOCLSampler:
3137 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
3138
3139 case CK_HLSLVectorTruncation: {
3140 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
3141 "Destination type must be a vector or builtin type.");
3142 Value *Vec = Visit(E);
3143 if (auto *VecTy = DestTy->getAs<VectorType>()) {
3144 SmallVector<int> Mask;
3145 unsigned NumElts = VecTy->getNumElements();
3146 for (unsigned I = 0; I != NumElts; ++I)
3147 Mask.push_back(I);
3148
3149 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
3150 }
3151 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3152 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
3153 }
3154 case CK_HLSLMatrixTruncation: {
3155 assert((DestTy->isMatrixType() || DestTy->isBuiltinType()) &&
3156 "Destination type must be a matrix or builtin type.");
3157 Value *Mat = Visit(E);
3158 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
3159 SmallVector<int> Mask(MatTy->getNumElementsFlattened());
3160 unsigned NumCols = MatTy->getNumColumns();
3161 unsigned NumRows = MatTy->getNumRows();
3162 auto *SrcMatTy = E->getType()->getAs<ConstantMatrixType>();
3163 assert(SrcMatTy && "Source type must be a matrix type.");
3164 assert(NumRows <= SrcMatTy->getNumRows());
3165 assert(NumCols <= SrcMatTy->getNumColumns());
3166 bool IsRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
3167 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
3168 for (unsigned R = 0; R < NumRows; R++)
3169 for (unsigned C = 0; C < NumCols; C++)
3170 Mask[MatTy->getFlattenedIndex(R, C, IsRowMajor)] =
3171 SrcMatTy->getFlattenedIndex(R, C, IsRowMajor);
3172
3173 return Builder.CreateShuffleVector(Mat, Mask, "trunc");
3174 }
3175 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3176 return Builder.CreateExtractElement(Mat, Zero, "cast.mtrunc");
3177 }
3178 case CK_HLSLElementwiseCast: {
3179 RValue RV = CGF.EmitAnyExpr(E);
3180 SourceLocation Loc = CE->getExprLoc();
3181
3182 Address SrcAddr = Address::invalid();
3183
3184 if (RV.isAggregate()) {
3185 SrcAddr = RV.getAggregateAddress();
3186 } else {
3187 SrcAddr = CGF.CreateMemTemp(E->getType(), "hlsl.ewcast.src");
3188 LValue TmpLV = CGF.MakeAddrLValue(SrcAddr, E->getType());
3189 CGF.EmitStoreThroughLValue(RV, TmpLV);
3190 }
3191
3192 LValue SrcVal = CGF.MakeAddrLValue(SrcAddr, E->getType());
3193 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
3194 }
3195
3196 } // end of switch
3197
3198 llvm_unreachable("unknown scalar cast");
3199}
3200
3201Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
3202 CodeGenFunction::StmtExprEvaluation eval(CGF);
3203 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
3204 !E->getType()->isVoidType());
3205 if (!RetAlloca.isValid())
3206 return nullptr;
3207 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
3208 E->getExprLoc());
3209}
3210
3211Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
3212 CodeGenFunction::RunCleanupsScope Scope(CGF);
3213 Value *V = Visit(E->getSubExpr());
3214 // Defend against dominance problems caused by jumps out of expression
3215 // evaluation through the shared cleanup block.
3216 Scope.ForceCleanup({&V});
3217 return V;
3218}
3219
3220//===----------------------------------------------------------------------===//
3221// Unary Operators
3222//===----------------------------------------------------------------------===//
3223
3225 llvm::Value *InVal, bool IsInc,
3226 FPOptions FPFeatures) {
3227 BinOpInfo BinOp;
3228 BinOp.LHS = InVal;
3229 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
3230 BinOp.Ty = E->getType();
3231 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3232 BinOp.FPFeatures = FPFeatures;
3233 BinOp.E = E;
3234 return BinOp;
3235}
3236
3237llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3238 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3239 // Treat positive amount as unsigned to support inc of i1 (needed for
3240 // unsigned _BitInt(1)).
3241 llvm::Value *Amount =
3242 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, !IsInc);
3243 StringRef Name = IsInc ? "inc" : "dec";
3244 QualType Ty = E->getType();
3245 const bool isSigned = Ty->isSignedIntegerOrEnumerationType();
3246 const bool hasSan =
3247 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
3248 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
3249
3250 switch (getOverflowBehaviorConsideringType(CGF, Ty)) {
3251 case LangOptions::OB_Wrap:
3252 return Builder.CreateAdd(InVal, Amount, Name);
3253 case LangOptions::OB_SignedAndDefined:
3254 if (!hasSan)
3255 return Builder.CreateAdd(InVal, Amount, Name);
3256 [[fallthrough]];
3257 case LangOptions::OB_Unset:
3258 if (!E->canOverflow())
3259 return Builder.CreateAdd(InVal, Amount, Name);
3260 if (!hasSan)
3261 return isSigned ? Builder.CreateNSWAdd(InVal, Amount, Name)
3262 : Builder.CreateAdd(InVal, Amount, Name);
3263 [[fallthrough]];
3264 case LangOptions::OB_Trap:
3265 if (!Ty->getAs<OverflowBehaviorType>() && !E->canOverflow())
3266 return Builder.CreateAdd(InVal, Amount, Name);
3267 BinOpInfo Info = createBinOpInfoFromIncDec(
3268 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3269 if (CanElideOverflowCheck(CGF.getContext(), Info))
3270 return isSigned ? Builder.CreateNSWAdd(InVal, Amount, Name)
3271 : Builder.CreateAdd(InVal, Amount, Name);
3272 return EmitOverflowCheckedBinOp(Info);
3273 }
3274 llvm_unreachable("Unknown OverflowBehaviorKind");
3275}
3276
3277namespace {
3278/// Handles check and update for lastprivate conditional variables.
3279class OMPLastprivateConditionalUpdateRAII {
3280private:
3281 CodeGenFunction &CGF;
3282 const UnaryOperator *E;
3283
3284public:
3285 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3286 const UnaryOperator *E)
3287 : CGF(CGF), E(E) {}
3288 ~OMPLastprivateConditionalUpdateRAII() {
3289 if (CGF.getLangOpts().OpenMP)
3291 CGF, E->getSubExpr());
3292 }
3293};
3294} // namespace
3295
3296llvm::Value *
3297ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3298 bool isInc, bool isPre) {
3299 ApplyAtomGroup Grp(CGF.getDebugInfo());
3300 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3301 QualType type = E->getSubExpr()->getType();
3302 llvm::PHINode *atomicPHI = nullptr;
3303 llvm::Value *value;
3304 llvm::Value *input;
3305 llvm::Value *Previous = nullptr;
3306 QualType SrcType = E->getType();
3307
3308 int amount = (isInc ? 1 : -1);
3309 bool isSubtraction = !isInc;
3310
3311 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3312 type = atomicTy->getValueType();
3313 if (isInc && type->isBooleanType()) {
3314 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3315 if (isPre) {
3316 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3317 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3318 return Builder.getTrue();
3319 }
3320 // For atomic bool increment, we just store true and return it for
3321 // preincrement, do an atomic swap with true for postincrement
3322 return Builder.CreateAtomicRMW(
3323 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3324 llvm::AtomicOrdering::SequentiallyConsistent);
3325 }
3326 // Special case for atomic increment / decrement on integers, emit
3327 // atomicrmw instructions. We skip this if we want to be doing overflow
3328 // checking, and fall into the slow path with the atomic cmpxchg loop.
3329 if (!type->isBooleanType() && type->isIntegerType() &&
3330 !(type->isUnsignedIntegerType() &&
3331 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3332 CGF.getLangOpts().getSignedOverflowBehavior() !=
3333 LangOptions::SOB_Trapping) {
3334 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3335 llvm::AtomicRMWInst::Sub;
3336 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3337 llvm::Instruction::Sub;
3338 llvm::Value *amt = CGF.EmitToMemory(
3339 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3340 llvm::Value *old =
3341 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3342 llvm::AtomicOrdering::SequentiallyConsistent);
3343 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3344 }
3345 // Special case for atomic increment/decrement on floats.
3346 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3347 if (type->isFloatingType()) {
3348 llvm::Type *Ty = ConvertType(type);
3349 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3350 llvm::AtomicRMWInst::BinOp aop =
3351 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3352 llvm::Instruction::BinaryOps op =
3353 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3354 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3355 llvm::AtomicRMWInst *old =
3356 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3357 llvm::AtomicOrdering::SequentiallyConsistent);
3358
3359 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3360 }
3361 }
3362 value = EmitLoadOfLValue(LV, E->getExprLoc());
3363 input = value;
3364 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3365 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3366 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3367 value = CGF.EmitToMemory(value, type);
3368 Builder.CreateBr(opBB);
3369 Builder.SetInsertPoint(opBB);
3370 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3371 atomicPHI->addIncoming(value, startBB);
3372 value = atomicPHI;
3373 } else {
3374 value = EmitLoadOfLValue(LV, E->getExprLoc());
3375 input = value;
3376 }
3377
3378 // Special case of integer increment that we have to check first: bool++.
3379 // Due to promotion rules, we get:
3380 // bool++ -> bool = bool + 1
3381 // -> bool = (int)bool + 1
3382 // -> bool = ((int)bool + 1 != 0)
3383 // An interesting aspect of this is that increment is always true.
3384 // Decrement does not have this property.
3385 if (isInc && type->isBooleanType()) {
3386 value = Builder.getTrue();
3387
3388 // Most common case by far: integer increment.
3389 } else if (type->isIntegerType()) {
3390 QualType promotedType;
3391 bool canPerformLossyDemotionCheck = false;
3392
3394 promotedType = CGF.getContext().getPromotedIntegerType(type);
3395 assert(promotedType != type && "Shouldn't promote to the same type.");
3396 canPerformLossyDemotionCheck = true;
3397 canPerformLossyDemotionCheck &=
3399 CGF.getContext().getCanonicalType(promotedType);
3400 canPerformLossyDemotionCheck &=
3402 type, promotedType);
3403 assert((!canPerformLossyDemotionCheck ||
3404 type->isSignedIntegerOrEnumerationType() ||
3405 promotedType->isSignedIntegerOrEnumerationType() ||
3406 ConvertType(type)->getScalarSizeInBits() ==
3407 ConvertType(promotedType)->getScalarSizeInBits()) &&
3408 "The following check expects that if we do promotion to different "
3409 "underlying canonical type, at least one of the types (either "
3410 "base or promoted) will be signed, or the bitwidths will match.");
3411 }
3412 if (CGF.SanOpts.hasOneOf(
3413 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3414 SanitizerKind::ImplicitBitfieldConversion) &&
3415 canPerformLossyDemotionCheck) {
3416 // While `x += 1` (for `x` with width less than int) is modeled as
3417 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3418 // ease; inc/dec with width less than int can't overflow because of
3419 // promotion rules, so we omit promotion+demotion, which means that we can
3420 // not catch lossy "demotion". Because we still want to catch these cases
3421 // when the sanitizer is enabled, we perform the promotion, then perform
3422 // the increment/decrement in the wider type, and finally
3423 // perform the demotion. This will catch lossy demotions.
3424
3425 // We have a special case for bitfields defined using all the bits of the
3426 // type. In this case we need to do the same trick as for the integer
3427 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3428
3429 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3430 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3431 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3432 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3433 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3434 // checks will take care of the conversion.
3435 ScalarConversionOpts Opts;
3436 if (!LV.isBitField())
3437 Opts = ScalarConversionOpts(CGF.SanOpts);
3438 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3439 Previous = value;
3440 SrcType = promotedType;
3441 }
3442
3443 Opts.PatternExcluded = CGF.getContext().isUnaryOverflowPatternExcluded(E);
3444 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3445 Opts);
3446
3447 // Note that signed integer inc/dec with width less than int can't
3448 // overflow because of promotion rules; we're just eliding a few steps
3449 // here.
3450 } else if (type->isSignedIntegerOrEnumerationType() ||
3451 type->isUnsignedIntegerType()) {
3452 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3453 } else {
3454 // Treat positive amount as unsigned to support inc of i1 (needed for
3455 // unsigned _BitInt(1)).
3456 llvm::Value *amt =
3457 llvm::ConstantInt::get(value->getType(), amount, !isInc);
3458 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3459 }
3460
3461 // Next most common: pointer increment.
3462 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3463 QualType type = ptr->getPointeeType();
3464
3465 // VLA types don't have constant size.
3466 if (const VariableArrayType *vla
3468 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3469 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3470 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3471 if (CGF.getLangOpts().PointerOverflowDefined)
3472 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3473 else
3474 value = CGF.EmitCheckedInBoundsGEP(
3475 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3476 E->getExprLoc(), "vla.inc");
3477
3478 // Arithmetic on function pointers (!) is just +-1.
3479 } else if (type->isFunctionType()) {
3480 llvm::Value *amt = Builder.getInt32(amount);
3481
3482 if (CGF.getLangOpts().PointerOverflowDefined)
3483 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3484 else
3485 value =
3486 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3487 /*SignedIndices=*/false, isSubtraction,
3488 E->getExprLoc(), "incdec.funcptr");
3489
3490 // For everything else, we can just do a simple increment.
3491 } else {
3492 llvm::Value *amt = Builder.getInt32(amount);
3493 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3494 if (CGF.getLangOpts().PointerOverflowDefined)
3495 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3496 else
3497 value = CGF.EmitCheckedInBoundsGEP(
3498 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3499 E->getExprLoc(), "incdec.ptr");
3500 }
3501
3502 // Vector increment/decrement.
3503 } else if (type->isVectorType()) {
3504 if (type->hasIntegerRepresentation()) {
3505 llvm::Value *amt = llvm::ConstantInt::getSigned(value->getType(), amount);
3506
3507 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3508 } else {
3509 value = Builder.CreateFAdd(
3510 value,
3511 llvm::ConstantFP::get(value->getType(), amount),
3512 isInc ? "inc" : "dec");
3513 }
3514
3515 // Floating point.
3516 } else if (type->isRealFloatingType()) {
3517 // Add the inc/dec to the real part.
3518 llvm::Value *amt;
3519 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3520
3521 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3522 // Another special case: half FP increment should be done via float. If
3523 // the input isn't already half, it may be i16.
3524 Value *bitcast = Builder.CreateBitCast(input, CGF.CGM.HalfTy);
3525 value = Builder.CreateFPExt(bitcast, CGF.CGM.FloatTy, "incdec.conv");
3526 }
3527
3528 if (value->getType()->isFloatTy())
3529 amt = llvm::ConstantFP::get(VMContext,
3530 llvm::APFloat(static_cast<float>(amount)));
3531 else if (value->getType()->isDoubleTy())
3532 amt = llvm::ConstantFP::get(VMContext,
3533 llvm::APFloat(static_cast<double>(amount)));
3534 else {
3535 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3536 // Convert from float.
3537 llvm::APFloat F(static_cast<float>(amount));
3538 bool ignored;
3539 const llvm::fltSemantics *FS;
3540 // Don't use getFloatTypeSemantics because Half isn't
3541 // necessarily represented using the "half" LLVM type.
3542 if (value->getType()->isFP128Ty())
3543 FS = &CGF.getTarget().getFloat128Format();
3544 else if (value->getType()->isHalfTy())
3545 FS = &CGF.getTarget().getHalfFormat();
3546 else if (value->getType()->isBFloatTy())
3547 FS = &CGF.getTarget().getBFloat16Format();
3548 else if (value->getType()->isPPC_FP128Ty())
3549 FS = &CGF.getTarget().getIbm128Format();
3550 else
3551 FS = &CGF.getTarget().getLongDoubleFormat();
3552 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3553 amt = llvm::ConstantFP::get(VMContext, F);
3554 }
3555 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3556
3557 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3558 value = Builder.CreateFPTrunc(value, CGF.CGM.HalfTy, "incdec.conv");
3559 value = Builder.CreateBitCast(value, input->getType());
3560 }
3561
3562 // Fixed-point types.
3563 } else if (type->isFixedPointType()) {
3564 // Fixed-point types are tricky. In some cases, it isn't possible to
3565 // represent a 1 or a -1 in the type at all. Piggyback off of
3566 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3567 BinOpInfo Info;
3568 Info.E = E;
3569 Info.Ty = E->getType();
3570 Info.Opcode = isInc ? BO_Add : BO_Sub;
3571 Info.LHS = value;
3572 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3573 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3574 // since -1 is guaranteed to be representable.
3575 if (type->isSignedFixedPointType()) {
3576 Info.Opcode = isInc ? BO_Sub : BO_Add;
3577 Info.RHS = Builder.CreateNeg(Info.RHS);
3578 }
3579 // Now, convert from our invented integer literal to the type of the unary
3580 // op. This will upscale and saturate if necessary. This value can become
3581 // undef in some cases.
3582 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3583 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3584 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3585 value = EmitFixedPointBinOp(Info);
3586
3587 // Objective-C pointer types.
3588 } else {
3589 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3590
3591 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3592 if (!isInc) size = -size;
3593 llvm::Value *sizeValue =
3594 llvm::ConstantInt::getSigned(CGF.SizeTy, size.getQuantity());
3595
3596 if (CGF.getLangOpts().PointerOverflowDefined)
3597 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3598 else
3599 value = CGF.EmitCheckedInBoundsGEP(
3600 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3601 E->getExprLoc(), "incdec.objptr");
3602 value = Builder.CreateBitCast(value, input->getType());
3603 }
3604
3605 if (atomicPHI) {
3606 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3607 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3608 auto Pair = CGF.EmitAtomicCompareExchange(
3609 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3610 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3611 llvm::Value *success = Pair.second;
3612 atomicPHI->addIncoming(old, curBlock);
3613 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3614 Builder.SetInsertPoint(contBB);
3615 return isPre ? value : input;
3616 }
3617
3618 // Store the updated result through the lvalue.
3619 if (LV.isBitField()) {
3620 Value *Src = Previous ? Previous : value;
3621 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3622 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3623 LV.getBitFieldInfo(), E->getExprLoc());
3624 } else
3625 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3626
3627 // If this is a postinc, return the value read from memory, otherwise use the
3628 // updated value.
3629 return isPre ? value : input;
3630}
3631
3632
3633Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3634 QualType PromotionType) {
3635 QualType promotionTy = PromotionType.isNull()
3636 ? getPromotionType(E->getSubExpr()->getType())
3637 : PromotionType;
3638 Value *result = VisitPlus(E, promotionTy);
3639 if (result && !promotionTy.isNull())
3640 result = EmitUnPromotedValue(result, E->getType());
3641 return result;
3642}
3643
3644Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3645 QualType PromotionType) {
3646 // This differs from gcc, though, most likely due to a bug in gcc.
3647 TestAndClearIgnoreResultAssign();
3648 if (!PromotionType.isNull())
3649 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3650 return Visit(E->getSubExpr());
3651}
3652
3653Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3654 QualType PromotionType) {
3655 QualType promotionTy = PromotionType.isNull()
3656 ? getPromotionType(E->getSubExpr()->getType())
3657 : PromotionType;
3658 Value *result = VisitMinus(E, promotionTy);
3659 if (result && !promotionTy.isNull())
3660 result = EmitUnPromotedValue(result, E->getType());
3661 return result;
3662}
3663
3664Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3665 QualType PromotionType) {
3666 TestAndClearIgnoreResultAssign();
3667 Value *Op;
3668 if (!PromotionType.isNull())
3669 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3670 else
3671 Op = Visit(E->getSubExpr());
3672
3673 // Generate a unary FNeg for FP ops.
3674 if (Op->getType()->isFPOrFPVectorTy())
3675 return Builder.CreateFNeg(Op, "fneg");
3676
3677 // Emit unary minus with EmitSub so we handle overflow cases etc.
3678 BinOpInfo BinOp;
3679 BinOp.RHS = Op;
3680 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3681 BinOp.Ty = E->getType();
3682 BinOp.Opcode = BO_Sub;
3683 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3684 BinOp.E = E;
3685 return EmitSub(BinOp);
3686}
3687
3688Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3689 TestAndClearIgnoreResultAssign();
3690 Value *Op = Visit(E->getSubExpr());
3691 return Builder.CreateNot(Op, "not");
3692}
3693
3694Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3695 // Perform vector logical not on comparison with zero vector.
3696 if (E->getType()->isVectorType() &&
3697 E->getType()->castAs<VectorType>()->getVectorKind() ==
3698 VectorKind::Generic) {
3699 Value *Oper = Visit(E->getSubExpr());
3700 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3701 Value *Result;
3702 if (Oper->getType()->isFPOrFPVectorTy()) {
3703 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3704 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3705 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3706 } else
3707 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3708 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3709 }
3710
3711 // Compare operand to zero.
3712 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3713
3714 // Invert value.
3715 // TODO: Could dynamically modify easy computations here. For example, if
3716 // the operand is an icmp ne, turn into icmp eq.
3717 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3718
3719 // ZExt result to the expr type.
3720 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3721}
3722
3723Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3724 // Try folding the offsetof to a constant.
3725 Expr::EvalResult EVResult;
3726 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3727 llvm::APSInt Value = EVResult.Val.getInt();
3728 return Builder.getInt(Value);
3729 }
3730
3731 // Loop over the components of the offsetof to compute the value.
3732 unsigned n = E->getNumComponents();
3733 llvm::Type* ResultType = ConvertType(E->getType());
3734 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3735 QualType CurrentType = E->getTypeSourceInfo()->getType();
3736 for (unsigned i = 0; i != n; ++i) {
3737 OffsetOfNode ON = E->getComponent(i);
3738 llvm::Value *Offset = nullptr;
3739 switch (ON.getKind()) {
3740 case OffsetOfNode::Array: {
3741 // Compute the index
3742 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3743 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3744 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3745 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3746
3747 // Save the element type
3748 CurrentType =
3749 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3750
3751 // Compute the element size
3752 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3753 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3754
3755 // Multiply out to compute the result
3756 Offset = Builder.CreateMul(Idx, ElemSize);
3757 break;
3758 }
3759
3760 case OffsetOfNode::Field: {
3761 FieldDecl *MemberDecl = ON.getField();
3762 auto *RD = CurrentType->castAsRecordDecl();
3763 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3764
3765 // Get the index of the field in its parent.
3766 unsigned FieldIndex = MemberDecl->getFieldIndex();
3767
3768 // Compute the offset to the field
3769 int64_t OffsetInt =
3770 RL.getFieldOffset(FieldIndex) / CGF.getContext().getCharWidth();
3771 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3772
3773 // Save the element type.
3774 CurrentType = MemberDecl->getType();
3775 break;
3776 }
3777
3779 llvm_unreachable("dependent __builtin_offsetof");
3780
3781 case OffsetOfNode::Base: {
3782 if (ON.getBase()->isVirtual()) {
3783 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3784 continue;
3785 }
3786
3787 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3788 CurrentType->castAsCanonical<RecordType>()->getDecl());
3789
3790 // Save the element type.
3791 CurrentType = ON.getBase()->getType();
3792
3793 // Compute the offset to the base.
3794 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3795 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3796 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3797 break;
3798 }
3799 }
3800 Result = Builder.CreateAdd(Result, Offset);
3801 }
3802 return Result;
3803}
3804
3805/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3806/// argument of the sizeof expression as an integer.
3807Value *
3808ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3809 const UnaryExprOrTypeTraitExpr *E) {
3810 QualType TypeToSize = E->getTypeOfArgument();
3811 if (auto Kind = E->getKind();
3812 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3813 if (const VariableArrayType *VAT =
3814 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3815 // For _Countof, we only want to evaluate if the extent is actually
3816 // variable as opposed to a multi-dimensional array whose extent is
3817 // constant but whose element type is variable.
3818 bool EvaluateExtent = true;
3819 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3820 EvaluateExtent =
3821 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3822 }
3823 if (EvaluateExtent) {
3824 if (E->isArgumentType()) {
3825 // sizeof(type) - make sure to emit the VLA size.
3826 CGF.EmitVariablyModifiedType(TypeToSize);
3827 } else {
3828 // C99 6.5.3.4p2: If the argument is an expression of type
3829 // VLA, it is evaluated.
3831 }
3832
3833 // For _Countof, we just want to return the size of a single dimension.
3834 if (Kind == UETT_CountOf)
3835 return CGF.getVLAElements1D(VAT).NumElts;
3836
3837 // For sizeof and __datasizeof, we need to scale the number of elements
3838 // by the size of the array element type.
3839 auto VlaSize = CGF.getVLASize(VAT);
3840
3841 // Scale the number of non-VLA elements by the non-VLA element size.
3842 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3843 if (!eltSize.isOne())
3844 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3845 VlaSize.NumElts);
3846 return VlaSize.NumElts;
3847 }
3848 }
3849 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3850 auto Alignment =
3851 CGF.getContext()
3854 .getQuantity();
3855 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3856 } else if (E->getKind() == UETT_VectorElements) {
3857 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3858 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3859 }
3860
3861 // If this isn't sizeof(vla), the result must be constant; use the constant
3862 // folding logic so we don't have to duplicate it here.
3863 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3864}
3865
3866Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3867 QualType PromotionType) {
3868 QualType promotionTy = PromotionType.isNull()
3869 ? getPromotionType(E->getSubExpr()->getType())
3870 : PromotionType;
3871 Value *result = VisitReal(E, promotionTy);
3872 if (result && !promotionTy.isNull())
3873 result = EmitUnPromotedValue(result, E->getType());
3874 return result;
3875}
3876
3877Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3878 QualType PromotionType) {
3879 Expr *Op = E->getSubExpr();
3880 if (Op->getType()->isAnyComplexType()) {
3881 // If it's an l-value, load through the appropriate subobject l-value.
3882 // Note that we have to ask E because Op might be an l-value that
3883 // this won't work for, e.g. an Obj-C property.
3884 if (E->isGLValue()) {
3885 if (!PromotionType.isNull()) {
3887 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3888 PromotionType = PromotionType->isAnyComplexType()
3889 ? PromotionType
3890 : CGF.getContext().getComplexType(PromotionType);
3891 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3892 : result.first;
3893 }
3894
3895 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3896 .getScalarVal();
3897 }
3898 // Otherwise, calculate and project.
3899 return CGF.EmitComplexExpr(Op, false, true).first;
3900 }
3901
3902 if (!PromotionType.isNull())
3903 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3904 return Visit(Op);
3905}
3906
3907Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3908 QualType PromotionType) {
3909 QualType promotionTy = PromotionType.isNull()
3910 ? getPromotionType(E->getSubExpr()->getType())
3911 : PromotionType;
3912 Value *result = VisitImag(E, promotionTy);
3913 if (result && !promotionTy.isNull())
3914 result = EmitUnPromotedValue(result, E->getType());
3915 return result;
3916}
3917
3918Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3919 QualType PromotionType) {
3920 Expr *Op = E->getSubExpr();
3921 if (Op->getType()->isAnyComplexType()) {
3922 // If it's an l-value, load through the appropriate subobject l-value.
3923 // Note that we have to ask E because Op might be an l-value that
3924 // this won't work for, e.g. an Obj-C property.
3925 if (Op->isGLValue()) {
3926 if (!PromotionType.isNull()) {
3928 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3929 PromotionType = PromotionType->isAnyComplexType()
3930 ? PromotionType
3931 : CGF.getContext().getComplexType(PromotionType);
3932 return result.second
3933 ? CGF.EmitPromotedValue(result, PromotionType).second
3934 : result.second;
3935 }
3936
3937 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3938 .getScalarVal();
3939 }
3940 // Otherwise, calculate and project.
3941 return CGF.EmitComplexExpr(Op, true, false).second;
3942 }
3943
3944 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3945 // effects are evaluated, but not the actual value.
3946 if (Op->isGLValue())
3947 CGF.EmitLValue(Op);
3948 else if (!PromotionType.isNull())
3949 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3950 else
3951 CGF.EmitScalarExpr(Op, true);
3952 if (!PromotionType.isNull())
3953 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3954 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3955}
3956
3957//===----------------------------------------------------------------------===//
3958// Binary Operators
3959//===----------------------------------------------------------------------===//
3960
3961Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3962 QualType PromotionType) {
3963 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3964}
3965
3966Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3967 QualType ExprType) {
3968 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3969}
3970
3971Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3972 E = E->IgnoreParens();
3973 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3974 switch (BO->getOpcode()) {
3975#define HANDLE_BINOP(OP) \
3976 case BO_##OP: \
3977 return Emit##OP(EmitBinOps(BO, PromotionType));
3978 HANDLE_BINOP(Add)
3979 HANDLE_BINOP(Sub)
3980 HANDLE_BINOP(Mul)
3981 HANDLE_BINOP(Div)
3982#undef HANDLE_BINOP
3983 default:
3984 break;
3985 }
3986 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3987 switch (UO->getOpcode()) {
3988 case UO_Imag:
3989 return VisitImag(UO, PromotionType);
3990 case UO_Real:
3991 return VisitReal(UO, PromotionType);
3992 case UO_Minus:
3993 return VisitMinus(UO, PromotionType);
3994 case UO_Plus:
3995 return VisitPlus(UO, PromotionType);
3996 default:
3997 break;
3998 }
3999 }
4000 auto result = Visit(const_cast<Expr *>(E));
4001 if (result) {
4002 if (!PromotionType.isNull())
4003 return EmitPromotedValue(result, PromotionType);
4004 else
4005 return EmitUnPromotedValue(result, E->getType());
4006 }
4007 return result;
4008}
4009
4010BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
4011 QualType PromotionType) {
4012 TestAndClearIgnoreResultAssign();
4013 BinOpInfo Result;
4014 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
4015 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
4016 if (!PromotionType.isNull())
4017 Result.Ty = PromotionType;
4018 else
4019 Result.Ty = E->getType();
4020 Result.Opcode = E->getOpcode();
4021 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
4022 Result.E = E;
4023 return Result;
4024}
4025
4026LValue ScalarExprEmitter::EmitCompoundAssignLValue(
4027 const CompoundAssignOperator *E,
4028 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
4029 Value *&Result) {
4030 QualType LHSTy = E->getLHS()->getType();
4031 BinOpInfo OpInfo;
4032
4035
4036 // Emit the RHS first. __block variables need to have the rhs evaluated
4037 // first, plus this should improve codegen a little.
4038
4039 QualType PromotionTypeCR;
4040 PromotionTypeCR = getPromotionType(E->getComputationResultType());
4041 if (PromotionTypeCR.isNull())
4042 PromotionTypeCR = E->getComputationResultType();
4043 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
4044 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
4045 if (!PromotionTypeRHS.isNull())
4046 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
4047 else
4048 OpInfo.RHS = Visit(E->getRHS());
4049 OpInfo.Ty = PromotionTypeCR;
4050 OpInfo.Opcode = E->getOpcode();
4051 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
4052 OpInfo.E = E;
4053 // Load/convert the LHS.
4054 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4055
4056 llvm::PHINode *atomicPHI = nullptr;
4057 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
4058 QualType type = atomicTy->getValueType();
4059 if (!type->isBooleanType() && type->isIntegerType() &&
4060 !(type->isUnsignedIntegerType() &&
4061 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
4062 CGF.getLangOpts().getSignedOverflowBehavior() !=
4063 LangOptions::SOB_Trapping) {
4064 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
4065 llvm::Instruction::BinaryOps Op;
4066 switch (OpInfo.Opcode) {
4067 // We don't have atomicrmw operands for *, %, /, <<, >>
4068 case BO_MulAssign: case BO_DivAssign:
4069 case BO_RemAssign:
4070 case BO_ShlAssign:
4071 case BO_ShrAssign:
4072 break;
4073 case BO_AddAssign:
4074 AtomicOp = llvm::AtomicRMWInst::Add;
4075 Op = llvm::Instruction::Add;
4076 break;
4077 case BO_SubAssign:
4078 AtomicOp = llvm::AtomicRMWInst::Sub;
4079 Op = llvm::Instruction::Sub;
4080 break;
4081 case BO_AndAssign:
4082 AtomicOp = llvm::AtomicRMWInst::And;
4083 Op = llvm::Instruction::And;
4084 break;
4085 case BO_XorAssign:
4086 AtomicOp = llvm::AtomicRMWInst::Xor;
4087 Op = llvm::Instruction::Xor;
4088 break;
4089 case BO_OrAssign:
4090 AtomicOp = llvm::AtomicRMWInst::Or;
4091 Op = llvm::Instruction::Or;
4092 break;
4093 default:
4094 llvm_unreachable("Invalid compound assignment type");
4095 }
4096 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
4097 llvm::Value *Amt = CGF.EmitToMemory(
4098 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
4099 E->getExprLoc()),
4100 LHSTy);
4101
4102 llvm::AtomicRMWInst *OldVal =
4103 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
4104
4105 // Since operation is atomic, the result type is guaranteed to be the
4106 // same as the input in LLVM terms.
4107 Result = Builder.CreateBinOp(Op, OldVal, Amt);
4108 return LHSLV;
4109 }
4110 }
4111 // FIXME: For floating point types, we should be saving and restoring the
4112 // floating point environment in the loop.
4113 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
4114 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
4115 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4116 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
4117 Builder.CreateBr(opBB);
4118 Builder.SetInsertPoint(opBB);
4119 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
4120 atomicPHI->addIncoming(OpInfo.LHS, startBB);
4121 OpInfo.LHS = atomicPHI;
4122 }
4123 else
4124 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4125
4126 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
4127 SourceLocation Loc = E->getExprLoc();
4128 if (!PromotionTypeLHS.isNull())
4129 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
4130 E->getExprLoc());
4131 else
4132 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
4133 E->getComputationLHSType(), Loc);
4134
4135 // Expand the binary operator.
4136 Result = (this->*Func)(OpInfo);
4137
4138 // Convert the result back to the LHS type,
4139 // potentially with Implicit Conversion sanitizer check.
4140 // If LHSLV is a bitfield, use default ScalarConversionOpts
4141 // to avoid emit any implicit integer checks.
4142 Value *Previous = nullptr;
4143 if (LHSLV.isBitField()) {
4144 Previous = Result;
4145 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
4146 } else if (const auto *atomicTy = LHSTy->getAs<AtomicType>()) {
4147 Result =
4148 EmitScalarConversion(Result, PromotionTypeCR, atomicTy->getValueType(),
4149 Loc, ScalarConversionOpts(CGF.SanOpts));
4150 } else {
4151 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
4152 ScalarConversionOpts(CGF.SanOpts));
4153 }
4154
4155 if (atomicPHI) {
4156 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
4157 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
4158 auto Pair = CGF.EmitAtomicCompareExchange(
4159 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
4160 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
4161 llvm::Value *success = Pair.second;
4162 atomicPHI->addIncoming(old, curBlock);
4163 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
4164 Builder.SetInsertPoint(contBB);
4165 return LHSLV;
4166 }
4167
4168 // Store the result value into the LHS lvalue. Bit-fields are handled
4169 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
4170 // 'An assignment expression has the value of the left operand after the
4171 // assignment...'.
4172 if (LHSLV.isBitField()) {
4173 Value *Src = Previous ? Previous : Result;
4174 QualType SrcType = E->getRHS()->getType();
4175 QualType DstType = E->getLHS()->getType();
4177 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
4178 LHSLV.getBitFieldInfo(), E->getExprLoc());
4179 } else
4181
4182 if (CGF.getLangOpts().OpenMP)
4184 E->getLHS());
4185 return LHSLV;
4186}
4187
4188Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
4189 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
4190 bool Ignore = TestAndClearIgnoreResultAssign();
4191 Value *RHS = nullptr;
4192 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
4193
4194 // If the result is clearly ignored, return now.
4195 if (Ignore)
4196 return nullptr;
4197
4198 // The result of an assignment in C is the assigned r-value.
4199 if (!CGF.getLangOpts().CPlusPlus)
4200 return RHS;
4201
4202 // If the lvalue is non-volatile, return the computed value of the assignment.
4203 if (!LHS.isVolatileQualified())
4204 return RHS;
4205
4206 // Otherwise, reload the value.
4207 return EmitLoadOfLValue(LHS, E->getExprLoc());
4208}
4209
4210void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4211 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4212 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4213 Checks;
4214
4215 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4216 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4217 SanitizerKind::SO_IntegerDivideByZero));
4218 }
4219
4220 const auto *BO = cast<BinaryOperator>(Ops.E);
4221 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4222 Ops.Ty->hasSignedIntegerRepresentation() &&
4223 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4224 Ops.mayHaveIntegerOverflow() && !Ops.Ty.isWrapType() &&
4226 SanitizerKind::SignedIntegerOverflow, Ops.Ty)) {
4227 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4228
4229 llvm::Value *IntMin =
4230 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4231 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4232
4233 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4234 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4235 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4236 Checks.push_back(
4237 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4238 }
4239
4240 if (Checks.size() > 0)
4241 EmitBinOpCheck(Checks, Ops);
4242}
4243
4244Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4245 {
4246 SanitizerDebugLocation SanScope(&CGF,
4247 {SanitizerKind::SO_IntegerDivideByZero,
4248 SanitizerKind::SO_SignedIntegerOverflow,
4249 SanitizerKind::SO_FloatDivideByZero},
4250 SanitizerHandler::DivremOverflow);
4251 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4252 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4253 Ops.Ty->isIntegerType() &&
4254 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4255 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4256 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4257 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4258 Ops.Ty->isRealFloatingType() &&
4259 Ops.mayHaveFloatDivisionByZero()) {
4260 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4261 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4262 EmitBinOpCheck(
4263 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4264 }
4265 }
4266
4267 if (Ops.Ty->isConstantMatrixType()) {
4268 llvm::MatrixBuilder MB(Builder);
4269 // We need to check the types of the operands of the operator to get the
4270 // correct matrix dimensions.
4271 auto *BO = cast<BinaryOperator>(Ops.E);
4272 (void)BO;
4273 assert(
4275 "first operand must be a matrix");
4276 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4277 "second operand must be an arithmetic type");
4278 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4279 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4280 Ops.Ty->hasUnsignedIntegerRepresentation());
4281 }
4282
4283 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4284 llvm::Value *Val;
4285 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4286 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4287 CGF.SetDivFPAccuracy(Val);
4288 return Val;
4289 }
4290 else if (Ops.isFixedPointOp())
4291 return EmitFixedPointBinOp(Ops);
4292 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4293 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4294 else
4295 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4296}
4297
4298Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4299 // Rem in C can't be a floating point type: C99 6.5.5p2.
4300 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4301 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4302 Ops.Ty->isIntegerType() &&
4303 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4304 SanitizerDebugLocation SanScope(&CGF,
4305 {SanitizerKind::SO_IntegerDivideByZero,
4306 SanitizerKind::SO_SignedIntegerOverflow},
4307 SanitizerHandler::DivremOverflow);
4308 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4309 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4310 }
4311
4312 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4313 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4314
4315 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4316 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4317
4318 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4319}
4320
4321Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4322 unsigned IID;
4323 unsigned OpID = 0;
4324 SanitizerHandler OverflowKind;
4325
4326 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4327 switch (Ops.Opcode) {
4328 case BO_Add:
4329 case BO_AddAssign:
4330 OpID = 1;
4331 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4332 llvm::Intrinsic::uadd_with_overflow;
4333 OverflowKind = SanitizerHandler::AddOverflow;
4334 break;
4335 case BO_Sub:
4336 case BO_SubAssign:
4337 OpID = 2;
4338 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4339 llvm::Intrinsic::usub_with_overflow;
4340 OverflowKind = SanitizerHandler::SubOverflow;
4341 break;
4342 case BO_Mul:
4343 case BO_MulAssign:
4344 OpID = 3;
4345 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4346 llvm::Intrinsic::umul_with_overflow;
4347 OverflowKind = SanitizerHandler::MulOverflow;
4348 break;
4349 default:
4350 llvm_unreachable("Unsupported operation for overflow detection");
4351 }
4352 OpID <<= 1;
4353 if (isSigned)
4354 OpID |= 1;
4355
4356 SanitizerDebugLocation SanScope(&CGF,
4357 {SanitizerKind::SO_SignedIntegerOverflow,
4358 SanitizerKind::SO_UnsignedIntegerOverflow},
4359 OverflowKind);
4360 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4361
4362 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4363
4364 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4365 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4366 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4367
4368 // Handle overflow with llvm.trap if no custom handler has been specified.
4369 const std::string *handlerName =
4371 if (handlerName->empty()) {
4372 // If no -ftrapv handler has been specified, try to use sanitizer runtimes
4373 // if available otherwise just emit a trap. It is possible for unsigned
4374 // arithmetic to result in a trap due to the OverflowBehaviorType attribute
4375 // which describes overflow behavior on a per-type basis.
4376 if (isSigned) {
4377 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4378 llvm::Value *NotOf = Builder.CreateNot(overflow);
4379 EmitBinOpCheck(
4380 std::make_pair(NotOf, SanitizerKind::SO_SignedIntegerOverflow),
4381 Ops);
4382 } else
4383 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4384 return result;
4385 }
4386 if (CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
4387 llvm::Value *NotOf = Builder.CreateNot(overflow);
4388 EmitBinOpCheck(
4389 std::make_pair(NotOf, SanitizerKind::SO_UnsignedIntegerOverflow),
4390 Ops);
4391 } else
4392 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4393 return result;
4394 }
4395
4396 // Branch in case of overflow.
4397 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4398 llvm::BasicBlock *continueBB =
4399 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4400 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4401
4402 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4403
4404 // If an overflow handler is set, then we want to call it and then use its
4405 // result, if it returns.
4406 Builder.SetInsertPoint(overflowBB);
4407
4408 // Get the overflow handler.
4409 llvm::Type *Int8Ty = CGF.Int8Ty;
4410 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4411 llvm::FunctionType *handlerTy =
4412 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4413 llvm::FunctionCallee handler =
4414 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4415
4416 // Sign extend the args to 64-bit, so that we can use the same handler for
4417 // all types of overflow.
4418 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4419 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4420
4421 // Call the handler with the two arguments, the operation, and the size of
4422 // the result.
4423 llvm::Value *handlerArgs[] = {
4424 lhs,
4425 rhs,
4426 Builder.getInt8(OpID),
4427 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4428 };
4429 llvm::Value *handlerResult =
4430 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4431
4432 // Truncate the result back to the desired size.
4433 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4434 Builder.CreateBr(continueBB);
4435
4436 Builder.SetInsertPoint(continueBB);
4437 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4438 phi->addIncoming(result, initialBB);
4439 phi->addIncoming(handlerResult, overflowBB);
4440
4441 return phi;
4442}
4443
4444/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4445/// information.
4446/// This function is used for BO_AddAssign/BO_SubAssign.
4447static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4448 bool isSubtraction) {
4449 // Must have binary (not unary) expr here. Unary pointer
4450 // increment/decrement doesn't use this path.
4452
4453 Value *pointer = op.LHS;
4454 Expr *pointerOperand = expr->getLHS();
4455 Value *index = op.RHS;
4456 Expr *indexOperand = expr->getRHS();
4457
4458 // In a subtraction, the LHS is always the pointer.
4459 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4460 std::swap(pointer, index);
4461 std::swap(pointerOperand, indexOperand);
4462 }
4463
4464 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4465 index, isSubtraction);
4466}
4467
4468/// Emit pointer + index arithmetic.
4470 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4471 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4472 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4473
4474 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4475 auto &DL = CGM.getDataLayout();
4476 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4477
4478 // Some versions of glibc and gcc use idioms (particularly in their malloc
4479 // routines) that add a pointer-sized integer (known to be a pointer value)
4480 // to a null pointer in order to cast the value back to an integer or as
4481 // part of a pointer alignment algorithm. This is undefined behavior, but
4482 // we'd like to be able to compile programs that use it.
4483 //
4484 // Normally, we'd generate a GEP with a null-pointer base here in response
4485 // to that code, but it's also UB to dereference a pointer created that
4486 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4487 // generate a direct cast of the integer value to a pointer.
4488 //
4489 // The idiom (p = nullptr + N) is not met if any of the following are true:
4490 //
4491 // The operation is subtraction.
4492 // The index is not pointer-sized.
4493 // The pointer type is not byte-sized.
4494 //
4495 // Note that we do not suppress the pointer overflow check in this case.
4497 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4498 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4499 if (getLangOpts().PointerOverflowDefined ||
4500 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4501 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4502 PtrTy->getPointerAddressSpace()))
4503 return Ptr;
4504 // The inbounds GEP of null is valid iff the index is zero.
4505 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4506 auto CheckHandler = SanitizerHandler::PointerOverflow;
4507 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4508 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4509 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4510 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4511 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4512 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4513 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4514 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4515 DynamicArgs);
4516 return Ptr;
4517 }
4518
4519 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4520 // Zero-extend or sign-extend the pointer value according to
4521 // whether the index is signed or not.
4522 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4523 "idx.ext");
4524 }
4525
4526 // If this is subtraction, negate the index.
4527 if (isSubtraction)
4528 index = Builder.CreateNeg(index, "idx.neg");
4529
4530 if (SanOpts.has(SanitizerKind::ArrayBounds))
4531 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4532 /*Accessed*/ false);
4533
4534 const PointerType *pointerType =
4535 pointerOperand->getType()->getAs<PointerType>();
4536 if (!pointerType) {
4537 QualType objectType = pointerOperand->getType()
4539 ->getPointeeType();
4540 llvm::Value *objectSize =
4541 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4542
4543 index = Builder.CreateMul(index, objectSize);
4544
4545 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4546 return Builder.CreateBitCast(result, pointer->getType());
4547 }
4548
4549 QualType elementType = pointerType->getPointeeType();
4550 if (const VariableArrayType *vla =
4551 getContext().getAsVariableArrayType(elementType)) {
4552 // The element count here is the total number of non-VLA elements.
4553 llvm::Value *numElements = getVLASize(vla).NumElts;
4554
4555 // Effectively, the multiply by the VLA size is part of the GEP.
4556 // GEP indexes are signed, and scaling an index isn't permitted to
4557 // signed-overflow, so we use the same semantics for our explicit
4558 // multiply. We suppress this if overflow is not undefined behavior.
4559 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4560 if (getLangOpts().PointerOverflowDefined) {
4561 index = Builder.CreateMul(index, numElements, "vla.index");
4562 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4563 } else {
4564 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4565 pointer =
4566 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4567 isSubtraction, BO->getExprLoc(), "add.ptr");
4568 }
4569 return pointer;
4570 }
4571
4572 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4573 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4574 // future proof.
4575 llvm::Type *elemTy;
4576 if (elementType->isVoidType() || elementType->isFunctionType())
4577 elemTy = Int8Ty;
4578 else
4579 elemTy = ConvertTypeForMem(elementType);
4580
4581 if (getLangOpts().PointerOverflowDefined)
4582 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4583
4584 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4585 BO->getExprLoc(), "add.ptr");
4586}
4587
4588// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4589// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4590// the add operand respectively. This allows fmuladd to represent a*b-c, or
4591// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4592// efficient operations.
4593static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4594 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4595 bool negMul, bool negAdd) {
4596 Value *MulOp0 = MulOp->getOperand(0);
4597 Value *MulOp1 = MulOp->getOperand(1);
4598 if (negMul)
4599 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4600 if (negAdd)
4601 Addend = Builder.CreateFNeg(Addend, "neg");
4602
4603 Value *FMulAdd = nullptr;
4604 if (Builder.getIsFPConstrained()) {
4605 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4606 "Only constrained operation should be created when Builder is in FP "
4607 "constrained mode");
4608 FMulAdd = Builder.CreateConstrainedFPCall(
4609 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4610 Addend->getType()),
4611 {MulOp0, MulOp1, Addend});
4612 } else {
4613 FMulAdd = Builder.CreateCall(
4614 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4615 {MulOp0, MulOp1, Addend});
4616 }
4617 MulOp->eraseFromParent();
4618
4619 return FMulAdd;
4620}
4621
4622// Check whether it would be legal to emit an fmuladd intrinsic call to
4623// represent op and if so, build the fmuladd.
4624//
4625// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4626// Does NOT check the type of the operation - it's assumed that this function
4627// will be called from contexts where it's known that the type is contractable.
4628static Value* tryEmitFMulAdd(const BinOpInfo &op,
4629 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4630 bool isSub=false) {
4631
4632 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4633 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4634 "Only fadd/fsub can be the root of an fmuladd.");
4635
4636 // Check whether this op is marked as fusable.
4637 if (!op.FPFeatures.allowFPContractWithinStatement())
4638 return nullptr;
4639
4640 Value *LHS = op.LHS;
4641 Value *RHS = op.RHS;
4642
4643 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4644 // it is the only use of its operand.
4645 bool NegLHS = false;
4646 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4647 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4648 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4649 LHS = LHSUnOp->getOperand(0);
4650 NegLHS = true;
4651 }
4652 }
4653
4654 bool NegRHS = false;
4655 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4656 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4657 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4658 RHS = RHSUnOp->getOperand(0);
4659 NegRHS = true;
4660 }
4661 }
4662
4663 // We have a potentially fusable op. Look for a mul on one of the operands.
4664 // Also, make sure that the mul result isn't used directly. In that case,
4665 // there's no point creating a muladd operation.
4666 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4667 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4668 (LHSBinOp->use_empty() || NegLHS)) {
4669 // If we looked through fneg, erase it.
4670 if (NegLHS)
4671 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4672 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4673 }
4674 }
4675 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4676 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4677 (RHSBinOp->use_empty() || NegRHS)) {
4678 // If we looked through fneg, erase it.
4679 if (NegRHS)
4680 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4681 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4682 }
4683 }
4684
4685 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4686 if (LHSBinOp->getIntrinsicID() ==
4687 llvm::Intrinsic::experimental_constrained_fmul &&
4688 (LHSBinOp->use_empty() || NegLHS)) {
4689 // If we looked through fneg, erase it.
4690 if (NegLHS)
4691 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4692 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4693 }
4694 }
4695 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4696 if (RHSBinOp->getIntrinsicID() ==
4697 llvm::Intrinsic::experimental_constrained_fmul &&
4698 (RHSBinOp->use_empty() || NegRHS)) {
4699 // If we looked through fneg, erase it.
4700 if (NegRHS)
4701 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4702 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4703 }
4704 }
4705
4706 return nullptr;
4707}
4708
4709Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4710 if (op.LHS->getType()->isPointerTy() ||
4711 op.RHS->getType()->isPointerTy())
4713
4714 if (op.Ty->isSignedIntegerOrEnumerationType() ||
4715 op.Ty->isUnsignedIntegerType()) {
4716 const bool isSigned = op.Ty->isSignedIntegerOrEnumerationType();
4717 const bool hasSan =
4718 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
4719 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
4720 switch (getOverflowBehaviorConsideringType(CGF, op.Ty)) {
4721 case LangOptions::OB_Wrap:
4722 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4723 case LangOptions::OB_SignedAndDefined:
4724 if (!hasSan)
4725 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4726 [[fallthrough]];
4727 case LangOptions::OB_Unset:
4728 if (!hasSan)
4729 return isSigned ? Builder.CreateNSWAdd(op.LHS, op.RHS, "add")
4730 : Builder.CreateAdd(op.LHS, op.RHS, "add");
4731 [[fallthrough]];
4732 case LangOptions::OB_Trap:
4733 if (CanElideOverflowCheck(CGF.getContext(), op))
4734 return isSigned ? Builder.CreateNSWAdd(op.LHS, op.RHS, "add")
4735 : Builder.CreateAdd(op.LHS, op.RHS, "add");
4736 return EmitOverflowCheckedBinOp(op);
4737 }
4738 }
4739
4740 // For vector and matrix adds, try to fold into a fmuladd.
4741 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4742 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4743 // Try to form an fmuladd.
4744 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4745 return FMulAdd;
4746 }
4747
4748 if (op.Ty->isConstantMatrixType()) {
4749 llvm::MatrixBuilder MB(Builder);
4750 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4751 return MB.CreateAdd(op.LHS, op.RHS);
4752 }
4753
4754 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4755 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4756 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4757 }
4758
4759 if (op.isFixedPointOp())
4760 return EmitFixedPointBinOp(op);
4761
4762 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4763}
4764
4765/// The resulting value must be calculated with exact precision, so the operands
4766/// may not be the same type.
4767Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4768 using llvm::APSInt;
4769 using llvm::ConstantInt;
4770
4771 // This is either a binary operation where at least one of the operands is
4772 // a fixed-point type, or a unary operation where the operand is a fixed-point
4773 // type. The result type of a binary operation is determined by
4774 // Sema::handleFixedPointConversions().
4775 QualType ResultTy = op.Ty;
4776 QualType LHSTy, RHSTy;
4777 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4778 RHSTy = BinOp->getRHS()->getType();
4779 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4780 // For compound assignment, the effective type of the LHS at this point
4781 // is the computation LHS type, not the actual LHS type, and the final
4782 // result type is not the type of the expression but rather the
4783 // computation result type.
4784 LHSTy = CAO->getComputationLHSType();
4785 ResultTy = CAO->getComputationResultType();
4786 } else
4787 LHSTy = BinOp->getLHS()->getType();
4788 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4789 LHSTy = UnOp->getSubExpr()->getType();
4790 RHSTy = UnOp->getSubExpr()->getType();
4791 }
4792 ASTContext &Ctx = CGF.getContext();
4793 Value *LHS = op.LHS;
4794 Value *RHS = op.RHS;
4795
4796 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4797 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4798 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4799 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4800
4801 // Perform the actual operation.
4802 Value *Result;
4803 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4804 switch (op.Opcode) {
4805 case BO_AddAssign:
4806 case BO_Add:
4807 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4808 break;
4809 case BO_SubAssign:
4810 case BO_Sub:
4811 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4812 break;
4813 case BO_MulAssign:
4814 case BO_Mul:
4815 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4816 break;
4817 case BO_DivAssign:
4818 case BO_Div:
4819 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4820 break;
4821 case BO_ShlAssign:
4822 case BO_Shl:
4823 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4824 break;
4825 case BO_ShrAssign:
4826 case BO_Shr:
4827 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4828 break;
4829 case BO_LT:
4830 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4831 case BO_GT:
4832 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4833 case BO_LE:
4834 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4835 case BO_GE:
4836 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4837 case BO_EQ:
4838 // For equality operations, we assume any padding bits on unsigned types are
4839 // zero'd out. They could be overwritten through non-saturating operations
4840 // that cause overflow, but this leads to undefined behavior.
4841 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4842 case BO_NE:
4843 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4844 case BO_Cmp:
4845 case BO_LAnd:
4846 case BO_LOr:
4847 llvm_unreachable("Found unimplemented fixed point binary operation");
4848 case BO_PtrMemD:
4849 case BO_PtrMemI:
4850 case BO_Rem:
4851 case BO_Xor:
4852 case BO_And:
4853 case BO_Or:
4854 case BO_Assign:
4855 case BO_RemAssign:
4856 case BO_AndAssign:
4857 case BO_XorAssign:
4858 case BO_OrAssign:
4859 case BO_Comma:
4860 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4861 }
4862
4863 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4865 // Convert to the result type.
4866 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4867 : CommonFixedSema,
4868 ResultFixedSema);
4869}
4870
4871Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4872 // The LHS is always a pointer if either side is.
4873 if (!op.LHS->getType()->isPointerTy()) {
4874 if (op.Ty->isSignedIntegerOrEnumerationType() ||
4875 op.Ty->isUnsignedIntegerType()) {
4876 const bool isSigned = op.Ty->isSignedIntegerOrEnumerationType();
4877 const bool hasSan =
4878 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
4879 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
4880 switch (getOverflowBehaviorConsideringType(CGF, op.Ty)) {
4881 case LangOptions::OB_Wrap:
4882 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4883 case LangOptions::OB_SignedAndDefined:
4884 if (!hasSan)
4885 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4886 [[fallthrough]];
4887 case LangOptions::OB_Unset:
4888 if (!hasSan)
4889 return isSigned ? Builder.CreateNSWSub(op.LHS, op.RHS, "sub")
4890 : Builder.CreateSub(op.LHS, op.RHS, "sub");
4891 [[fallthrough]];
4892 case LangOptions::OB_Trap:
4893 if (CanElideOverflowCheck(CGF.getContext(), op))
4894 return isSigned ? Builder.CreateNSWSub(op.LHS, op.RHS, "sub")
4895 : Builder.CreateSub(op.LHS, op.RHS, "sub");
4896 return EmitOverflowCheckedBinOp(op);
4897 }
4898 }
4899
4900 // For vector and matrix subs, try to fold into a fmuladd.
4901 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4902 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4903 // Try to form an fmuladd.
4904 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4905 return FMulAdd;
4906 }
4907
4908 if (op.Ty->isConstantMatrixType()) {
4909 llvm::MatrixBuilder MB(Builder);
4910 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4911 return MB.CreateSub(op.LHS, op.RHS);
4912 }
4913
4914 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4915 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4916 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4917 }
4918
4919 if (op.isFixedPointOp())
4920 return EmitFixedPointBinOp(op);
4921
4922 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4923 }
4924
4925 // If the RHS is not a pointer, then we have normal pointer
4926 // arithmetic.
4927 if (!op.RHS->getType()->isPointerTy())
4929
4930 // Otherwise, this is a pointer subtraction.
4931
4932 // Do the raw subtraction part.
4933 llvm::Value *LHS
4934 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4935 llvm::Value *RHS
4936 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4937 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4938
4939 // Okay, figure out the element size.
4940 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4941 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4942
4943 llvm::Value *divisor = nullptr;
4944
4945 // For a variable-length array, this is going to be non-constant.
4946 if (const VariableArrayType *vla
4947 = CGF.getContext().getAsVariableArrayType(elementType)) {
4948 auto VlaSize = CGF.getVLASize(vla);
4949 elementType = VlaSize.Type;
4950 divisor = VlaSize.NumElts;
4951
4952 // Scale the number of non-VLA elements by the non-VLA element size.
4953 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4954 if (!eltSize.isOne())
4955 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4956
4957 // For everything elese, we can just compute it, safe in the
4958 // assumption that Sema won't let anything through that we can't
4959 // safely compute the size of.
4960 } else {
4961 CharUnits elementSize;
4962 // Handle GCC extension for pointer arithmetic on void* and
4963 // function pointer types.
4964 if (elementType->isVoidType() || elementType->isFunctionType())
4965 elementSize = CharUnits::One();
4966 else
4967 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4968
4969 // Don't even emit the divide for element size of 1.
4970 if (elementSize.isOne())
4971 return diffInChars;
4972
4973 divisor = CGF.CGM.getSize(elementSize);
4974 }
4975
4976 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4977 // pointer difference in C is only defined in the case where both operands
4978 // are pointing to elements of an array.
4979 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4980}
4981
4982Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4983 bool RHSIsSigned) {
4984 llvm::IntegerType *Ty;
4985 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4986 Ty = cast<llvm::IntegerType>(VT->getElementType());
4987 else
4988 Ty = cast<llvm::IntegerType>(LHS->getType());
4989 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4990 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4991 // this in ConstantInt::get, this results in the value getting truncated.
4992 // Constrain the return value to be max(RHS) in this case.
4993 llvm::Type *RHSTy = RHS->getType();
4994 llvm::APInt RHSMax =
4995 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4996 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4997 if (RHSMax.ult(Ty->getBitWidth()))
4998 return llvm::ConstantInt::get(RHSTy, RHSMax);
4999 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
5000}
5001
5002Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
5003 const Twine &Name) {
5004 llvm::IntegerType *Ty;
5005 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
5006 Ty = cast<llvm::IntegerType>(VT->getElementType());
5007 else
5008 Ty = cast<llvm::IntegerType>(LHS->getType());
5009
5010 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
5011 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
5012
5013 return Builder.CreateURem(
5014 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
5015}
5016
5017Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
5018 // TODO: This misses out on the sanitizer check below.
5019 if (Ops.isFixedPointOp())
5020 return EmitFixedPointBinOp(Ops);
5021
5022 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
5023 // RHS to the same size as the LHS.
5024 Value *RHS = Ops.RHS;
5025 if (Ops.LHS->getType() != RHS->getType())
5026 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
5027
5028 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
5029 Ops.Ty->hasSignedIntegerRepresentation() &&
5031 !CGF.getLangOpts().CPlusPlus20;
5032 bool SanitizeUnsignedBase =
5033 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
5034 Ops.Ty->hasUnsignedIntegerRepresentation();
5035 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
5036 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
5037 // OpenCL 6.3j: shift values are effectively % word size of LHS.
5038 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
5039 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
5040 else if ((SanitizeBase || SanitizeExponent) &&
5041 isa<llvm::IntegerType>(Ops.LHS->getType())) {
5042 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
5043 if (SanitizeSignedBase)
5044 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
5045 if (SanitizeUnsignedBase)
5046 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
5047 if (SanitizeExponent)
5048 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
5049
5050 SanitizerDebugLocation SanScope(&CGF, Ordinals,
5051 SanitizerHandler::ShiftOutOfBounds);
5052 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
5053 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
5054 llvm::Value *WidthMinusOne =
5055 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
5056 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
5057
5058 if (SanitizeExponent) {
5059 Checks.push_back(
5060 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
5061 }
5062
5063 if (SanitizeBase) {
5064 // Check whether we are shifting any non-zero bits off the top of the
5065 // integer. We only emit this check if exponent is valid - otherwise
5066 // instructions below will have undefined behavior themselves.
5067 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
5068 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
5069 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
5070 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
5071 llvm::Value *PromotedWidthMinusOne =
5072 (RHS == Ops.RHS) ? WidthMinusOne
5073 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
5074 CGF.EmitBlock(CheckShiftBase);
5075 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
5076 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
5077 /*NUW*/ true, /*NSW*/ true),
5078 "shl.check");
5079 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
5080 // In C99, we are not permitted to shift a 1 bit into the sign bit.
5081 // Under C++11's rules, shifting a 1 bit into the sign bit is
5082 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
5083 // define signed left shifts, so we use the C99 and C++11 rules there).
5084 // Unsigned shifts can always shift into the top bit.
5085 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
5086 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
5087 }
5088 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
5089 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
5090 CGF.EmitBlock(Cont);
5091 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
5092 BaseCheck->addIncoming(Builder.getTrue(), Orig);
5093 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
5094 Checks.push_back(std::make_pair(
5095 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
5096 : SanitizerKind::SO_UnsignedShiftBase));
5097 }
5098
5099 assert(!Checks.empty());
5100 EmitBinOpCheck(Checks, Ops);
5101 }
5102
5103 return Builder.CreateShl(Ops.LHS, RHS, "shl");
5104}
5105
5106Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
5107 // TODO: This misses out on the sanitizer check below.
5108 if (Ops.isFixedPointOp())
5109 return EmitFixedPointBinOp(Ops);
5110
5111 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
5112 // RHS to the same size as the LHS.
5113 Value *RHS = Ops.RHS;
5114 if (Ops.LHS->getType() != RHS->getType())
5115 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
5116
5117 // OpenCL 6.3j: shift values are effectively % word size of LHS.
5118 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
5119 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
5120 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
5121 isa<llvm::IntegerType>(Ops.LHS->getType())) {
5122 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
5123 SanitizerHandler::ShiftOutOfBounds);
5124 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
5125 llvm::Value *Valid = Builder.CreateICmpULE(
5126 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
5127 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
5128 }
5129
5130 if (Ops.Ty->hasUnsignedIntegerRepresentation())
5131 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
5132 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
5133}
5134
5136// return corresponding comparison intrinsic for given vector type
5137static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
5138 BuiltinType::Kind ElemKind) {
5139 switch (ElemKind) {
5140 default: llvm_unreachable("unexpected element type");
5141 case BuiltinType::Char_U:
5142 case BuiltinType::UChar:
5143 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5144 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
5145 case BuiltinType::Char_S:
5146 case BuiltinType::SChar:
5147 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5148 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
5149 case BuiltinType::UShort:
5150 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5151 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
5152 case BuiltinType::Short:
5153 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5154 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
5155 case BuiltinType::UInt:
5156 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5157 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
5158 case BuiltinType::Int:
5159 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5160 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
5161 case BuiltinType::ULong:
5162 case BuiltinType::ULongLong:
5163 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5164 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
5165 case BuiltinType::Long:
5166 case BuiltinType::LongLong:
5167 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5168 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
5169 case BuiltinType::Float:
5170 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
5171 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
5172 case BuiltinType::Double:
5173 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
5174 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
5175 case BuiltinType::UInt128:
5176 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5177 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
5178 case BuiltinType::Int128:
5179 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5180 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
5181 }
5182}
5183
5184Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
5185 llvm::CmpInst::Predicate UICmpOpc,
5186 llvm::CmpInst::Predicate SICmpOpc,
5187 llvm::CmpInst::Predicate FCmpOpc,
5188 bool IsSignaling) {
5189 TestAndClearIgnoreResultAssign();
5190 Value *Result;
5191 QualType LHSTy = E->getLHS()->getType();
5192 QualType RHSTy = E->getRHS()->getType();
5193 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
5194 assert(E->getOpcode() == BO_EQ ||
5195 E->getOpcode() == BO_NE);
5196 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
5197 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
5199 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
5200 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
5201 BinOpInfo BOInfo = EmitBinOps(E);
5202 Value *LHS = BOInfo.LHS;
5203 Value *RHS = BOInfo.RHS;
5204
5205 // If AltiVec, the comparison results in a numeric type, so we use
5206 // intrinsics comparing vectors and giving 0 or 1 as a result
5207 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
5208 // constants for mapping CR6 register bits to predicate result
5209 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
5210
5211 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
5212
5213 // in several cases vector arguments order will be reversed
5214 Value *FirstVecArg = LHS,
5215 *SecondVecArg = RHS;
5216
5217 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
5218 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
5219
5220 switch(E->getOpcode()) {
5221 default: llvm_unreachable("is not a comparison operation");
5222 case BO_EQ:
5223 CR6 = CR6_LT;
5224 ID = GetIntrinsic(VCMPEQ, ElementKind);
5225 break;
5226 case BO_NE:
5227 CR6 = CR6_EQ;
5228 ID = GetIntrinsic(VCMPEQ, ElementKind);
5229 break;
5230 case BO_LT:
5231 CR6 = CR6_LT;
5232 ID = GetIntrinsic(VCMPGT, ElementKind);
5233 std::swap(FirstVecArg, SecondVecArg);
5234 break;
5235 case BO_GT:
5236 CR6 = CR6_LT;
5237 ID = GetIntrinsic(VCMPGT, ElementKind);
5238 break;
5239 case BO_LE:
5240 if (ElementKind == BuiltinType::Float) {
5241 CR6 = CR6_LT;
5242 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5243 std::swap(FirstVecArg, SecondVecArg);
5244 }
5245 else {
5246 CR6 = CR6_EQ;
5247 ID = GetIntrinsic(VCMPGT, ElementKind);
5248 }
5249 break;
5250 case BO_GE:
5251 if (ElementKind == BuiltinType::Float) {
5252 CR6 = CR6_LT;
5253 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5254 }
5255 else {
5256 CR6 = CR6_EQ;
5257 ID = GetIntrinsic(VCMPGT, ElementKind);
5258 std::swap(FirstVecArg, SecondVecArg);
5259 }
5260 break;
5261 }
5262
5263 Value *CR6Param = Builder.getInt32(CR6);
5264 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5265 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5266
5267 // The result type of intrinsic may not be same as E->getType().
5268 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5269 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5270 // do nothing, if ResultTy is not i1 at the same time, it will cause
5271 // crash later.
5272 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5273 if (ResultTy->getBitWidth() > 1 &&
5274 E->getType() == CGF.getContext().BoolTy)
5275 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5276 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5277 E->getExprLoc());
5278 }
5279
5280 if (BOInfo.isFixedPointOp()) {
5281 Result = EmitFixedPointBinOp(BOInfo);
5282 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5283 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5284 if (!IsSignaling)
5285 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5286 else
5287 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5288 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5289 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5290 } else {
5291 // Unsigned integers and pointers.
5292
5293 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5296
5297 // Dynamic information is required to be stripped for comparisons,
5298 // because it could leak the dynamic information. Based on comparisons
5299 // of pointers to dynamic objects, the optimizer can replace one pointer
5300 // with another, which might be incorrect in presence of invariant
5301 // groups. Comparison with null is safe because null does not carry any
5302 // dynamic information.
5303 if (LHSTy.mayBeDynamicClass())
5304 LHS = Builder.CreateStripInvariantGroup(LHS);
5305 if (RHSTy.mayBeDynamicClass())
5306 RHS = Builder.CreateStripInvariantGroup(RHS);
5307 }
5308
5309 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5310 }
5311
5312 // If this is a vector comparison, sign extend the result to the appropriate
5313 // vector integer type and return it (don't convert to bool).
5314 if (LHSTy->isVectorType())
5315 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5316
5317 } else {
5318 // Complex Comparison: can only be an equality comparison.
5320 QualType CETy;
5321 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5322 LHS = CGF.EmitComplexExpr(E->getLHS());
5323 CETy = CTy->getElementType();
5324 } else {
5325 LHS.first = Visit(E->getLHS());
5326 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5327 CETy = LHSTy;
5328 }
5329 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5330 RHS = CGF.EmitComplexExpr(E->getRHS());
5331 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5332 CTy->getElementType()) &&
5333 "The element types must always match.");
5334 (void)CTy;
5335 } else {
5336 RHS.first = Visit(E->getRHS());
5337 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5338 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5339 "The element types must always match.");
5340 }
5341
5342 Value *ResultR, *ResultI;
5343 if (CETy->isRealFloatingType()) {
5344 // As complex comparisons can only be equality comparisons, they
5345 // are never signaling comparisons.
5346 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5347 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5348 } else {
5349 // Complex comparisons can only be equality comparisons. As such, signed
5350 // and unsigned opcodes are the same.
5351 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5352 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5353 }
5354
5355 if (E->getOpcode() == BO_EQ) {
5356 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5357 } else {
5358 assert(E->getOpcode() == BO_NE &&
5359 "Complex comparison other than == or != ?");
5360 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5361 }
5362 }
5363
5364 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5365 E->getExprLoc());
5366}
5367
5369 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5370 // In case we have the integer or bitfield sanitizer checks enabled
5371 // we want to get the expression before scalar conversion.
5372 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5373 CastKind Kind = ICE->getCastKind();
5374 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5375 *SrcType = ICE->getSubExpr()->getType();
5376 *Previous = EmitScalarExpr(ICE->getSubExpr());
5377 // Pass default ScalarConversionOpts to avoid emitting
5378 // integer sanitizer checks as E refers to bitfield.
5379 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5380 ICE->getExprLoc());
5381 }
5382 }
5383 return EmitScalarExpr(E->getRHS());
5384}
5385
5386Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5387 ApplyAtomGroup Grp(CGF.getDebugInfo());
5388 bool Ignore = TestAndClearIgnoreResultAssign();
5389
5390 Value *RHS;
5391 LValue LHS;
5392
5393 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5396 llvm::Value *RV =
5397 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5398 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5400
5401 if (Ignore)
5402 return nullptr;
5403 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5404 LV.getAddress(), /*nonnull*/ false);
5405 return RV;
5406 }
5407
5408 switch (E->getLHS()->getType().getObjCLifetime()) {
5410 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5411 break;
5412
5414 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5415 break;
5416
5418 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5419 break;
5420
5422 RHS = Visit(E->getRHS());
5423 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5424 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5425 break;
5426
5428 // __block variables need to have the rhs evaluated first, plus
5429 // this should improve codegen just a little.
5430 Value *Previous = nullptr;
5431 QualType SrcType = E->getRHS()->getType();
5432 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5433 // we want to extract that value and potentially (if the bitfield sanitizer
5434 // is enabled) use it to check for an implicit conversion.
5435 if (E->getLHS()->refersToBitField())
5436 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5437 else
5438 RHS = Visit(E->getRHS());
5439
5440 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5441
5442 // Store the value into the LHS. Bit-fields are handled specially
5443 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5444 // 'An assignment expression has the value of the left operand after
5445 // the assignment...'.
5446 if (LHS.isBitField()) {
5447 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5448 // If the expression contained an implicit conversion, make sure
5449 // to use the value before the scalar conversion.
5450 Value *Src = Previous ? Previous : RHS;
5451 QualType DstType = E->getLHS()->getType();
5452 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5453 LHS.getBitFieldInfo(), E->getExprLoc());
5454 } else {
5455 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5456 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5457 }
5458 }
5459 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5460 if (CGF.getLangOpts().OpenMP) {
5462 E->getLHS());
5463 }
5464
5465 // If the result is clearly ignored, return now.
5466 if (Ignore)
5467 return nullptr;
5468
5469 // The result of an assignment in C is the assigned r-value.
5470 if (!CGF.getLangOpts().CPlusPlus)
5471 return RHS;
5472
5473 // If the lvalue is non-volatile, return the computed value of the assignment.
5474 if (!LHS.isVolatileQualified())
5475 return RHS;
5476
5477 // Otherwise, reload the value.
5478 return EmitLoadOfLValue(LHS, E->getExprLoc());
5479}
5480
5481Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5482 auto HasLHSSkip = CGF.hasSkipCounter(E);
5483 auto HasRHSSkip = CGF.hasSkipCounter(E->getRHS());
5484
5485 // Perform vector logical and on comparisons with zero vectors.
5486 if (E->getType()->isVectorType()) {
5488
5489 Value *LHS = Visit(E->getLHS());
5490 Value *RHS = Visit(E->getRHS());
5491 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5492 if (LHS->getType()->isFPOrFPVectorTy()) {
5493 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5494 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5495 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5496 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5497 } else {
5498 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5499 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5500 }
5501 Value *And = Builder.CreateAnd(LHS, RHS);
5502 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5503 }
5504
5505 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5506 llvm::Type *ResTy = ConvertType(E->getType());
5507
5508 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5509 // If we have 1 && X, just emit X without inserting the control flow.
5510 bool LHSCondVal;
5511 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5512 if (LHSCondVal) { // If we have 1 && X, just emit X.
5513 CGF.incrementProfileCounter(CGF.UseExecPath, E, /*UseBoth=*/true);
5514
5515 // If the top of the logical operator nest, reset the MCDC temp to 0.
5516 if (CGF.isMCDCDecisionExpr(E))
5518
5519 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5520
5521 // If we're generating for profiling or coverage, generate a branch to a
5522 // block that increments the RHS counter needed to track branch condition
5523 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5524 // "FalseBlock" after the increment is done.
5525 if (InstrumentRegions &&
5527 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5528 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5529 llvm::BasicBlock *RHSSkip =
5530 (HasRHSSkip ? CGF.createBasicBlock("land.rhsskip") : FBlock);
5531 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5532 Builder.CreateCondBr(RHSCond, RHSBlockCnt, RHSSkip);
5533 CGF.EmitBlock(RHSBlockCnt);
5535 CGF.EmitBranch(FBlock);
5536 if (HasRHSSkip) {
5537 CGF.EmitBlock(RHSSkip);
5539 }
5540 CGF.EmitBlock(FBlock);
5541 } else
5542 CGF.markStmtMaybeUsed(E->getRHS());
5543
5544 // If the top of the logical operator nest, update the MCDC bitmap.
5545 if (CGF.isMCDCDecisionExpr(E))
5547
5548 // ZExt result to int or bool.
5549 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5550 }
5551
5552 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5553 if (!CGF.ContainsLabel(E->getRHS())) {
5554 CGF.markStmtAsUsed(false, E);
5555 if (HasLHSSkip)
5557
5558 CGF.markStmtMaybeUsed(E->getRHS());
5559
5560 return llvm::Constant::getNullValue(ResTy);
5561 }
5562 }
5563
5564 // If the top of the logical operator nest, reset the MCDC temp to 0.
5565 if (CGF.isMCDCDecisionExpr(E))
5567
5568 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5569 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5570
5571 llvm::BasicBlock *LHSFalseBlock =
5572 (HasLHSSkip ? CGF.createBasicBlock("land.lhsskip") : ContBlock);
5573
5574 CodeGenFunction::ConditionalEvaluation eval(CGF);
5575
5576 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5577 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, LHSFalseBlock,
5578 CGF.getProfileCount(E->getRHS()));
5579
5580 if (HasLHSSkip) {
5581 CGF.EmitBlock(LHSFalseBlock);
5583 CGF.EmitBranch(ContBlock);
5584 }
5585
5586 // Any edges into the ContBlock are now from an (indeterminate number of)
5587 // edges from this first condition. All of these values will be false. Start
5588 // setting up the PHI node in the Cont Block for this.
5589 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5590 "", ContBlock);
5591 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5592 PI != PE; ++PI)
5593 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5594
5595 eval.begin(CGF);
5596 CGF.EmitBlock(RHSBlock);
5598 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5599 eval.end(CGF);
5600
5601 // Reaquire the RHS block, as there may be subblocks inserted.
5602 RHSBlock = Builder.GetInsertBlock();
5603
5604 // If we're generating for profiling or coverage, generate a branch on the
5605 // RHS to a block that increments the RHS true counter needed to track branch
5606 // condition coverage.
5607 llvm::BasicBlock *ContIncoming = RHSBlock;
5608 if (InstrumentRegions &&
5610 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5611 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5612 llvm::BasicBlock *RHSBlockSkip =
5613 (HasRHSSkip ? CGF.createBasicBlock("land.rhsskip") : ContBlock);
5614 Builder.CreateCondBr(RHSCond, RHSBlockCnt, RHSBlockSkip);
5615 CGF.EmitBlock(RHSBlockCnt);
5617 CGF.EmitBranch(ContBlock);
5618 PN->addIncoming(RHSCond, RHSBlockCnt);
5619 if (HasRHSSkip) {
5620 CGF.EmitBlock(RHSBlockSkip);
5622 CGF.EmitBranch(ContBlock);
5623 ContIncoming = RHSBlockSkip;
5624 }
5625 }
5626
5627 // Emit an unconditional branch from this block to ContBlock.
5628 {
5629 // There is no need to emit line number for unconditional branch.
5630 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5631 CGF.EmitBlock(ContBlock);
5632 }
5633 // Insert an entry into the phi node for the edge with the value of RHSCond.
5634 PN->addIncoming(RHSCond, ContIncoming);
5635
5636 // If the top of the logical operator nest, update the MCDC bitmap.
5637 if (CGF.isMCDCDecisionExpr(E))
5639
5640 // Artificial location to preserve the scope information
5641 {
5643 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5644 }
5645
5646 // ZExt result to int.
5647 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5648}
5649
5650Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5651 auto HasLHSSkip = CGF.hasSkipCounter(E);
5652 auto HasRHSSkip = CGF.hasSkipCounter(E->getRHS());
5653
5654 // Perform vector logical or on comparisons with zero vectors.
5655 if (E->getType()->isVectorType()) {
5657
5658 Value *LHS = Visit(E->getLHS());
5659 Value *RHS = Visit(E->getRHS());
5660 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5661 if (LHS->getType()->isFPOrFPVectorTy()) {
5662 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5663 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5664 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5665 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5666 } else {
5667 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5668 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5669 }
5670 Value *Or = Builder.CreateOr(LHS, RHS);
5671 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5672 }
5673
5674 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5675 llvm::Type *ResTy = ConvertType(E->getType());
5676
5677 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5678 // If we have 0 || X, just emit X without inserting the control flow.
5679 bool LHSCondVal;
5680 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5681 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5682 CGF.incrementProfileCounter(CGF.UseExecPath, E, /*UseBoth=*/true);
5683
5684 // If the top of the logical operator nest, reset the MCDC temp to 0.
5685 if (CGF.isMCDCDecisionExpr(E))
5687
5688 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5689
5690 // If we're generating for profiling or coverage, generate a branch to a
5691 // block that increments the RHS counter need to track branch condition
5692 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5693 // "FalseBlock" after the increment is done.
5694 if (InstrumentRegions &&
5696 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5697 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5698 llvm::BasicBlock *RHSSkip =
5699 (HasRHSSkip ? CGF.createBasicBlock("lor.rhsskip") : FBlock);
5700 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5701 Builder.CreateCondBr(RHSCond, RHSSkip, RHSBlockCnt);
5702 CGF.EmitBlock(RHSBlockCnt);
5704 CGF.EmitBranch(FBlock);
5705 if (HasRHSSkip) {
5706 CGF.EmitBlock(RHSSkip);
5708 }
5709 CGF.EmitBlock(FBlock);
5710 } else
5711 CGF.markStmtMaybeUsed(E->getRHS());
5712
5713 // If the top of the logical operator nest, update the MCDC bitmap.
5714 if (CGF.isMCDCDecisionExpr(E))
5716
5717 // ZExt result to int or bool.
5718 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5719 }
5720
5721 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5722 if (!CGF.ContainsLabel(E->getRHS())) {
5723 CGF.markStmtAsUsed(false, E);
5724 if (HasLHSSkip)
5726
5727 CGF.markStmtMaybeUsed(E->getRHS());
5728
5729 return llvm::ConstantInt::get(ResTy, 1);
5730 }
5731 }
5732
5733 // If the top of the logical operator nest, reset the MCDC temp to 0.
5734 if (CGF.isMCDCDecisionExpr(E))
5736
5737 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5738 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5739 llvm::BasicBlock *LHSTrueBlock =
5740 (HasLHSSkip ? CGF.createBasicBlock("lor.lhsskip") : ContBlock);
5741
5742 CodeGenFunction::ConditionalEvaluation eval(CGF);
5743
5744 // Branch on the LHS first. If it is true, go to the success (cont) block.
5745 CGF.EmitBranchOnBoolExpr(E->getLHS(), LHSTrueBlock, RHSBlock,
5747 CGF.getProfileCount(E->getRHS()));
5748
5749 if (HasLHSSkip) {
5750 CGF.EmitBlock(LHSTrueBlock);
5752 CGF.EmitBranch(ContBlock);
5753 }
5754
5755 // Any edges into the ContBlock are now from an (indeterminate number of)
5756 // edges from this first condition. All of these values will be true. Start
5757 // setting up the PHI node in the Cont Block for this.
5758 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5759 "", ContBlock);
5760 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5761 PI != PE; ++PI)
5762 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5763
5764 eval.begin(CGF);
5765
5766 // Emit the RHS condition as a bool value.
5767 CGF.EmitBlock(RHSBlock);
5769 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5770
5771 eval.end(CGF);
5772
5773 // Reaquire the RHS block, as there may be subblocks inserted.
5774 RHSBlock = Builder.GetInsertBlock();
5775
5776 // If we're generating for profiling or coverage, generate a branch on the
5777 // RHS to a block that increments the RHS true counter needed to track branch
5778 // condition coverage.
5779 llvm::BasicBlock *ContIncoming = RHSBlock;
5780 if (InstrumentRegions &&
5782 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5783 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5784 llvm::BasicBlock *RHSTrueBlock =
5785 (HasRHSSkip ? CGF.createBasicBlock("lor.rhsskip") : ContBlock);
5786 Builder.CreateCondBr(RHSCond, RHSTrueBlock, RHSBlockCnt);
5787 CGF.EmitBlock(RHSBlockCnt);
5789 CGF.EmitBranch(ContBlock);
5790 PN->addIncoming(RHSCond, RHSBlockCnt);
5791 if (HasRHSSkip) {
5792 CGF.EmitBlock(RHSTrueBlock);
5794 CGF.EmitBranch(ContBlock);
5795 ContIncoming = RHSTrueBlock;
5796 }
5797 }
5798
5799 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5800 // into the phi node for the edge with the value of RHSCond.
5801 CGF.EmitBlock(ContBlock);
5802 PN->addIncoming(RHSCond, ContIncoming);
5803
5804 // If the top of the logical operator nest, update the MCDC bitmap.
5805 if (CGF.isMCDCDecisionExpr(E))
5807
5808 // ZExt result to int.
5809 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5810}
5811
5812Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5813 CGF.EmitIgnoredExpr(E->getLHS());
5814 CGF.EnsureInsertPoint();
5815 return Visit(E->getRHS());
5816}
5817
5818//===----------------------------------------------------------------------===//
5819// Other Operators
5820//===----------------------------------------------------------------------===//
5821
5822/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5823/// expression is cheap enough and side-effect-free enough to evaluate
5824/// unconditionally instead of conditionally. This is used to convert control
5825/// flow into selects in some cases.
5827 CodeGenFunction &CGF) {
5828 // Anything that is an integer or floating point constant is fine.
5829 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5830
5831 // Even non-volatile automatic variables can't be evaluated unconditionally.
5832 // Referencing a thread_local may cause non-trivial initialization work to
5833 // occur. If we're inside a lambda and one of the variables is from the scope
5834 // outside the lambda, that function may have returned already. Reading its
5835 // locals is a bad idea. Also, these reads may introduce races there didn't
5836 // exist in the source-level program.
5837}
5838
5839
5840Value *ScalarExprEmitter::
5841VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5842 TestAndClearIgnoreResultAssign();
5843
5844 // Bind the common expression if necessary.
5845 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5846
5847 Expr *condExpr = E->getCond();
5848 Expr *lhsExpr = E->getTrueExpr();
5849 Expr *rhsExpr = E->getFalseExpr();
5850
5851 // If the condition constant folds and can be elided, try to avoid emitting
5852 // the condition and the dead arm.
5853 bool CondExprBool;
5854 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5855 Expr *live = lhsExpr, *dead = rhsExpr;
5856 if (!CondExprBool) std::swap(live, dead);
5857
5858 // If the dead side doesn't have labels we need, just emit the Live part.
5859 if (!CGF.ContainsLabel(dead)) {
5860 CGF.incrementProfileCounter(CondExprBool ? CGF.UseExecPath
5861 : CGF.UseSkipPath,
5862 E, /*UseBoth=*/true);
5863 Value *Result = Visit(live);
5864 CGF.markStmtMaybeUsed(dead);
5865
5866 // If the live part is a throw expression, it acts like it has a void
5867 // type, so evaluating it returns a null Value*. However, a conditional
5868 // with non-void type must return a non-null Value*.
5869 if (!Result && !E->getType()->isVoidType())
5870 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5871
5872 return Result;
5873 }
5874 }
5875
5876 // OpenCL: If the condition is a vector, we can treat this condition like
5877 // the select function.
5878 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5879 condExpr->getType()->isExtVectorType())) {
5881
5882 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5883 llvm::Value *LHS = Visit(lhsExpr);
5884 llvm::Value *RHS = Visit(rhsExpr);
5885
5886 llvm::Type *condType = ConvertType(condExpr->getType());
5887 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5888
5889 unsigned numElem = vecTy->getNumElements();
5890 llvm::Type *elemType = vecTy->getElementType();
5891
5892 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5893 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5894 llvm::Value *tmp = Builder.CreateSExt(
5895 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5896 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5897
5898 // Cast float to int to perform ANDs if necessary.
5899 llvm::Value *RHSTmp = RHS;
5900 llvm::Value *LHSTmp = LHS;
5901 bool wasCast = false;
5902 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5903 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5904 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5905 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5906 wasCast = true;
5907 }
5908
5909 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5910 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5911 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5912 if (wasCast)
5913 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5914
5915 return tmp5;
5916 }
5917
5918 if (condExpr->getType()->isVectorType() ||
5919 condExpr->getType()->isSveVLSBuiltinType()) {
5921
5922 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5923 llvm::Value *LHS = Visit(lhsExpr);
5924 llvm::Value *RHS = Visit(rhsExpr);
5925
5926 llvm::Type *CondType = ConvertType(condExpr->getType());
5927 auto *VecTy = cast<llvm::VectorType>(CondType);
5928
5929 if (VecTy->getElementType()->isIntegerTy(1))
5930 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5931
5932 // OpenCL uses the MSB of the mask vector.
5933 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5934 if (condExpr->getType()->isExtVectorType())
5935 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5936 else
5937 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5938 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5939 }
5940
5941 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5942 // select instead of as control flow. We can only do this if it is cheap and
5943 // safe to evaluate the LHS and RHS unconditionally.
5947 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5948 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5949
5950 CGF.incrementProfileCounter(E, StepV);
5951
5952 llvm::Value *LHS = Visit(lhsExpr);
5953 llvm::Value *RHS = Visit(rhsExpr);
5954 if (!LHS) {
5955 // If the conditional has void type, make sure we return a null Value*.
5956 assert(!RHS && "LHS and RHS types must match");
5957 return nullptr;
5958 }
5959 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5960 }
5961
5962 // If the top of the logical operator nest, reset the MCDC temp to 0.
5963 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5965
5966 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5967 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5968 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5969
5970 CodeGenFunction::ConditionalEvaluation eval(CGF);
5971 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5972 CGF.getProfileCount(lhsExpr));
5973
5974 CGF.EmitBlock(LHSBlock);
5975
5976 // If the top of the logical operator nest, update the MCDC bitmap for the
5977 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5978 // may also contain a boolean expression.
5979 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5981
5983 eval.begin(CGF);
5984 Value *LHS = Visit(lhsExpr);
5985 eval.end(CGF);
5986
5987 LHSBlock = Builder.GetInsertBlock();
5988 Builder.CreateBr(ContBlock);
5989
5990 CGF.EmitBlock(RHSBlock);
5991
5992 // If the top of the logical operator nest, update the MCDC bitmap for the
5993 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5994 // may also contain a boolean expression.
5995 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5997
5999 eval.begin(CGF);
6000 Value *RHS = Visit(rhsExpr);
6001 eval.end(CGF);
6002
6003 RHSBlock = Builder.GetInsertBlock();
6004 CGF.EmitBlock(ContBlock);
6005
6006 // If the LHS or RHS is a throw expression, it will be legitimately null.
6007 if (!LHS)
6008 return RHS;
6009 if (!RHS)
6010 return LHS;
6011
6012 // Create a PHI node for the real part.
6013 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
6014 PN->addIncoming(LHS, LHSBlock);
6015 PN->addIncoming(RHS, RHSBlock);
6016
6017 return PN;
6018}
6019
6020Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
6021 return Visit(E->getChosenSubExpr());
6022}
6023
6024Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
6025 Address ArgValue = Address::invalid();
6026 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
6027
6028 return ArgPtr.getScalarVal();
6029}
6030
6031Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
6032 return CGF.EmitBlockLiteral(block);
6033}
6034
6035// Convert a vec3 to vec4, or vice versa.
6037 Value *Src, unsigned NumElementsDst) {
6038 static constexpr int Mask[] = {0, 1, 2, -1};
6039 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
6040}
6041
6042// Create cast instructions for converting LLVM value \p Src to LLVM type \p
6043// DstTy. \p Src has the same size as \p DstTy. Both are single value types
6044// but could be scalar or vectors of different lengths, and either can be
6045// pointer.
6046// There are 4 cases:
6047// 1. non-pointer -> non-pointer : needs 1 bitcast
6048// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
6049// 3. pointer -> non-pointer
6050// a) pointer -> intptr_t : needs 1 ptrtoint
6051// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
6052// 4. non-pointer -> pointer
6053// a) intptr_t -> pointer : needs 1 inttoptr
6054// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
6055// Note: for cases 3b and 4b two casts are required since LLVM casts do not
6056// allow casting directly between pointer types and non-integer non-pointer
6057// types.
6059 const llvm::DataLayout &DL,
6060 Value *Src, llvm::Type *DstTy,
6061 StringRef Name = "") {
6062 auto SrcTy = Src->getType();
6063
6064 // Case 1.
6065 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
6066 return Builder.CreateBitCast(Src, DstTy, Name);
6067
6068 // Case 2.
6069 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
6070 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
6071
6072 // Case 3.
6073 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
6074 // Case 3b.
6075 if (!DstTy->isIntegerTy())
6076 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
6077 // Cases 3a and 3b.
6078 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
6079 }
6080
6081 // Case 4b.
6082 if (!SrcTy->isIntegerTy())
6083 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
6084 // Cases 4a and 4b.
6085 return Builder.CreateIntToPtr(Src, DstTy, Name);
6086}
6087
6088Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
6089 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
6090 llvm::Type *DstTy = ConvertType(E->getType());
6091
6092 llvm::Type *SrcTy = Src->getType();
6093 unsigned NumElementsSrc =
6095 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
6096 : 0;
6097 unsigned NumElementsDst =
6099 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
6100 : 0;
6101
6102 // Use bit vector expansion for ext_vector_type boolean vectors.
6103 if (E->getType()->isExtVectorBoolType())
6104 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
6105
6106 // Going from vec3 to non-vec3 is a special case and requires a shuffle
6107 // vector to get a vec4, then a bitcast if the target type is different.
6108 if (NumElementsSrc == 3 && NumElementsDst != 3) {
6109 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
6110 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
6111 DstTy);
6112
6113 Src->setName("astype");
6114 return Src;
6115 }
6116
6117 // Going from non-vec3 to vec3 is a special case and requires a bitcast
6118 // to vec4 if the original type is not vec4, then a shuffle vector to
6119 // get a vec3.
6120 if (NumElementsSrc != 3 && NumElementsDst == 3) {
6121 auto *Vec4Ty = llvm::FixedVectorType::get(
6122 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
6123 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
6124 Vec4Ty);
6125
6126 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
6127 Src->setName("astype");
6128 return Src;
6129 }
6130
6131 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
6132 Src, DstTy, "astype");
6133}
6134
6135Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
6136 return CGF.EmitAtomicExpr(E).getScalarVal();
6137}
6138
6139//===----------------------------------------------------------------------===//
6140// Entry Point into this File
6141//===----------------------------------------------------------------------===//
6142
6143/// Emit the computation of the specified expression of scalar type, ignoring
6144/// the result.
6145Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
6146 assert(E && hasScalarEvaluationKind(E->getType()) &&
6147 "Invalid scalar expression to emit");
6148
6149 return ScalarExprEmitter(*this, IgnoreResultAssign)
6150 .Visit(const_cast<Expr *>(E));
6151}
6152
6153/// Emit a conversion from the specified type to the specified destination type,
6154/// both of which are LLVM scalar types.
6156 QualType DstTy,
6157 SourceLocation Loc) {
6158 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
6159 "Invalid scalar expression to emit");
6160 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
6161}
6162
6163/// Emit a conversion from the specified complex type to the specified
6164/// destination type, where the destination type is an LLVM scalar type.
6166 QualType SrcTy,
6167 QualType DstTy,
6168 SourceLocation Loc) {
6169 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
6170 "Invalid complex -> scalar conversion");
6171 return ScalarExprEmitter(*this)
6172 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
6173}
6174
6175
6176Value *
6178 QualType PromotionType) {
6179 if (!PromotionType.isNull())
6180 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
6181 else
6182 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
6183}
6184
6185
6188 bool isInc, bool isPre) {
6189 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
6190}
6191
6193 // object->isa or (*object).isa
6194 // Generate code as for: *(Class*)object
6195
6196 Expr *BaseExpr = E->getBase();
6198 if (BaseExpr->isPRValue()) {
6199 llvm::Type *BaseTy =
6201 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
6202 } else {
6203 Addr = EmitLValue(BaseExpr).getAddress();
6204 }
6205
6206 // Cast the address to Class*.
6207 Addr = Addr.withElementType(ConvertType(E->getType()));
6208 return MakeAddrLValue(Addr, E->getType());
6209}
6210
6211
6213 const CompoundAssignOperator *E) {
6215 ScalarExprEmitter Scalar(*this);
6216 Value *Result = nullptr;
6217 switch (E->getOpcode()) {
6218#define COMPOUND_OP(Op) \
6219 case BO_##Op##Assign: \
6220 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
6221 Result)
6222 COMPOUND_OP(Mul);
6223 COMPOUND_OP(Div);
6224 COMPOUND_OP(Rem);
6225 COMPOUND_OP(Add);
6226 COMPOUND_OP(Sub);
6227 COMPOUND_OP(Shl);
6228 COMPOUND_OP(Shr);
6230 COMPOUND_OP(Xor);
6231 COMPOUND_OP(Or);
6232#undef COMPOUND_OP
6233
6234 case BO_PtrMemD:
6235 case BO_PtrMemI:
6236 case BO_Mul:
6237 case BO_Div:
6238 case BO_Rem:
6239 case BO_Add:
6240 case BO_Sub:
6241 case BO_Shl:
6242 case BO_Shr:
6243 case BO_LT:
6244 case BO_GT:
6245 case BO_LE:
6246 case BO_GE:
6247 case BO_EQ:
6248 case BO_NE:
6249 case BO_Cmp:
6250 case BO_And:
6251 case BO_Xor:
6252 case BO_Or:
6253 case BO_LAnd:
6254 case BO_LOr:
6255 case BO_Assign:
6256 case BO_Comma:
6257 llvm_unreachable("Not valid compound assignment operators");
6258 }
6259
6260 llvm_unreachable("Unhandled compound assignment operator");
6261}
6262
6264 // The total (signed) byte offset for the GEP.
6265 llvm::Value *TotalOffset;
6266 // The offset overflow flag - true if the total offset overflows.
6267 llvm::Value *OffsetOverflows;
6268};
6269
6270/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6271/// and compute the total offset it applies from it's base pointer BasePtr.
6272/// Returns offset in bytes and a boolean flag whether an overflow happened
6273/// during evaluation.
6275 llvm::LLVMContext &VMContext,
6276 CodeGenModule &CGM,
6277 CGBuilderTy &Builder) {
6278 const auto &DL = CGM.getDataLayout();
6279
6280 // The total (signed) byte offset for the GEP.
6281 llvm::Value *TotalOffset = nullptr;
6282
6283 // Was the GEP already reduced to a constant?
6284 if (isa<llvm::Constant>(GEPVal)) {
6285 // Compute the offset by casting both pointers to integers and subtracting:
6286 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6287 Value *BasePtr_int =
6288 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6289 Value *GEPVal_int =
6290 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6291 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6292 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6293 }
6294
6295 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6296 assert(GEP->getPointerOperand() == BasePtr &&
6297 "BasePtr must be the base of the GEP.");
6298 assert(GEP->isInBounds() && "Expected inbounds GEP");
6299
6300 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6301
6302 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6303 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6304 auto *SAddIntrinsic =
6305 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6306 auto *SMulIntrinsic =
6307 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6308
6309 // The offset overflow flag - true if the total offset overflows.
6310 llvm::Value *OffsetOverflows = Builder.getFalse();
6311
6312 /// Return the result of the given binary operation.
6313 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6314 llvm::Value *RHS) -> llvm::Value * {
6315 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6316
6317 // If the operands are constants, return a constant result.
6318 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6319 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6320 llvm::APInt N;
6321 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6322 /*Signed=*/true, N);
6323 if (HasOverflow)
6324 OffsetOverflows = Builder.getTrue();
6325 return llvm::ConstantInt::get(VMContext, N);
6326 }
6327 }
6328
6329 // Otherwise, compute the result with checked arithmetic.
6330 auto *ResultAndOverflow = Builder.CreateCall(
6331 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6332 OffsetOverflows = Builder.CreateOr(
6333 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6334 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6335 };
6336
6337 // Determine the total byte offset by looking at each GEP operand.
6338 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6339 GTI != GTE; ++GTI) {
6340 llvm::Value *LocalOffset;
6341 auto *Index = GTI.getOperand();
6342 // Compute the local offset contributed by this indexing step:
6343 if (auto *STy = GTI.getStructTypeOrNull()) {
6344 // For struct indexing, the local offset is the byte position of the
6345 // specified field.
6346 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6347 LocalOffset = llvm::ConstantInt::get(
6348 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6349 } else {
6350 // Otherwise this is array-like indexing. The local offset is the index
6351 // multiplied by the element size.
6352 auto *ElementSize =
6353 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6354 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6355 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6356 }
6357
6358 // If this is the first offset, set it as the total offset. Otherwise, add
6359 // the local offset into the running total.
6360 if (!TotalOffset || TotalOffset == Zero)
6361 TotalOffset = LocalOffset;
6362 else
6363 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6364 }
6365
6366 return {TotalOffset, OffsetOverflows};
6367}
6368
6369Value *
6370CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6371 ArrayRef<Value *> IdxList,
6372 bool SignedIndices, bool IsSubtraction,
6373 SourceLocation Loc, const Twine &Name) {
6374 llvm::Type *PtrTy = Ptr->getType();
6375
6376 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6377 if (!SignedIndices && !IsSubtraction)
6378 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6379
6380 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6381
6382 // If the pointer overflow sanitizer isn't enabled, do nothing.
6383 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6384 return GEPVal;
6385
6386 // Perform nullptr-and-offset check unless the nullptr is defined.
6387 bool PerformNullCheck = !NullPointerIsDefined(
6388 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6389 // Check for overflows unless the GEP got constant-folded,
6390 // and only in the default address space
6391 bool PerformOverflowCheck =
6392 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6393
6394 if (!(PerformNullCheck || PerformOverflowCheck))
6395 return GEPVal;
6396
6397 const auto &DL = CGM.getDataLayout();
6398
6399 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6400 auto CheckHandler = SanitizerHandler::PointerOverflow;
6401 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6402 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6403
6404 GEPOffsetAndOverflow EvaluatedGEP =
6405 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6406
6407 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6408 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6409 "If the offset got constant-folded, we don't expect that there was an "
6410 "overflow.");
6411
6412 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6413
6414 // Common case: if the total offset is zero, don't emit a check.
6415 if (EvaluatedGEP.TotalOffset == Zero)
6416 return GEPVal;
6417
6418 // Now that we've computed the total offset, add it to the base pointer (with
6419 // wrapping semantics).
6420 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6421 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6422
6423 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6424 2>
6425 Checks;
6426
6427 if (PerformNullCheck) {
6428 // If the base pointer evaluates to a null pointer value,
6429 // the only valid pointer this inbounds GEP can produce is also
6430 // a null pointer, so the offset must also evaluate to zero.
6431 // Likewise, if we have non-zero base pointer, we can not get null pointer
6432 // as a result, so the offset can not be -intptr_t(BasePtr).
6433 // In other words, both pointers are either null, or both are non-null,
6434 // or the behaviour is undefined.
6435 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6436 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6437 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6438 Checks.emplace_back(Valid, CheckOrdinal);
6439 }
6440
6441 if (PerformOverflowCheck) {
6442 // The GEP is valid if:
6443 // 1) The total offset doesn't overflow, and
6444 // 2) The sign of the difference between the computed address and the base
6445 // pointer matches the sign of the total offset.
6446 llvm::Value *ValidGEP;
6447 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6448 if (SignedIndices) {
6449 // GEP is computed as `unsigned base + signed offset`, therefore:
6450 // * If offset was positive, then the computed pointer can not be
6451 // [unsigned] less than the base pointer, unless it overflowed.
6452 // * If offset was negative, then the computed pointer can not be
6453 // [unsigned] greater than the bas pointere, unless it overflowed.
6454 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6455 auto *PosOrZeroOffset =
6456 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6457 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6458 ValidGEP =
6459 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6460 } else if (!IsSubtraction) {
6461 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6462 // computed pointer can not be [unsigned] less than base pointer,
6463 // unless there was an overflow.
6464 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6465 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6466 } else {
6467 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6468 // computed pointer can not be [unsigned] greater than base pointer,
6469 // unless there was an overflow.
6470 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6471 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6472 }
6473 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6474 Checks.emplace_back(ValidGEP, CheckOrdinal);
6475 }
6476
6477 assert(!Checks.empty() && "Should have produced some checks.");
6478
6479 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6480 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6481 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6482 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6483
6484 return GEPVal;
6485}
6486
6488 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6489 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6490 const Twine &Name) {
6491 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6492 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6493 if (!SignedIndices && !IsSubtraction)
6494 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6495
6496 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6497 }
6498
6499 return RawAddress(
6500 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6501 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6502 elementType, Align);
6503}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
static uint32_t getBitWidth(const Expr *E)
llvm::APSInt APSInt
Definition Compiler.cpp:24
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:508
bool isLValue() const
Definition APValue.h:490
bool isInt() const
Definition APValue.h:485
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:952
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:917
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
bool isUnaryOverflowPatternExcluded(const UnaryOperator *UO)
uint64_t getCharWidth() const
Return the size of the character type, in bits.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
LabelDecl * getLabel() const
Definition Expr.h:4576
uint64_t getValue() const
Definition ExprCXX.h:3045
QualType getElementType() const
Definition TypeBase.h:3784
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6751
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4188
bool isCompoundAssignmentOp() const
Definition Expr.h:4185
SourceLocation getExprLoc() const
Definition Expr.h:4082
bool isShiftOp() const
Definition Expr.h:4130
Expr * getRHS() const
Definition Expr.h:4093
bool isShiftAssignOp() const
Definition Expr.h:4199
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4254
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2205
Opcode getOpcode() const
Definition Expr.h:4086
BinaryOperatorKind Opcode
Definition Expr.h:4046
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:741
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4333
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:305
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
path_iterator path_begin()
Definition Expr.h:3749
CastKind getCastKind() const
Definition Expr.h:3723
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
path_iterator path_end()
Definition Expr.h:3750
Expr * getSubExpr()
Definition Expr.h:3729
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1632
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:102
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:94
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:84
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:71
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2179
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:591
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3116
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3706
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7192
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:275
SanitizerSet SanOpts
Sanitizers enabled for this function.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:269
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:283
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:3000
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4001
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6453
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7293
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2572
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:388
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2979
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
bool hasSkipCounter(const Stmt *S) const
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3656
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3891
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:176
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3980
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:251
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6406
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2497
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateIRTempWithoutCast - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:183
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:65
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1255
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4149
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6359
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2037
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3520
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2223
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6345
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2738
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4584
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:557
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:273
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:912
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7302
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1591
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:660
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1672
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:742
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4060
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:5154
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4491
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2257
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:52
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1934
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3551
void markStmtAsUsed(bool Skipped, const Stmt *S)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2678
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
bool isMCDCDecisionExpr(const Expr *E) const
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:640
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1387
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:183
bool isBitField() const
Definition CGValue.h:288
bool isVolatileQualified() const
Definition CGValue.h:297
const Qualifiers & getQuals() const
Definition CGValue.h:350
Address getAddress() const
Definition CGValue.h:373
QualType getType() const
Definition CGValue.h:303
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:446
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
QualType getComputationLHSType() const
Definition Expr.h:4337
QualType getComputationResultType() const
Definition Expr.h:4340
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
APValue getAPValueResult() const
Definition Expr.cpp:413
bool hasAPValueResult() const
Definition Expr.h:1160
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4437
unsigned mapRowMajorToColumnMajorFlattenedIndex(unsigned RowMajorIdx) const
Given a row-major flattened index RowMajorIdx, return the equivalent column-major flattened index.
Definition TypeBase.h:4496
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4812
T * getAttr() const
Definition DeclBase.h:573
ChildElementIter< false > begin()
Definition Expr.h:5235
size_t getDataElementCount() const
Definition Expr.h:5151
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:677
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:479
QualType getType() const
Definition Expr.h:144
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3260
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1578
llvm::APFloat getValue() const
Definition Expr.h:1669
const Expr * getSubExpr() const
Definition Expr.h:1065
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3856
unsigned getNumInits() const
Definition Expr.h:5332
bool hadArrayRangeDesignator() const
Definition Expr.h:5486
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
bool isSignedOverflowDefined() const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4387
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
VersionTuple getVersion() const
Definition ExprObjC.h:1757
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1529
Expr * getBase() const
Definition ExprObjC.h:1554
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1577
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1395
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:8049
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:8086
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2589
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2577
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2570
unsigned getNumComponents() const
Definition Expr.h:2585
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2482
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2488
@ Array
An index into an array.
Definition Expr.h:2429
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2433
@ Field
A field.
Definition Expr.h:2431
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2436
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2478
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2498
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1211
Expr * getSelectedExpr() const
Definition ExprCXX.h:4640
const Expr * getSubExpr() const
Definition Expr.h:2202
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1459
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:132
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8431
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8616
QualType getCanonicalType() const
Definition TypeBase.h:8483
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1626
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:137
bool isCanonical() const
Definition TypeBase.h:8488
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:587
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4698
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4679
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4685
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4516
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2282
SourceLocation getLocation() const
Definition Expr.h:5064
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
virtual bool useFP16ConversionIntrinsics() const
Check whether conversions to and from __fp16 should go through an integer bitcast with i16.
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:789
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:799
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:810
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:818
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:826
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8413
bool getBoolValue() const
Definition ExprCXX.h:2948
const APValue & getAPValue() const
Definition ExprCXX.h:2953
bool isStoredAsBoolean() const
Definition ExprCXX.h:2944
bool isVoidType() const
Definition TypeBase.h:9034
bool isBooleanType() const
Definition TypeBase.h:9171
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8680
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2254
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2308
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2375
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9078
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
bool isReferenceType() const
Definition TypeBase.h:8692
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1923
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2652
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool isExtVectorType() const
Definition TypeBase.h:8811
bool isExtVectorBoolType() const
Definition TypeBase.h:8815
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8953
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8791
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8803
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:9094
bool isHalfType() const
Definition TypeBase.h:9038
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2275
bool isQueueT() const
Definition TypeBase.h:8924
bool isMatrixType() const
Definition TypeBase.h:8831
bool isEventT() const
Definition TypeBase.h:8916
bool isFunctionType() const
Definition TypeBase.h:8664
bool isVectorType() const
Definition TypeBase.h:8807
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2358
bool isFloatingType() const
Definition Type.cpp:2342
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2285
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2978
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
bool isNullPtrType() const
Definition TypeBase.h:9071
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2697
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2660
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2403
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2301
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5582
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
Represents a GCC generic vector type.
Definition TypeBase.h:4225
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
bool BitCast(InterpState &S, CodePtr OpPC)
Definition Interp.h:3711
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1341
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1999
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1356
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::Type * HalfTy
half, bfloat, float, double
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184