clang 23.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
86 : LHSAP.usub_ov(RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
89 : LHSAP.umul_ov(RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(BaseTy) ||
184 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Consider OverflowBehaviorType and language options to calculate the final
196/// overflow behavior for an expression. There are no language options for
197/// unsigned overflow semantics so there is nothing to consider there.
199getOverflowBehaviorConsideringType(const CodeGenFunction &CGF,
200 const QualType Ty) {
201 const OverflowBehaviorType *OBT = Ty->getAs<OverflowBehaviorType>();
202 /// FIXME: Having two enums named `OverflowBehaviorKind` is not ideal, these
203 /// should be unified into one coherent enum that supports both unsigned and
204 /// signed overflow behavior semantics.
205 if (OBT) {
206 switch (OBT->getBehaviorKind()) {
207 case OverflowBehaviorType::OverflowBehaviorKind::Wrap:
209 case OverflowBehaviorType::OverflowBehaviorKind::Trap:
211 }
212 llvm_unreachable("Unknown OverflowBehaviorKind");
213 }
214
215 if (Ty->isUnsignedIntegerType()) {
217 }
218
219 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
226 }
227 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
228}
229
230/// Check if we can skip the overflow check for \p Op.
231static bool CanElideOverflowCheck(ASTContext &Ctx, const BinOpInfo &Op) {
232 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
233 "Expected a unary or binary operator");
234
235 // If the binop has constant inputs and we can prove there is no overflow,
236 // we can elide the overflow check.
237 if (!Op.mayHaveIntegerOverflow())
238 return true;
239
240 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
241 if (UO && Ctx.isUnaryOverflowPatternExcluded(UO))
242 return true;
243
244 const auto *BO = dyn_cast<BinaryOperator>(Op.E);
245 if (BO && BO->hasExcludedOverflowPattern())
246 return true;
247
248 if (Op.Ty.isWrapType())
249 return true;
250 if (Op.Ty.isTrapType())
251 return false;
252
253 if (Op.Ty->isSignedIntegerType() &&
254 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
255 Op.Ty)) {
256 return true;
257 }
258
259 if (Op.Ty->isUnsignedIntegerType() &&
260 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
261 Op.Ty)) {
262 return true;
263 }
264
265 // If a unary op has a widened operand, the op cannot overflow.
266 if (UO)
267 return !UO->canOverflow();
268
269 // We usually don't need overflow checks for binops with widened operands.
270 // Multiplication with promoted unsigned operands is a special case.
271 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
272 if (!OptionalLHSTy)
273 return false;
274
275 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
276 if (!OptionalRHSTy)
277 return false;
278
279 QualType LHSTy = *OptionalLHSTy;
280 QualType RHSTy = *OptionalRHSTy;
281
282 // This is the simple case: binops without unsigned multiplication, and with
283 // widened operands. No overflow check is needed here.
284 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
285 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
286 return true;
287
288 // For unsigned multiplication the overflow check can be elided if either one
289 // of the unpromoted types are less than half the size of the promoted type.
290 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
291 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
292 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
293}
294
295class ScalarExprEmitter
296 : public StmtVisitor<ScalarExprEmitter, Value*> {
297 CodeGenFunction &CGF;
298 CGBuilderTy &Builder;
299 bool IgnoreResultAssign;
300 llvm::LLVMContext &VMContext;
301public:
302
303 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
304 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
305 VMContext(cgf.getLLVMContext()) {
306 }
307
308 //===--------------------------------------------------------------------===//
309 // Utilities
310 //===--------------------------------------------------------------------===//
311
312 bool TestAndClearIgnoreResultAssign() {
313 bool I = IgnoreResultAssign;
314 IgnoreResultAssign = false;
315 return I;
316 }
317
318 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
319 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
320 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
321 return CGF.EmitCheckedLValue(E, TCK);
322 }
323
324 void EmitBinOpCheck(
325 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
326 const BinOpInfo &Info);
327
328 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
329 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
330 }
331
332 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
333 const AlignValueAttr *AVAttr = nullptr;
334 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
335 const ValueDecl *VD = DRE->getDecl();
336
337 if (VD->getType()->isReferenceType()) {
338 if (const auto *TTy =
339 VD->getType().getNonReferenceType()->getAs<TypedefType>())
340 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
341 } else {
342 // Assumptions for function parameters are emitted at the start of the
343 // function, so there is no need to repeat that here,
344 // unless the alignment-assumption sanitizer is enabled,
345 // then we prefer the assumption over alignment attribute
346 // on IR function param.
347 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
348 return;
349
350 AVAttr = VD->getAttr<AlignValueAttr>();
351 }
352 }
353
354 if (!AVAttr)
355 if (const auto *TTy = E->getType()->getAs<TypedefType>())
356 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
357
358 if (!AVAttr)
359 return;
360
361 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
362 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
363 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
364 }
365
366 /// EmitLoadOfLValue - Given an expression with complex type that represents a
367 /// value l-value, this method emits the address of the l-value, then loads
368 /// and returns the result.
369 Value *EmitLoadOfLValue(const Expr *E) {
370 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
371 E->getExprLoc());
372
373 EmitLValueAlignmentAssumption(E, V);
374 return V;
375 }
376
377 /// EmitConversionToBool - Convert the specified expression value to a
378 /// boolean (i1) truth value. This is equivalent to "Val != 0".
379 Value *EmitConversionToBool(Value *Src, QualType DstTy);
380
381 /// Emit a check that a conversion from a floating-point type does not
382 /// overflow.
383 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
384 Value *Src, QualType SrcType, QualType DstType,
385 llvm::Type *DstTy, SourceLocation Loc);
386
387 /// Known implicit conversion check kinds.
388 /// This is used for bitfield conversion checks as well.
389 /// Keep in sync with the enum of the same name in ubsan_handlers.h
390 enum ImplicitConversionCheckKind : unsigned char {
391 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
392 ICCK_UnsignedIntegerTruncation = 1,
393 ICCK_SignedIntegerTruncation = 2,
394 ICCK_IntegerSignChange = 3,
395 ICCK_SignedIntegerTruncationOrSignChange = 4,
396 };
397
398 /// Emit a check that an [implicit] truncation of an integer does not
399 /// discard any bits. It is not UB, so we use the value after truncation.
400 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
401 QualType DstType, SourceLocation Loc,
402 bool OBTrapInvolved = false);
403
404 /// Emit a check that an [implicit] conversion of an integer does not change
405 /// the sign of the value. It is not UB, so we use the value after conversion.
406 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
407 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
408 QualType DstType, SourceLocation Loc,
409 bool OBTrapInvolved = false);
410
411 /// Emit a conversion from the specified type to the specified destination
412 /// type, both of which are LLVM scalar types.
413 struct ScalarConversionOpts {
414 bool TreatBooleanAsSigned;
415 bool EmitImplicitIntegerTruncationChecks;
416 bool EmitImplicitIntegerSignChangeChecks;
417 /* Potential -fsanitize-undefined-ignore-overflow-pattern= */
418 bool PatternExcluded;
419
420 ScalarConversionOpts()
421 : TreatBooleanAsSigned(false),
422 EmitImplicitIntegerTruncationChecks(false),
423 EmitImplicitIntegerSignChangeChecks(false), PatternExcluded(false) {}
424
425 ScalarConversionOpts(clang::SanitizerSet SanOpts)
426 : TreatBooleanAsSigned(false),
427 EmitImplicitIntegerTruncationChecks(
428 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
429 EmitImplicitIntegerSignChangeChecks(
430 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)),
431 PatternExcluded(false) {}
432 };
433 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
434 llvm::Type *SrcTy, llvm::Type *DstTy,
435 ScalarConversionOpts Opts);
436 Value *
437 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
438 SourceLocation Loc,
439 ScalarConversionOpts Opts = ScalarConversionOpts());
440
441 /// Convert between either a fixed point and other fixed point or fixed point
442 /// and an integer.
443 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
444 SourceLocation Loc);
445
446 /// Emit a conversion from the specified complex type to the specified
447 /// destination type, where the destination type is an LLVM scalar type.
448 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
449 QualType SrcTy, QualType DstTy,
450 SourceLocation Loc);
451
452 /// EmitNullValue - Emit a value that corresponds to null for the given type.
453 Value *EmitNullValue(QualType Ty);
454
455 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
456 Value *EmitFloatToBoolConversion(Value *V) {
457 // Compare against 0.0 for fp scalars.
458 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
459 return Builder.CreateFCmpUNE(V, Zero, "tobool");
460 }
461
462 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
463 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
464 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
465
466 return Builder.CreateICmpNE(V, Zero, "tobool");
467 }
468
469 Value *EmitIntToBoolConversion(Value *V) {
470 // Because of the type rules of C, we often end up computing a
471 // logical value, then zero extending it to int, then wanting it
472 // as a logical value again. Optimize this common case.
473 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
474 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
475 Value *Result = ZI->getOperand(0);
476 // If there aren't any more uses, zap the instruction to save space.
477 // Note that there can be more uses, for example if this
478 // is the result of an assignment.
479 if (ZI->use_empty())
480 ZI->eraseFromParent();
481 return Result;
482 }
483 }
484
485 return Builder.CreateIsNotNull(V, "tobool");
486 }
487
488 //===--------------------------------------------------------------------===//
489 // Visitor Methods
490 //===--------------------------------------------------------------------===//
491
492 Value *Visit(Expr *E) {
493 ApplyDebugLocation DL(CGF, E);
494 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
495 }
496
497 Value *VisitStmt(Stmt *S) {
498 S->dump(llvm::errs(), CGF.getContext());
499 llvm_unreachable("Stmt can't have complex result type!");
500 }
501 Value *VisitExpr(Expr *S);
502
503 Value *VisitConstantExpr(ConstantExpr *E) {
504 // A constant expression of type 'void' generates no code and produces no
505 // value.
506 if (E->getType()->isVoidType())
507 return nullptr;
508
509 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
510 if (E->isGLValue()) {
511 // This was already converted to an rvalue when it was constant
512 // evaluated.
513 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
514 return Result;
515 return CGF.EmitLoadOfScalar(
518 /*Volatile*/ false, E->getType(), E->getExprLoc());
519 }
520 return Result;
521 }
522 return Visit(E->getSubExpr());
523 }
524 Value *VisitParenExpr(ParenExpr *PE) {
525 return Visit(PE->getSubExpr());
526 }
527 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
528 return Visit(E->getReplacement());
529 }
530 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
531 return Visit(GE->getResultExpr());
532 }
533 Value *VisitCoawaitExpr(CoawaitExpr *S) {
534 return CGF.EmitCoawaitExpr(*S).getScalarVal();
535 }
536 Value *VisitCoyieldExpr(CoyieldExpr *S) {
537 return CGF.EmitCoyieldExpr(*S).getScalarVal();
538 }
539 Value *VisitUnaryCoawait(const UnaryOperator *E) {
540 return Visit(E->getSubExpr());
541 }
542
543 // Leaves.
544 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
545 return Builder.getInt(E->getValue());
546 }
547 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
548 return Builder.getInt(E->getValue());
549 }
550 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
551 return llvm::ConstantFP::get(VMContext, E->getValue());
552 }
553 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
554 // Character literals are always stored in an unsigned (even for signed
555 // char), so allow implicit truncation here.
556 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue(),
557 /*IsSigned=*/false, /*ImplicitTrunc=*/true);
558 }
559 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
560 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
561 }
562 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
563 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
564 }
565 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
566 if (E->getType()->isVoidType())
567 return nullptr;
568
569 return EmitNullValue(E->getType());
570 }
571 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
572 return EmitNullValue(E->getType());
573 }
574 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
575 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
576 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
577 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
578 return Builder.CreateBitCast(V, ConvertType(E->getType()));
579 }
580
581 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
582 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
583 }
584
585 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
586 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
587 }
588
589 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
590 Value *VisitEmbedExpr(EmbedExpr *E);
591
592 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
593 if (E->isGLValue())
594 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
595 E->getExprLoc());
596
597 // Otherwise, assume the mapping is the scalar directly.
599 }
600
601 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
602 llvm_unreachable("Codegen for this isn't defined/implemented");
603 }
604
605 // l-values.
606 Value *VisitDeclRefExpr(DeclRefExpr *E) {
607 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
608 return CGF.emitScalarConstant(Constant, E);
609 return EmitLoadOfLValue(E);
610 }
611
612 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
613 return CGF.EmitObjCSelectorExpr(E);
614 }
615 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
616 return CGF.EmitObjCProtocolExpr(E);
617 }
618 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
619 return EmitLoadOfLValue(E);
620 }
621 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
622 if (E->getMethodDecl() &&
624 return EmitLoadOfLValue(E);
625 return CGF.EmitObjCMessageExpr(E).getScalarVal();
626 }
627
628 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
629 LValue LV = CGF.EmitObjCIsaExpr(E);
631 return V;
632 }
633
634 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
635 VersionTuple Version = E->getVersion();
636
637 // If we're checking for a platform older than our minimum deployment
638 // target, we can fold the check away.
639 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
640 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
641
642 return CGF.EmitBuiltinAvailable(Version);
643 }
644
645 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
646 Value *VisitMatrixSingleSubscriptExpr(MatrixSingleSubscriptExpr *E);
647 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
648 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
649 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
650 Value *VisitMemberExpr(MemberExpr *E);
651 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
652 Value *VisitMatrixElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
653 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
654 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
655 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
656 // literals aren't l-values in C++. We do so simply because that's the
657 // cleanest way to handle compound literals in C++.
658 // See the discussion here: https://reviews.llvm.org/D64464
659 return EmitLoadOfLValue(E);
660 }
661
662 Value *VisitInitListExpr(InitListExpr *E);
663
664 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
665 assert(CGF.getArrayInitIndex() &&
666 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
667 return CGF.getArrayInitIndex();
668 }
669
670 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
671 return EmitNullValue(E->getType());
672 }
673 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
674 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
675 return VisitCastExpr(E);
676 }
677 Value *VisitCastExpr(CastExpr *E);
678
679 Value *VisitCallExpr(const CallExpr *E) {
681 return EmitLoadOfLValue(E);
682
683 Value *V = CGF.EmitCallExpr(E).getScalarVal();
684
685 EmitLValueAlignmentAssumption(E, V);
686 return V;
687 }
688
689 Value *VisitStmtExpr(const StmtExpr *E);
690
691 // Unary Operators.
692 Value *VisitUnaryPostDec(const UnaryOperator *E) {
693 LValue LV = EmitLValue(E->getSubExpr());
694 return EmitScalarPrePostIncDec(E, LV, false, false);
695 }
696 Value *VisitUnaryPostInc(const UnaryOperator *E) {
697 LValue LV = EmitLValue(E->getSubExpr());
698 return EmitScalarPrePostIncDec(E, LV, true, false);
699 }
700 Value *VisitUnaryPreDec(const UnaryOperator *E) {
701 LValue LV = EmitLValue(E->getSubExpr());
702 return EmitScalarPrePostIncDec(E, LV, false, true);
703 }
704 Value *VisitUnaryPreInc(const UnaryOperator *E) {
705 LValue LV = EmitLValue(E->getSubExpr());
706 return EmitScalarPrePostIncDec(E, LV, true, true);
707 }
708
709 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
710 llvm::Value *InVal,
711 bool IsInc);
712
713 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
714 bool isInc, bool isPre);
715
716
717 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
718 if (isa<MemberPointerType>(E->getType())) // never sugared
719 return CGF.CGM.getMemberPointerConstant(E);
720
721 return EmitLValue(E->getSubExpr()).getPointer(CGF);
722 }
723 Value *VisitUnaryDeref(const UnaryOperator *E) {
724 if (E->getType()->isVoidType())
725 return Visit(E->getSubExpr()); // the actual value should be unused
726 return EmitLoadOfLValue(E);
727 }
728
729 Value *VisitUnaryPlus(const UnaryOperator *E,
730 QualType PromotionType = QualType());
731 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
732 Value *VisitUnaryMinus(const UnaryOperator *E,
733 QualType PromotionType = QualType());
734 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
735
736 Value *VisitUnaryNot (const UnaryOperator *E);
737 Value *VisitUnaryLNot (const UnaryOperator *E);
738 Value *VisitUnaryReal(const UnaryOperator *E,
739 QualType PromotionType = QualType());
740 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
741 Value *VisitUnaryImag(const UnaryOperator *E,
742 QualType PromotionType = QualType());
743 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
744 Value *VisitUnaryExtension(const UnaryOperator *E) {
745 return Visit(E->getSubExpr());
746 }
747
748 // C++
749 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
750 return EmitLoadOfLValue(E);
751 }
752 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
753 auto &Ctx = CGF.getContext();
756 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
757 SLE->getType());
758 }
759
760 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
761 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
762 return Visit(DAE->getExpr());
763 }
764 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
765 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
766 return Visit(DIE->getExpr());
767 }
768 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
769 return CGF.LoadCXXThis();
770 }
771
772 Value *VisitExprWithCleanups(ExprWithCleanups *E);
773 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
774 return CGF.EmitCXXNewExpr(E);
775 }
776 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
777 CGF.EmitCXXDeleteExpr(E);
778 return nullptr;
779 }
780
781 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
782 if (E->isStoredAsBoolean())
783 return llvm::ConstantInt::get(ConvertType(E->getType()),
784 E->getBoolValue());
785 assert(E->getAPValue().isInt() && "APValue type not supported");
786 return llvm::ConstantInt::get(ConvertType(E->getType()),
787 E->getAPValue().getInt());
788 }
789
790 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
791 return Builder.getInt1(E->isSatisfied());
792 }
793
794 Value *VisitRequiresExpr(const RequiresExpr *E) {
795 return Builder.getInt1(E->isSatisfied());
796 }
797
798 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
799 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
800 }
801
802 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
803 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
804 }
805
806 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
807 // C++ [expr.pseudo]p1:
808 // The result shall only be used as the operand for the function call
809 // operator (), and the result of such a call has type void. The only
810 // effect is the evaluation of the postfix-expression before the dot or
811 // arrow.
812 CGF.EmitScalarExpr(E->getBase());
813 return nullptr;
814 }
815
816 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
817 return EmitNullValue(E->getType());
818 }
819
820 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
821 CGF.EmitCXXThrowExpr(E);
822 return nullptr;
823 }
824
825 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
826 return Builder.getInt1(E->getValue());
827 }
828
829 // Binary Operators.
830 Value *EmitMul(const BinOpInfo &Ops) {
831 if (Ops.Ty->isSignedIntegerOrEnumerationType() ||
832 Ops.Ty->isUnsignedIntegerType()) {
833 const bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
834 const bool hasSan =
835 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
836 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
837 switch (getOverflowBehaviorConsideringType(CGF, Ops.Ty)) {
838 case LangOptions::OB_Wrap:
839 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
840 case LangOptions::OB_SignedAndDefined:
841 if (!hasSan)
842 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
843 [[fallthrough]];
844 case LangOptions::OB_Unset:
845 if (!hasSan)
846 return isSigned ? Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul")
847 : Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
848 [[fallthrough]];
849 case LangOptions::OB_Trap:
850 if (CanElideOverflowCheck(CGF.getContext(), Ops))
851 return isSigned ? Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul")
852 : Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
853 return EmitOverflowCheckedBinOp(Ops);
854 }
855 }
856
857 if (Ops.Ty->isConstantMatrixType()) {
858 llvm::MatrixBuilder MB(Builder);
859 // We need to check the types of the operands of the operator to get the
860 // correct matrix dimensions.
861 auto *BO = cast<BinaryOperator>(Ops.E);
862 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
863 BO->getLHS()->getType().getCanonicalType());
864 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
865 BO->getRHS()->getType().getCanonicalType());
866 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
867 if (LHSMatTy && RHSMatTy)
868 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
869 LHSMatTy->getNumColumns(),
870 RHSMatTy->getNumColumns());
871 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
872 }
873
874 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
875 // Preserve the old values
876 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
877 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
878 }
879 if (Ops.isFixedPointOp())
880 return EmitFixedPointBinOp(Ops);
881 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
882 }
883 /// Create a binary op that checks for overflow.
884 /// Currently only supports +, - and *.
885 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
886
887 // Check for undefined division and modulus behaviors.
888 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
889 llvm::Value *Zero,bool isDiv);
890 // Common helper for getting how wide LHS of shift is.
891 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
892
893 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
894 // non powers of two.
895 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
896
897 Value *EmitDiv(const BinOpInfo &Ops);
898 Value *EmitRem(const BinOpInfo &Ops);
899 Value *EmitAdd(const BinOpInfo &Ops);
900 Value *EmitSub(const BinOpInfo &Ops);
901 Value *EmitShl(const BinOpInfo &Ops);
902 Value *EmitShr(const BinOpInfo &Ops);
903 Value *EmitAnd(const BinOpInfo &Ops) {
904 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
905 }
906 Value *EmitXor(const BinOpInfo &Ops) {
907 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
908 }
909 Value *EmitOr (const BinOpInfo &Ops) {
910 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
911 }
912
913 // Helper functions for fixed point binary operations.
914 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
915
916 BinOpInfo EmitBinOps(const BinaryOperator *E,
917 QualType PromotionTy = QualType());
918
919 Value *EmitPromotedValue(Value *result, QualType PromotionType);
920 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
921 Value *EmitPromoted(const Expr *E, QualType PromotionType);
922
923 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
924 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
925 Value *&Result);
926
927 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
928 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
929
930 QualType getPromotionType(QualType Ty) {
931 const auto &Ctx = CGF.getContext();
932 if (auto *CT = Ty->getAs<ComplexType>()) {
933 QualType ElementType = CT->getElementType();
934 if (ElementType.UseExcessPrecision(Ctx))
935 return Ctx.getComplexType(Ctx.FloatTy);
936 }
937
938 if (Ty.UseExcessPrecision(Ctx)) {
939 if (auto *VT = Ty->getAs<VectorType>()) {
940 unsigned NumElements = VT->getNumElements();
941 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
942 }
943 return Ctx.FloatTy;
944 }
945
946 return QualType();
947 }
948
949 // Binary operators and binary compound assignment operators.
950#define HANDLEBINOP(OP) \
951 Value *VisitBin##OP(const BinaryOperator *E) { \
952 QualType promotionTy = getPromotionType(E->getType()); \
953 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
954 if (result && !promotionTy.isNull()) \
955 result = EmitUnPromotedValue(result, E->getType()); \
956 return result; \
957 } \
958 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
959 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
960 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
961 }
962 HANDLEBINOP(Mul)
963 HANDLEBINOP(Div)
964 HANDLEBINOP(Rem)
965 HANDLEBINOP(Add)
966 HANDLEBINOP(Sub)
967 HANDLEBINOP(Shl)
968 HANDLEBINOP(Shr)
970 HANDLEBINOP(Xor)
972#undef HANDLEBINOP
973
974 // Comparisons.
975 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
976 llvm::CmpInst::Predicate SICmpOpc,
977 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
978#define VISITCOMP(CODE, UI, SI, FP, SIG) \
979 Value *VisitBin##CODE(const BinaryOperator *E) { \
980 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
981 llvm::FCmpInst::FP, SIG); }
982 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
983 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
984 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
985 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
986 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
987 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
988#undef VISITCOMP
989
990 Value *VisitBinAssign (const BinaryOperator *E);
991
992 Value *VisitBinLAnd (const BinaryOperator *E);
993 Value *VisitBinLOr (const BinaryOperator *E);
994 Value *VisitBinComma (const BinaryOperator *E);
995
996 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
997 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
998
999 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
1000 return Visit(E->getSemanticForm());
1001 }
1002
1003 // Other Operators.
1004 Value *VisitBlockExpr(const BlockExpr *BE);
1005 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
1006 Value *VisitChooseExpr(ChooseExpr *CE);
1007 Value *VisitVAArgExpr(VAArgExpr *VE);
1008 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
1009 return CGF.EmitObjCStringLiteral(E);
1010 }
1011 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
1012 return CGF.EmitObjCBoxedExpr(E);
1013 }
1014 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
1015 return CGF.EmitObjCArrayLiteral(E);
1016 }
1017 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
1018 return CGF.EmitObjCDictionaryLiteral(E);
1019 }
1020 Value *VisitAsTypeExpr(AsTypeExpr *CE);
1021 Value *VisitAtomicExpr(AtomicExpr *AE);
1022 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
1023 return Visit(E->getSelectedExpr());
1024 }
1025};
1026} // end anonymous namespace.
1027
1028//===----------------------------------------------------------------------===//
1029// Utilities
1030//===----------------------------------------------------------------------===//
1031
1032/// EmitConversionToBool - Convert the specified expression value to a
1033/// boolean (i1) truth value. This is equivalent to "Val != 0".
1034Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
1035 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
1036
1037 if (SrcType->isRealFloatingType())
1038 return EmitFloatToBoolConversion(Src);
1039
1040 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
1041 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
1042
1043 // The conversion is a NOP, and will be done when CodeGening the builtin.
1044 if (SrcType == CGF.getContext().AMDGPUFeaturePredicateTy)
1045 return Src;
1046
1047 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
1048 "Unknown scalar type to convert");
1049
1050 if (isa<llvm::IntegerType>(Src->getType()))
1051 return EmitIntToBoolConversion(Src);
1052
1053 assert(isa<llvm::PointerType>(Src->getType()));
1054 return EmitPointerToBoolConversion(Src, SrcType);
1055}
1056
1057void ScalarExprEmitter::EmitFloatConversionCheck(
1058 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1059 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1060 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1061 if (!isa<llvm::IntegerType>(DstTy))
1062 return;
1063
1064 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1065 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1066 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1067 using llvm::APFloat;
1068 using llvm::APSInt;
1069
1070 llvm::Value *Check = nullptr;
1071 const llvm::fltSemantics &SrcSema =
1072 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1073
1074 // Floating-point to integer. This has undefined behavior if the source is
1075 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1076 // to an integer).
1077 unsigned Width = CGF.getContext().getIntWidth(DstType);
1079
1080 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1081 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1082 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1083 APFloat::opOverflow)
1084 // Don't need an overflow check for lower bound. Just check for
1085 // -Inf/NaN.
1086 MinSrc = APFloat::getInf(SrcSema, true);
1087 else
1088 // Find the largest value which is too small to represent (before
1089 // truncation toward zero).
1090 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1091
1092 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1093 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1094 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1095 APFloat::opOverflow)
1096 // Don't need an overflow check for upper bound. Just check for
1097 // +Inf/NaN.
1098 MaxSrc = APFloat::getInf(SrcSema, false);
1099 else
1100 // Find the smallest value which is too large to represent (before
1101 // truncation toward zero).
1102 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1103
1104 // If we're converting from __half, convert the range to float to match
1105 // the type of src.
1106 if (OrigSrcType->isHalfType()) {
1107 const llvm::fltSemantics &Sema =
1108 CGF.getContext().getFloatTypeSemantics(SrcType);
1109 bool IsInexact;
1110 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1111 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1112 }
1113
1114 llvm::Value *GE =
1115 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1116 llvm::Value *LE =
1117 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1118 Check = Builder.CreateAnd(GE, LE);
1119
1120 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1121 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1122 CGF.EmitCheckTypeDescriptor(DstType)};
1123 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1124 OrigSrc);
1125}
1126
1127// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1128// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1129static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1130 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1132 QualType DstType, CGBuilderTy &Builder) {
1133 llvm::Type *SrcTy = Src->getType();
1134 llvm::Type *DstTy = Dst->getType();
1135 (void)DstTy; // Only used in assert()
1136
1137 // This should be truncation of integral types.
1138 assert(Src != Dst);
1139 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1140 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1141 "non-integer llvm type");
1142
1143 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1144 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1145
1146 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1147 // Else, it is a signed truncation.
1148 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1150 if (!SrcSigned && !DstSigned) {
1151 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1152 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1153 } else {
1154 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1155 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1156 }
1157
1158 llvm::Value *Check = nullptr;
1159 // 1. Extend the truncated value back to the same width as the Src.
1160 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1161 // 2. Equality-compare with the original source value
1162 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1163 // If the comparison result is 'i1 false', then the truncation was lossy.
1164 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1165}
1166
1168 QualType SrcType, QualType DstType) {
1169 return SrcType->isIntegerType() && DstType->isIntegerType();
1170}
1171
1172void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1173 Value *Dst, QualType DstType,
1174 SourceLocation Loc,
1175 bool OBTrapInvolved) {
1176 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation) &&
1177 !OBTrapInvolved)
1178 return;
1179
1180 // We only care about int->int conversions here.
1181 // We ignore conversions to/from pointer and/or bool.
1183 DstType))
1184 return;
1185
1186 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1187 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1188 // This must be truncation. Else we do not care.
1189 if (SrcBits <= DstBits)
1190 return;
1191
1192 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1193
1194 // If the integer sign change sanitizer is enabled,
1195 // and we are truncating from larger unsigned type to smaller signed type,
1196 // let that next sanitizer deal with it.
1197 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1198 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1199 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1200 (!SrcSigned && DstSigned))
1201 return;
1202
1203 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1204 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1205 Check;
1206
1207 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1208 {
1209 // We don't know the check kind until we call
1210 // EmitIntegerTruncationCheckHelper, but we want to annotate
1211 // EmitIntegerTruncationCheckHelper's instructions too.
1212 SanitizerDebugLocation SanScope(
1213 &CGF,
1214 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1215 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1216 CheckHandler);
1217 Check =
1218 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1219 // If the comparison result is 'i1 false', then the truncation was lossy.
1220 }
1221
1222 // Do we care about this type of truncation?
1223 if (!CGF.SanOpts.has(Check.second.second)) {
1224 // Just emit a trap check if an __ob_trap was involved but appropriate
1225 // sanitizer isn't enabled.
1226 if (OBTrapInvolved)
1227 CGF.EmitTrapCheck(Check.second.first, CheckHandler);
1228 return;
1229 }
1230
1231 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1232
1233 // Does some SSCL ignore this type?
1234 const bool ignoredBySanitizer = CGF.getContext().isTypeIgnoredBySanitizer(
1235 SanitizerMask::bitPosToMask(Check.second.second), DstType);
1236
1237 // Consider OverflowBehaviorTypes which override SSCL type entries for
1238 // truncation sanitizers.
1239 if (const auto *OBT = DstType->getAs<OverflowBehaviorType>()) {
1240 if (OBT->isWrapKind())
1241 return;
1242 }
1243 if (ignoredBySanitizer && !OBTrapInvolved)
1244 return;
1245
1246 llvm::Constant *StaticArgs[] = {
1247 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1248 CGF.EmitCheckTypeDescriptor(DstType),
1249 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1250 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1251
1252 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1253}
1254
1255static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1256 const char *Name,
1257 CGBuilderTy &Builder) {
1258 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1259 llvm::Type *VTy = V->getType();
1260 if (!VSigned) {
1261 // If the value is unsigned, then it is never negative.
1262 return llvm::ConstantInt::getFalse(VTy->getContext());
1263 }
1264 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1265 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1266 llvm::Twine(Name) + "." + V->getName() +
1267 ".negativitycheck");
1268}
1269
1270// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1271// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1272static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1273 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1275 QualType DstType, CGBuilderTy &Builder) {
1276 llvm::Type *SrcTy = Src->getType();
1277 llvm::Type *DstTy = Dst->getType();
1278
1279 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1280 "non-integer llvm type");
1281
1282 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1283 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1284 (void)SrcSigned; // Only used in assert()
1285 (void)DstSigned; // Only used in assert()
1286 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1287 unsigned DstBits = DstTy->getScalarSizeInBits();
1288 (void)SrcBits; // Only used in assert()
1289 (void)DstBits; // Only used in assert()
1290
1291 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1292 "either the widths should be different, or the signednesses.");
1293
1294 // 1. Was the old Value negative?
1295 llvm::Value *SrcIsNegative =
1296 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1297 // 2. Is the new Value negative?
1298 llvm::Value *DstIsNegative =
1299 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1300 // 3. Now, was the 'negativity status' preserved during the conversion?
1301 // NOTE: conversion from negative to zero is considered to change the sign.
1302 // (We want to get 'false' when the conversion changed the sign)
1303 // So we should just equality-compare the negativity statuses.
1304 llvm::Value *Check = nullptr;
1305 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1306 // If the comparison result is 'false', then the conversion changed the sign.
1307 return std::make_pair(
1308 ScalarExprEmitter::ICCK_IntegerSignChange,
1309 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1310}
1311
1312void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1313 Value *Dst, QualType DstType,
1314 SourceLocation Loc,
1315 bool OBTrapInvolved) {
1316 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange) &&
1317 !OBTrapInvolved)
1318 return;
1319
1320 llvm::Type *SrcTy = Src->getType();
1321 llvm::Type *DstTy = Dst->getType();
1322
1323 // We only care about int->int conversions here.
1324 // We ignore conversions to/from pointer and/or bool.
1326 DstType))
1327 return;
1328
1329 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1330 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1331 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1332 unsigned DstBits = DstTy->getScalarSizeInBits();
1333
1334 // Now, we do not need to emit the check in *all* of the cases.
1335 // We can avoid emitting it in some obvious cases where it would have been
1336 // dropped by the opt passes (instcombine) always anyways.
1337 // If it's a cast between effectively the same type, no check.
1338 // NOTE: this is *not* equivalent to checking the canonical types.
1339 if (SrcSigned == DstSigned && SrcBits == DstBits)
1340 return;
1341 // At least one of the values needs to have signed type.
1342 // If both are unsigned, then obviously, neither of them can be negative.
1343 if (!SrcSigned && !DstSigned)
1344 return;
1345 // If the conversion is to *larger* *signed* type, then no check is needed.
1346 // Because either sign-extension happens (so the sign will remain),
1347 // or zero-extension will happen (the sign bit will be zero.)
1348 if ((DstBits > SrcBits) && DstSigned)
1349 return;
1350 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1351 (SrcBits > DstBits) && SrcSigned) {
1352 // If the signed integer truncation sanitizer is enabled,
1353 // and this is a truncation from signed type, then no check is needed.
1354 // Because here sign change check is interchangeable with truncation check.
1355 return;
1356 }
1357 // Does an SSCL have an entry for the DstType under its respective sanitizer
1358 // section? Don't check this if an __ob_trap type is involved as it has
1359 // priority to emit checks regardless of sanitizer case lists.
1360 if (!OBTrapInvolved) {
1361 if (DstSigned &&
1363 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1364 return;
1365 if (!DstSigned &&
1367 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1368 return;
1369 }
1370 // That's it. We can't rule out any more cases with the data we have.
1371
1372 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1373 SanitizerDebugLocation SanScope(
1374 &CGF,
1375 {SanitizerKind::SO_ImplicitIntegerSignChange,
1376 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1377 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1378 CheckHandler);
1379
1380 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1381 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1382 Check;
1383
1384 // Each of these checks needs to return 'false' when an issue was detected.
1385 ImplicitConversionCheckKind CheckKind;
1386 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1387 2>
1388 Checks;
1389 // So we can 'and' all the checks together, and still get 'false',
1390 // if at least one of the checks detected an issue.
1391
1392 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1393 CheckKind = Check.first;
1394 Checks.emplace_back(Check.second);
1395
1396 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1397 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1398 // If the signed integer truncation sanitizer was enabled,
1399 // and we are truncating from larger unsigned type to smaller signed type,
1400 // let's handle the case we skipped in that check.
1401 Check =
1402 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1403 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1404 Checks.emplace_back(Check.second);
1405 // If the comparison result is 'i1 false', then the truncation was lossy.
1406 }
1407
1408 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange)) {
1409 if (OBTrapInvolved) {
1410 llvm::Value *Combined = Check.second.first;
1411 for (const auto &C : Checks)
1412 Combined = Builder.CreateAnd(Combined, C.first);
1413 CGF.EmitTrapCheck(Combined, CheckHandler);
1414 }
1415 return;
1416 }
1417
1418 llvm::Constant *StaticArgs[] = {
1419 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1420 CGF.EmitCheckTypeDescriptor(DstType),
1421 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1422 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1423 // EmitCheck() will 'and' all the checks together.
1424 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1425}
1426
1427// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1428// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1429static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1430 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1432 QualType DstType, CGBuilderTy &Builder) {
1433 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1434 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1435
1436 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1437 if (!SrcSigned && !DstSigned)
1438 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1439 else
1440 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1441
1442 llvm::Value *Check = nullptr;
1443 // 1. Extend the truncated value back to the same width as the Src.
1444 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1445 // 2. Equality-compare with the original source value
1446 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1447 // If the comparison result is 'i1 false', then the truncation was lossy.
1448
1449 return std::make_pair(
1450 Kind,
1451 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1452}
1453
1454// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1455// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1456static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1457 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1459 QualType DstType, CGBuilderTy &Builder) {
1460 // 1. Was the old Value negative?
1461 llvm::Value *SrcIsNegative =
1462 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1463 // 2. Is the new Value negative?
1464 llvm::Value *DstIsNegative =
1465 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1466 // 3. Now, was the 'negativity status' preserved during the conversion?
1467 // NOTE: conversion from negative to zero is considered to change the sign.
1468 // (We want to get 'false' when the conversion changed the sign)
1469 // So we should just equality-compare the negativity statuses.
1470 llvm::Value *Check = nullptr;
1471 Check =
1472 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1473 // If the comparison result is 'false', then the conversion changed the sign.
1474 return std::make_pair(
1475 ScalarExprEmitter::ICCK_IntegerSignChange,
1476 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1477}
1478
1480 Value *Dst, QualType DstType,
1481 const CGBitFieldInfo &Info,
1482 SourceLocation Loc) {
1483
1484 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1485 return;
1486
1487 // We only care about int->int conversions here.
1488 // We ignore conversions to/from pointer and/or bool.
1490 DstType))
1491 return;
1492
1493 if (DstType->isBooleanType() || SrcType->isBooleanType())
1494 return;
1495
1496 // This should be truncation of integral types.
1497 assert(isa<llvm::IntegerType>(Src->getType()) &&
1498 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1499
1500 // TODO: Calculate src width to avoid emitting code
1501 // for unecessary cases.
1502 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1503 unsigned DstBits = Info.Size;
1504
1505 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1506 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1507
1508 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1509 SanitizerDebugLocation SanScope(
1510 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1511
1512 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1513 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1514 Check;
1515
1516 // Truncation
1517 bool EmitTruncation = DstBits < SrcBits;
1518 // If Dst is signed and Src unsigned, we want to be more specific
1519 // about the CheckKind we emit, in this case we want to emit
1520 // ICCK_SignedIntegerTruncationOrSignChange.
1521 bool EmitTruncationFromUnsignedToSigned =
1522 EmitTruncation && DstSigned && !SrcSigned;
1523 // Sign change
1524 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1525 bool BothUnsigned = !SrcSigned && !DstSigned;
1526 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1527 // We can avoid emitting sign change checks in some obvious cases
1528 // 1. If Src and Dst have the same signedness and size
1529 // 2. If both are unsigned sign check is unecessary!
1530 // 3. If Dst is signed and bigger than Src, either
1531 // sign-extension or zero-extension will make sure
1532 // the sign remains.
1533 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1534
1535 if (EmitTruncation)
1536 Check =
1537 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1538 else if (EmitSignChange) {
1539 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1540 "either the widths should be different, or the signednesses.");
1541 Check =
1542 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1543 } else
1544 return;
1545
1546 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1547 if (EmitTruncationFromUnsignedToSigned)
1548 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1549
1550 llvm::Constant *StaticArgs[] = {
1552 EmitCheckTypeDescriptor(DstType),
1553 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1554 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1555
1556 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1557}
1558
1559Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1560 QualType DstType, llvm::Type *SrcTy,
1561 llvm::Type *DstTy,
1562 ScalarConversionOpts Opts) {
1563 // The Element types determine the type of cast to perform.
1564 llvm::Type *SrcElementTy;
1565 llvm::Type *DstElementTy;
1566 QualType SrcElementType;
1567 QualType DstElementType;
1568 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1569 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1570 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1571 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1572 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1573 } else {
1574 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1575 "cannot cast between matrix and non-matrix types");
1576 SrcElementTy = SrcTy;
1577 DstElementTy = DstTy;
1578 SrcElementType = SrcType;
1579 DstElementType = DstType;
1580 }
1581
1582 if (isa<llvm::IntegerType>(SrcElementTy)) {
1583 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1584 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1585 InputSigned = true;
1586 }
1587
1588 if (isa<llvm::IntegerType>(DstElementTy))
1589 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1590 if (InputSigned)
1591 return Builder.CreateSIToFP(Src, DstTy, "conv");
1592 return Builder.CreateUIToFP(Src, DstTy, "conv");
1593 }
1594
1595 if (isa<llvm::IntegerType>(DstElementTy)) {
1596 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1597 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1598
1599 // If we can't recognize overflow as undefined behavior, assume that
1600 // overflow saturates. This protects against normal optimizations if we are
1601 // compiling with non-standard FP semantics.
1602 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1603 llvm::Intrinsic::ID IID =
1604 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1605 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1606 }
1607
1608 if (IsSigned)
1609 return Builder.CreateFPToSI(Src, DstTy, "conv");
1610 return Builder.CreateFPToUI(Src, DstTy, "conv");
1611 }
1612
1613 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1614 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1615 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1616 }
1617 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1618 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1619 return Builder.CreateFPExt(Src, DstTy, "conv");
1620}
1621
1622/// Emit a conversion from the specified type to the specified destination type,
1623/// both of which are LLVM scalar types.
1624Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1625 QualType DstType,
1626 SourceLocation Loc,
1627 ScalarConversionOpts Opts) {
1628 // All conversions involving fixed point types should be handled by the
1629 // EmitFixedPoint family functions. This is done to prevent bloating up this
1630 // function more, and although fixed point numbers are represented by
1631 // integers, we do not want to follow any logic that assumes they should be
1632 // treated as integers.
1633 // TODO(leonardchan): When necessary, add another if statement checking for
1634 // conversions to fixed point types from other types.
1635 if (SrcType->isFixedPointType()) {
1636 if (DstType->isBooleanType())
1637 // It is important that we check this before checking if the dest type is
1638 // an integer because booleans are technically integer types.
1639 // We do not need to check the padding bit on unsigned types if unsigned
1640 // padding is enabled because overflow into this bit is undefined
1641 // behavior.
1642 return Builder.CreateIsNotNull(Src, "tobool");
1643 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1644 DstType->isRealFloatingType())
1645 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1646
1647 llvm_unreachable(
1648 "Unhandled scalar conversion from a fixed point type to another type.");
1649 } else if (DstType->isFixedPointType()) {
1650 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1651 // This also includes converting booleans and enums to fixed point types.
1652 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1653
1654 llvm_unreachable(
1655 "Unhandled scalar conversion to a fixed point type from another type.");
1656 }
1657
1658 QualType NoncanonicalSrcType = SrcType;
1659 QualType NoncanonicalDstType = DstType;
1660
1661 SrcType = CGF.getContext().getCanonicalType(SrcType);
1662 DstType = CGF.getContext().getCanonicalType(DstType);
1663 if (SrcType == DstType) return Src;
1664
1665 if (DstType->isVoidType()) return nullptr;
1666
1667 llvm::Value *OrigSrc = Src;
1668 QualType OrigSrcType = SrcType;
1669 llvm::Type *SrcTy = Src->getType();
1670
1671 // Handle conversions to bool first, they are special: comparisons against 0.
1672 if (DstType->isBooleanType())
1673 return EmitConversionToBool(Src, SrcType);
1674
1675 llvm::Type *DstTy = ConvertType(DstType);
1676
1677 // Determine whether an overflow behavior of 'trap' has been specified for
1678 // either the destination or the source types. If so, we can elide sanitizer
1679 // capability checks as this overflow behavior kind is also capable of
1680 // emitting traps without runtime sanitizer support.
1681 // Also skip instrumentation if either source or destination has 'wrap'
1682 // behavior - the user has explicitly indicated they accept wrapping
1683 // semantics. Use non-canonical types to preserve OBT annotations.
1684 const auto *DstOBT = NoncanonicalDstType->getAs<OverflowBehaviorType>();
1685 const auto *SrcOBT = NoncanonicalSrcType->getAs<OverflowBehaviorType>();
1686 bool OBTrapInvolved =
1687 (DstOBT && DstOBT->isTrapKind()) || (SrcOBT && SrcOBT->isTrapKind());
1688 bool OBWrapInvolved =
1689 (DstOBT && DstOBT->isWrapKind()) || (SrcOBT && SrcOBT->isWrapKind());
1690
1691 // Cast from half through float if half isn't a native type.
1692 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1693 // Cast to FP using the intrinsic if the half type itself isn't supported.
1694 if (DstTy->isFloatingPointTy()) {
1696 Value *BitCast = Builder.CreateBitCast(Src, CGF.CGM.HalfTy);
1697 return Builder.CreateFPExt(BitCast, DstTy, "conv");
1698 }
1699 } else {
1700 // Cast to other types through float, using either the intrinsic or FPExt,
1701 // depending on whether the half type itself is supported
1702 // (as opposed to operations on half, available with NativeHalfType).
1703
1704 if (Src->getType() != CGF.CGM.HalfTy) {
1706 Src = Builder.CreateBitCast(Src, CGF.CGM.HalfTy);
1707 }
1708
1709 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1710 SrcType = CGF.getContext().FloatTy;
1711 SrcTy = CGF.FloatTy;
1712 }
1713 }
1714
1715 // Ignore conversions like int -> uint.
1716 if (SrcTy == DstTy) {
1717 if (Opts.EmitImplicitIntegerSignChangeChecks ||
1718 (OBTrapInvolved && !OBWrapInvolved))
1719 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1720 NoncanonicalDstType, Loc, OBTrapInvolved);
1721
1722 return Src;
1723 }
1724
1725 // Handle pointer conversions next: pointers can only be converted to/from
1726 // other pointers and integers. Check for pointer types in terms of LLVM, as
1727 // some native types (like Obj-C id) may map to a pointer type.
1728 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1729 // The source value may be an integer, or a pointer.
1730 if (isa<llvm::PointerType>(SrcTy))
1731 return Src;
1732
1733 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1734 // First, convert to the correct width so that we control the kind of
1735 // extension.
1736 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1737 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1738 llvm::Value* IntResult =
1739 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1740 // Then, cast to pointer.
1741 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1742 }
1743
1744 if (isa<llvm::PointerType>(SrcTy)) {
1745 // Must be an ptr to int cast.
1746 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1747 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1748 }
1749
1750 // A scalar can be splatted to an extended vector of the same element type
1751 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1752 // Sema should add casts to make sure that the source expression's type is
1753 // the same as the vector's element type (sans qualifiers)
1754 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1755 SrcType.getTypePtr() &&
1756 "Splatted expr doesn't match with vector element type?");
1757
1758 // Splat the element across to all elements
1759 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1760 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1761 }
1762
1763 if (SrcType->isMatrixType() && DstType->isMatrixType())
1764 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1765
1766 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1767 // Allow bitcast from vector to integer/fp of the same size.
1768 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1769 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1770 if (SrcSize == DstSize)
1771 return Builder.CreateBitCast(Src, DstTy, "conv");
1772
1773 // Conversions between vectors of different sizes are not allowed except
1774 // when vectors of half are involved. Operations on storage-only half
1775 // vectors require promoting half vector operands to float vectors and
1776 // truncating the result, which is either an int or float vector, to a
1777 // short or half vector.
1778
1779 // Source and destination are both expected to be vectors.
1780 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1781 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1782 (void)DstElementTy;
1783
1784 assert(((SrcElementTy->isIntegerTy() &&
1785 DstElementTy->isIntegerTy()) ||
1786 (SrcElementTy->isFloatingPointTy() &&
1787 DstElementTy->isFloatingPointTy())) &&
1788 "unexpected conversion between a floating-point vector and an "
1789 "integer vector");
1790
1791 // Truncate an i32 vector to an i16 vector.
1792 if (SrcElementTy->isIntegerTy())
1793 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1794
1795 // Truncate a float vector to a half vector.
1796 if (SrcSize > DstSize)
1797 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1798
1799 // Promote a half vector to a float vector.
1800 return Builder.CreateFPExt(Src, DstTy, "conv");
1801 }
1802
1803 // Finally, we have the arithmetic types: real int/float.
1804 Value *Res = nullptr;
1805 llvm::Type *ResTy = DstTy;
1806
1807 // An overflowing conversion has undefined behavior if either the source type
1808 // or the destination type is a floating-point type. However, we consider the
1809 // range of representable values for all floating-point types to be
1810 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1811 // floating-point type.
1812 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1813 OrigSrcType->isFloatingType())
1814 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1815 Loc);
1816
1817 // Cast to half through float if half isn't a native type.
1818 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1819 // Make sure we cast in a single step if from another FP type.
1820 if (SrcTy->isFloatingPointTy()) {
1821 // Handle the case where the half type is represented as an integer (as
1822 // opposed to operations on half, available with NativeHalfType).
1823
1824 // If the half type is supported, just use an fptrunc.
1825 Value *Res = Builder.CreateFPTrunc(Src, CGF.CGM.HalfTy, "conv");
1826 if (DstTy == CGF.CGM.HalfTy)
1827 return Res;
1828
1829 assert(DstTy->isIntegerTy(16) &&
1831 "Only half FP requires extra conversion");
1832 return Builder.CreateBitCast(Res, DstTy);
1833 }
1834
1835 DstTy = CGF.FloatTy;
1836 }
1837
1838 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1839
1840 if (DstTy != ResTy) {
1841 Res = Builder.CreateFPTrunc(Res, CGF.CGM.HalfTy, "conv");
1842
1843 if (ResTy != CGF.CGM.HalfTy) {
1844 assert(ResTy->isIntegerTy(16) &&
1846 "Only half FP requires extra conversion");
1847 Res = Builder.CreateBitCast(Res, ResTy);
1848 }
1849 }
1850
1851 if ((Opts.EmitImplicitIntegerTruncationChecks || OBTrapInvolved) &&
1852 !OBWrapInvolved && !Opts.PatternExcluded)
1853 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1854 NoncanonicalDstType, Loc, OBTrapInvolved);
1855
1856 if (Opts.EmitImplicitIntegerSignChangeChecks ||
1857 (OBTrapInvolved && !OBWrapInvolved))
1858 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1859 NoncanonicalDstType, Loc, OBTrapInvolved);
1860
1861 return Res;
1862}
1863
1864Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1865 QualType DstTy,
1866 SourceLocation Loc) {
1867 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1868 llvm::Value *Result;
1869 if (SrcTy->isRealFloatingType())
1870 Result = FPBuilder.CreateFloatingToFixed(Src,
1871 CGF.getContext().getFixedPointSemantics(DstTy));
1872 else if (DstTy->isRealFloatingType())
1873 Result = FPBuilder.CreateFixedToFloating(Src,
1875 ConvertType(DstTy));
1876 else {
1877 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1878 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1879
1880 if (DstTy->isIntegerType())
1881 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1882 DstFPSema.getWidth(),
1883 DstFPSema.isSigned());
1884 else if (SrcTy->isIntegerType())
1885 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1886 DstFPSema);
1887 else
1888 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1889 }
1890 return Result;
1891}
1892
1893/// Emit a conversion from the specified complex type to the specified
1894/// destination type, where the destination type is an LLVM scalar type.
1895Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1896 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1897 SourceLocation Loc) {
1898 // Get the source element type.
1899 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1900
1901 // Handle conversions to bool first, they are special: comparisons against 0.
1902 if (DstTy->isBooleanType()) {
1903 // Complex != 0 -> (Real != 0) | (Imag != 0)
1904 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1905 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1906 return Builder.CreateOr(Src.first, Src.second, "tobool");
1907 }
1908
1909 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1910 // the imaginary part of the complex value is discarded and the value of the
1911 // real part is converted according to the conversion rules for the
1912 // corresponding real type.
1913 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1914}
1915
1916Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1917 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1918}
1919
1920/// Emit a sanitization check for the given "binary" operation (which
1921/// might actually be a unary increment which has been lowered to a binary
1922/// operation). The check passes if all values in \p Checks (which are \c i1),
1923/// are \c true.
1924void ScalarExprEmitter::EmitBinOpCheck(
1925 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1926 const BinOpInfo &Info) {
1927 assert(CGF.IsSanitizerScope);
1928 SanitizerHandler Check;
1929 SmallVector<llvm::Constant *, 4> StaticData;
1930 SmallVector<llvm::Value *, 2> DynamicData;
1931 TrapReason TR;
1932
1933 BinaryOperatorKind Opcode = Info.Opcode;
1936
1937 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1938 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1939 if (UO && UO->getOpcode() == UO_Minus) {
1940 Check = SanitizerHandler::NegateOverflow;
1941 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1942 DynamicData.push_back(Info.RHS);
1943 } else {
1944 if (BinaryOperator::isShiftOp(Opcode)) {
1945 // Shift LHS negative or too large, or RHS out of bounds.
1946 Check = SanitizerHandler::ShiftOutOfBounds;
1947 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1948 StaticData.push_back(
1949 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1950 StaticData.push_back(
1951 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1952 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1953 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1954 Check = SanitizerHandler::DivremOverflow;
1955 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1956 } else {
1957 // Arithmetic overflow (+, -, *).
1958 int ArithOverflowKind = 0;
1959 switch (Opcode) {
1960 case BO_Add: {
1961 Check = SanitizerHandler::AddOverflow;
1962 ArithOverflowKind = diag::UBSanArithKind::Add;
1963 break;
1964 }
1965 case BO_Sub: {
1966 Check = SanitizerHandler::SubOverflow;
1967 ArithOverflowKind = diag::UBSanArithKind::Sub;
1968 break;
1969 }
1970 case BO_Mul: {
1971 Check = SanitizerHandler::MulOverflow;
1972 ArithOverflowKind = diag::UBSanArithKind::Mul;
1973 break;
1974 }
1975 default:
1976 llvm_unreachable("unexpected opcode for bin op check");
1977 }
1978 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1980 SanitizerKind::UnsignedIntegerOverflow) ||
1982 SanitizerKind::SignedIntegerOverflow)) {
1983 // Only pay the cost for constructing the trap diagnostic if they are
1984 // going to be used.
1985 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1986 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1987 << Info.E;
1988 }
1989 }
1990 DynamicData.push_back(Info.LHS);
1991 DynamicData.push_back(Info.RHS);
1992 }
1993
1994 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1995}
1996
1997//===----------------------------------------------------------------------===//
1998// Visitor Methods
1999//===----------------------------------------------------------------------===//
2000
2001Value *ScalarExprEmitter::VisitExpr(Expr *E) {
2002 CGF.ErrorUnsupported(E, "scalar expression");
2003 if (E->getType()->isVoidType())
2004 return nullptr;
2005 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
2006}
2007
2008Value *
2009ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
2010 ASTContext &Context = CGF.getContext();
2011 unsigned AddrSpace =
2013 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
2014 E->ComputeName(Context), "__usn_str", AddrSpace);
2015
2016 llvm::Type *ExprTy = ConvertType(E->getType());
2017 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
2018 "usn_addr_cast");
2019}
2020
2021Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
2022 assert(E->getDataElementCount() == 1);
2023 auto It = E->begin();
2024 return Builder.getInt((*It)->getValue());
2025}
2026
2027Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
2028 // Vector Mask Case
2029 if (E->getNumSubExprs() == 2) {
2030 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
2031 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
2032 Value *Mask;
2033
2034 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
2035 unsigned LHSElts = LTy->getNumElements();
2036
2037 Mask = RHS;
2038
2039 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
2040
2041 // Mask off the high bits of each shuffle index.
2042 Value *MaskBits =
2043 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
2044 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
2045
2046 // newv = undef
2047 // mask = mask & maskbits
2048 // for each elt
2049 // n = extract mask i
2050 // x = extract val n
2051 // newv = insert newv, x, i
2052 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
2053 MTy->getNumElements());
2054 Value* NewV = llvm::PoisonValue::get(RTy);
2055 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
2056 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
2057 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
2058
2059 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
2060 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
2061 }
2062 return NewV;
2063 }
2064
2065 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
2066 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
2067
2068 SmallVector<int, 32> Indices;
2069 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
2070 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
2071 // Check for -1 and output it as undef in the IR.
2072 if (Idx.isSigned() && Idx.isAllOnes())
2073 Indices.push_back(-1);
2074 else
2075 Indices.push_back(Idx.getZExtValue());
2076 }
2077
2078 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
2079}
2080
2081Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
2082 QualType SrcType = E->getSrcExpr()->getType(),
2083 DstType = E->getType();
2084
2085 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
2086
2087 SrcType = CGF.getContext().getCanonicalType(SrcType);
2088 DstType = CGF.getContext().getCanonicalType(DstType);
2089 if (SrcType == DstType) return Src;
2090
2091 assert(SrcType->isVectorType() &&
2092 "ConvertVector source type must be a vector");
2093 assert(DstType->isVectorType() &&
2094 "ConvertVector destination type must be a vector");
2095
2096 llvm::Type *SrcTy = Src->getType();
2097 llvm::Type *DstTy = ConvertType(DstType);
2098
2099 // Ignore conversions like int -> uint.
2100 if (SrcTy == DstTy)
2101 return Src;
2102
2103 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
2104 DstEltType = DstType->castAs<VectorType>()->getElementType();
2105
2106 assert(SrcTy->isVectorTy() &&
2107 "ConvertVector source IR type must be a vector");
2108 assert(DstTy->isVectorTy() &&
2109 "ConvertVector destination IR type must be a vector");
2110
2111 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
2112 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2113
2114 if (DstEltType->isBooleanType()) {
2115 assert((SrcEltTy->isFloatingPointTy() ||
2116 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2117
2118 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2119 if (SrcEltTy->isFloatingPointTy()) {
2120 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2121 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2122 } else {
2123 return Builder.CreateICmpNE(Src, Zero, "tobool");
2124 }
2125 }
2126
2127 // We have the arithmetic types: real int/float.
2128 Value *Res = nullptr;
2129
2130 if (isa<llvm::IntegerType>(SrcEltTy)) {
2131 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2132 if (isa<llvm::IntegerType>(DstEltTy))
2133 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2134 else {
2135 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2136 if (InputSigned)
2137 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2138 else
2139 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2140 }
2141 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2142 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2143 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2144 if (DstEltType->isSignedIntegerOrEnumerationType())
2145 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2146 else
2147 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2148 } else {
2149 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2150 "Unknown real conversion");
2151 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2152 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2153 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2154 else
2155 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2156 }
2157
2158 return Res;
2159}
2160
2161Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2162 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2163 CGF.EmitIgnoredExpr(E->getBase());
2164 return CGF.emitScalarConstant(Constant, E);
2165 } else {
2166 Expr::EvalResult Result;
2168 llvm::APSInt Value = Result.Val.getInt();
2169 CGF.EmitIgnoredExpr(E->getBase());
2170 return Builder.getInt(Value);
2171 }
2172 }
2173
2174 llvm::Value *Result = EmitLoadOfLValue(E);
2175
2176 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2177 // debug info for the pointer, even if there is no variable associated with
2178 // the pointer's expression.
2179 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2180 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2181 if (llvm::GetElementPtrInst *GEP =
2182 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2183 if (llvm::Instruction *Pointer =
2184 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2185 QualType Ty = E->getBase()->getType();
2186 if (!E->isArrow())
2187 Ty = CGF.getContext().getPointerType(Ty);
2188 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2189 }
2190 }
2191 }
2192 }
2193 return Result;
2194}
2195
2196Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2197 TestAndClearIgnoreResultAssign();
2198
2199 // Emit subscript expressions in rvalue context's. For most cases, this just
2200 // loads the lvalue formed by the subscript expr. However, we have to be
2201 // careful, because the base of a vector subscript is occasionally an rvalue,
2202 // so we can't get it as an lvalue.
2203 if (!E->getBase()->getType()->isVectorType() &&
2205 return EmitLoadOfLValue(E);
2206
2207 // Handle the vector case. The base must be a vector, the index must be an
2208 // integer value.
2209 Value *Base = Visit(E->getBase());
2210 Value *Idx = Visit(E->getIdx());
2211 QualType IdxTy = E->getIdx()->getType();
2212
2213 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2214 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2215
2216 return Builder.CreateExtractElement(Base, Idx, "vecext");
2217}
2218
2219Value *ScalarExprEmitter::VisitMatrixSingleSubscriptExpr(
2220 MatrixSingleSubscriptExpr *E) {
2221 TestAndClearIgnoreResultAssign();
2222
2223 auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2224 unsigned NumRows = MatrixTy->getNumRows();
2225 unsigned NumColumns = MatrixTy->getNumColumns();
2226
2227 // Row index
2228 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2229 llvm::MatrixBuilder MB(Builder);
2230
2231 // The row index must be in [0, NumRows)
2232 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2233 MB.CreateIndexAssumption(RowIdx, NumRows);
2234
2235 Value *FlatMatrix = Visit(E->getBase());
2236 llvm::Type *ElemTy = CGF.ConvertTypeForMem(MatrixTy->getElementType());
2237 auto *ResultTy = llvm::FixedVectorType::get(ElemTy, NumColumns);
2238 Value *RowVec = llvm::PoisonValue::get(ResultTy);
2239
2240 for (unsigned Col = 0; Col != NumColumns; ++Col) {
2241 Value *ColVal = llvm::ConstantInt::get(RowIdx->getType(), Col);
2242 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2243 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2244 Value *EltIdx = MB.CreateIndex(RowIdx, ColVal, NumRows, NumColumns,
2245 IsMatrixRowMajor, "matrix_row_idx");
2246 Value *Elt =
2247 Builder.CreateExtractElement(FlatMatrix, EltIdx, "matrix_elem");
2248 Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2249 RowVec = Builder.CreateInsertElement(RowVec, Elt, Lane, "matrix_row_ins");
2250 }
2251
2252 return CGF.EmitFromMemory(RowVec, E->getType());
2253}
2254
2255Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2256 TestAndClearIgnoreResultAssign();
2257
2258 // Handle the vector case. The base must be a vector, the index must be an
2259 // integer value.
2260 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2261 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2262
2263 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2264 llvm::MatrixBuilder MB(Builder);
2265
2266 Value *Idx;
2267 unsigned NumCols = MatrixTy->getNumColumns();
2268 unsigned NumRows = MatrixTy->getNumRows();
2269 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2270 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2271 Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows, NumCols, IsMatrixRowMajor);
2272
2273 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2274 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2275
2276 Value *Matrix = Visit(E->getBase());
2277
2278 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2279 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2280}
2281
2282static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2283 unsigned Off) {
2284 int MV = SVI->getMaskValue(Idx);
2285 if (MV == -1)
2286 return -1;
2287 return Off + MV;
2288}
2289
2290static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2291 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2292 "Index operand too large for shufflevector mask!");
2293 return C->getZExtValue();
2294}
2295
2296Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2297 bool Ignore = TestAndClearIgnoreResultAssign();
2298 (void)Ignore;
2299 unsigned NumInitElements = E->getNumInits();
2300 assert((Ignore == false ||
2301 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2302 "init list ignored");
2303
2304 // HLSL initialization lists in the AST are an expansion which can contain
2305 // side-effecting expressions wrapped in opaque value expressions. To properly
2306 // emit these we need to emit the opaque values before we emit the argument
2307 // expressions themselves. This is a little hacky, but it prevents us needing
2308 // to do a bigger AST-level change for a language feature that we need
2309 // deprecate in the near future. See related HLSL language proposals in the
2310 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2311 // * 0005-strict-initializer-lists.md
2312 // * 0032-constructors.md
2313 if (CGF.getLangOpts().HLSL)
2315
2316 if (E->hadArrayRangeDesignator())
2317 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2318
2319 llvm::VectorType *VType =
2320 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2321
2322 if (!VType) {
2323 if (NumInitElements == 0) {
2324 // C++11 value-initialization for the scalar.
2325 return EmitNullValue(E->getType());
2326 }
2327 // We have a scalar in braces. Just use the first element.
2328 return Visit(E->getInit(0));
2329 }
2330
2331 if (isa<llvm::ScalableVectorType>(VType)) {
2332 if (NumInitElements == 0) {
2333 // C++11 value-initialization for the vector.
2334 return EmitNullValue(E->getType());
2335 }
2336
2337 if (NumInitElements == 1) {
2338 Expr *InitVector = E->getInit(0);
2339
2340 // Initialize from another scalable vector of the same type.
2341 if (InitVector->getType().getCanonicalType() ==
2343 return Visit(InitVector);
2344 }
2345
2346 llvm_unreachable("Unexpected initialization of a scalable vector!");
2347 }
2348
2349 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2350
2351 // For column-major matrix types, we insert elements directly at their
2352 // column-major positions rather than inserting sequentially and shuffling.
2353 const ConstantMatrixType *ColMajorMT = nullptr;
2354 if (const auto *MT = E->getType()->getAs<ConstantMatrixType>();
2355 MT && CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2356 LangOptions::MatrixMemoryLayout::MatrixColMajor)
2357 ColMajorMT = MT;
2358
2359 // Loop over initializers collecting the Value for each, and remembering
2360 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2361 // us to fold the shuffle for the swizzle into the shuffle for the vector
2362 // initializer, since LLVM optimizers generally do not want to touch
2363 // shuffles.
2364 unsigned CurIdx = 0;
2365 bool VIsPoisonShuffle = false;
2366 llvm::Value *V = llvm::PoisonValue::get(VType);
2367 for (unsigned i = 0; i != NumInitElements; ++i) {
2368 Expr *IE = E->getInit(i);
2369 Value *Init = Visit(IE);
2370 SmallVector<int, 16> Args;
2371
2372 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2373
2374 // Handle scalar elements. If the scalar initializer is actually one
2375 // element of a different vector of the same width, use shuffle instead of
2376 // extract+insert.
2377 if (!VVT) {
2378 if (isa<ExtVectorElementExpr>(IE)) {
2379 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2380
2381 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2382 ->getNumElements() == ResElts) {
2383 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2384 Value *LHS = nullptr, *RHS = nullptr;
2385 if (CurIdx == 0) {
2386 // insert into poison -> shuffle (src, poison)
2387 // shufflemask must use an i32
2388 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2389 Args.resize(ResElts, -1);
2390
2391 LHS = EI->getVectorOperand();
2392 RHS = V;
2393 VIsPoisonShuffle = true;
2394 } else if (VIsPoisonShuffle) {
2395 // insert into poison shuffle && size match -> shuffle (v, src)
2396 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2397 for (unsigned j = 0; j != CurIdx; ++j)
2398 Args.push_back(getMaskElt(SVV, j, 0));
2399 Args.push_back(ResElts + C->getZExtValue());
2400 Args.resize(ResElts, -1);
2401
2402 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2403 RHS = EI->getVectorOperand();
2404 VIsPoisonShuffle = false;
2405 }
2406 if (!Args.empty()) {
2407 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2408 ++CurIdx;
2409 continue;
2410 }
2411 }
2412 }
2413 unsigned InsertIdx =
2414 ColMajorMT
2415 ? ColMajorMT->mapRowMajorToColumnMajorFlattenedIndex(CurIdx)
2416 : CurIdx;
2417 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(InsertIdx),
2418 "vecinit");
2419 VIsPoisonShuffle = false;
2420 ++CurIdx;
2421 continue;
2422 }
2423
2424 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2425
2426 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2427 // input is the same width as the vector being constructed, generate an
2428 // optimized shuffle of the swizzle input into the result.
2429 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2430 if (isa<ExtVectorElementExpr>(IE)) {
2431 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2432 Value *SVOp = SVI->getOperand(0);
2433 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2434
2435 if (OpTy->getNumElements() == ResElts) {
2436 for (unsigned j = 0; j != CurIdx; ++j) {
2437 // If the current vector initializer is a shuffle with poison, merge
2438 // this shuffle directly into it.
2439 if (VIsPoisonShuffle) {
2440 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2441 } else {
2442 Args.push_back(j);
2443 }
2444 }
2445 for (unsigned j = 0, je = InitElts; j != je; ++j)
2446 Args.push_back(getMaskElt(SVI, j, Offset));
2447 Args.resize(ResElts, -1);
2448
2449 if (VIsPoisonShuffle)
2450 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2451
2452 Init = SVOp;
2453 }
2454 }
2455
2456 // Extend init to result vector length, and then shuffle its contribution
2457 // to the vector initializer into V.
2458 if (Args.empty()) {
2459 for (unsigned j = 0; j != InitElts; ++j)
2460 Args.push_back(j);
2461 Args.resize(ResElts, -1);
2462 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2463
2464 Args.clear();
2465 for (unsigned j = 0; j != CurIdx; ++j)
2466 Args.push_back(j);
2467 for (unsigned j = 0; j != InitElts; ++j)
2468 Args.push_back(j + Offset);
2469 Args.resize(ResElts, -1);
2470 }
2471
2472 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2473 // merging subsequent shuffles into this one.
2474 if (CurIdx == 0)
2475 std::swap(V, Init);
2476 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2477 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2478 CurIdx += InitElts;
2479 }
2480
2481 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2482 // Emit remaining default initializers.
2483 llvm::Type *EltTy = VType->getElementType();
2484
2485 // Emit remaining default initializers
2486 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2487 unsigned InsertIdx =
2488 ColMajorMT ? ColMajorMT->mapRowMajorToColumnMajorFlattenedIndex(CurIdx)
2489 : CurIdx;
2490 Value *Idx = Builder.getInt32(InsertIdx);
2491 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2492 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2493 }
2494
2495 return V;
2496}
2497
2499 return !D->isWeak();
2500}
2501
2502static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2503 E = E->IgnoreParens();
2504
2505 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2506 if (UO->getOpcode() == UO_Deref)
2507 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2508
2509 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2510 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2511
2512 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2513 if (isa<FieldDecl>(ME->getMemberDecl()))
2514 return true;
2515 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2516 }
2517
2518 // Array subscripts? Anything else?
2519
2520 return false;
2521}
2522
2524 assert(E->getType()->isSignableType(getContext()));
2525
2526 E = E->IgnoreParens();
2527
2528 if (isa<CXXThisExpr>(E))
2529 return true;
2530
2531 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2532 if (UO->getOpcode() == UO_AddrOf)
2533 return isLValueKnownNonNull(*this, UO->getSubExpr());
2534
2535 if (const auto *CE = dyn_cast<CastExpr>(E))
2536 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2537 CE->getCastKind() == CK_ArrayToPointerDecay)
2538 return isLValueKnownNonNull(*this, CE->getSubExpr());
2539
2540 // Maybe honor __nonnull?
2541
2542 return false;
2543}
2544
2546 const Expr *E = CE->getSubExpr();
2547
2548 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2549 return false;
2550
2551 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2552 // We always assume that 'this' is never null.
2553 return false;
2554 }
2555
2556 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2557 // And that glvalue casts are never null.
2558 if (ICE->isGLValue())
2559 return false;
2560 }
2561
2562 return true;
2563}
2564
2565// RHS is an aggregate type
2567 QualType DestTy, SourceLocation Loc) {
2568 SmallVector<LValue, 16> LoadList;
2569 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
2570 // Dest is either a vector, constant matrix, or a builtin
2571 // if its a vector create a temp alloca to store into and return that
2572 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2573 assert(LoadList.size() >= VecTy->getNumElements() &&
2574 "Flattened type on RHS must have the same number or more elements "
2575 "than vector on LHS.");
2576 llvm::Value *V = CGF.Builder.CreateLoad(
2577 CGF.CreateIRTempWithoutCast(DestTy, "flatcast.tmp"));
2578 // write to V.
2579 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2580 RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
2581 assert(RVal.isScalar() &&
2582 "All flattened source values should be scalars.");
2583 llvm::Value *Cast =
2584 CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
2585 VecTy->getElementType(), Loc);
2586 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2587 }
2588 return V;
2589 }
2590 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2591 assert(LoadList.size() >= MatTy->getNumElementsFlattened() &&
2592 "Flattened type on RHS must have the same number or more elements "
2593 "than vector on LHS.");
2594
2595 bool IsRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2597
2598 llvm::Value *V = CGF.Builder.CreateLoad(
2599 CGF.CreateIRTempWithoutCast(DestTy, "flatcast.tmp"));
2600 // V is an allocated temporary for constructing the matrix.
2601 for (unsigned Row = 0, RE = MatTy->getNumRows(); Row < RE; Row++) {
2602 for (unsigned Col = 0, CE = MatTy->getNumColumns(); Col < CE; Col++) {
2603 // When interpreted as a matrix, \p LoadList is *always* row-major order
2604 // regardless of the default matrix memory layout.
2605 unsigned LoadIdx = MatTy->getRowMajorFlattenedIndex(Row, Col);
2606 RValue RVal = CGF.EmitLoadOfLValue(LoadList[LoadIdx], Loc);
2607 assert(RVal.isScalar() &&
2608 "All flattened source values should be scalars.");
2609 llvm::Value *Cast = CGF.EmitScalarConversion(
2610 RVal.getScalarVal(), LoadList[LoadIdx].getType(),
2611 MatTy->getElementType(), Loc);
2612 unsigned MatrixIdx = MatTy->getFlattenedIndex(Row, Col, IsRowMajor);
2613 V = CGF.Builder.CreateInsertElement(V, Cast, MatrixIdx);
2614 }
2615 }
2616 return V;
2617 }
2618 // if its a builtin just do an extract element or load.
2619 assert(DestTy->isBuiltinType() &&
2620 "Destination type must be a vector, matrix, or builtin type.");
2621 RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
2622 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2623 return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
2624 DestTy, Loc);
2625}
2626
2627// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2628// have to handle a more broad range of conversions than explicit casts, as they
2629// handle things like function to ptr-to-function decay etc.
2630Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2631 llvm::scope_exit RestoreCurCast(
2632 [this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2633 CGF.CurCast = CE;
2634
2635 Expr *E = CE->getSubExpr();
2636 QualType DestTy = CE->getType();
2637 CastKind Kind = CE->getCastKind();
2638 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2639
2640 // These cases are generally not written to ignore the result of
2641 // evaluating their sub-expressions, so we clear this now.
2642 bool Ignored = TestAndClearIgnoreResultAssign();
2643
2644 // Since almost all cast kinds apply to scalars, this switch doesn't have
2645 // a default case, so the compiler will warn on a missing case. The cases
2646 // are in the same order as in the CastKind enum.
2647 switch (Kind) {
2648 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2649 case CK_BuiltinFnToFnPtr:
2650 llvm_unreachable("builtin functions are handled elsewhere");
2651
2652 case CK_LValueBitCast:
2653 case CK_ObjCObjectLValueCast: {
2654 Address Addr = EmitLValue(E).getAddress();
2655 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2656 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2657 return EmitLoadOfLValue(LV, CE->getExprLoc());
2658 }
2659
2660 case CK_LValueToRValueBitCast: {
2661 LValue SourceLVal = CGF.EmitLValue(E);
2662 Address Addr =
2663 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2664 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2665 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2666 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2667 }
2668
2669 case CK_CPointerToObjCPointerCast:
2670 case CK_BlockPointerToObjCPointerCast:
2671 case CK_AnyPointerToBlockPointerCast:
2672 case CK_BitCast: {
2673 Value *Src = Visit(E);
2674 llvm::Type *SrcTy = Src->getType();
2675 llvm::Type *DstTy = ConvertType(DestTy);
2676
2677 // FIXME: this is a gross but seemingly necessary workaround for an issue
2678 // manifesting when a target uses a non-default AS for indirect sret args,
2679 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2680 // on the address of a local struct that gets returned by value yields an
2681 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2682 // DefaultAS. We can only do this subversive thing because sret args are
2683 // manufactured and them residing in the IndirectAS is a target specific
2684 // detail, and doing an AS cast here still retains the semantics the user
2685 // expects. It is desirable to remove this iff a better solution is found.
2686 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2687 return CGF.performAddrSpaceCast(Src, DstTy);
2688
2689 // FIXME: Similarly to the sret case above, we need to handle BitCasts that
2690 // involve implicit address space conversions. This arises when the source
2691 // language lacks explicit address spaces, but the target's data layout
2692 // assigns different address spaces (e.g., program address space for
2693 // function pointers). Since Sema operates on Clang types (which don't carry
2694 // this information) and selects CK_BitCast, we must detect the address
2695 // space mismatch here in CodeGen when lowering to LLVM types. The most
2696 // common case is casting function pointers (which get the program AS from
2697 // the data layout) to/from object pointers (which use the default AS).
2698 // Ideally, this would be resolved at a higher level, but that would require
2699 // exposing data layout details to Sema.
2700 if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
2701 SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
2702 return CGF.performAddrSpaceCast(Src, DstTy);
2703 }
2704
2705 assert(
2706 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2707 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2708 "Address-space cast must be used to convert address spaces");
2709
2710 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2711 if (auto *PT = DestTy->getAs<PointerType>()) {
2713 PT->getPointeeType(),
2714 Address(Src,
2716 E->getType()->castAs<PointerType>()->getPointeeType()),
2717 CGF.getPointerAlign()),
2718 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2719 CE->getBeginLoc());
2720 }
2721 }
2722
2723 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2724 const QualType SrcType = E->getType();
2725
2726 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2727 // Casting to pointer that could carry dynamic information (provided by
2728 // invariant.group) requires launder.
2729 Src = Builder.CreateLaunderInvariantGroup(Src);
2730 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2731 // Casting to pointer that does not carry dynamic information (provided
2732 // by invariant.group) requires stripping it. Note that we don't do it
2733 // if the source could not be dynamic type and destination could be
2734 // dynamic because dynamic information is already laundered. It is
2735 // because launder(strip(src)) == launder(src), so there is no need to
2736 // add extra strip before launder.
2737 Src = Builder.CreateStripInvariantGroup(Src);
2738 }
2739 }
2740
2741 // Update heapallocsite metadata when there is an explicit pointer cast.
2742 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2743 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2744 !isa<CastExpr>(E)) {
2745 QualType PointeeType = DestTy->getPointeeType();
2746 if (!PointeeType.isNull())
2747 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2748 CE->getExprLoc());
2749 }
2750 }
2751
2752 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2753 // same element type, use the llvm.vector.insert intrinsic to perform the
2754 // bitcast.
2755 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2756 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2757 // If we are casting a fixed i8 vector to a scalable i1 predicate
2758 // vector, use a vector insert and bitcast the result.
2759 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2760 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2761 ScalableDstTy = llvm::ScalableVectorType::get(
2762 FixedSrcTy->getElementType(),
2763 llvm::divideCeil(
2764 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2765 }
2766 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2767 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2768 llvm::Value *Result = Builder.CreateInsertVector(
2769 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2770 ScalableDstTy = cast<llvm::ScalableVectorType>(
2771 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2772 if (Result->getType() != ScalableDstTy)
2773 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2774 if (Result->getType() != DstTy)
2775 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2776 return Result;
2777 }
2778 }
2779 }
2780
2781 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2782 // same element type, use the llvm.vector.extract intrinsic to perform the
2783 // bitcast.
2784 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2785 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2786 // If we are casting a scalable i1 predicate vector to a fixed i8
2787 // vector, bitcast the source and use a vector extract.
2788 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2789 FixedDstTy->getElementType()->isIntegerTy(8)) {
2790 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2791 ScalableSrcTy = llvm::ScalableVectorType::get(
2792 ScalableSrcTy->getElementType(),
2793 llvm::alignTo<8>(
2794 ScalableSrcTy->getElementCount().getKnownMinValue()));
2795 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2796 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2797 uint64_t(0));
2798 }
2799
2800 ScalableSrcTy = llvm::ScalableVectorType::get(
2801 FixedDstTy->getElementType(),
2802 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2803 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2804 }
2805 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2806 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2807 "cast.fixed");
2808 }
2809 }
2810
2811 // Perform VLAT <-> VLST bitcast through memory.
2812 // TODO: since the llvm.vector.{insert,extract} intrinsics
2813 // require the element types of the vectors to be the same, we
2814 // need to keep this around for bitcasts between VLAT <-> VLST where
2815 // the element types of the vectors are not the same, until we figure
2816 // out a better way of doing these casts.
2817 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2821 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2822 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2823 CGF.EmitStoreOfScalar(Src, LV);
2824 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2825 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2826 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2827 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2828 }
2829
2830 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2831 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2832 }
2833 case CK_AddressSpaceConversion: {
2834 Expr::EvalResult Result;
2835 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2836 Result.Val.isNullPointer()) {
2837 // If E has side effect, it is emitted even if its final result is a
2838 // null pointer. In that case, a DCE pass should be able to
2839 // eliminate the useless instructions emitted during translating E.
2840 if (Result.HasSideEffects)
2841 Visit(E);
2843 ConvertType(DestTy)), DestTy);
2844 }
2845 // Since target may map different address spaces in AST to the same address
2846 // space, an address space conversion may end up as a bitcast.
2847 return CGF.performAddrSpaceCast(Visit(E), ConvertType(DestTy));
2848 }
2849 case CK_AtomicToNonAtomic:
2850 case CK_NonAtomicToAtomic:
2851 case CK_UserDefinedConversion:
2852 return Visit(E);
2853
2854 case CK_NoOp: {
2855 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2856 }
2857
2858 case CK_BaseToDerived: {
2859 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2860 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2861
2863 Address Derived =
2864 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2865 CE->path_begin(), CE->path_end(),
2867
2868 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2869 // performed and the object is not of the derived type.
2870 if (CGF.sanitizePerformTypeCheck())
2872 Derived, DestTy->getPointeeType());
2873
2874 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2875 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2876 /*MayBeNull=*/true,
2878 CE->getBeginLoc());
2879
2880 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2881 }
2882 case CK_UncheckedDerivedToBase:
2883 case CK_DerivedToBase: {
2884 // The EmitPointerWithAlignment path does this fine; just discard
2885 // the alignment.
2887 CE->getType()->getPointeeType());
2888 }
2889
2890 case CK_Dynamic: {
2892 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2893 return CGF.EmitDynamicCast(V, DCE);
2894 }
2895
2896 case CK_ArrayToPointerDecay:
2898 CE->getType()->getPointeeType());
2899 case CK_FunctionToPointerDecay:
2900 return EmitLValue(E).getPointer(CGF);
2901
2902 case CK_NullToPointer:
2903 if (MustVisitNullValue(E))
2904 CGF.EmitIgnoredExpr(E);
2905
2906 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2907 DestTy);
2908
2909 case CK_NullToMemberPointer: {
2910 if (MustVisitNullValue(E))
2911 CGF.EmitIgnoredExpr(E);
2912
2913 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2914 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2915 }
2916
2917 case CK_ReinterpretMemberPointer:
2918 case CK_BaseToDerivedMemberPointer:
2919 case CK_DerivedToBaseMemberPointer: {
2920 Value *Src = Visit(E);
2921
2922 // Note that the AST doesn't distinguish between checked and
2923 // unchecked member pointer conversions, so we always have to
2924 // implement checked conversions here. This is inefficient when
2925 // actual control flow may be required in order to perform the
2926 // check, which it is for data member pointers (but not member
2927 // function pointers on Itanium and ARM).
2928 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2929 }
2930
2931 case CK_ARCProduceObject:
2932 return CGF.EmitARCRetainScalarExpr(E);
2933 case CK_ARCConsumeObject:
2934 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2935 case CK_ARCReclaimReturnedObject:
2936 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2937 case CK_ARCExtendBlockObject:
2938 return CGF.EmitARCExtendBlockObject(E);
2939
2940 case CK_CopyAndAutoreleaseBlockObject:
2941 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2942
2943 case CK_FloatingRealToComplex:
2944 case CK_FloatingComplexCast:
2945 case CK_IntegralRealToComplex:
2946 case CK_IntegralComplexCast:
2947 case CK_IntegralComplexToFloatingComplex:
2948 case CK_FloatingComplexToIntegralComplex:
2949 case CK_ConstructorConversion:
2950 case CK_ToUnion:
2951 case CK_HLSLArrayRValue:
2952 llvm_unreachable("scalar cast to non-scalar value");
2953
2954 case CK_LValueToRValue:
2955 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2956 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2957 return Visit(E);
2958
2959 case CK_IntegralToPointer: {
2960 Value *Src = Visit(E);
2961
2962 // First, convert to the correct width so that we control the kind of
2963 // extension.
2964 auto DestLLVMTy = ConvertType(DestTy);
2965 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2966 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2967 llvm::Value* IntResult =
2968 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2969
2970 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2971
2972 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2973 // Going from integer to pointer that could be dynamic requires reloading
2974 // dynamic information from invariant.group.
2975 if (DestTy.mayBeDynamicClass())
2976 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2977 }
2978
2979 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2980 return IntToPtr;
2981 }
2982 case CK_PointerToIntegral: {
2983 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2984 auto *PtrExpr = Visit(E);
2985
2986 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2987 const QualType SrcType = E->getType();
2988
2989 // Casting to integer requires stripping dynamic information as it does
2990 // not carries it.
2991 if (SrcType.mayBeDynamicClass())
2992 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2993 }
2994
2995 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2996 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2997 }
2998 case CK_ToVoid: {
2999 CGF.EmitIgnoredExpr(E);
3000 return nullptr;
3001 }
3002 case CK_MatrixCast: {
3003 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3004 CE->getExprLoc());
3005 }
3006 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
3007 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
3008 // To perform any necessary Scalar Cast, so this Cast can be handled
3009 // by the regular Vector Splat cast code.
3010 case CK_HLSLAggregateSplatCast:
3011 case CK_VectorSplat: {
3012 llvm::Type *DstTy = ConvertType(DestTy);
3013 Value *Elt = Visit(E);
3014 // Splat the element across to all elements
3015 llvm::ElementCount NumElements =
3016 cast<llvm::VectorType>(DstTy)->getElementCount();
3017 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
3018 }
3019
3020 case CK_FixedPointCast:
3021 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3022 CE->getExprLoc());
3023
3024 case CK_FixedPointToBoolean:
3025 assert(E->getType()->isFixedPointType() &&
3026 "Expected src type to be fixed point type");
3027 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
3028 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3029 CE->getExprLoc());
3030
3031 case CK_FixedPointToIntegral:
3032 assert(E->getType()->isFixedPointType() &&
3033 "Expected src type to be fixed point type");
3034 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
3035 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3036 CE->getExprLoc());
3037
3038 case CK_IntegralToFixedPoint:
3039 assert(E->getType()->isIntegerType() &&
3040 "Expected src type to be an integer");
3041 assert(DestTy->isFixedPointType() &&
3042 "Expected dest type to be fixed point type");
3043 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3044 CE->getExprLoc());
3045
3046 case CK_IntegralCast: {
3047 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
3048 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3049 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
3051 "conv");
3052 }
3053 ScalarConversionOpts Opts;
3054 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
3055 if (!ICE->isPartOfExplicitCast())
3056 Opts = ScalarConversionOpts(CGF.SanOpts);
3057 }
3058 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3059 CE->getExprLoc(), Opts);
3060 }
3061 case CK_IntegralToFloating: {
3062 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3063 // TODO: Support constrained FP intrinsics.
3064 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3065 if (SrcElTy->isSignedIntegerOrEnumerationType())
3066 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
3067 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
3068 }
3069 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3070 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3071 CE->getExprLoc());
3072 }
3073 case CK_FloatingToIntegral: {
3074 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3075 // TODO: Support constrained FP intrinsics.
3076 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
3077 if (DstElTy->isSignedIntegerOrEnumerationType())
3078 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
3079 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
3080 }
3081 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3082 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3083 CE->getExprLoc());
3084 }
3085 case CK_FloatingCast: {
3086 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3087 // TODO: Support constrained FP intrinsics.
3088 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3089 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
3090 if (DstElTy->castAs<BuiltinType>()->getKind() <
3091 SrcElTy->castAs<BuiltinType>()->getKind())
3092 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
3093 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
3094 }
3095 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3096 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3097 CE->getExprLoc());
3098 }
3099 case CK_FixedPointToFloating:
3100 case CK_FloatingToFixedPoint: {
3101 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3102 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3103 CE->getExprLoc());
3104 }
3105 case CK_BooleanToSignedIntegral: {
3106 ScalarConversionOpts Opts;
3107 Opts.TreatBooleanAsSigned = true;
3108 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3109 CE->getExprLoc(), Opts);
3110 }
3111 case CK_IntegralToBoolean:
3112 return EmitIntToBoolConversion(Visit(E));
3113 case CK_PointerToBoolean:
3114 return EmitPointerToBoolConversion(Visit(E), E->getType());
3115 case CK_FloatingToBoolean: {
3116 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3117 return EmitFloatToBoolConversion(Visit(E));
3118 }
3119 case CK_MemberPointerToBoolean: {
3120 llvm::Value *MemPtr = Visit(E);
3121 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
3122 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
3123 }
3124
3125 case CK_FloatingComplexToReal:
3126 case CK_IntegralComplexToReal:
3127 return CGF.EmitComplexExpr(E, false, true).first;
3128
3129 case CK_FloatingComplexToBoolean:
3130 case CK_IntegralComplexToBoolean: {
3132
3133 // TODO: kill this function off, inline appropriate case here
3134 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
3135 CE->getExprLoc());
3136 }
3137
3138 case CK_ZeroToOCLOpaqueType: {
3139 assert((DestTy->isEventT() || DestTy->isQueueT() ||
3140 DestTy->isOCLIntelSubgroupAVCType()) &&
3141 "CK_ZeroToOCLEvent cast on non-event type");
3142 return llvm::Constant::getNullValue(ConvertType(DestTy));
3143 }
3144
3145 case CK_IntToOCLSampler:
3146 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
3147
3148 case CK_HLSLVectorTruncation: {
3149 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
3150 "Destination type must be a vector or builtin type.");
3151 Value *Vec = Visit(E);
3152 if (auto *VecTy = DestTy->getAs<VectorType>()) {
3153 SmallVector<int> Mask;
3154 unsigned NumElts = VecTy->getNumElements();
3155 for (unsigned I = 0; I != NumElts; ++I)
3156 Mask.push_back(I);
3157
3158 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
3159 }
3160 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3161 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
3162 }
3163 case CK_HLSLMatrixTruncation: {
3164 assert((DestTy->isMatrixType() || DestTy->isBuiltinType()) &&
3165 "Destination type must be a matrix or builtin type.");
3166 Value *Mat = Visit(E);
3167 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
3168 SmallVector<int> Mask(MatTy->getNumElementsFlattened());
3169 unsigned NumCols = MatTy->getNumColumns();
3170 unsigned NumRows = MatTy->getNumRows();
3171 auto *SrcMatTy = E->getType()->getAs<ConstantMatrixType>();
3172 assert(SrcMatTy && "Source type must be a matrix type.");
3173 assert(NumRows <= SrcMatTy->getNumRows());
3174 assert(NumCols <= SrcMatTy->getNumColumns());
3175 bool IsRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
3176 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
3177 for (unsigned R = 0; R < NumRows; R++)
3178 for (unsigned C = 0; C < NumCols; C++)
3179 Mask[MatTy->getFlattenedIndex(R, C, IsRowMajor)] =
3180 SrcMatTy->getFlattenedIndex(R, C, IsRowMajor);
3181
3182 return Builder.CreateShuffleVector(Mat, Mask, "trunc");
3183 }
3184 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3185 return Builder.CreateExtractElement(Mat, Zero, "cast.mtrunc");
3186 }
3187 case CK_HLSLElementwiseCast: {
3188 RValue RV = CGF.EmitAnyExpr(E);
3189 SourceLocation Loc = CE->getExprLoc();
3190
3191 Address SrcAddr = Address::invalid();
3192
3193 if (RV.isAggregate()) {
3194 SrcAddr = RV.getAggregateAddress();
3195 } else {
3196 SrcAddr = CGF.CreateMemTemp(E->getType(), "hlsl.ewcast.src");
3197 LValue TmpLV = CGF.MakeAddrLValue(SrcAddr, E->getType());
3198 CGF.EmitStoreThroughLValue(RV, TmpLV);
3199 }
3200
3201 LValue SrcVal = CGF.MakeAddrLValue(SrcAddr, E->getType());
3202 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
3203 }
3204
3205 } // end of switch
3206
3207 llvm_unreachable("unknown scalar cast");
3208}
3209
3210Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
3211 CodeGenFunction::StmtExprEvaluation eval(CGF);
3212 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
3213 !E->getType()->isVoidType());
3214 if (!RetAlloca.isValid())
3215 return nullptr;
3216 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
3217 E->getExprLoc());
3218}
3219
3220Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
3221 CodeGenFunction::RunCleanupsScope Scope(CGF);
3222 Value *V = Visit(E->getSubExpr());
3223 // Defend against dominance problems caused by jumps out of expression
3224 // evaluation through the shared cleanup block.
3225 Scope.ForceCleanup({&V});
3226 return V;
3227}
3228
3229//===----------------------------------------------------------------------===//
3230// Unary Operators
3231//===----------------------------------------------------------------------===//
3232
3234 llvm::Value *InVal, bool IsInc,
3235 FPOptions FPFeatures) {
3236 BinOpInfo BinOp;
3237 BinOp.LHS = InVal;
3238 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
3239 BinOp.Ty = E->getType();
3240 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3241 BinOp.FPFeatures = FPFeatures;
3242 BinOp.E = E;
3243 return BinOp;
3244}
3245
3246llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3247 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3248 // Treat positive amount as unsigned to support inc of i1 (needed for
3249 // unsigned _BitInt(1)).
3250 llvm::Value *Amount =
3251 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, !IsInc);
3252 StringRef Name = IsInc ? "inc" : "dec";
3253 QualType Ty = E->getType();
3254 const bool isSigned = Ty->isSignedIntegerOrEnumerationType();
3255 const bool hasSan =
3256 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
3257 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
3258
3259 switch (getOverflowBehaviorConsideringType(CGF, Ty)) {
3260 case LangOptions::OB_Wrap:
3261 return Builder.CreateAdd(InVal, Amount, Name);
3262 case LangOptions::OB_SignedAndDefined:
3263 if (!hasSan)
3264 return Builder.CreateAdd(InVal, Amount, Name);
3265 [[fallthrough]];
3266 case LangOptions::OB_Unset:
3267 if (!E->canOverflow())
3268 return Builder.CreateAdd(InVal, Amount, Name);
3269 if (!hasSan)
3270 return isSigned ? Builder.CreateNSWAdd(InVal, Amount, Name)
3271 : Builder.CreateAdd(InVal, Amount, Name);
3272 [[fallthrough]];
3273 case LangOptions::OB_Trap:
3274 if (!Ty->getAs<OverflowBehaviorType>() && !E->canOverflow())
3275 return Builder.CreateAdd(InVal, Amount, Name);
3276 BinOpInfo Info = createBinOpInfoFromIncDec(
3277 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3278 if (CanElideOverflowCheck(CGF.getContext(), Info))
3279 return isSigned ? Builder.CreateNSWAdd(InVal, Amount, Name)
3280 : Builder.CreateAdd(InVal, Amount, Name);
3281 return EmitOverflowCheckedBinOp(Info);
3282 }
3283 llvm_unreachable("Unknown OverflowBehaviorKind");
3284}
3285
3286namespace {
3287/// Handles check and update for lastprivate conditional variables.
3288class OMPLastprivateConditionalUpdateRAII {
3289private:
3290 CodeGenFunction &CGF;
3291 const UnaryOperator *E;
3292
3293public:
3294 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3295 const UnaryOperator *E)
3296 : CGF(CGF), E(E) {}
3297 ~OMPLastprivateConditionalUpdateRAII() {
3298 if (CGF.getLangOpts().OpenMP)
3300 CGF, E->getSubExpr());
3301 }
3302};
3303} // namespace
3304
3305llvm::Value *
3306ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3307 bool isInc, bool isPre) {
3308 ApplyAtomGroup Grp(CGF.getDebugInfo());
3309 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3310 QualType type = E->getSubExpr()->getType();
3311 llvm::PHINode *atomicPHI = nullptr;
3312 llvm::Value *value;
3313 llvm::Value *input;
3314 llvm::Value *Previous = nullptr;
3315 QualType SrcType = E->getType();
3316
3317 int amount = (isInc ? 1 : -1);
3318 bool isSubtraction = !isInc;
3319
3320 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3321 type = atomicTy->getValueType();
3322 if (isInc && type->isBooleanType()) {
3323 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3324 if (isPre) {
3325 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3326 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3327 return Builder.getTrue();
3328 }
3329 // For atomic bool increment, we just store true and return it for
3330 // preincrement, do an atomic swap with true for postincrement
3331 return Builder.CreateAtomicRMW(
3332 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3333 llvm::AtomicOrdering::SequentiallyConsistent);
3334 }
3335 // Special case for atomic increment / decrement on integers, emit
3336 // atomicrmw instructions. We skip this if we want to be doing overflow
3337 // checking, and fall into the slow path with the atomic cmpxchg loop.
3338 if (!type->isBooleanType() && type->isIntegerType() &&
3339 !(type->isUnsignedIntegerType() &&
3340 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3341 CGF.getLangOpts().getSignedOverflowBehavior() !=
3342 LangOptions::SOB_Trapping) {
3343 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3344 llvm::AtomicRMWInst::Sub;
3345 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3346 llvm::Instruction::Sub;
3347 llvm::Value *amt = CGF.EmitToMemory(
3348 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3349 llvm::Value *old =
3350 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3351 llvm::AtomicOrdering::SequentiallyConsistent);
3352 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3353 }
3354 // Special case for atomic increment/decrement on floats.
3355 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3356 if (type->isFloatingType()) {
3357 llvm::Type *Ty = ConvertType(type);
3358 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3359 llvm::AtomicRMWInst::BinOp aop =
3360 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3361 llvm::Instruction::BinaryOps op =
3362 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3363 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3364 llvm::AtomicRMWInst *old =
3365 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3366 llvm::AtomicOrdering::SequentiallyConsistent);
3367
3368 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3369 }
3370 }
3371 value = EmitLoadOfLValue(LV, E->getExprLoc());
3372 input = value;
3373 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3374 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3375 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3376 value = CGF.EmitToMemory(value, type);
3377 Builder.CreateBr(opBB);
3378 Builder.SetInsertPoint(opBB);
3379 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3380 atomicPHI->addIncoming(value, startBB);
3381 value = atomicPHI;
3382 } else {
3383 value = EmitLoadOfLValue(LV, E->getExprLoc());
3384 input = value;
3385 }
3386
3387 // Special case of integer increment that we have to check first: bool++.
3388 // Due to promotion rules, we get:
3389 // bool++ -> bool = bool + 1
3390 // -> bool = (int)bool + 1
3391 // -> bool = ((int)bool + 1 != 0)
3392 // An interesting aspect of this is that increment is always true.
3393 // Decrement does not have this property.
3394 if (isInc && type->isBooleanType()) {
3395 value = Builder.getTrue();
3396
3397 // Most common case by far: integer increment.
3398 } else if (type->isIntegerType()) {
3399 QualType promotedType;
3400 bool canPerformLossyDemotionCheck = false;
3401
3403 promotedType = CGF.getContext().getPromotedIntegerType(type);
3404 assert(promotedType != type && "Shouldn't promote to the same type.");
3405 canPerformLossyDemotionCheck = true;
3406 canPerformLossyDemotionCheck &=
3408 CGF.getContext().getCanonicalType(promotedType);
3409 canPerformLossyDemotionCheck &=
3411 type, promotedType);
3412 assert((!canPerformLossyDemotionCheck ||
3413 type->isSignedIntegerOrEnumerationType() ||
3414 promotedType->isSignedIntegerOrEnumerationType() ||
3415 ConvertType(type)->getScalarSizeInBits() ==
3416 ConvertType(promotedType)->getScalarSizeInBits()) &&
3417 "The following check expects that if we do promotion to different "
3418 "underlying canonical type, at least one of the types (either "
3419 "base or promoted) will be signed, or the bitwidths will match.");
3420 }
3421 if (CGF.SanOpts.hasOneOf(
3422 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3423 SanitizerKind::ImplicitBitfieldConversion) &&
3424 canPerformLossyDemotionCheck) {
3425 // While `x += 1` (for `x` with width less than int) is modeled as
3426 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3427 // ease; inc/dec with width less than int can't overflow because of
3428 // promotion rules, so we omit promotion+demotion, which means that we can
3429 // not catch lossy "demotion". Because we still want to catch these cases
3430 // when the sanitizer is enabled, we perform the promotion, then perform
3431 // the increment/decrement in the wider type, and finally
3432 // perform the demotion. This will catch lossy demotions.
3433
3434 // We have a special case for bitfields defined using all the bits of the
3435 // type. In this case we need to do the same trick as for the integer
3436 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3437
3438 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3439 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3440 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3441 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3442 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3443 // checks will take care of the conversion.
3444 ScalarConversionOpts Opts;
3445 if (!LV.isBitField())
3446 Opts = ScalarConversionOpts(CGF.SanOpts);
3447 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3448 Previous = value;
3449 SrcType = promotedType;
3450 }
3451
3452 Opts.PatternExcluded = CGF.getContext().isUnaryOverflowPatternExcluded(E);
3453 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3454 Opts);
3455
3456 // Note that signed integer inc/dec with width less than int can't
3457 // overflow because of promotion rules; we're just eliding a few steps
3458 // here.
3459 } else if (type->isSignedIntegerOrEnumerationType() ||
3460 type->isUnsignedIntegerType()) {
3461 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3462 } else {
3463 // Treat positive amount as unsigned to support inc of i1 (needed for
3464 // unsigned _BitInt(1)).
3465 llvm::Value *amt =
3466 llvm::ConstantInt::get(value->getType(), amount, !isInc);
3467 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3468 }
3469
3470 // Next most common: pointer increment.
3471 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3472 QualType type = ptr->getPointeeType();
3473
3474 // VLA types don't have constant size.
3475 if (const VariableArrayType *vla
3477 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3478 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3479 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3480 if (CGF.getLangOpts().PointerOverflowDefined)
3481 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3482 else
3483 value = CGF.EmitCheckedInBoundsGEP(
3484 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3485 E->getExprLoc(), "vla.inc");
3486
3487 // Arithmetic on function pointers (!) is just +-1.
3488 } else if (type->isFunctionType()) {
3489 llvm::Value *amt = Builder.getInt32(amount);
3490
3491 if (CGF.getLangOpts().PointerOverflowDefined)
3492 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3493 else
3494 value =
3495 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3496 /*SignedIndices=*/false, isSubtraction,
3497 E->getExprLoc(), "incdec.funcptr");
3498
3499 // For everything else, we can just do a simple increment.
3500 } else {
3501 llvm::Value *amt = Builder.getInt32(amount);
3502 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3503 if (CGF.getLangOpts().PointerOverflowDefined)
3504 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3505 else
3506 value = CGF.EmitCheckedInBoundsGEP(
3507 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3508 E->getExprLoc(), "incdec.ptr");
3509 }
3510
3511 // Vector increment/decrement.
3512 } else if (type->isVectorType()) {
3513 if (type->hasIntegerRepresentation()) {
3514 llvm::Value *amt = llvm::ConstantInt::getSigned(value->getType(), amount);
3515
3516 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3517 } else {
3518 value = Builder.CreateFAdd(
3519 value,
3520 llvm::ConstantFP::get(value->getType(), amount),
3521 isInc ? "inc" : "dec");
3522 }
3523
3524 // Floating point.
3525 } else if (type->isRealFloatingType()) {
3526 // Add the inc/dec to the real part.
3527 llvm::Value *amt;
3528 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3529
3530 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3531 // Another special case: half FP increment should be done via float. If
3532 // the input isn't already half, it may be i16.
3533 Value *bitcast = Builder.CreateBitCast(input, CGF.CGM.HalfTy);
3534 value = Builder.CreateFPExt(bitcast, CGF.CGM.FloatTy, "incdec.conv");
3535 }
3536
3537 if (value->getType()->isFloatTy())
3538 amt = llvm::ConstantFP::get(VMContext,
3539 llvm::APFloat(static_cast<float>(amount)));
3540 else if (value->getType()->isDoubleTy())
3541 amt = llvm::ConstantFP::get(VMContext,
3542 llvm::APFloat(static_cast<double>(amount)));
3543 else {
3544 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3545 // Convert from float.
3546 llvm::APFloat F(static_cast<float>(amount));
3547 bool ignored;
3548 const llvm::fltSemantics *FS;
3549 // Don't use getFloatTypeSemantics because Half isn't
3550 // necessarily represented using the "half" LLVM type.
3551 if (value->getType()->isFP128Ty())
3552 FS = &CGF.getTarget().getFloat128Format();
3553 else if (value->getType()->isHalfTy())
3554 FS = &CGF.getTarget().getHalfFormat();
3555 else if (value->getType()->isBFloatTy())
3556 FS = &CGF.getTarget().getBFloat16Format();
3557 else if (value->getType()->isPPC_FP128Ty())
3558 FS = &CGF.getTarget().getIbm128Format();
3559 else
3560 FS = &CGF.getTarget().getLongDoubleFormat();
3561 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3562 amt = llvm::ConstantFP::get(VMContext, F);
3563 }
3564 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3565
3566 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3567 value = Builder.CreateFPTrunc(value, CGF.CGM.HalfTy, "incdec.conv");
3568 value = Builder.CreateBitCast(value, input->getType());
3569 }
3570
3571 // Fixed-point types.
3572 } else if (type->isFixedPointType()) {
3573 // Fixed-point types are tricky. In some cases, it isn't possible to
3574 // represent a 1 or a -1 in the type at all. Piggyback off of
3575 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3576 BinOpInfo Info;
3577 Info.E = E;
3578 Info.Ty = E->getType();
3579 Info.Opcode = isInc ? BO_Add : BO_Sub;
3580 Info.LHS = value;
3581 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3582 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3583 // since -1 is guaranteed to be representable.
3584 if (type->isSignedFixedPointType()) {
3585 Info.Opcode = isInc ? BO_Sub : BO_Add;
3586 Info.RHS = Builder.CreateNeg(Info.RHS);
3587 }
3588 // Now, convert from our invented integer literal to the type of the unary
3589 // op. This will upscale and saturate if necessary. This value can become
3590 // undef in some cases.
3591 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3592 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3593 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3594 value = EmitFixedPointBinOp(Info);
3595
3596 // Objective-C pointer types.
3597 } else {
3598 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3599
3600 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3601 if (!isInc) size = -size;
3602 llvm::Value *sizeValue =
3603 llvm::ConstantInt::getSigned(CGF.SizeTy, size.getQuantity());
3604
3605 if (CGF.getLangOpts().PointerOverflowDefined)
3606 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3607 else
3608 value = CGF.EmitCheckedInBoundsGEP(
3609 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3610 E->getExprLoc(), "incdec.objptr");
3611 value = Builder.CreateBitCast(value, input->getType());
3612 }
3613
3614 if (atomicPHI) {
3615 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3616 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3617 auto Pair = CGF.EmitAtomicCompareExchange(
3618 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3619 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3620 llvm::Value *success = Pair.second;
3621 atomicPHI->addIncoming(old, curBlock);
3622 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3623 Builder.SetInsertPoint(contBB);
3624 return isPre ? value : input;
3625 }
3626
3627 // Store the updated result through the lvalue.
3628 if (LV.isBitField()) {
3629 Value *Src = Previous ? Previous : value;
3630 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3631 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3632 LV.getBitFieldInfo(), E->getExprLoc());
3633 } else
3634 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3635
3636 // If this is a postinc, return the value read from memory, otherwise use the
3637 // updated value.
3638 return isPre ? value : input;
3639}
3640
3641
3642Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3643 QualType PromotionType) {
3644 QualType promotionTy = PromotionType.isNull()
3645 ? getPromotionType(E->getSubExpr()->getType())
3646 : PromotionType;
3647 Value *result = VisitPlus(E, promotionTy);
3648 if (result && !promotionTy.isNull())
3649 result = EmitUnPromotedValue(result, E->getType());
3650 return result;
3651}
3652
3653Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3654 QualType PromotionType) {
3655 // This differs from gcc, though, most likely due to a bug in gcc.
3656 TestAndClearIgnoreResultAssign();
3657 if (!PromotionType.isNull())
3658 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3659 return Visit(E->getSubExpr());
3660}
3661
3662Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3663 QualType PromotionType) {
3664 QualType promotionTy = PromotionType.isNull()
3665 ? getPromotionType(E->getSubExpr()->getType())
3666 : PromotionType;
3667 Value *result = VisitMinus(E, promotionTy);
3668 if (result && !promotionTy.isNull())
3669 result = EmitUnPromotedValue(result, E->getType());
3670 return result;
3671}
3672
3673Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3674 QualType PromotionType) {
3675 TestAndClearIgnoreResultAssign();
3676 Value *Op;
3677 if (!PromotionType.isNull())
3678 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3679 else
3680 Op = Visit(E->getSubExpr());
3681
3682 // Generate a unary FNeg for FP ops.
3683 if (Op->getType()->isFPOrFPVectorTy())
3684 return Builder.CreateFNeg(Op, "fneg");
3685
3686 // Emit unary minus with EmitSub so we handle overflow cases etc.
3687 BinOpInfo BinOp;
3688 BinOp.RHS = Op;
3689 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3690 BinOp.Ty = E->getType();
3691 BinOp.Opcode = BO_Sub;
3692 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3693 BinOp.E = E;
3694 return EmitSub(BinOp);
3695}
3696
3697Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3698 TestAndClearIgnoreResultAssign();
3699 Value *Op = Visit(E->getSubExpr());
3700 return Builder.CreateNot(Op, "not");
3701}
3702
3703Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3704 // Perform vector logical not on comparison with zero vector.
3705 if (E->getType()->isVectorType() &&
3706 E->getType()->castAs<VectorType>()->getVectorKind() ==
3707 VectorKind::Generic) {
3708 Value *Oper = Visit(E->getSubExpr());
3709 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3710 Value *Result;
3711 if (Oper->getType()->isFPOrFPVectorTy()) {
3712 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3713 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3714 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3715 } else
3716 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3717 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3718 }
3719
3720 // Compare operand to zero.
3721 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3722
3723 // Invert value.
3724 // TODO: Could dynamically modify easy computations here. For example, if
3725 // the operand is an icmp ne, turn into icmp eq.
3726 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3727
3728 // ZExt result to the expr type.
3729 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3730}
3731
3732Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3733 // Try folding the offsetof to a constant.
3734 Expr::EvalResult EVResult;
3735 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3736 llvm::APSInt Value = EVResult.Val.getInt();
3737 return Builder.getInt(Value);
3738 }
3739
3740 // Loop over the components of the offsetof to compute the value.
3741 unsigned n = E->getNumComponents();
3742 llvm::Type* ResultType = ConvertType(E->getType());
3743 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3744 QualType CurrentType = E->getTypeSourceInfo()->getType();
3745 for (unsigned i = 0; i != n; ++i) {
3746 OffsetOfNode ON = E->getComponent(i);
3747 llvm::Value *Offset = nullptr;
3748 switch (ON.getKind()) {
3749 case OffsetOfNode::Array: {
3750 // Compute the index
3751 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3752 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3753 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3754 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3755
3756 // Save the element type
3757 CurrentType =
3758 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3759
3760 // Compute the element size
3761 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3762 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3763
3764 // Multiply out to compute the result
3765 Offset = Builder.CreateMul(Idx, ElemSize);
3766 break;
3767 }
3768
3769 case OffsetOfNode::Field: {
3770 FieldDecl *MemberDecl = ON.getField();
3771 auto *RD = CurrentType->castAsRecordDecl();
3772 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3773
3774 // Get the index of the field in its parent.
3775 unsigned FieldIndex = MemberDecl->getFieldIndex();
3776
3777 // Compute the offset to the field
3778 int64_t OffsetInt =
3779 RL.getFieldOffset(FieldIndex) / CGF.getContext().getCharWidth();
3780 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3781
3782 // Save the element type.
3783 CurrentType = MemberDecl->getType();
3784 break;
3785 }
3786
3788 llvm_unreachable("dependent __builtin_offsetof");
3789
3790 case OffsetOfNode::Base: {
3791 if (ON.getBase()->isVirtual()) {
3792 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3793 continue;
3794 }
3795
3796 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3797 CurrentType->castAsCanonical<RecordType>()->getDecl());
3798
3799 // Save the element type.
3800 CurrentType = ON.getBase()->getType();
3801
3802 // Compute the offset to the base.
3803 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3804 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3805 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3806 break;
3807 }
3808 }
3809 Result = Builder.CreateAdd(Result, Offset);
3810 }
3811 return Result;
3812}
3813
3814/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3815/// argument of the sizeof expression as an integer.
3816Value *
3817ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3818 const UnaryExprOrTypeTraitExpr *E) {
3819 QualType TypeToSize = E->getTypeOfArgument();
3820 if (auto Kind = E->getKind();
3821 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3822 if (const VariableArrayType *VAT =
3823 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3824 // For _Countof, we only want to evaluate if the extent is actually
3825 // variable as opposed to a multi-dimensional array whose extent is
3826 // constant but whose element type is variable.
3827 bool EvaluateExtent = true;
3828 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3829 EvaluateExtent =
3830 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3831 }
3832 if (EvaluateExtent) {
3833 if (E->isArgumentType()) {
3834 // sizeof(type) - make sure to emit the VLA size.
3835 CGF.EmitVariablyModifiedType(TypeToSize);
3836 } else {
3837 // C99 6.5.3.4p2: If the argument is an expression of type
3838 // VLA, it is evaluated.
3840 }
3841
3842 // For _Countof, we just want to return the size of a single dimension.
3843 if (Kind == UETT_CountOf)
3844 return CGF.getVLAElements1D(VAT).NumElts;
3845
3846 // For sizeof and __datasizeof, we need to scale the number of elements
3847 // by the size of the array element type.
3848 auto VlaSize = CGF.getVLASize(VAT);
3849
3850 // Scale the number of non-VLA elements by the non-VLA element size.
3851 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3852 if (!eltSize.isOne())
3853 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3854 VlaSize.NumElts);
3855 return VlaSize.NumElts;
3856 }
3857 }
3858 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3859 auto Alignment =
3860 CGF.getContext()
3863 .getQuantity();
3864 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3865 } else if (E->getKind() == UETT_VectorElements) {
3866 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3867 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3868 }
3869
3870 // If this isn't sizeof(vla), the result must be constant; use the constant
3871 // folding logic so we don't have to duplicate it here.
3872 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3873}
3874
3875Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3876 QualType PromotionType) {
3877 QualType promotionTy = PromotionType.isNull()
3878 ? getPromotionType(E->getSubExpr()->getType())
3879 : PromotionType;
3880 Value *result = VisitReal(E, promotionTy);
3881 if (result && !promotionTy.isNull())
3882 result = EmitUnPromotedValue(result, E->getType());
3883 return result;
3884}
3885
3886Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3887 QualType PromotionType) {
3888 Expr *Op = E->getSubExpr();
3889 if (Op->getType()->isAnyComplexType()) {
3890 // If it's an l-value, load through the appropriate subobject l-value.
3891 // Note that we have to ask E because Op might be an l-value that
3892 // this won't work for, e.g. an Obj-C property.
3893 if (E->isGLValue()) {
3894 if (!PromotionType.isNull()) {
3896 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3897 PromotionType = PromotionType->isAnyComplexType()
3898 ? PromotionType
3899 : CGF.getContext().getComplexType(PromotionType);
3900 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3901 : result.first;
3902 }
3903
3904 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3905 .getScalarVal();
3906 }
3907 // Otherwise, calculate and project.
3908 return CGF.EmitComplexExpr(Op, false, true).first;
3909 }
3910
3911 if (!PromotionType.isNull())
3912 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3913 return Visit(Op);
3914}
3915
3916Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3917 QualType PromotionType) {
3918 QualType promotionTy = PromotionType.isNull()
3919 ? getPromotionType(E->getSubExpr()->getType())
3920 : PromotionType;
3921 Value *result = VisitImag(E, promotionTy);
3922 if (result && !promotionTy.isNull())
3923 result = EmitUnPromotedValue(result, E->getType());
3924 return result;
3925}
3926
3927Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3928 QualType PromotionType) {
3929 Expr *Op = E->getSubExpr();
3930 if (Op->getType()->isAnyComplexType()) {
3931 // If it's an l-value, load through the appropriate subobject l-value.
3932 // Note that we have to ask E because Op might be an l-value that
3933 // this won't work for, e.g. an Obj-C property.
3934 if (Op->isGLValue()) {
3935 if (!PromotionType.isNull()) {
3937 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3938 PromotionType = PromotionType->isAnyComplexType()
3939 ? PromotionType
3940 : CGF.getContext().getComplexType(PromotionType);
3941 return result.second
3942 ? CGF.EmitPromotedValue(result, PromotionType).second
3943 : result.second;
3944 }
3945
3946 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3947 .getScalarVal();
3948 }
3949 // Otherwise, calculate and project.
3950 return CGF.EmitComplexExpr(Op, true, false).second;
3951 }
3952
3953 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3954 // effects are evaluated, but not the actual value.
3955 if (Op->isGLValue())
3956 CGF.EmitLValue(Op);
3957 else if (!PromotionType.isNull())
3958 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3959 else
3960 CGF.EmitScalarExpr(Op, true);
3961 if (!PromotionType.isNull())
3962 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3963 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3964}
3965
3966//===----------------------------------------------------------------------===//
3967// Binary Operators
3968//===----------------------------------------------------------------------===//
3969
3970Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3971 QualType PromotionType) {
3972 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3973}
3974
3975Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3976 QualType ExprType) {
3977 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3978}
3979
3980Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3981 E = E->IgnoreParens();
3982 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3983 switch (BO->getOpcode()) {
3984#define HANDLE_BINOP(OP) \
3985 case BO_##OP: \
3986 return Emit##OP(EmitBinOps(BO, PromotionType));
3987 HANDLE_BINOP(Add)
3988 HANDLE_BINOP(Sub)
3989 HANDLE_BINOP(Mul)
3990 HANDLE_BINOP(Div)
3991#undef HANDLE_BINOP
3992 default:
3993 break;
3994 }
3995 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3996 switch (UO->getOpcode()) {
3997 case UO_Imag:
3998 return VisitImag(UO, PromotionType);
3999 case UO_Real:
4000 return VisitReal(UO, PromotionType);
4001 case UO_Minus:
4002 return VisitMinus(UO, PromotionType);
4003 case UO_Plus:
4004 return VisitPlus(UO, PromotionType);
4005 default:
4006 break;
4007 }
4008 }
4009 auto result = Visit(const_cast<Expr *>(E));
4010 if (result) {
4011 if (!PromotionType.isNull())
4012 return EmitPromotedValue(result, PromotionType);
4013 else
4014 return EmitUnPromotedValue(result, E->getType());
4015 }
4016 return result;
4017}
4018
4019BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
4020 QualType PromotionType) {
4021 TestAndClearIgnoreResultAssign();
4022 BinOpInfo Result;
4023 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
4024 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
4025 if (!PromotionType.isNull())
4026 Result.Ty = PromotionType;
4027 else
4028 Result.Ty = E->getType();
4029 Result.Opcode = E->getOpcode();
4030 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
4031 Result.E = E;
4032 return Result;
4033}
4034
4035LValue ScalarExprEmitter::EmitCompoundAssignLValue(
4036 const CompoundAssignOperator *E,
4037 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
4038 Value *&Result) {
4039 QualType LHSTy = E->getLHS()->getType();
4040 BinOpInfo OpInfo;
4041
4044
4045 // Emit the RHS first. __block variables need to have the rhs evaluated
4046 // first, plus this should improve codegen a little.
4047
4048 QualType PromotionTypeCR;
4049 PromotionTypeCR = getPromotionType(E->getComputationResultType());
4050 if (PromotionTypeCR.isNull())
4051 PromotionTypeCR = E->getComputationResultType();
4052 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
4053 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
4054 if (!PromotionTypeRHS.isNull())
4055 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
4056 else
4057 OpInfo.RHS = Visit(E->getRHS());
4058 OpInfo.Ty = PromotionTypeCR;
4059 OpInfo.Opcode = E->getOpcode();
4060 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
4061 OpInfo.E = E;
4062 // Load/convert the LHS.
4063 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4064
4065 llvm::PHINode *atomicPHI = nullptr;
4066 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
4067 QualType type = atomicTy->getValueType();
4068 if (!type->isBooleanType() && type->isIntegerType() &&
4069 !(type->isUnsignedIntegerType() &&
4070 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
4071 CGF.getLangOpts().getSignedOverflowBehavior() !=
4072 LangOptions::SOB_Trapping) {
4073 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
4074 llvm::Instruction::BinaryOps Op;
4075 switch (OpInfo.Opcode) {
4076 // We don't have atomicrmw operands for *, %, /, <<, >>
4077 case BO_MulAssign: case BO_DivAssign:
4078 case BO_RemAssign:
4079 case BO_ShlAssign:
4080 case BO_ShrAssign:
4081 break;
4082 case BO_AddAssign:
4083 AtomicOp = llvm::AtomicRMWInst::Add;
4084 Op = llvm::Instruction::Add;
4085 break;
4086 case BO_SubAssign:
4087 AtomicOp = llvm::AtomicRMWInst::Sub;
4088 Op = llvm::Instruction::Sub;
4089 break;
4090 case BO_AndAssign:
4091 AtomicOp = llvm::AtomicRMWInst::And;
4092 Op = llvm::Instruction::And;
4093 break;
4094 case BO_XorAssign:
4095 AtomicOp = llvm::AtomicRMWInst::Xor;
4096 Op = llvm::Instruction::Xor;
4097 break;
4098 case BO_OrAssign:
4099 AtomicOp = llvm::AtomicRMWInst::Or;
4100 Op = llvm::Instruction::Or;
4101 break;
4102 default:
4103 llvm_unreachable("Invalid compound assignment type");
4104 }
4105 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
4106 llvm::Value *Amt = CGF.EmitToMemory(
4107 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
4108 E->getExprLoc()),
4109 LHSTy);
4110
4111 llvm::AtomicRMWInst *OldVal =
4112 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
4113
4114 // Since operation is atomic, the result type is guaranteed to be the
4115 // same as the input in LLVM terms.
4116 Result = Builder.CreateBinOp(Op, OldVal, Amt);
4117 return LHSLV;
4118 }
4119 }
4120 // FIXME: For floating point types, we should be saving and restoring the
4121 // floating point environment in the loop.
4122 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
4123 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
4124 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4125 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
4126 Builder.CreateBr(opBB);
4127 Builder.SetInsertPoint(opBB);
4128 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
4129 atomicPHI->addIncoming(OpInfo.LHS, startBB);
4130 OpInfo.LHS = atomicPHI;
4131 }
4132 else
4133 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4134
4135 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
4136 SourceLocation Loc = E->getExprLoc();
4137 if (!PromotionTypeLHS.isNull())
4138 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
4139 E->getExprLoc());
4140 else
4141 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
4142 E->getComputationLHSType(), Loc);
4143
4144 // Expand the binary operator.
4145 Result = (this->*Func)(OpInfo);
4146
4147 // Convert the result back to the LHS type,
4148 // potentially with Implicit Conversion sanitizer check.
4149 // If LHSLV is a bitfield, use default ScalarConversionOpts
4150 // to avoid emit any implicit integer checks.
4151 Value *Previous = nullptr;
4152 if (LHSLV.isBitField()) {
4153 Previous = Result;
4154 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
4155 } else if (const auto *atomicTy = LHSTy->getAs<AtomicType>()) {
4156 Result =
4157 EmitScalarConversion(Result, PromotionTypeCR, atomicTy->getValueType(),
4158 Loc, ScalarConversionOpts(CGF.SanOpts));
4159 } else {
4160 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
4161 ScalarConversionOpts(CGF.SanOpts));
4162 }
4163
4164 if (atomicPHI) {
4165 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
4166 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
4167 auto Pair = CGF.EmitAtomicCompareExchange(
4168 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
4169 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
4170 llvm::Value *success = Pair.second;
4171 atomicPHI->addIncoming(old, curBlock);
4172 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
4173 Builder.SetInsertPoint(contBB);
4174 return LHSLV;
4175 }
4176
4177 // Store the result value into the LHS lvalue. Bit-fields are handled
4178 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
4179 // 'An assignment expression has the value of the left operand after the
4180 // assignment...'.
4181 if (LHSLV.isBitField()) {
4182 Value *Src = Previous ? Previous : Result;
4183 QualType SrcType = E->getRHS()->getType();
4184 QualType DstType = E->getLHS()->getType();
4186 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
4187 LHSLV.getBitFieldInfo(), E->getExprLoc());
4188 } else
4190
4191 if (CGF.getLangOpts().OpenMP)
4193 E->getLHS());
4194 return LHSLV;
4195}
4196
4197Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
4198 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
4199 bool Ignore = TestAndClearIgnoreResultAssign();
4200 Value *RHS = nullptr;
4201 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
4202
4203 // If the result is clearly ignored, return now.
4204 if (Ignore)
4205 return nullptr;
4206
4207 // The result of an assignment in C is the assigned r-value.
4208 if (!CGF.getLangOpts().CPlusPlus)
4209 return RHS;
4210
4211 // If the lvalue is non-volatile, return the computed value of the assignment.
4212 if (!LHS.isVolatileQualified())
4213 return RHS;
4214
4215 // Otherwise, reload the value.
4216 return EmitLoadOfLValue(LHS, E->getExprLoc());
4217}
4218
4219void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4220 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4221 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4222 Checks;
4223
4224 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4225 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4226 SanitizerKind::SO_IntegerDivideByZero));
4227 }
4228
4229 const auto *BO = cast<BinaryOperator>(Ops.E);
4230 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4231 Ops.Ty->hasSignedIntegerRepresentation() &&
4232 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4233 Ops.mayHaveIntegerOverflow() && !Ops.Ty.isWrapType() &&
4235 SanitizerKind::SignedIntegerOverflow, Ops.Ty)) {
4236 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4237
4238 llvm::Value *IntMin =
4239 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4240 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4241
4242 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4243 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4244 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4245 Checks.push_back(
4246 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4247 }
4248
4249 if (Checks.size() > 0)
4250 EmitBinOpCheck(Checks, Ops);
4251}
4252
4253Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4254 {
4255 SanitizerDebugLocation SanScope(&CGF,
4256 {SanitizerKind::SO_IntegerDivideByZero,
4257 SanitizerKind::SO_SignedIntegerOverflow,
4258 SanitizerKind::SO_FloatDivideByZero},
4259 SanitizerHandler::DivremOverflow);
4260 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4261 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4262 Ops.Ty->isIntegerType() &&
4263 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4264 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4265 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4266 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4267 Ops.Ty->isRealFloatingType() &&
4268 Ops.mayHaveFloatDivisionByZero()) {
4269 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4270 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4271 EmitBinOpCheck(
4272 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4273 }
4274 }
4275
4276 if (Ops.Ty->isConstantMatrixType()) {
4277 llvm::MatrixBuilder MB(Builder);
4278 // We need to check the types of the operands of the operator to get the
4279 // correct matrix dimensions.
4280 auto *BO = cast<BinaryOperator>(Ops.E);
4281 (void)BO;
4282 assert(
4284 "first operand must be a matrix");
4285 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4286 "second operand must be an arithmetic type");
4287 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4288 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4289 Ops.Ty->hasUnsignedIntegerRepresentation());
4290 }
4291
4292 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4293 llvm::Value *Val;
4294 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4295 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4296 CGF.SetDivFPAccuracy(Val);
4297 return Val;
4298 }
4299 else if (Ops.isFixedPointOp())
4300 return EmitFixedPointBinOp(Ops);
4301 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4302 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4303 else
4304 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4305}
4306
4307Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4308 // Rem in C can't be a floating point type: C99 6.5.5p2.
4309 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4310 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4311 Ops.Ty->isIntegerType() &&
4312 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4313 SanitizerDebugLocation SanScope(&CGF,
4314 {SanitizerKind::SO_IntegerDivideByZero,
4315 SanitizerKind::SO_SignedIntegerOverflow},
4316 SanitizerHandler::DivremOverflow);
4317 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4318 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4319 }
4320
4321 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4322 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4323
4324 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4325 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4326
4327 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4328}
4329
4330Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4331 unsigned IID;
4332 unsigned OpID = 0;
4333 SanitizerHandler OverflowKind;
4334
4335 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4336 switch (Ops.Opcode) {
4337 case BO_Add:
4338 case BO_AddAssign:
4339 OpID = 1;
4340 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4341 llvm::Intrinsic::uadd_with_overflow;
4342 OverflowKind = SanitizerHandler::AddOverflow;
4343 break;
4344 case BO_Sub:
4345 case BO_SubAssign:
4346 OpID = 2;
4347 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4348 llvm::Intrinsic::usub_with_overflow;
4349 OverflowKind = SanitizerHandler::SubOverflow;
4350 break;
4351 case BO_Mul:
4352 case BO_MulAssign:
4353 OpID = 3;
4354 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4355 llvm::Intrinsic::umul_with_overflow;
4356 OverflowKind = SanitizerHandler::MulOverflow;
4357 break;
4358 default:
4359 llvm_unreachable("Unsupported operation for overflow detection");
4360 }
4361 OpID <<= 1;
4362 if (isSigned)
4363 OpID |= 1;
4364
4365 SanitizerDebugLocation SanScope(&CGF,
4366 {SanitizerKind::SO_SignedIntegerOverflow,
4367 SanitizerKind::SO_UnsignedIntegerOverflow},
4368 OverflowKind);
4369 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4370
4371 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4372
4373 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4374 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4375 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4376
4377 // Handle overflow with llvm.trap if no custom handler has been specified.
4378 const std::string *handlerName =
4380 if (handlerName->empty()) {
4381 // If no -ftrapv handler has been specified, try to use sanitizer runtimes
4382 // if available otherwise just emit a trap. It is possible for unsigned
4383 // arithmetic to result in a trap due to the OverflowBehaviorType attribute
4384 // which describes overflow behavior on a per-type basis.
4385 if (isSigned) {
4386 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4387 llvm::Value *NotOf = Builder.CreateNot(overflow);
4388 EmitBinOpCheck(
4389 std::make_pair(NotOf, SanitizerKind::SO_SignedIntegerOverflow),
4390 Ops);
4391 } else
4392 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4393 return result;
4394 }
4395 if (CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
4396 llvm::Value *NotOf = Builder.CreateNot(overflow);
4397 EmitBinOpCheck(
4398 std::make_pair(NotOf, SanitizerKind::SO_UnsignedIntegerOverflow),
4399 Ops);
4400 } else
4401 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4402 return result;
4403 }
4404
4405 // Branch in case of overflow.
4406 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4407 llvm::BasicBlock *continueBB =
4408 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4409 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4410
4411 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4412
4413 // If an overflow handler is set, then we want to call it and then use its
4414 // result, if it returns.
4415 Builder.SetInsertPoint(overflowBB);
4416
4417 // Get the overflow handler.
4418 llvm::Type *Int8Ty = CGF.Int8Ty;
4419 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4420 llvm::FunctionType *handlerTy =
4421 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4422 llvm::FunctionCallee handler =
4423 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4424
4425 // Sign extend the args to 64-bit, so that we can use the same handler for
4426 // all types of overflow.
4427 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4428 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4429
4430 // Call the handler with the two arguments, the operation, and the size of
4431 // the result.
4432 llvm::Value *handlerArgs[] = {
4433 lhs,
4434 rhs,
4435 Builder.getInt8(OpID),
4436 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4437 };
4438 llvm::Value *handlerResult =
4439 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4440
4441 // Truncate the result back to the desired size.
4442 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4443 Builder.CreateBr(continueBB);
4444
4445 Builder.SetInsertPoint(continueBB);
4446 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4447 phi->addIncoming(result, initialBB);
4448 phi->addIncoming(handlerResult, overflowBB);
4449
4450 return phi;
4451}
4452
4453/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4454/// information.
4455/// This function is used for BO_AddAssign/BO_SubAssign.
4456static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4457 bool isSubtraction) {
4458 // Must have binary (not unary) expr here. Unary pointer
4459 // increment/decrement doesn't use this path.
4461
4462 Value *pointer = op.LHS;
4463 Expr *pointerOperand = expr->getLHS();
4464 Value *index = op.RHS;
4465 Expr *indexOperand = expr->getRHS();
4466
4467 // In a subtraction, the LHS is always the pointer.
4468 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4469 std::swap(pointer, index);
4470 std::swap(pointerOperand, indexOperand);
4471 }
4472
4473 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4474 index, isSubtraction);
4475}
4476
4477/// Emit pointer + index arithmetic.
4479 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4480 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4481 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4482
4483 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4484 auto &DL = CGM.getDataLayout();
4485 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4486
4487 // Some versions of glibc and gcc use idioms (particularly in their malloc
4488 // routines) that add a pointer-sized integer (known to be a pointer value)
4489 // to a null pointer in order to cast the value back to an integer or as
4490 // part of a pointer alignment algorithm. This is undefined behavior, but
4491 // we'd like to be able to compile programs that use it.
4492 //
4493 // Normally, we'd generate a GEP with a null-pointer base here in response
4494 // to that code, but it's also UB to dereference a pointer created that
4495 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4496 // generate a direct cast of the integer value to a pointer.
4497 //
4498 // The idiom (p = nullptr + N) is not met if any of the following are true:
4499 //
4500 // The operation is subtraction.
4501 // The index is not pointer-sized.
4502 // The pointer type is not byte-sized.
4503 //
4504 // Note that we do not suppress the pointer overflow check in this case.
4506 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4507 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4508 if (getLangOpts().PointerOverflowDefined ||
4509 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4510 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4511 PtrTy->getPointerAddressSpace()))
4512 return Ptr;
4513 // The inbounds GEP of null is valid iff the index is zero.
4514 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4515 auto CheckHandler = SanitizerHandler::PointerOverflow;
4516 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4517 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4518 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4519 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4520 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4521 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4522 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4523 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4524 DynamicArgs);
4525 return Ptr;
4526 }
4527
4528 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4529 // Zero-extend or sign-extend the pointer value according to
4530 // whether the index is signed or not.
4531 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4532 "idx.ext");
4533 }
4534
4535 // If this is subtraction, negate the index.
4536 if (isSubtraction)
4537 index = Builder.CreateNeg(index, "idx.neg");
4538
4539 if (SanOpts.has(SanitizerKind::ArrayBounds))
4540 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4541 /*Accessed*/ false);
4542
4543 const PointerType *pointerType =
4544 pointerOperand->getType()->getAs<PointerType>();
4545 if (!pointerType) {
4546 QualType objectType = pointerOperand->getType()
4548 ->getPointeeType();
4549 llvm::Value *objectSize =
4550 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4551
4552 index = Builder.CreateMul(index, objectSize);
4553
4554 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4555 return Builder.CreateBitCast(result, pointer->getType());
4556 }
4557
4558 QualType elementType = pointerType->getPointeeType();
4559 if (const VariableArrayType *vla =
4560 getContext().getAsVariableArrayType(elementType)) {
4561 // The element count here is the total number of non-VLA elements.
4562 llvm::Value *numElements = getVLASize(vla).NumElts;
4563
4564 // Effectively, the multiply by the VLA size is part of the GEP.
4565 // GEP indexes are signed, and scaling an index isn't permitted to
4566 // signed-overflow, so we use the same semantics for our explicit
4567 // multiply. We suppress this if overflow is not undefined behavior.
4568 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4569 if (getLangOpts().PointerOverflowDefined) {
4570 index = Builder.CreateMul(index, numElements, "vla.index");
4571 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4572 } else {
4573 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4574 pointer =
4575 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4576 isSubtraction, BO->getExprLoc(), "add.ptr");
4577 }
4578 return pointer;
4579 }
4580
4581 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4582 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4583 // future proof.
4584 llvm::Type *elemTy;
4585 if (elementType->isVoidType() || elementType->isFunctionType())
4586 elemTy = Int8Ty;
4587 else
4588 elemTy = ConvertTypeForMem(elementType);
4589
4590 if (getLangOpts().PointerOverflowDefined)
4591 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4592
4593 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4594 BO->getExprLoc(), "add.ptr");
4595}
4596
4597// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4598// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4599// the add operand respectively. This allows fmuladd to represent a*b-c, or
4600// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4601// efficient operations.
4602static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4603 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4604 bool negMul, bool negAdd) {
4605 Value *MulOp0 = MulOp->getOperand(0);
4606 Value *MulOp1 = MulOp->getOperand(1);
4607 if (negMul)
4608 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4609 if (negAdd)
4610 Addend = Builder.CreateFNeg(Addend, "neg");
4611
4612 Value *FMulAdd = nullptr;
4613 if (Builder.getIsFPConstrained()) {
4614 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4615 "Only constrained operation should be created when Builder is in FP "
4616 "constrained mode");
4617 FMulAdd = Builder.CreateConstrainedFPCall(
4618 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4619 Addend->getType()),
4620 {MulOp0, MulOp1, Addend});
4621 } else {
4622 FMulAdd = Builder.CreateCall(
4623 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4624 {MulOp0, MulOp1, Addend});
4625 }
4626 MulOp->eraseFromParent();
4627
4628 return FMulAdd;
4629}
4630
4631// Check whether it would be legal to emit an fmuladd intrinsic call to
4632// represent op and if so, build the fmuladd.
4633//
4634// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4635// Does NOT check the type of the operation - it's assumed that this function
4636// will be called from contexts where it's known that the type is contractable.
4637static Value* tryEmitFMulAdd(const BinOpInfo &op,
4638 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4639 bool isSub=false) {
4640
4641 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4642 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4643 "Only fadd/fsub can be the root of an fmuladd.");
4644
4645 // Check whether this op is marked as fusable.
4646 if (!op.FPFeatures.allowFPContractWithinStatement())
4647 return nullptr;
4648
4649 Value *LHS = op.LHS;
4650 Value *RHS = op.RHS;
4651
4652 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4653 // it is the only use of its operand.
4654 bool NegLHS = false;
4655 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4656 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4657 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4658 LHS = LHSUnOp->getOperand(0);
4659 NegLHS = true;
4660 }
4661 }
4662
4663 bool NegRHS = false;
4664 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4665 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4666 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4667 RHS = RHSUnOp->getOperand(0);
4668 NegRHS = true;
4669 }
4670 }
4671
4672 // We have a potentially fusable op. Look for a mul on one of the operands.
4673 // Also, make sure that the mul result isn't used directly. In that case,
4674 // there's no point creating a muladd operation.
4675 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4676 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4677 (LHSBinOp->use_empty() || NegLHS)) {
4678 // If we looked through fneg, erase it.
4679 if (NegLHS)
4680 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4681 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4682 }
4683 }
4684 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4685 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4686 (RHSBinOp->use_empty() || NegRHS)) {
4687 // If we looked through fneg, erase it.
4688 if (NegRHS)
4689 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4690 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4691 }
4692 }
4693
4694 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4695 if (LHSBinOp->getIntrinsicID() ==
4696 llvm::Intrinsic::experimental_constrained_fmul &&
4697 (LHSBinOp->use_empty() || NegLHS)) {
4698 // If we looked through fneg, erase it.
4699 if (NegLHS)
4700 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4701 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4702 }
4703 }
4704 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4705 if (RHSBinOp->getIntrinsicID() ==
4706 llvm::Intrinsic::experimental_constrained_fmul &&
4707 (RHSBinOp->use_empty() || NegRHS)) {
4708 // If we looked through fneg, erase it.
4709 if (NegRHS)
4710 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4711 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4712 }
4713 }
4714
4715 return nullptr;
4716}
4717
4718Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4719 if (op.LHS->getType()->isPointerTy() ||
4720 op.RHS->getType()->isPointerTy())
4722
4723 if (op.Ty->isSignedIntegerOrEnumerationType() ||
4724 op.Ty->isUnsignedIntegerType()) {
4725 const bool isSigned = op.Ty->isSignedIntegerOrEnumerationType();
4726 const bool hasSan =
4727 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
4728 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
4729 switch (getOverflowBehaviorConsideringType(CGF, op.Ty)) {
4730 case LangOptions::OB_Wrap:
4731 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4732 case LangOptions::OB_SignedAndDefined:
4733 if (!hasSan)
4734 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4735 [[fallthrough]];
4736 case LangOptions::OB_Unset:
4737 if (!hasSan)
4738 return isSigned ? Builder.CreateNSWAdd(op.LHS, op.RHS, "add")
4739 : Builder.CreateAdd(op.LHS, op.RHS, "add");
4740 [[fallthrough]];
4741 case LangOptions::OB_Trap:
4742 if (CanElideOverflowCheck(CGF.getContext(), op))
4743 return isSigned ? Builder.CreateNSWAdd(op.LHS, op.RHS, "add")
4744 : Builder.CreateAdd(op.LHS, op.RHS, "add");
4745 return EmitOverflowCheckedBinOp(op);
4746 }
4747 }
4748
4749 // For vector and matrix adds, try to fold into a fmuladd.
4750 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4751 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4752 // Try to form an fmuladd.
4753 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4754 return FMulAdd;
4755 }
4756
4757 if (op.Ty->isConstantMatrixType()) {
4758 llvm::MatrixBuilder MB(Builder);
4759 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4760 return MB.CreateAdd(op.LHS, op.RHS);
4761 }
4762
4763 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4764 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4765 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4766 }
4767
4768 if (op.isFixedPointOp())
4769 return EmitFixedPointBinOp(op);
4770
4771 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4772}
4773
4774/// The resulting value must be calculated with exact precision, so the operands
4775/// may not be the same type.
4776Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4777 using llvm::APSInt;
4778 using llvm::ConstantInt;
4779
4780 // This is either a binary operation where at least one of the operands is
4781 // a fixed-point type, or a unary operation where the operand is a fixed-point
4782 // type. The result type of a binary operation is determined by
4783 // Sema::handleFixedPointConversions().
4784 QualType ResultTy = op.Ty;
4785 QualType LHSTy, RHSTy;
4786 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4787 RHSTy = BinOp->getRHS()->getType();
4788 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4789 // For compound assignment, the effective type of the LHS at this point
4790 // is the computation LHS type, not the actual LHS type, and the final
4791 // result type is not the type of the expression but rather the
4792 // computation result type.
4793 LHSTy = CAO->getComputationLHSType();
4794 ResultTy = CAO->getComputationResultType();
4795 } else
4796 LHSTy = BinOp->getLHS()->getType();
4797 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4798 LHSTy = UnOp->getSubExpr()->getType();
4799 RHSTy = UnOp->getSubExpr()->getType();
4800 }
4801 ASTContext &Ctx = CGF.getContext();
4802 Value *LHS = op.LHS;
4803 Value *RHS = op.RHS;
4804
4805 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4806 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4807 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4808 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4809
4810 // Perform the actual operation.
4811 Value *Result;
4812 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4813 switch (op.Opcode) {
4814 case BO_AddAssign:
4815 case BO_Add:
4816 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4817 break;
4818 case BO_SubAssign:
4819 case BO_Sub:
4820 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4821 break;
4822 case BO_MulAssign:
4823 case BO_Mul:
4824 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4825 break;
4826 case BO_DivAssign:
4827 case BO_Div:
4828 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4829 break;
4830 case BO_ShlAssign:
4831 case BO_Shl:
4832 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4833 break;
4834 case BO_ShrAssign:
4835 case BO_Shr:
4836 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4837 break;
4838 case BO_LT:
4839 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4840 case BO_GT:
4841 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4842 case BO_LE:
4843 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4844 case BO_GE:
4845 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4846 case BO_EQ:
4847 // For equality operations, we assume any padding bits on unsigned types are
4848 // zero'd out. They could be overwritten through non-saturating operations
4849 // that cause overflow, but this leads to undefined behavior.
4850 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4851 case BO_NE:
4852 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4853 case BO_Cmp:
4854 case BO_LAnd:
4855 case BO_LOr:
4856 llvm_unreachable("Found unimplemented fixed point binary operation");
4857 case BO_PtrMemD:
4858 case BO_PtrMemI:
4859 case BO_Rem:
4860 case BO_Xor:
4861 case BO_And:
4862 case BO_Or:
4863 case BO_Assign:
4864 case BO_RemAssign:
4865 case BO_AndAssign:
4866 case BO_XorAssign:
4867 case BO_OrAssign:
4868 case BO_Comma:
4869 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4870 }
4871
4872 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4874 // Convert to the result type.
4875 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4876 : CommonFixedSema,
4877 ResultFixedSema);
4878}
4879
4880Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4881 // The LHS is always a pointer if either side is.
4882 if (!op.LHS->getType()->isPointerTy()) {
4883 if (op.Ty->isSignedIntegerOrEnumerationType() ||
4884 op.Ty->isUnsignedIntegerType()) {
4885 const bool isSigned = op.Ty->isSignedIntegerOrEnumerationType();
4886 const bool hasSan =
4887 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
4888 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
4889 switch (getOverflowBehaviorConsideringType(CGF, op.Ty)) {
4890 case LangOptions::OB_Wrap:
4891 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4892 case LangOptions::OB_SignedAndDefined:
4893 if (!hasSan)
4894 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4895 [[fallthrough]];
4896 case LangOptions::OB_Unset:
4897 if (!hasSan)
4898 return isSigned ? Builder.CreateNSWSub(op.LHS, op.RHS, "sub")
4899 : Builder.CreateSub(op.LHS, op.RHS, "sub");
4900 [[fallthrough]];
4901 case LangOptions::OB_Trap:
4902 if (CanElideOverflowCheck(CGF.getContext(), op))
4903 return isSigned ? Builder.CreateNSWSub(op.LHS, op.RHS, "sub")
4904 : Builder.CreateSub(op.LHS, op.RHS, "sub");
4905 return EmitOverflowCheckedBinOp(op);
4906 }
4907 }
4908
4909 // For vector and matrix subs, try to fold into a fmuladd.
4910 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4911 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4912 // Try to form an fmuladd.
4913 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4914 return FMulAdd;
4915 }
4916
4917 if (op.Ty->isConstantMatrixType()) {
4918 llvm::MatrixBuilder MB(Builder);
4919 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4920 return MB.CreateSub(op.LHS, op.RHS);
4921 }
4922
4923 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4924 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4925 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4926 }
4927
4928 if (op.isFixedPointOp())
4929 return EmitFixedPointBinOp(op);
4930
4931 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4932 }
4933
4934 // If the RHS is not a pointer, then we have normal pointer
4935 // arithmetic.
4936 if (!op.RHS->getType()->isPointerTy())
4938
4939 // Otherwise, this is a pointer subtraction.
4940
4941 // Do the raw subtraction part.
4942 llvm::Value *LHS
4943 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4944 llvm::Value *RHS
4945 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4946 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4947
4948 // Okay, figure out the element size.
4949 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4950 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4951
4952 llvm::Value *divisor = nullptr;
4953
4954 // For a variable-length array, this is going to be non-constant.
4955 if (const VariableArrayType *vla
4956 = CGF.getContext().getAsVariableArrayType(elementType)) {
4957 auto VlaSize = CGF.getVLASize(vla);
4958 elementType = VlaSize.Type;
4959 divisor = VlaSize.NumElts;
4960
4961 // Scale the number of non-VLA elements by the non-VLA element size.
4962 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4963 if (!eltSize.isOne())
4964 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4965
4966 // For everything elese, we can just compute it, safe in the
4967 // assumption that Sema won't let anything through that we can't
4968 // safely compute the size of.
4969 } else {
4970 CharUnits elementSize;
4971 // Handle GCC extension for pointer arithmetic on void* and
4972 // function pointer types.
4973 if (elementType->isVoidType() || elementType->isFunctionType())
4974 elementSize = CharUnits::One();
4975 else
4976 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4977
4978 // Don't even emit the divide for element size of 1.
4979 if (elementSize.isOne())
4980 return diffInChars;
4981
4982 divisor = CGF.CGM.getSize(elementSize);
4983 }
4984
4985 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4986 // pointer difference in C is only defined in the case where both operands
4987 // are pointing to elements of an array.
4988 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4989}
4990
4991Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4992 bool RHSIsSigned) {
4993 llvm::IntegerType *Ty;
4994 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4995 Ty = cast<llvm::IntegerType>(VT->getElementType());
4996 else
4997 Ty = cast<llvm::IntegerType>(LHS->getType());
4998 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4999 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
5000 // this in ConstantInt::get, this results in the value getting truncated.
5001 // Constrain the return value to be max(RHS) in this case.
5002 llvm::Type *RHSTy = RHS->getType();
5003 llvm::APInt RHSMax =
5004 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
5005 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
5006 if (RHSMax.ult(Ty->getBitWidth()))
5007 return llvm::ConstantInt::get(RHSTy, RHSMax);
5008 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
5009}
5010
5011Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
5012 const Twine &Name) {
5013 llvm::IntegerType *Ty;
5014 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
5015 Ty = cast<llvm::IntegerType>(VT->getElementType());
5016 else
5017 Ty = cast<llvm::IntegerType>(LHS->getType());
5018
5019 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
5020 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
5021
5022 return Builder.CreateURem(
5023 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
5024}
5025
5026Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
5027 // TODO: This misses out on the sanitizer check below.
5028 if (Ops.isFixedPointOp())
5029 return EmitFixedPointBinOp(Ops);
5030
5031 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
5032 // RHS to the same size as the LHS.
5033 Value *RHS = Ops.RHS;
5034 if (Ops.LHS->getType() != RHS->getType())
5035 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
5036
5037 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
5038 Ops.Ty->hasSignedIntegerRepresentation() &&
5040 !CGF.getLangOpts().CPlusPlus20;
5041 bool SanitizeUnsignedBase =
5042 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
5043 Ops.Ty->hasUnsignedIntegerRepresentation();
5044 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
5045 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
5046 // OpenCL 6.3j: shift values are effectively % word size of LHS.
5047 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
5048 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
5049 else if ((SanitizeBase || SanitizeExponent) &&
5050 isa<llvm::IntegerType>(Ops.LHS->getType())) {
5051 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
5052 if (SanitizeSignedBase)
5053 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
5054 if (SanitizeUnsignedBase)
5055 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
5056 if (SanitizeExponent)
5057 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
5058
5059 SanitizerDebugLocation SanScope(&CGF, Ordinals,
5060 SanitizerHandler::ShiftOutOfBounds);
5061 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
5062 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
5063 llvm::Value *WidthMinusOne =
5064 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
5065 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
5066
5067 if (SanitizeExponent) {
5068 Checks.push_back(
5069 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
5070 }
5071
5072 if (SanitizeBase) {
5073 // Check whether we are shifting any non-zero bits off the top of the
5074 // integer. We only emit this check if exponent is valid - otherwise
5075 // instructions below will have undefined behavior themselves.
5076 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
5077 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
5078 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
5079 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
5080 llvm::Value *PromotedWidthMinusOne =
5081 (RHS == Ops.RHS) ? WidthMinusOne
5082 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
5083 CGF.EmitBlock(CheckShiftBase);
5084 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
5085 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
5086 /*NUW*/ true, /*NSW*/ true),
5087 "shl.check");
5088 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
5089 // In C99, we are not permitted to shift a 1 bit into the sign bit.
5090 // Under C++11's rules, shifting a 1 bit into the sign bit is
5091 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
5092 // define signed left shifts, so we use the C99 and C++11 rules there).
5093 // Unsigned shifts can always shift into the top bit.
5094 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
5095 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
5096 }
5097 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
5098 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
5099 CGF.EmitBlock(Cont);
5100 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
5101 BaseCheck->addIncoming(Builder.getTrue(), Orig);
5102 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
5103 Checks.push_back(std::make_pair(
5104 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
5105 : SanitizerKind::SO_UnsignedShiftBase));
5106 }
5107
5108 assert(!Checks.empty());
5109 EmitBinOpCheck(Checks, Ops);
5110 }
5111
5112 return Builder.CreateShl(Ops.LHS, RHS, "shl");
5113}
5114
5115Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
5116 // TODO: This misses out on the sanitizer check below.
5117 if (Ops.isFixedPointOp())
5118 return EmitFixedPointBinOp(Ops);
5119
5120 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
5121 // RHS to the same size as the LHS.
5122 Value *RHS = Ops.RHS;
5123 if (Ops.LHS->getType() != RHS->getType())
5124 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
5125
5126 // OpenCL 6.3j: shift values are effectively % word size of LHS.
5127 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
5128 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
5129 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
5130 isa<llvm::IntegerType>(Ops.LHS->getType())) {
5131 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
5132 SanitizerHandler::ShiftOutOfBounds);
5133 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
5134 llvm::Value *Valid = Builder.CreateICmpULE(
5135 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
5136 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
5137 }
5138
5139 if (Ops.Ty->hasUnsignedIntegerRepresentation())
5140 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
5141 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
5142}
5143
5145// return corresponding comparison intrinsic for given vector type
5146static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
5147 BuiltinType::Kind ElemKind) {
5148 switch (ElemKind) {
5149 default: llvm_unreachable("unexpected element type");
5150 case BuiltinType::Char_U:
5151 case BuiltinType::UChar:
5152 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5153 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
5154 case BuiltinType::Char_S:
5155 case BuiltinType::SChar:
5156 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5157 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
5158 case BuiltinType::UShort:
5159 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5160 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
5161 case BuiltinType::Short:
5162 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5163 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
5164 case BuiltinType::UInt:
5165 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5166 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
5167 case BuiltinType::Int:
5168 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5169 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
5170 case BuiltinType::ULong:
5171 case BuiltinType::ULongLong:
5172 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5173 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
5174 case BuiltinType::Long:
5175 case BuiltinType::LongLong:
5176 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5177 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
5178 case BuiltinType::Float:
5179 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
5180 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
5181 case BuiltinType::Double:
5182 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
5183 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
5184 case BuiltinType::UInt128:
5185 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5186 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
5187 case BuiltinType::Int128:
5188 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5189 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
5190 }
5191}
5192
5193Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
5194 llvm::CmpInst::Predicate UICmpOpc,
5195 llvm::CmpInst::Predicate SICmpOpc,
5196 llvm::CmpInst::Predicate FCmpOpc,
5197 bool IsSignaling) {
5198 TestAndClearIgnoreResultAssign();
5199 Value *Result;
5200 QualType LHSTy = E->getLHS()->getType();
5201 QualType RHSTy = E->getRHS()->getType();
5202 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
5203 assert(E->getOpcode() == BO_EQ ||
5204 E->getOpcode() == BO_NE);
5205 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
5206 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
5208 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
5209 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
5210 BinOpInfo BOInfo = EmitBinOps(E);
5211 Value *LHS = BOInfo.LHS;
5212 Value *RHS = BOInfo.RHS;
5213
5214 // If AltiVec, the comparison results in a numeric type, so we use
5215 // intrinsics comparing vectors and giving 0 or 1 as a result
5216 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
5217 // constants for mapping CR6 register bits to predicate result
5218 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
5219
5220 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
5221
5222 // in several cases vector arguments order will be reversed
5223 Value *FirstVecArg = LHS,
5224 *SecondVecArg = RHS;
5225
5226 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
5227 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
5228
5229 switch(E->getOpcode()) {
5230 default: llvm_unreachable("is not a comparison operation");
5231 case BO_EQ:
5232 CR6 = CR6_LT;
5233 ID = GetIntrinsic(VCMPEQ, ElementKind);
5234 break;
5235 case BO_NE:
5236 CR6 = CR6_EQ;
5237 ID = GetIntrinsic(VCMPEQ, ElementKind);
5238 break;
5239 case BO_LT:
5240 CR6 = CR6_LT;
5241 ID = GetIntrinsic(VCMPGT, ElementKind);
5242 std::swap(FirstVecArg, SecondVecArg);
5243 break;
5244 case BO_GT:
5245 CR6 = CR6_LT;
5246 ID = GetIntrinsic(VCMPGT, ElementKind);
5247 break;
5248 case BO_LE:
5249 if (ElementKind == BuiltinType::Float) {
5250 CR6 = CR6_LT;
5251 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5252 std::swap(FirstVecArg, SecondVecArg);
5253 }
5254 else {
5255 CR6 = CR6_EQ;
5256 ID = GetIntrinsic(VCMPGT, ElementKind);
5257 }
5258 break;
5259 case BO_GE:
5260 if (ElementKind == BuiltinType::Float) {
5261 CR6 = CR6_LT;
5262 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5263 }
5264 else {
5265 CR6 = CR6_EQ;
5266 ID = GetIntrinsic(VCMPGT, ElementKind);
5267 std::swap(FirstVecArg, SecondVecArg);
5268 }
5269 break;
5270 }
5271
5272 Value *CR6Param = Builder.getInt32(CR6);
5273 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5274 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5275
5276 // The result type of intrinsic may not be same as E->getType().
5277 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5278 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5279 // do nothing, if ResultTy is not i1 at the same time, it will cause
5280 // crash later.
5281 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5282 if (ResultTy->getBitWidth() > 1 &&
5283 E->getType() == CGF.getContext().BoolTy)
5284 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5285 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5286 E->getExprLoc());
5287 }
5288
5289 if (BOInfo.isFixedPointOp()) {
5290 Result = EmitFixedPointBinOp(BOInfo);
5291 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5292 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5293 if (!IsSignaling)
5294 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5295 else
5296 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5297 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5298 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5299 } else {
5300 // Unsigned integers and pointers.
5301
5302 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5305
5306 // Dynamic information is required to be stripped for comparisons,
5307 // because it could leak the dynamic information. Based on comparisons
5308 // of pointers to dynamic objects, the optimizer can replace one pointer
5309 // with another, which might be incorrect in presence of invariant
5310 // groups. Comparison with null is safe because null does not carry any
5311 // dynamic information.
5312 if (LHSTy.mayBeDynamicClass())
5313 LHS = Builder.CreateStripInvariantGroup(LHS);
5314 if (RHSTy.mayBeDynamicClass())
5315 RHS = Builder.CreateStripInvariantGroup(RHS);
5316 }
5317
5318 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5319 }
5320
5321 // If this is a vector comparison, sign extend the result to the appropriate
5322 // vector integer type and return it (don't convert to bool).
5323 if (LHSTy->isVectorType())
5324 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5325
5326 } else {
5327 // Complex Comparison: can only be an equality comparison.
5329 QualType CETy;
5330 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5331 LHS = CGF.EmitComplexExpr(E->getLHS());
5332 CETy = CTy->getElementType();
5333 } else {
5334 LHS.first = Visit(E->getLHS());
5335 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5336 CETy = LHSTy;
5337 }
5338 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5339 RHS = CGF.EmitComplexExpr(E->getRHS());
5340 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5341 CTy->getElementType()) &&
5342 "The element types must always match.");
5343 (void)CTy;
5344 } else {
5345 RHS.first = Visit(E->getRHS());
5346 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5347 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5348 "The element types must always match.");
5349 }
5350
5351 Value *ResultR, *ResultI;
5352 if (CETy->isRealFloatingType()) {
5353 // As complex comparisons can only be equality comparisons, they
5354 // are never signaling comparisons.
5355 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5356 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5357 } else {
5358 // Complex comparisons can only be equality comparisons. As such, signed
5359 // and unsigned opcodes are the same.
5360 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5361 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5362 }
5363
5364 if (E->getOpcode() == BO_EQ) {
5365 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5366 } else {
5367 assert(E->getOpcode() == BO_NE &&
5368 "Complex comparison other than == or != ?");
5369 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5370 }
5371 }
5372
5373 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5374 E->getExprLoc());
5375}
5376
5378 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5379 // In case we have the integer or bitfield sanitizer checks enabled
5380 // we want to get the expression before scalar conversion.
5381 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5382 CastKind Kind = ICE->getCastKind();
5383 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5384 *SrcType = ICE->getSubExpr()->getType();
5385 *Previous = EmitScalarExpr(ICE->getSubExpr());
5386 // Pass default ScalarConversionOpts to avoid emitting
5387 // integer sanitizer checks as E refers to bitfield.
5388 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5389 ICE->getExprLoc());
5390 }
5391 }
5392 return EmitScalarExpr(E->getRHS());
5393}
5394
5395Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5396 ApplyAtomGroup Grp(CGF.getDebugInfo());
5397 bool Ignore = TestAndClearIgnoreResultAssign();
5398
5399 Value *RHS;
5400 LValue LHS;
5401
5402 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5405 llvm::Value *RV =
5406 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5407 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5409
5410 if (Ignore)
5411 return nullptr;
5412 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5413 LV.getAddress(), /*nonnull*/ false);
5414 return RV;
5415 }
5416
5417 switch (E->getLHS()->getType().getObjCLifetime()) {
5419 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5420 break;
5421
5423 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5424 break;
5425
5427 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5428 break;
5429
5431 RHS = Visit(E->getRHS());
5432 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5433 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5434 break;
5435
5437 // __block variables need to have the rhs evaluated first, plus
5438 // this should improve codegen just a little.
5439 Value *Previous = nullptr;
5440 QualType SrcType = E->getRHS()->getType();
5441 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5442 // we want to extract that value and potentially (if the bitfield sanitizer
5443 // is enabled) use it to check for an implicit conversion.
5444 if (E->getLHS()->refersToBitField())
5445 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5446 else
5447 RHS = Visit(E->getRHS());
5448
5449 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5450
5451 // Store the value into the LHS. Bit-fields are handled specially
5452 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5453 // 'An assignment expression has the value of the left operand after
5454 // the assignment...'.
5455 if (LHS.isBitField()) {
5456 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5457 // If the expression contained an implicit conversion, make sure
5458 // to use the value before the scalar conversion.
5459 Value *Src = Previous ? Previous : RHS;
5460 QualType DstType = E->getLHS()->getType();
5461 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5462 LHS.getBitFieldInfo(), E->getExprLoc());
5463 } else {
5464 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5465 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5466 }
5467 }
5468 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5469 if (CGF.getLangOpts().OpenMP) {
5471 E->getLHS());
5472 }
5473
5474 // If the result is clearly ignored, return now.
5475 if (Ignore)
5476 return nullptr;
5477
5478 // The result of an assignment in C is the assigned r-value.
5479 if (!CGF.getLangOpts().CPlusPlus)
5480 return RHS;
5481
5482 // If the lvalue is non-volatile, return the computed value of the assignment.
5483 if (!LHS.isVolatileQualified())
5484 return RHS;
5485
5486 // Otherwise, reload the value.
5487 return EmitLoadOfLValue(LHS, E->getExprLoc());
5488}
5489
5490Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5491 auto HasLHSSkip = CGF.hasSkipCounter(E);
5492 auto HasRHSSkip = CGF.hasSkipCounter(E->getRHS());
5493
5494 // Perform vector logical and on comparisons with zero vectors.
5495 if (E->getType()->isVectorType()) {
5497
5498 Value *LHS = Visit(E->getLHS());
5499 Value *RHS = Visit(E->getRHS());
5500 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5501 if (LHS->getType()->isFPOrFPVectorTy()) {
5502 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5503 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5504 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5505 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5506 } else {
5507 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5508 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5509 }
5510 Value *And = Builder.CreateAnd(LHS, RHS);
5511 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5512 }
5513
5514 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5515 llvm::Type *ResTy = ConvertType(E->getType());
5516
5517 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5518 // If we have 1 && X, just emit X without inserting the control flow.
5519 bool LHSCondVal;
5520 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5521 if (LHSCondVal) { // If we have 1 && X, just emit X.
5522 CGF.incrementProfileCounter(CGF.UseExecPath, E, /*UseBoth=*/true);
5523
5524 // If the top of the logical operator nest, reset the MCDC temp to 0.
5525 if (CGF.isMCDCDecisionExpr(E))
5527
5528 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5529
5530 // If we're generating for profiling or coverage, generate a branch to a
5531 // block that increments the RHS counter needed to track branch condition
5532 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5533 // "FalseBlock" after the increment is done.
5534 if (InstrumentRegions &&
5536 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5537 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5538 llvm::BasicBlock *RHSSkip =
5539 (HasRHSSkip ? CGF.createBasicBlock("land.rhsskip") : FBlock);
5540 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5541 Builder.CreateCondBr(RHSCond, RHSBlockCnt, RHSSkip);
5542 CGF.EmitBlock(RHSBlockCnt);
5544 CGF.EmitBranch(FBlock);
5545 if (HasRHSSkip) {
5546 CGF.EmitBlock(RHSSkip);
5548 }
5549 CGF.EmitBlock(FBlock);
5550 } else
5551 CGF.markStmtMaybeUsed(E->getRHS());
5552
5553 // If the top of the logical operator nest, update the MCDC bitmap.
5554 if (CGF.isMCDCDecisionExpr(E))
5556
5557 // ZExt result to int or bool.
5558 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5559 }
5560
5561 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5562 if (!CGF.ContainsLabel(E->getRHS())) {
5563 CGF.markStmtAsUsed(false, E);
5564 if (HasLHSSkip)
5566
5567 CGF.markStmtMaybeUsed(E->getRHS());
5568
5569 return llvm::Constant::getNullValue(ResTy);
5570 }
5571 }
5572
5573 // If the top of the logical operator nest, reset the MCDC temp to 0.
5574 if (CGF.isMCDCDecisionExpr(E))
5576
5577 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5578 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5579
5580 llvm::BasicBlock *LHSFalseBlock =
5581 (HasLHSSkip ? CGF.createBasicBlock("land.lhsskip") : ContBlock);
5582
5583 CodeGenFunction::ConditionalEvaluation eval(CGF);
5584
5585 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5586 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, LHSFalseBlock,
5587 CGF.getProfileCount(E->getRHS()));
5588
5589 if (HasLHSSkip) {
5590 CGF.EmitBlock(LHSFalseBlock);
5592 CGF.EmitBranch(ContBlock);
5593 }
5594
5595 // Any edges into the ContBlock are now from an (indeterminate number of)
5596 // edges from this first condition. All of these values will be false. Start
5597 // setting up the PHI node in the Cont Block for this.
5598 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5599 "", ContBlock);
5600 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5601 PI != PE; ++PI)
5602 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5603
5604 eval.begin(CGF);
5605 CGF.EmitBlock(RHSBlock);
5607 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5608 eval.end(CGF);
5609
5610 // Reaquire the RHS block, as there may be subblocks inserted.
5611 RHSBlock = Builder.GetInsertBlock();
5612
5613 // If we're generating for profiling or coverage, generate a branch on the
5614 // RHS to a block that increments the RHS true counter needed to track branch
5615 // condition coverage.
5616 llvm::BasicBlock *ContIncoming = RHSBlock;
5617 if (InstrumentRegions &&
5619 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5620 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5621 llvm::BasicBlock *RHSBlockSkip =
5622 (HasRHSSkip ? CGF.createBasicBlock("land.rhsskip") : ContBlock);
5623 Builder.CreateCondBr(RHSCond, RHSBlockCnt, RHSBlockSkip);
5624 CGF.EmitBlock(RHSBlockCnt);
5626 CGF.EmitBranch(ContBlock);
5627 PN->addIncoming(RHSCond, RHSBlockCnt);
5628 if (HasRHSSkip) {
5629 CGF.EmitBlock(RHSBlockSkip);
5631 CGF.EmitBranch(ContBlock);
5632 ContIncoming = RHSBlockSkip;
5633 }
5634 }
5635
5636 // Emit an unconditional branch from this block to ContBlock.
5637 {
5638 // There is no need to emit line number for unconditional branch.
5639 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5640 CGF.EmitBlock(ContBlock);
5641 }
5642 // Insert an entry into the phi node for the edge with the value of RHSCond.
5643 PN->addIncoming(RHSCond, ContIncoming);
5644
5645 // If the top of the logical operator nest, update the MCDC bitmap.
5646 if (CGF.isMCDCDecisionExpr(E))
5648
5649 // Artificial location to preserve the scope information
5650 {
5652 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5653 }
5654
5655 // ZExt result to int.
5656 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5657}
5658
5659Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5660 auto HasLHSSkip = CGF.hasSkipCounter(E);
5661 auto HasRHSSkip = CGF.hasSkipCounter(E->getRHS());
5662
5663 // Perform vector logical or on comparisons with zero vectors.
5664 if (E->getType()->isVectorType()) {
5666
5667 Value *LHS = Visit(E->getLHS());
5668 Value *RHS = Visit(E->getRHS());
5669 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5670 if (LHS->getType()->isFPOrFPVectorTy()) {
5671 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5672 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5673 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5674 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5675 } else {
5676 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5677 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5678 }
5679 Value *Or = Builder.CreateOr(LHS, RHS);
5680 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5681 }
5682
5683 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5684 llvm::Type *ResTy = ConvertType(E->getType());
5685
5686 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5687 // If we have 0 || X, just emit X without inserting the control flow.
5688 bool LHSCondVal;
5689 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5690 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5691 CGF.incrementProfileCounter(CGF.UseExecPath, E, /*UseBoth=*/true);
5692
5693 // If the top of the logical operator nest, reset the MCDC temp to 0.
5694 if (CGF.isMCDCDecisionExpr(E))
5696
5697 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5698
5699 // If we're generating for profiling or coverage, generate a branch to a
5700 // block that increments the RHS counter need to track branch condition
5701 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5702 // "FalseBlock" after the increment is done.
5703 if (InstrumentRegions &&
5705 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5706 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5707 llvm::BasicBlock *RHSSkip =
5708 (HasRHSSkip ? CGF.createBasicBlock("lor.rhsskip") : FBlock);
5709 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5710 Builder.CreateCondBr(RHSCond, RHSSkip, RHSBlockCnt);
5711 CGF.EmitBlock(RHSBlockCnt);
5713 CGF.EmitBranch(FBlock);
5714 if (HasRHSSkip) {
5715 CGF.EmitBlock(RHSSkip);
5717 }
5718 CGF.EmitBlock(FBlock);
5719 } else
5720 CGF.markStmtMaybeUsed(E->getRHS());
5721
5722 // If the top of the logical operator nest, update the MCDC bitmap.
5723 if (CGF.isMCDCDecisionExpr(E))
5725
5726 // ZExt result to int or bool.
5727 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5728 }
5729
5730 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5731 if (!CGF.ContainsLabel(E->getRHS())) {
5732 CGF.markStmtAsUsed(false, E);
5733 if (HasLHSSkip)
5735
5736 CGF.markStmtMaybeUsed(E->getRHS());
5737
5738 return llvm::ConstantInt::get(ResTy, 1);
5739 }
5740 }
5741
5742 // If the top of the logical operator nest, reset the MCDC temp to 0.
5743 if (CGF.isMCDCDecisionExpr(E))
5745
5746 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5747 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5748 llvm::BasicBlock *LHSTrueBlock =
5749 (HasLHSSkip ? CGF.createBasicBlock("lor.lhsskip") : ContBlock);
5750
5751 CodeGenFunction::ConditionalEvaluation eval(CGF);
5752
5753 // Branch on the LHS first. If it is true, go to the success (cont) block.
5754 CGF.EmitBranchOnBoolExpr(E->getLHS(), LHSTrueBlock, RHSBlock,
5756 CGF.getProfileCount(E->getRHS()));
5757
5758 if (HasLHSSkip) {
5759 CGF.EmitBlock(LHSTrueBlock);
5761 CGF.EmitBranch(ContBlock);
5762 }
5763
5764 // Any edges into the ContBlock are now from an (indeterminate number of)
5765 // edges from this first condition. All of these values will be true. Start
5766 // setting up the PHI node in the Cont Block for this.
5767 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5768 "", ContBlock);
5769 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5770 PI != PE; ++PI)
5771 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5772
5773 eval.begin(CGF);
5774
5775 // Emit the RHS condition as a bool value.
5776 CGF.EmitBlock(RHSBlock);
5778 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5779
5780 eval.end(CGF);
5781
5782 // Reaquire the RHS block, as there may be subblocks inserted.
5783 RHSBlock = Builder.GetInsertBlock();
5784
5785 // If we're generating for profiling or coverage, generate a branch on the
5786 // RHS to a block that increments the RHS true counter needed to track branch
5787 // condition coverage.
5788 llvm::BasicBlock *ContIncoming = RHSBlock;
5789 if (InstrumentRegions &&
5791 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5792 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5793 llvm::BasicBlock *RHSTrueBlock =
5794 (HasRHSSkip ? CGF.createBasicBlock("lor.rhsskip") : ContBlock);
5795 Builder.CreateCondBr(RHSCond, RHSTrueBlock, RHSBlockCnt);
5796 CGF.EmitBlock(RHSBlockCnt);
5798 CGF.EmitBranch(ContBlock);
5799 PN->addIncoming(RHSCond, RHSBlockCnt);
5800 if (HasRHSSkip) {
5801 CGF.EmitBlock(RHSTrueBlock);
5803 CGF.EmitBranch(ContBlock);
5804 ContIncoming = RHSTrueBlock;
5805 }
5806 }
5807
5808 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5809 // into the phi node for the edge with the value of RHSCond.
5810 CGF.EmitBlock(ContBlock);
5811 PN->addIncoming(RHSCond, ContIncoming);
5812
5813 // If the top of the logical operator nest, update the MCDC bitmap.
5814 if (CGF.isMCDCDecisionExpr(E))
5816
5817 // ZExt result to int.
5818 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5819}
5820
5821Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5822 CGF.EmitIgnoredExpr(E->getLHS());
5823 CGF.EnsureInsertPoint();
5824 return Visit(E->getRHS());
5825}
5826
5827//===----------------------------------------------------------------------===//
5828// Other Operators
5829//===----------------------------------------------------------------------===//
5830
5831/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5832/// expression is cheap enough and side-effect-free enough to evaluate
5833/// unconditionally instead of conditionally. This is used to convert control
5834/// flow into selects in some cases.
5836 CodeGenFunction &CGF) {
5837 // Anything that is an integer or floating point constant is fine.
5838 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5839
5840 // Even non-volatile automatic variables can't be evaluated unconditionally.
5841 // Referencing a thread_local may cause non-trivial initialization work to
5842 // occur. If we're inside a lambda and one of the variables is from the scope
5843 // outside the lambda, that function may have returned already. Reading its
5844 // locals is a bad idea. Also, these reads may introduce races there didn't
5845 // exist in the source-level program.
5846}
5847
5848
5849Value *ScalarExprEmitter::
5850VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5851 TestAndClearIgnoreResultAssign();
5852
5853 // Bind the common expression if necessary.
5854 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5855
5856 Expr *condExpr = E->getCond();
5857 Expr *lhsExpr = E->getTrueExpr();
5858 Expr *rhsExpr = E->getFalseExpr();
5859
5860 // If the condition constant folds and can be elided, try to avoid emitting
5861 // the condition and the dead arm.
5862 bool CondExprBool;
5863 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5864 Expr *live = lhsExpr, *dead = rhsExpr;
5865 if (!CondExprBool) std::swap(live, dead);
5866
5867 // If the dead side doesn't have labels we need, just emit the Live part.
5868 if (!CGF.ContainsLabel(dead)) {
5869 CGF.incrementProfileCounter(CondExprBool ? CGF.UseExecPath
5870 : CGF.UseSkipPath,
5871 E, /*UseBoth=*/true);
5872 Value *Result = Visit(live);
5873 CGF.markStmtMaybeUsed(dead);
5874
5875 // If the live part is a throw expression, it acts like it has a void
5876 // type, so evaluating it returns a null Value*. However, a conditional
5877 // with non-void type must return a non-null Value*.
5878 if (!Result && !E->getType()->isVoidType())
5879 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5880
5881 return Result;
5882 }
5883 }
5884
5885 // OpenCL: If the condition is a vector, we can treat this condition like
5886 // the select function.
5887 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5888 condExpr->getType()->isExtVectorType())) {
5890
5891 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5892 llvm::Value *LHS = Visit(lhsExpr);
5893 llvm::Value *RHS = Visit(rhsExpr);
5894
5895 llvm::Type *condType = ConvertType(condExpr->getType());
5896 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5897
5898 unsigned numElem = vecTy->getNumElements();
5899 llvm::Type *elemType = vecTy->getElementType();
5900
5901 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5902 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5903 llvm::Value *tmp = Builder.CreateSExt(
5904 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5905 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5906
5907 // Cast float to int to perform ANDs if necessary.
5908 llvm::Value *RHSTmp = RHS;
5909 llvm::Value *LHSTmp = LHS;
5910 bool wasCast = false;
5911 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5912 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5913 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5914 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5915 wasCast = true;
5916 }
5917
5918 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5919 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5920 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5921 if (wasCast)
5922 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5923
5924 return tmp5;
5925 }
5926
5927 if (condExpr->getType()->isVectorType() ||
5928 condExpr->getType()->isSveVLSBuiltinType()) {
5930
5931 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5932 llvm::Value *LHS = Visit(lhsExpr);
5933 llvm::Value *RHS = Visit(rhsExpr);
5934
5935 llvm::Type *CondType = ConvertType(condExpr->getType());
5936 auto *VecTy = cast<llvm::VectorType>(CondType);
5937
5938 if (VecTy->getElementType()->isIntegerTy(1))
5939 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5940
5941 // OpenCL uses the MSB of the mask vector.
5942 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5943 if (condExpr->getType()->isExtVectorType())
5944 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5945 else
5946 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5947 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5948 }
5949
5950 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5951 // select instead of as control flow. We can only do this if it is cheap and
5952 // safe to evaluate the LHS and RHS unconditionally.
5956 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5957 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5958
5959 CGF.incrementProfileCounter(E, StepV);
5960
5961 llvm::Value *LHS = Visit(lhsExpr);
5962 llvm::Value *RHS = Visit(rhsExpr);
5963 if (!LHS) {
5964 // If the conditional has void type, make sure we return a null Value*.
5965 assert(!RHS && "LHS and RHS types must match");
5966 return nullptr;
5967 }
5968 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5969 }
5970
5971 // If the top of the logical operator nest, reset the MCDC temp to 0.
5972 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5974
5975 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5976 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5977 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5978
5979 CodeGenFunction::ConditionalEvaluation eval(CGF);
5980 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5981 CGF.getProfileCount(lhsExpr));
5982
5983 CGF.EmitBlock(LHSBlock);
5984
5985 // If the top of the logical operator nest, update the MCDC bitmap for the
5986 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5987 // may also contain a boolean expression.
5988 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5990
5992 eval.begin(CGF);
5993 Value *LHS = Visit(lhsExpr);
5994 eval.end(CGF);
5995
5996 LHSBlock = Builder.GetInsertBlock();
5997 Builder.CreateBr(ContBlock);
5998
5999 CGF.EmitBlock(RHSBlock);
6000
6001 // If the top of the logical operator nest, update the MCDC bitmap for the
6002 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
6003 // may also contain a boolean expression.
6004 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
6006
6008 eval.begin(CGF);
6009 Value *RHS = Visit(rhsExpr);
6010 eval.end(CGF);
6011
6012 RHSBlock = Builder.GetInsertBlock();
6013 CGF.EmitBlock(ContBlock);
6014
6015 // If the LHS or RHS is a throw expression, it will be legitimately null.
6016 if (!LHS)
6017 return RHS;
6018 if (!RHS)
6019 return LHS;
6020
6021 // Create a PHI node for the real part.
6022 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
6023 PN->addIncoming(LHS, LHSBlock);
6024 PN->addIncoming(RHS, RHSBlock);
6025
6026 return PN;
6027}
6028
6029Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
6030 return Visit(E->getChosenSubExpr());
6031}
6032
6033Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
6034 Address ArgValue = Address::invalid();
6035 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
6036
6037 return ArgPtr.getScalarVal();
6038}
6039
6040Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
6041 return CGF.EmitBlockLiteral(block);
6042}
6043
6044// Convert a vec3 to vec4, or vice versa.
6046 Value *Src, unsigned NumElementsDst) {
6047 static constexpr int Mask[] = {0, 1, 2, -1};
6048 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
6049}
6050
6051// Create cast instructions for converting LLVM value \p Src to LLVM type \p
6052// DstTy. \p Src has the same size as \p DstTy. Both are single value types
6053// but could be scalar or vectors of different lengths, and either can be
6054// pointer.
6055// There are 4 cases:
6056// 1. non-pointer -> non-pointer : needs 1 bitcast
6057// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
6058// 3. pointer -> non-pointer
6059// a) pointer -> intptr_t : needs 1 ptrtoint
6060// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
6061// 4. non-pointer -> pointer
6062// a) intptr_t -> pointer : needs 1 inttoptr
6063// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
6064// Note: for cases 3b and 4b two casts are required since LLVM casts do not
6065// allow casting directly between pointer types and non-integer non-pointer
6066// types.
6068 const llvm::DataLayout &DL,
6069 Value *Src, llvm::Type *DstTy,
6070 StringRef Name = "") {
6071 auto SrcTy = Src->getType();
6072
6073 // Case 1.
6074 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
6075 return Builder.CreateBitCast(Src, DstTy, Name);
6076
6077 // Case 2.
6078 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
6079 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
6080
6081 // Case 3.
6082 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
6083 // Case 3b.
6084 if (!DstTy->isIntegerTy())
6085 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
6086 // Cases 3a and 3b.
6087 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
6088 }
6089
6090 // Case 4b.
6091 if (!SrcTy->isIntegerTy())
6092 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
6093 // Cases 4a and 4b.
6094 return Builder.CreateIntToPtr(Src, DstTy, Name);
6095}
6096
6097Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
6098 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
6099 llvm::Type *DstTy = ConvertType(E->getType());
6100
6101 llvm::Type *SrcTy = Src->getType();
6102 unsigned NumElementsSrc =
6104 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
6105 : 0;
6106 unsigned NumElementsDst =
6108 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
6109 : 0;
6110
6111 // Use bit vector expansion for ext_vector_type boolean vectors.
6112 if (E->getType()->isExtVectorBoolType())
6113 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
6114
6115 // Going from vec3 to non-vec3 is a special case and requires a shuffle
6116 // vector to get a vec4, then a bitcast if the target type is different.
6117 if (NumElementsSrc == 3 && NumElementsDst != 3) {
6118 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
6119 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
6120 DstTy);
6121
6122 Src->setName("astype");
6123 return Src;
6124 }
6125
6126 // Going from non-vec3 to vec3 is a special case and requires a bitcast
6127 // to vec4 if the original type is not vec4, then a shuffle vector to
6128 // get a vec3.
6129 if (NumElementsSrc != 3 && NumElementsDst == 3) {
6130 auto *Vec4Ty = llvm::FixedVectorType::get(
6131 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
6132 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
6133 Vec4Ty);
6134
6135 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
6136 Src->setName("astype");
6137 return Src;
6138 }
6139
6140 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
6141 Src, DstTy, "astype");
6142}
6143
6144Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
6145 return CGF.EmitAtomicExpr(E).getScalarVal();
6146}
6147
6148//===----------------------------------------------------------------------===//
6149// Entry Point into this File
6150//===----------------------------------------------------------------------===//
6151
6152/// Emit the computation of the specified expression of scalar type, ignoring
6153/// the result.
6154Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
6155 assert(E && hasScalarEvaluationKind(E->getType()) &&
6156 "Invalid scalar expression to emit");
6157
6158 return ScalarExprEmitter(*this, IgnoreResultAssign)
6159 .Visit(const_cast<Expr *>(E));
6160}
6161
6162/// Emit a conversion from the specified type to the specified destination type,
6163/// both of which are LLVM scalar types.
6165 QualType DstTy,
6166 SourceLocation Loc) {
6167 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
6168 "Invalid scalar expression to emit");
6169 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
6170}
6171
6172/// Emit a conversion from the specified complex type to the specified
6173/// destination type, where the destination type is an LLVM scalar type.
6175 QualType SrcTy,
6176 QualType DstTy,
6177 SourceLocation Loc) {
6178 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
6179 "Invalid complex -> scalar conversion");
6180 return ScalarExprEmitter(*this)
6181 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
6182}
6183
6184
6185Value *
6187 QualType PromotionType) {
6188 if (!PromotionType.isNull())
6189 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
6190 else
6191 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
6192}
6193
6194
6197 bool isInc, bool isPre) {
6198 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
6199}
6200
6202 // object->isa or (*object).isa
6203 // Generate code as for: *(Class*)object
6204
6205 Expr *BaseExpr = E->getBase();
6207 if (BaseExpr->isPRValue()) {
6208 llvm::Type *BaseTy =
6210 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
6211 } else {
6212 Addr = EmitLValue(BaseExpr).getAddress();
6213 }
6214
6215 // Cast the address to Class*.
6216 Addr = Addr.withElementType(ConvertType(E->getType()));
6217 return MakeAddrLValue(Addr, E->getType());
6218}
6219
6220
6222 const CompoundAssignOperator *E) {
6224 ScalarExprEmitter Scalar(*this);
6225 Value *Result = nullptr;
6226 switch (E->getOpcode()) {
6227#define COMPOUND_OP(Op) \
6228 case BO_##Op##Assign: \
6229 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
6230 Result)
6231 COMPOUND_OP(Mul);
6232 COMPOUND_OP(Div);
6233 COMPOUND_OP(Rem);
6234 COMPOUND_OP(Add);
6235 COMPOUND_OP(Sub);
6236 COMPOUND_OP(Shl);
6237 COMPOUND_OP(Shr);
6239 COMPOUND_OP(Xor);
6240 COMPOUND_OP(Or);
6241#undef COMPOUND_OP
6242
6243 case BO_PtrMemD:
6244 case BO_PtrMemI:
6245 case BO_Mul:
6246 case BO_Div:
6247 case BO_Rem:
6248 case BO_Add:
6249 case BO_Sub:
6250 case BO_Shl:
6251 case BO_Shr:
6252 case BO_LT:
6253 case BO_GT:
6254 case BO_LE:
6255 case BO_GE:
6256 case BO_EQ:
6257 case BO_NE:
6258 case BO_Cmp:
6259 case BO_And:
6260 case BO_Xor:
6261 case BO_Or:
6262 case BO_LAnd:
6263 case BO_LOr:
6264 case BO_Assign:
6265 case BO_Comma:
6266 llvm_unreachable("Not valid compound assignment operators");
6267 }
6268
6269 llvm_unreachable("Unhandled compound assignment operator");
6270}
6271
6273 // The total (signed) byte offset for the GEP.
6274 llvm::Value *TotalOffset;
6275 // The offset overflow flag - true if the total offset overflows.
6276 llvm::Value *OffsetOverflows;
6277};
6278
6279/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6280/// and compute the total offset it applies from it's base pointer BasePtr.
6281/// Returns offset in bytes and a boolean flag whether an overflow happened
6282/// during evaluation.
6284 llvm::LLVMContext &VMContext,
6285 CodeGenModule &CGM,
6286 CGBuilderTy &Builder) {
6287 const auto &DL = CGM.getDataLayout();
6288
6289 // The total (signed) byte offset for the GEP.
6290 llvm::Value *TotalOffset = nullptr;
6291
6292 // Was the GEP already reduced to a constant?
6293 if (isa<llvm::Constant>(GEPVal)) {
6294 // Compute the offset by casting both pointers to integers and subtracting:
6295 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6296 Value *BasePtr_int =
6297 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6298 Value *GEPVal_int =
6299 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6300 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6301 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6302 }
6303
6304 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6305 assert(GEP->getPointerOperand() == BasePtr &&
6306 "BasePtr must be the base of the GEP.");
6307 assert(GEP->isInBounds() && "Expected inbounds GEP");
6308
6309 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6310
6311 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6312 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6313 auto *SAddIntrinsic =
6314 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6315 auto *SMulIntrinsic =
6316 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6317
6318 // The offset overflow flag - true if the total offset overflows.
6319 llvm::Value *OffsetOverflows = Builder.getFalse();
6320
6321 /// Return the result of the given binary operation.
6322 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6323 llvm::Value *RHS) -> llvm::Value * {
6324 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6325
6326 // If the operands are constants, return a constant result.
6327 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6328 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6329 llvm::APInt N;
6330 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6331 /*Signed=*/true, N);
6332 if (HasOverflow)
6333 OffsetOverflows = Builder.getTrue();
6334 return llvm::ConstantInt::get(VMContext, N);
6335 }
6336 }
6337
6338 // Otherwise, compute the result with checked arithmetic.
6339 auto *ResultAndOverflow = Builder.CreateCall(
6340 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6341 OffsetOverflows = Builder.CreateOr(
6342 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6343 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6344 };
6345
6346 // Determine the total byte offset by looking at each GEP operand.
6347 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6348 GTI != GTE; ++GTI) {
6349 llvm::Value *LocalOffset;
6350 auto *Index = GTI.getOperand();
6351 // Compute the local offset contributed by this indexing step:
6352 if (auto *STy = GTI.getStructTypeOrNull()) {
6353 // For struct indexing, the local offset is the byte position of the
6354 // specified field.
6355 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6356 LocalOffset = llvm::ConstantInt::get(
6357 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6358 } else {
6359 // Otherwise this is array-like indexing. The local offset is the index
6360 // multiplied by the element size.
6361 auto *ElementSize =
6362 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6363 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6364 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6365 }
6366
6367 // If this is the first offset, set it as the total offset. Otherwise, add
6368 // the local offset into the running total.
6369 if (!TotalOffset || TotalOffset == Zero)
6370 TotalOffset = LocalOffset;
6371 else
6372 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6373 }
6374
6375 return {TotalOffset, OffsetOverflows};
6376}
6377
6378Value *
6379CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6380 ArrayRef<Value *> IdxList,
6381 bool SignedIndices, bool IsSubtraction,
6382 SourceLocation Loc, const Twine &Name) {
6383 llvm::Type *PtrTy = Ptr->getType();
6384
6385 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6386 if (!SignedIndices && !IsSubtraction)
6387 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6388
6389 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6390
6391 // If the pointer overflow sanitizer isn't enabled, do nothing.
6392 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6393 return GEPVal;
6394
6395 // Perform nullptr-and-offset check unless the nullptr is defined.
6396 bool PerformNullCheck = !NullPointerIsDefined(
6397 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6398 // Check for overflows unless the GEP got constant-folded,
6399 // and only in the default address space
6400 bool PerformOverflowCheck =
6401 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6402
6403 if (!(PerformNullCheck || PerformOverflowCheck))
6404 return GEPVal;
6405
6406 const auto &DL = CGM.getDataLayout();
6407
6408 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6409 auto CheckHandler = SanitizerHandler::PointerOverflow;
6410 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6411 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6412
6413 GEPOffsetAndOverflow EvaluatedGEP =
6414 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6415
6416 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6417 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6418 "If the offset got constant-folded, we don't expect that there was an "
6419 "overflow.");
6420
6421 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6422
6423 // Common case: if the total offset is zero, don't emit a check.
6424 if (EvaluatedGEP.TotalOffset == Zero)
6425 return GEPVal;
6426
6427 // Now that we've computed the total offset, add it to the base pointer (with
6428 // wrapping semantics).
6429 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6430 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6431
6432 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6433 2>
6434 Checks;
6435
6436 if (PerformNullCheck) {
6437 // If the base pointer evaluates to a null pointer value,
6438 // the only valid pointer this inbounds GEP can produce is also
6439 // a null pointer, so the offset must also evaluate to zero.
6440 // Likewise, if we have non-zero base pointer, we can not get null pointer
6441 // as a result, so the offset can not be -intptr_t(BasePtr).
6442 // In other words, both pointers are either null, or both are non-null,
6443 // or the behaviour is undefined.
6444 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6445 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6446 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6447 Checks.emplace_back(Valid, CheckOrdinal);
6448 }
6449
6450 if (PerformOverflowCheck) {
6451 // The GEP is valid if:
6452 // 1) The total offset doesn't overflow, and
6453 // 2) The sign of the difference between the computed address and the base
6454 // pointer matches the sign of the total offset.
6455 llvm::Value *ValidGEP;
6456 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6457 if (SignedIndices) {
6458 // GEP is computed as `unsigned base + signed offset`, therefore:
6459 // * If offset was positive, then the computed pointer can not be
6460 // [unsigned] less than the base pointer, unless it overflowed.
6461 // * If offset was negative, then the computed pointer can not be
6462 // [unsigned] greater than the bas pointere, unless it overflowed.
6463 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6464 auto *PosOrZeroOffset =
6465 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6466 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6467 ValidGEP =
6468 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6469 } else if (!IsSubtraction) {
6470 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6471 // computed pointer can not be [unsigned] less than base pointer,
6472 // unless there was an overflow.
6473 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6474 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6475 } else {
6476 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6477 // computed pointer can not be [unsigned] greater than base pointer,
6478 // unless there was an overflow.
6479 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6480 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6481 }
6482 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6483 Checks.emplace_back(ValidGEP, CheckOrdinal);
6484 }
6485
6486 assert(!Checks.empty() && "Should have produced some checks.");
6487
6488 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6489 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6490 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6491 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6492
6493 return GEPVal;
6494}
6495
6497 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6498 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6499 const Twine &Name) {
6500 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6501 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6502 if (!SignedIndices && !IsSubtraction)
6503 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6504
6505 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6506 }
6507
6508 return RawAddress(
6509 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6510 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6511 elementType, Align);
6512}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
static uint32_t getBitWidth(const Expr *E)
llvm::APSInt APSInt
Definition Compiler.cpp:24
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:508
bool isLValue() const
Definition APValue.h:490
bool isInt() const
Definition APValue.h:485
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:952
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:917
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
bool isUnaryOverflowPatternExcluded(const UnaryOperator *UO)
uint64_t getCharWidth() const
Return the size of the character type, in bits.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
LabelDecl * getLabel() const
Definition Expr.h:4576
uint64_t getValue() const
Definition ExprCXX.h:3048
QualType getElementType() const
Definition TypeBase.h:3784
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6751
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4188
bool isCompoundAssignmentOp() const
Definition Expr.h:4185
SourceLocation getExprLoc() const
Definition Expr.h:4082
bool isShiftOp() const
Definition Expr.h:4130
Expr * getRHS() const
Definition Expr.h:4093
bool isShiftAssignOp() const
Definition Expr.h:4199
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4254
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2205
Opcode getOpcode() const
Definition Expr.h:4086
BinaryOperatorKind Opcode
Definition Expr.h:4046
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:744
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1107
bool getValue() const
Definition ExprCXX.h:4332
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:308
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
path_iterator path_begin()
Definition Expr.h:3749
CastKind getCastKind() const
Definition Expr.h:3723
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
path_iterator path_end()
Definition Expr.h:3750
Expr * getSubExpr()
Definition Expr.h:3729
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1632
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:102
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:94
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:84
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:71
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2179
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:591
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3116
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3706
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7193
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:275
SanitizerSet SanOpts
Sanitizers enabled for this function.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:269
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:283
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:3000
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4001
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6484
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7294
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2572
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:388
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2953
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
bool hasSkipCounter(const Stmt *S) const
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3656
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3891
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:176
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3980
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:251
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6406
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2497
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateIRTempWithoutCast - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:183
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:65
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1255
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4149
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6359
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2037
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3520
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2223
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6345
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2738
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4584
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:560
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:273
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:912
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7303
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1591
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:663
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1672
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:742
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4060
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:5154
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4491
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2257
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:52
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1934
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3551
void markStmtAsUsed(bool Skipped, const Stmt *S)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2678
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
bool isMCDCDecisionExpr(const Expr *E) const
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:643
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1387
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:183
bool isBitField() const
Definition CGValue.h:288
bool isVolatileQualified() const
Definition CGValue.h:297
const Qualifiers & getQuals() const
Definition CGValue.h:350
Address getAddress() const
Definition CGValue.h:373
QualType getType() const
Definition CGValue.h:303
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:446
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
QualType getComputationLHSType() const
Definition Expr.h:4337
QualType getComputationResultType() const
Definition Expr.h:4340
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
APValue getAPValueResult() const
Definition Expr.cpp:413
bool hasAPValueResult() const
Definition Expr.h:1160
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4437
unsigned mapRowMajorToColumnMajorFlattenedIndex(unsigned RowMajorIdx) const
Given a row-major flattened index RowMajorIdx, return the equivalent column-major flattened index.
Definition TypeBase.h:4496
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4812
T * getAttr() const
Definition DeclBase.h:581
ChildElementIter< false > begin()
Definition Expr.h:5235
size_t getDataElementCount() const
Definition Expr.h:5151
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:677
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:479
QualType getType() const
Definition Expr.h:144
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3260
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1578
llvm::APFloat getValue() const
Definition Expr.h:1669
const Expr * getSubExpr() const
Definition Expr.h:1065
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3856
unsigned getNumInits() const
Definition Expr.h:5332
bool hadArrayRangeDesignator() const
Definition Expr.h:5486
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
bool isSignedOverflowDefined() const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4387
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
VersionTuple getVersion() const
Definition ExprObjC.h:1757
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1529
Expr * getBase() const
Definition ExprObjC.h:1554
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1577
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1395
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:8049
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:8086
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2589
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2577
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2570
unsigned getNumComponents() const
Definition Expr.h:2585
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2482
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2488
@ Array
An index into an array.
Definition Expr.h:2429
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2433
@ Field
A field.
Definition Expr.h:2431
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2436
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2478
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2498
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1211
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2202
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1459
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:132
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8431
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8616
QualType getCanonicalType() const
Definition TypeBase.h:8483
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1626
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:137
bool isCanonical() const
Definition TypeBase.h:8488
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:587
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4698
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4679
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4685
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4515
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2282
SourceLocation getLocation() const
Definition Expr.h:5064
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
virtual bool useFP16ConversionIntrinsics() const
Check whether conversions to and from __fp16 should go through an integer bitcast with i16.
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:789
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:799
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:810
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:818
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:826
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8413
bool getBoolValue() const
Definition ExprCXX.h:2951
const APValue & getAPValue() const
Definition ExprCXX.h:2956
bool isStoredAsBoolean() const
Definition ExprCXX.h:2947
bool isVoidType() const
Definition TypeBase.h:9034
bool isBooleanType() const
Definition TypeBase.h:9171
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8680
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2254
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2308
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2375
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9078
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
bool isReferenceType() const
Definition TypeBase.h:8692
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1923
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2654
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool isExtVectorType() const
Definition TypeBase.h:8811
bool isExtVectorBoolType() const
Definition TypeBase.h:8815
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8953
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8791
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8803
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:9094
bool isHalfType() const
Definition TypeBase.h:9038
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2275
bool isQueueT() const
Definition TypeBase.h:8924
bool isMatrixType() const
Definition TypeBase.h:8831
bool isEventT() const
Definition TypeBase.h:8916
bool isFunctionType() const
Definition TypeBase.h:8664
bool isVectorType() const
Definition TypeBase.h:8807
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2358
bool isFloatingType() const
Definition Type.cpp:2342
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2285
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2978
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
bool isNullPtrType() const
Definition TypeBase.h:9071
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2697
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2660
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2403
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2301
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5583
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
Represents a GCC generic vector type.
Definition TypeBase.h:4225
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
bool BitCast(InterpState &S, CodePtr OpPC)
Definition Interp.h:3986
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
@ Address
A pointer to a ValueDecl.
Definition Primitives.h:28
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1438
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:2164
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1453
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::Type * HalfTy
half, bfloat, float, double
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184