clang 23.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
86 : LHSAP.usub_ov(RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
89 : LHSAP.umul_ov(RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(BaseTy) ||
184 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Consider OverflowBehaviorType and language options to calculate the final
196/// overflow behavior for an expression. There are no language options for
197/// unsigned overflow semantics so there is nothing to consider there.
199getOverflowBehaviorConsideringType(const CodeGenFunction &CGF,
200 const QualType Ty) {
201 const OverflowBehaviorType *OBT = Ty->getAs<OverflowBehaviorType>();
202 /// FIXME: Having two enums named `OverflowBehaviorKind` is not ideal, these
203 /// should be unified into one coherent enum that supports both unsigned and
204 /// signed overflow behavior semantics.
205 if (OBT) {
206 switch (OBT->getBehaviorKind()) {
207 case OverflowBehaviorType::OverflowBehaviorKind::Wrap:
209 case OverflowBehaviorType::OverflowBehaviorKind::Trap:
211 }
212 llvm_unreachable("Unknown OverflowBehaviorKind");
213 }
214
215 if (Ty->isUnsignedIntegerType()) {
217 }
218
219 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
226 }
227 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
228}
229
230/// Check if we can skip the overflow check for \p Op.
231static bool CanElideOverflowCheck(ASTContext &Ctx, const BinOpInfo &Op) {
232 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
233 "Expected a unary or binary operator");
234
235 // If the binop has constant inputs and we can prove there is no overflow,
236 // we can elide the overflow check.
237 if (!Op.mayHaveIntegerOverflow())
238 return true;
239
240 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
241 if (UO && Ctx.isUnaryOverflowPatternExcluded(UO))
242 return true;
243
244 const auto *BO = dyn_cast<BinaryOperator>(Op.E);
245 if (BO && BO->hasExcludedOverflowPattern())
246 return true;
247
248 if (Op.Ty.isWrapType())
249 return true;
250 if (Op.Ty.isTrapType())
251 return false;
252
253 if (Op.Ty->isSignedIntegerType() &&
254 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
255 Op.Ty)) {
256 return true;
257 }
258
259 if (Op.Ty->isUnsignedIntegerType() &&
260 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
261 Op.Ty)) {
262 return true;
263 }
264
265 // If a unary op has a widened operand, the op cannot overflow.
266 if (UO)
267 return !UO->canOverflow();
268
269 // We usually don't need overflow checks for binops with widened operands.
270 // Multiplication with promoted unsigned operands is a special case.
271 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
272 if (!OptionalLHSTy)
273 return false;
274
275 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
276 if (!OptionalRHSTy)
277 return false;
278
279 QualType LHSTy = *OptionalLHSTy;
280 QualType RHSTy = *OptionalRHSTy;
281
282 // This is the simple case: binops without unsigned multiplication, and with
283 // widened operands. No overflow check is needed here.
284 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
285 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
286 return true;
287
288 // For unsigned multiplication the overflow check can be elided if either one
289 // of the unpromoted types are less than half the size of the promoted type.
290 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
291 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
292 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
293}
294
295class ScalarExprEmitter
296 : public StmtVisitor<ScalarExprEmitter, Value*> {
297 CodeGenFunction &CGF;
298 CGBuilderTy &Builder;
299 bool IgnoreResultAssign;
300 llvm::LLVMContext &VMContext;
301public:
302
303 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
304 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
305 VMContext(cgf.getLLVMContext()) {
306 }
307
308 //===--------------------------------------------------------------------===//
309 // Utilities
310 //===--------------------------------------------------------------------===//
311
312 bool TestAndClearIgnoreResultAssign() {
313 bool I = IgnoreResultAssign;
314 IgnoreResultAssign = false;
315 return I;
316 }
317
318 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
319 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
320 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
321 return CGF.EmitCheckedLValue(E, TCK);
322 }
323
324 void EmitBinOpCheck(
325 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
326 const BinOpInfo &Info);
327
328 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
329 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
330 }
331
332 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
333 const AlignValueAttr *AVAttr = nullptr;
334 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
335 const ValueDecl *VD = DRE->getDecl();
336
337 if (VD->getType()->isReferenceType()) {
338 if (const auto *TTy =
339 VD->getType().getNonReferenceType()->getAs<TypedefType>())
340 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
341 } else {
342 // Assumptions for function parameters are emitted at the start of the
343 // function, so there is no need to repeat that here,
344 // unless the alignment-assumption sanitizer is enabled,
345 // then we prefer the assumption over alignment attribute
346 // on IR function param.
347 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
348 return;
349
350 AVAttr = VD->getAttr<AlignValueAttr>();
351 }
352 }
353
354 if (!AVAttr)
355 if (const auto *TTy = E->getType()->getAs<TypedefType>())
356 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
357
358 if (!AVAttr)
359 return;
360
361 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
362 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
363 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
364 }
365
366 /// EmitLoadOfLValue - Given an expression with complex type that represents a
367 /// value l-value, this method emits the address of the l-value, then loads
368 /// and returns the result.
369 Value *EmitLoadOfLValue(const Expr *E) {
370 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
371 E->getExprLoc());
372
373 EmitLValueAlignmentAssumption(E, V);
374 return V;
375 }
376
377 /// EmitConversionToBool - Convert the specified expression value to a
378 /// boolean (i1) truth value. This is equivalent to "Val != 0".
379 Value *EmitConversionToBool(Value *Src, QualType DstTy);
380
381 /// Emit a check that a conversion from a floating-point type does not
382 /// overflow.
383 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
384 Value *Src, QualType SrcType, QualType DstType,
385 llvm::Type *DstTy, SourceLocation Loc);
386
387 /// Known implicit conversion check kinds.
388 /// This is used for bitfield conversion checks as well.
389 /// Keep in sync with the enum of the same name in ubsan_handlers.h
390 enum ImplicitConversionCheckKind : unsigned char {
391 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
392 ICCK_UnsignedIntegerTruncation = 1,
393 ICCK_SignedIntegerTruncation = 2,
394 ICCK_IntegerSignChange = 3,
395 ICCK_SignedIntegerTruncationOrSignChange = 4,
396 };
397
398 /// Emit a check that an [implicit] truncation of an integer does not
399 /// discard any bits. It is not UB, so we use the value after truncation.
400 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
401 QualType DstType, SourceLocation Loc,
402 bool OBTrapInvolved = false);
403
404 /// Emit a check that an [implicit] conversion of an integer does not change
405 /// the sign of the value. It is not UB, so we use the value after conversion.
406 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
407 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
408 QualType DstType, SourceLocation Loc);
409
410 /// Emit a conversion from the specified type to the specified destination
411 /// type, both of which are LLVM scalar types.
412 struct ScalarConversionOpts {
413 bool TreatBooleanAsSigned;
414 bool EmitImplicitIntegerTruncationChecks;
415 bool EmitImplicitIntegerSignChangeChecks;
416
417 ScalarConversionOpts()
418 : TreatBooleanAsSigned(false),
419 EmitImplicitIntegerTruncationChecks(false),
420 EmitImplicitIntegerSignChangeChecks(false) {}
421
422 ScalarConversionOpts(clang::SanitizerSet SanOpts)
423 : TreatBooleanAsSigned(false),
424 EmitImplicitIntegerTruncationChecks(
425 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
426 EmitImplicitIntegerSignChangeChecks(
427 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
428 };
429 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
430 llvm::Type *SrcTy, llvm::Type *DstTy,
431 ScalarConversionOpts Opts);
432 Value *
433 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
434 SourceLocation Loc,
435 ScalarConversionOpts Opts = ScalarConversionOpts());
436
437 /// Convert between either a fixed point and other fixed point or fixed point
438 /// and an integer.
439 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
440 SourceLocation Loc);
441
442 /// Emit a conversion from the specified complex type to the specified
443 /// destination type, where the destination type is an LLVM scalar type.
444 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
445 QualType SrcTy, QualType DstTy,
446 SourceLocation Loc);
447
448 /// EmitNullValue - Emit a value that corresponds to null for the given type.
449 Value *EmitNullValue(QualType Ty);
450
451 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
452 Value *EmitFloatToBoolConversion(Value *V) {
453 // Compare against 0.0 for fp scalars.
454 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
455 return Builder.CreateFCmpUNE(V, Zero, "tobool");
456 }
457
458 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
459 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
460 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
461
462 return Builder.CreateICmpNE(V, Zero, "tobool");
463 }
464
465 Value *EmitIntToBoolConversion(Value *V) {
466 // Because of the type rules of C, we often end up computing a
467 // logical value, then zero extending it to int, then wanting it
468 // as a logical value again. Optimize this common case.
469 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
470 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
471 Value *Result = ZI->getOperand(0);
472 // If there aren't any more uses, zap the instruction to save space.
473 // Note that there can be more uses, for example if this
474 // is the result of an assignment.
475 if (ZI->use_empty())
476 ZI->eraseFromParent();
477 return Result;
478 }
479 }
480
481 return Builder.CreateIsNotNull(V, "tobool");
482 }
483
484 //===--------------------------------------------------------------------===//
485 // Visitor Methods
486 //===--------------------------------------------------------------------===//
487
488 Value *Visit(Expr *E) {
489 ApplyDebugLocation DL(CGF, E);
490 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
491 }
492
493 Value *VisitStmt(Stmt *S) {
494 S->dump(llvm::errs(), CGF.getContext());
495 llvm_unreachable("Stmt can't have complex result type!");
496 }
497 Value *VisitExpr(Expr *S);
498
499 Value *VisitConstantExpr(ConstantExpr *E) {
500 // A constant expression of type 'void' generates no code and produces no
501 // value.
502 if (E->getType()->isVoidType())
503 return nullptr;
504
505 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
506 if (E->isGLValue()) {
507 // This was already converted to an rvalue when it was constant
508 // evaluated.
509 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
510 return Result;
511 return CGF.EmitLoadOfScalar(
512 Address(Result, CGF.convertTypeForLoadStore(E->getType()),
514 /*Volatile*/ false, E->getType(), E->getExprLoc());
515 }
516 return Result;
517 }
518 return Visit(E->getSubExpr());
519 }
520 Value *VisitParenExpr(ParenExpr *PE) {
521 return Visit(PE->getSubExpr());
522 }
523 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
524 return Visit(E->getReplacement());
525 }
526 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
527 return Visit(GE->getResultExpr());
528 }
529 Value *VisitCoawaitExpr(CoawaitExpr *S) {
530 return CGF.EmitCoawaitExpr(*S).getScalarVal();
531 }
532 Value *VisitCoyieldExpr(CoyieldExpr *S) {
533 return CGF.EmitCoyieldExpr(*S).getScalarVal();
534 }
535 Value *VisitUnaryCoawait(const UnaryOperator *E) {
536 return Visit(E->getSubExpr());
537 }
538
539 // Leaves.
540 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
541 return Builder.getInt(E->getValue());
542 }
543 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
544 return Builder.getInt(E->getValue());
545 }
546 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
547 return llvm::ConstantFP::get(VMContext, E->getValue());
548 }
549 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
550 // Character literals are always stored in an unsigned (even for signed
551 // char), so allow implicit truncation here.
552 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue(),
553 /*IsSigned=*/false, /*ImplicitTrunc=*/true);
554 }
555 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
556 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
557 }
558 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
559 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
560 }
561 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
562 if (E->getType()->isVoidType())
563 return nullptr;
564
565 return EmitNullValue(E->getType());
566 }
567 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
568 return EmitNullValue(E->getType());
569 }
570 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
571 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
572 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
573 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
574 return Builder.CreateBitCast(V, ConvertType(E->getType()));
575 }
576
577 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
578 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
579 }
580
581 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
582 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
583 }
584
585 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
586 Value *VisitEmbedExpr(EmbedExpr *E);
587
588 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
589 if (E->isGLValue())
590 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
591 E->getExprLoc());
592
593 // Otherwise, assume the mapping is the scalar directly.
595 }
596
597 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
598 llvm_unreachable("Codegen for this isn't defined/implemented");
599 }
600
601 // l-values.
602 Value *VisitDeclRefExpr(DeclRefExpr *E) {
603 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
604 return CGF.emitScalarConstant(Constant, E);
605 return EmitLoadOfLValue(E);
606 }
607
608 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
609 return CGF.EmitObjCSelectorExpr(E);
610 }
611 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
612 return CGF.EmitObjCProtocolExpr(E);
613 }
614 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
615 return EmitLoadOfLValue(E);
616 }
617 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
618 if (E->getMethodDecl() &&
620 return EmitLoadOfLValue(E);
621 return CGF.EmitObjCMessageExpr(E).getScalarVal();
622 }
623
624 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
625 LValue LV = CGF.EmitObjCIsaExpr(E);
627 return V;
628 }
629
630 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
631 VersionTuple Version = E->getVersion();
632
633 // If we're checking for a platform older than our minimum deployment
634 // target, we can fold the check away.
635 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
636 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
637
638 return CGF.EmitBuiltinAvailable(Version);
639 }
640
641 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
642 Value *VisitMatrixSingleSubscriptExpr(MatrixSingleSubscriptExpr *E);
643 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
644 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
645 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
646 Value *VisitMemberExpr(MemberExpr *E);
647 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
648 Value *VisitMatrixElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
649 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
650 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
651 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
652 // literals aren't l-values in C++. We do so simply because that's the
653 // cleanest way to handle compound literals in C++.
654 // See the discussion here: https://reviews.llvm.org/D64464
655 return EmitLoadOfLValue(E);
656 }
657
658 Value *VisitInitListExpr(InitListExpr *E);
659
660 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
661 assert(CGF.getArrayInitIndex() &&
662 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
663 return CGF.getArrayInitIndex();
664 }
665
666 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
667 return EmitNullValue(E->getType());
668 }
669 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
670 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
671 return VisitCastExpr(E);
672 }
673 Value *VisitCastExpr(CastExpr *E);
674
675 Value *VisitCallExpr(const CallExpr *E) {
677 return EmitLoadOfLValue(E);
678
679 Value *V = CGF.EmitCallExpr(E).getScalarVal();
680
681 EmitLValueAlignmentAssumption(E, V);
682 return V;
683 }
684
685 Value *VisitStmtExpr(const StmtExpr *E);
686
687 // Unary Operators.
688 Value *VisitUnaryPostDec(const UnaryOperator *E) {
689 LValue LV = EmitLValue(E->getSubExpr());
690 return EmitScalarPrePostIncDec(E, LV, false, false);
691 }
692 Value *VisitUnaryPostInc(const UnaryOperator *E) {
693 LValue LV = EmitLValue(E->getSubExpr());
694 return EmitScalarPrePostIncDec(E, LV, true, false);
695 }
696 Value *VisitUnaryPreDec(const UnaryOperator *E) {
697 LValue LV = EmitLValue(E->getSubExpr());
698 return EmitScalarPrePostIncDec(E, LV, false, true);
699 }
700 Value *VisitUnaryPreInc(const UnaryOperator *E) {
701 LValue LV = EmitLValue(E->getSubExpr());
702 return EmitScalarPrePostIncDec(E, LV, true, true);
703 }
704
705 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
706 llvm::Value *InVal,
707 bool IsInc);
708
709 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
710 bool isInc, bool isPre);
711
712
713 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
714 if (isa<MemberPointerType>(E->getType())) // never sugared
715 return CGF.CGM.getMemberPointerConstant(E);
716
717 return EmitLValue(E->getSubExpr()).getPointer(CGF);
718 }
719 Value *VisitUnaryDeref(const UnaryOperator *E) {
720 if (E->getType()->isVoidType())
721 return Visit(E->getSubExpr()); // the actual value should be unused
722 return EmitLoadOfLValue(E);
723 }
724
725 Value *VisitUnaryPlus(const UnaryOperator *E,
726 QualType PromotionType = QualType());
727 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
728 Value *VisitUnaryMinus(const UnaryOperator *E,
729 QualType PromotionType = QualType());
730 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
731
732 Value *VisitUnaryNot (const UnaryOperator *E);
733 Value *VisitUnaryLNot (const UnaryOperator *E);
734 Value *VisitUnaryReal(const UnaryOperator *E,
735 QualType PromotionType = QualType());
736 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
737 Value *VisitUnaryImag(const UnaryOperator *E,
738 QualType PromotionType = QualType());
739 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
740 Value *VisitUnaryExtension(const UnaryOperator *E) {
741 return Visit(E->getSubExpr());
742 }
743
744 // C++
745 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
746 return EmitLoadOfLValue(E);
747 }
748 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
749 auto &Ctx = CGF.getContext();
752 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
753 SLE->getType());
754 }
755
756 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
757 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
758 return Visit(DAE->getExpr());
759 }
760 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
761 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
762 return Visit(DIE->getExpr());
763 }
764 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
765 return CGF.LoadCXXThis();
766 }
767
768 Value *VisitExprWithCleanups(ExprWithCleanups *E);
769 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
770 return CGF.EmitCXXNewExpr(E);
771 }
772 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
773 CGF.EmitCXXDeleteExpr(E);
774 return nullptr;
775 }
776
777 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
778 if (E->isStoredAsBoolean())
779 return llvm::ConstantInt::get(ConvertType(E->getType()),
780 E->getBoolValue());
781 assert(E->getAPValue().isInt() && "APValue type not supported");
782 return llvm::ConstantInt::get(ConvertType(E->getType()),
783 E->getAPValue().getInt());
784 }
785
786 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
787 return Builder.getInt1(E->isSatisfied());
788 }
789
790 Value *VisitRequiresExpr(const RequiresExpr *E) {
791 return Builder.getInt1(E->isSatisfied());
792 }
793
794 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
795 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
796 }
797
798 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
799 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
800 }
801
802 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
803 // C++ [expr.pseudo]p1:
804 // The result shall only be used as the operand for the function call
805 // operator (), and the result of such a call has type void. The only
806 // effect is the evaluation of the postfix-expression before the dot or
807 // arrow.
808 CGF.EmitScalarExpr(E->getBase());
809 return nullptr;
810 }
811
812 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
813 return EmitNullValue(E->getType());
814 }
815
816 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
817 CGF.EmitCXXThrowExpr(E);
818 return nullptr;
819 }
820
821 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
822 return Builder.getInt1(E->getValue());
823 }
824
825 // Binary Operators.
826 Value *EmitMul(const BinOpInfo &Ops) {
827 if (Ops.Ty->isSignedIntegerOrEnumerationType() ||
828 Ops.Ty->isUnsignedIntegerType()) {
829 const bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
830 const bool hasSan =
831 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
832 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
833 switch (getOverflowBehaviorConsideringType(CGF, Ops.Ty)) {
834 case LangOptions::OB_Wrap:
835 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
836 case LangOptions::OB_SignedAndDefined:
837 if (!hasSan)
838 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
839 [[fallthrough]];
840 case LangOptions::OB_Unset:
841 if (!hasSan)
842 return isSigned ? Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul")
843 : Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
844 [[fallthrough]];
845 case LangOptions::OB_Trap:
846 if (CanElideOverflowCheck(CGF.getContext(), Ops))
847 return isSigned ? Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul")
848 : Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
849 return EmitOverflowCheckedBinOp(Ops);
850 }
851 }
852
853 if (Ops.Ty->isConstantMatrixType()) {
854 llvm::MatrixBuilder MB(Builder);
855 // We need to check the types of the operands of the operator to get the
856 // correct matrix dimensions.
857 auto *BO = cast<BinaryOperator>(Ops.E);
858 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
859 BO->getLHS()->getType().getCanonicalType());
860 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
861 BO->getRHS()->getType().getCanonicalType());
862 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
863 if (LHSMatTy && RHSMatTy)
864 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
865 LHSMatTy->getNumColumns(),
866 RHSMatTy->getNumColumns());
867 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
868 }
869
870 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
871 // Preserve the old values
872 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
873 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
874 }
875 if (Ops.isFixedPointOp())
876 return EmitFixedPointBinOp(Ops);
877 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
878 }
879 /// Create a binary op that checks for overflow.
880 /// Currently only supports +, - and *.
881 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
882
883 // Check for undefined division and modulus behaviors.
884 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
885 llvm::Value *Zero,bool isDiv);
886 // Common helper for getting how wide LHS of shift is.
887 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
888
889 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
890 // non powers of two.
891 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
892
893 Value *EmitDiv(const BinOpInfo &Ops);
894 Value *EmitRem(const BinOpInfo &Ops);
895 Value *EmitAdd(const BinOpInfo &Ops);
896 Value *EmitSub(const BinOpInfo &Ops);
897 Value *EmitShl(const BinOpInfo &Ops);
898 Value *EmitShr(const BinOpInfo &Ops);
899 Value *EmitAnd(const BinOpInfo &Ops) {
900 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
901 }
902 Value *EmitXor(const BinOpInfo &Ops) {
903 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
904 }
905 Value *EmitOr (const BinOpInfo &Ops) {
906 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
907 }
908
909 // Helper functions for fixed point binary operations.
910 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
911
912 BinOpInfo EmitBinOps(const BinaryOperator *E,
913 QualType PromotionTy = QualType());
914
915 Value *EmitPromotedValue(Value *result, QualType PromotionType);
916 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
917 Value *EmitPromoted(const Expr *E, QualType PromotionType);
918
919 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
920 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
921 Value *&Result);
922
923 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
924 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
925
926 QualType getPromotionType(QualType Ty) {
927 const auto &Ctx = CGF.getContext();
928 if (auto *CT = Ty->getAs<ComplexType>()) {
929 QualType ElementType = CT->getElementType();
930 if (ElementType.UseExcessPrecision(Ctx))
931 return Ctx.getComplexType(Ctx.FloatTy);
932 }
933
934 if (Ty.UseExcessPrecision(Ctx)) {
935 if (auto *VT = Ty->getAs<VectorType>()) {
936 unsigned NumElements = VT->getNumElements();
937 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
938 }
939 return Ctx.FloatTy;
940 }
941
942 return QualType();
943 }
944
945 // Binary operators and binary compound assignment operators.
946#define HANDLEBINOP(OP) \
947 Value *VisitBin##OP(const BinaryOperator *E) { \
948 QualType promotionTy = getPromotionType(E->getType()); \
949 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
950 if (result && !promotionTy.isNull()) \
951 result = EmitUnPromotedValue(result, E->getType()); \
952 return result; \
953 } \
954 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
955 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
956 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
957 }
958 HANDLEBINOP(Mul)
959 HANDLEBINOP(Div)
960 HANDLEBINOP(Rem)
961 HANDLEBINOP(Add)
962 HANDLEBINOP(Sub)
963 HANDLEBINOP(Shl)
964 HANDLEBINOP(Shr)
966 HANDLEBINOP(Xor)
968#undef HANDLEBINOP
969
970 // Comparisons.
971 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
972 llvm::CmpInst::Predicate SICmpOpc,
973 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
974#define VISITCOMP(CODE, UI, SI, FP, SIG) \
975 Value *VisitBin##CODE(const BinaryOperator *E) { \
976 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
977 llvm::FCmpInst::FP, SIG); }
978 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
979 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
980 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
981 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
982 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
983 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
984#undef VISITCOMP
985
986 Value *VisitBinAssign (const BinaryOperator *E);
987
988 Value *VisitBinLAnd (const BinaryOperator *E);
989 Value *VisitBinLOr (const BinaryOperator *E);
990 Value *VisitBinComma (const BinaryOperator *E);
991
992 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
993 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
994
995 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
996 return Visit(E->getSemanticForm());
997 }
998
999 // Other Operators.
1000 Value *VisitBlockExpr(const BlockExpr *BE);
1001 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
1002 Value *VisitChooseExpr(ChooseExpr *CE);
1003 Value *VisitVAArgExpr(VAArgExpr *VE);
1004 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
1005 return CGF.EmitObjCStringLiteral(E);
1006 }
1007 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
1008 return CGF.EmitObjCBoxedExpr(E);
1009 }
1010 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
1011 return CGF.EmitObjCArrayLiteral(E);
1012 }
1013 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
1014 return CGF.EmitObjCDictionaryLiteral(E);
1015 }
1016 Value *VisitAsTypeExpr(AsTypeExpr *CE);
1017 Value *VisitAtomicExpr(AtomicExpr *AE);
1018 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
1019 return Visit(E->getSelectedExpr());
1020 }
1021};
1022} // end anonymous namespace.
1023
1024//===----------------------------------------------------------------------===//
1025// Utilities
1026//===----------------------------------------------------------------------===//
1027
1028/// EmitConversionToBool - Convert the specified expression value to a
1029/// boolean (i1) truth value. This is equivalent to "Val != 0".
1030Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
1031 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
1032
1033 if (SrcType->isRealFloatingType())
1034 return EmitFloatToBoolConversion(Src);
1035
1036 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
1037 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
1038
1039 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
1040 "Unknown scalar type to convert");
1041
1042 if (isa<llvm::IntegerType>(Src->getType()))
1043 return EmitIntToBoolConversion(Src);
1044
1045 assert(isa<llvm::PointerType>(Src->getType()));
1046 return EmitPointerToBoolConversion(Src, SrcType);
1047}
1048
1049void ScalarExprEmitter::EmitFloatConversionCheck(
1050 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1051 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1052 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1053 if (!isa<llvm::IntegerType>(DstTy))
1054 return;
1055
1056 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1057 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1058 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1059 using llvm::APFloat;
1060 using llvm::APSInt;
1061
1062 llvm::Value *Check = nullptr;
1063 const llvm::fltSemantics &SrcSema =
1064 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1065
1066 // Floating-point to integer. This has undefined behavior if the source is
1067 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1068 // to an integer).
1069 unsigned Width = CGF.getContext().getIntWidth(DstType);
1071
1072 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1073 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1074 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1075 APFloat::opOverflow)
1076 // Don't need an overflow check for lower bound. Just check for
1077 // -Inf/NaN.
1078 MinSrc = APFloat::getInf(SrcSema, true);
1079 else
1080 // Find the largest value which is too small to represent (before
1081 // truncation toward zero).
1082 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1083
1084 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1085 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1086 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1087 APFloat::opOverflow)
1088 // Don't need an overflow check for upper bound. Just check for
1089 // +Inf/NaN.
1090 MaxSrc = APFloat::getInf(SrcSema, false);
1091 else
1092 // Find the smallest value which is too large to represent (before
1093 // truncation toward zero).
1094 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1095
1096 // If we're converting from __half, convert the range to float to match
1097 // the type of src.
1098 if (OrigSrcType->isHalfType()) {
1099 const llvm::fltSemantics &Sema =
1100 CGF.getContext().getFloatTypeSemantics(SrcType);
1101 bool IsInexact;
1102 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1103 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1104 }
1105
1106 llvm::Value *GE =
1107 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1108 llvm::Value *LE =
1109 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1110 Check = Builder.CreateAnd(GE, LE);
1111
1112 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1113 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1114 CGF.EmitCheckTypeDescriptor(DstType)};
1115 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1116 OrigSrc);
1117}
1118
1119// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1120// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1121static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1122 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1124 QualType DstType, CGBuilderTy &Builder) {
1125 llvm::Type *SrcTy = Src->getType();
1126 llvm::Type *DstTy = Dst->getType();
1127 (void)DstTy; // Only used in assert()
1128
1129 // This should be truncation of integral types.
1130 assert(Src != Dst);
1131 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1132 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1133 "non-integer llvm type");
1134
1135 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1136 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1137
1138 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1139 // Else, it is a signed truncation.
1140 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1142 if (!SrcSigned && !DstSigned) {
1143 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1144 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1145 } else {
1146 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1147 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1148 }
1149
1150 llvm::Value *Check = nullptr;
1151 // 1. Extend the truncated value back to the same width as the Src.
1152 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1153 // 2. Equality-compare with the original source value
1154 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1155 // If the comparison result is 'i1 false', then the truncation was lossy.
1156 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1157}
1158
1160 QualType SrcType, QualType DstType) {
1161 return SrcType->isIntegerType() && DstType->isIntegerType();
1162}
1163
1164void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1165 Value *Dst, QualType DstType,
1166 SourceLocation Loc,
1167 bool OBTrapInvolved) {
1168 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation) &&
1169 !OBTrapInvolved)
1170 return;
1171
1172 // We only care about int->int conversions here.
1173 // We ignore conversions to/from pointer and/or bool.
1175 DstType))
1176 return;
1177
1178 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1179 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1180 // This must be truncation. Else we do not care.
1181 if (SrcBits <= DstBits)
1182 return;
1183
1184 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1185
1186 // If the integer sign change sanitizer is enabled,
1187 // and we are truncating from larger unsigned type to smaller signed type,
1188 // let that next sanitizer deal with it.
1189 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1190 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1191 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1192 (!SrcSigned && DstSigned))
1193 return;
1194
1195 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1196 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1197 Check;
1198
1199 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1200 {
1201 // We don't know the check kind until we call
1202 // EmitIntegerTruncationCheckHelper, but we want to annotate
1203 // EmitIntegerTruncationCheckHelper's instructions too.
1204 SanitizerDebugLocation SanScope(
1205 &CGF,
1206 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1207 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1208 CheckHandler);
1209 Check =
1210 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1211 // If the comparison result is 'i1 false', then the truncation was lossy.
1212 }
1213
1214 // Do we care about this type of truncation?
1215 if (!CGF.SanOpts.has(Check.second.second)) {
1216 // Just emit a trap check if an __ob_trap was involved but appropriate
1217 // sanitizer isn't enabled.
1218 if (OBTrapInvolved)
1219 CGF.EmitTrapCheck(Check.second.first, CheckHandler);
1220 return;
1221 }
1222
1223 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1224
1225 // Does some SSCL ignore this type?
1226 const bool ignoredBySanitizer = CGF.getContext().isTypeIgnoredBySanitizer(
1227 SanitizerMask::bitPosToMask(Check.second.second), DstType);
1228
1229 // Consider OverflowBehaviorTypes which override SSCL type entries for
1230 // truncation sanitizers.
1231 if (const auto *OBT = DstType->getAs<OverflowBehaviorType>()) {
1232 if (OBT->isWrapKind())
1233 return;
1234 }
1235 if (ignoredBySanitizer && !OBTrapInvolved)
1236 return;
1237
1238 llvm::Constant *StaticArgs[] = {
1239 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1240 CGF.EmitCheckTypeDescriptor(DstType),
1241 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1242 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1243
1244 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1245}
1246
1247static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1248 const char *Name,
1249 CGBuilderTy &Builder) {
1250 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1251 llvm::Type *VTy = V->getType();
1252 if (!VSigned) {
1253 // If the value is unsigned, then it is never negative.
1254 return llvm::ConstantInt::getFalse(VTy->getContext());
1255 }
1256 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1257 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1258 llvm::Twine(Name) + "." + V->getName() +
1259 ".negativitycheck");
1260}
1261
1262// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1263// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1264static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1265 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1267 QualType DstType, CGBuilderTy &Builder) {
1268 llvm::Type *SrcTy = Src->getType();
1269 llvm::Type *DstTy = Dst->getType();
1270
1271 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1272 "non-integer llvm type");
1273
1274 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1275 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1276 (void)SrcSigned; // Only used in assert()
1277 (void)DstSigned; // Only used in assert()
1278 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1279 unsigned DstBits = DstTy->getScalarSizeInBits();
1280 (void)SrcBits; // Only used in assert()
1281 (void)DstBits; // Only used in assert()
1282
1283 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1284 "either the widths should be different, or the signednesses.");
1285
1286 // 1. Was the old Value negative?
1287 llvm::Value *SrcIsNegative =
1288 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1289 // 2. Is the new Value negative?
1290 llvm::Value *DstIsNegative =
1291 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1292 // 3. Now, was the 'negativity status' preserved during the conversion?
1293 // NOTE: conversion from negative to zero is considered to change the sign.
1294 // (We want to get 'false' when the conversion changed the sign)
1295 // So we should just equality-compare the negativity statuses.
1296 llvm::Value *Check = nullptr;
1297 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1298 // If the comparison result is 'false', then the conversion changed the sign.
1299 return std::make_pair(
1300 ScalarExprEmitter::ICCK_IntegerSignChange,
1301 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1302}
1303
1304void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1305 Value *Dst, QualType DstType,
1306 SourceLocation Loc) {
1307 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange))
1308 return;
1309
1310 llvm::Type *SrcTy = Src->getType();
1311 llvm::Type *DstTy = Dst->getType();
1312
1313 // We only care about int->int conversions here.
1314 // We ignore conversions to/from pointer and/or bool.
1316 DstType))
1317 return;
1318
1319 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1320 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1321 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1322 unsigned DstBits = DstTy->getScalarSizeInBits();
1323
1324 // Now, we do not need to emit the check in *all* of the cases.
1325 // We can avoid emitting it in some obvious cases where it would have been
1326 // dropped by the opt passes (instcombine) always anyways.
1327 // If it's a cast between effectively the same type, no check.
1328 // NOTE: this is *not* equivalent to checking the canonical types.
1329 if (SrcSigned == DstSigned && SrcBits == DstBits)
1330 return;
1331 // At least one of the values needs to have signed type.
1332 // If both are unsigned, then obviously, neither of them can be negative.
1333 if (!SrcSigned && !DstSigned)
1334 return;
1335 // If the conversion is to *larger* *signed* type, then no check is needed.
1336 // Because either sign-extension happens (so the sign will remain),
1337 // or zero-extension will happen (the sign bit will be zero.)
1338 if ((DstBits > SrcBits) && DstSigned)
1339 return;
1340 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1341 (SrcBits > DstBits) && SrcSigned) {
1342 // If the signed integer truncation sanitizer is enabled,
1343 // and this is a truncation from signed type, then no check is needed.
1344 // Because here sign change check is interchangeable with truncation check.
1345 return;
1346 }
1347 // Does an SSCL have an entry for the DstType under its respective sanitizer
1348 // section?
1349 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1350 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1351 return;
1352 if (!DstSigned &&
1354 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1355 return;
1356 // That's it. We can't rule out any more cases with the data we have.
1357
1358 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1359 SanitizerDebugLocation SanScope(
1360 &CGF,
1361 {SanitizerKind::SO_ImplicitIntegerSignChange,
1362 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1363 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1364 CheckHandler);
1365
1366 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1367 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1368 Check;
1369
1370 // Each of these checks needs to return 'false' when an issue was detected.
1371 ImplicitConversionCheckKind CheckKind;
1372 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1373 2>
1374 Checks;
1375 // So we can 'and' all the checks together, and still get 'false',
1376 // if at least one of the checks detected an issue.
1377
1378 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1379 CheckKind = Check.first;
1380 Checks.emplace_back(Check.second);
1381
1382 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1383 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1384 // If the signed integer truncation sanitizer was enabled,
1385 // and we are truncating from larger unsigned type to smaller signed type,
1386 // let's handle the case we skipped in that check.
1387 Check =
1388 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1389 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1390 Checks.emplace_back(Check.second);
1391 // If the comparison result is 'i1 false', then the truncation was lossy.
1392 }
1393
1394 llvm::Constant *StaticArgs[] = {
1395 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1396 CGF.EmitCheckTypeDescriptor(DstType),
1397 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1398 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1399 // EmitCheck() will 'and' all the checks together.
1400 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1401}
1402
1403// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1404// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1405static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1406 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1408 QualType DstType, CGBuilderTy &Builder) {
1409 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1410 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1411
1412 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1413 if (!SrcSigned && !DstSigned)
1414 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1415 else
1416 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1417
1418 llvm::Value *Check = nullptr;
1419 // 1. Extend the truncated value back to the same width as the Src.
1420 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1421 // 2. Equality-compare with the original source value
1422 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1423 // If the comparison result is 'i1 false', then the truncation was lossy.
1424
1425 return std::make_pair(
1426 Kind,
1427 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1428}
1429
1430// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1431// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1432static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1433 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1435 QualType DstType, CGBuilderTy &Builder) {
1436 // 1. Was the old Value negative?
1437 llvm::Value *SrcIsNegative =
1438 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1439 // 2. Is the new Value negative?
1440 llvm::Value *DstIsNegative =
1441 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1442 // 3. Now, was the 'negativity status' preserved during the conversion?
1443 // NOTE: conversion from negative to zero is considered to change the sign.
1444 // (We want to get 'false' when the conversion changed the sign)
1445 // So we should just equality-compare the negativity statuses.
1446 llvm::Value *Check = nullptr;
1447 Check =
1448 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1449 // If the comparison result is 'false', then the conversion changed the sign.
1450 return std::make_pair(
1451 ScalarExprEmitter::ICCK_IntegerSignChange,
1452 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1453}
1454
1456 Value *Dst, QualType DstType,
1457 const CGBitFieldInfo &Info,
1458 SourceLocation Loc) {
1459
1460 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1461 return;
1462
1463 // We only care about int->int conversions here.
1464 // We ignore conversions to/from pointer and/or bool.
1466 DstType))
1467 return;
1468
1469 if (DstType->isBooleanType() || SrcType->isBooleanType())
1470 return;
1471
1472 // This should be truncation of integral types.
1473 assert(isa<llvm::IntegerType>(Src->getType()) &&
1474 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1475
1476 // TODO: Calculate src width to avoid emitting code
1477 // for unecessary cases.
1478 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1479 unsigned DstBits = Info.Size;
1480
1481 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1482 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1483
1484 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1485 SanitizerDebugLocation SanScope(
1486 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1487
1488 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1489 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1490 Check;
1491
1492 // Truncation
1493 bool EmitTruncation = DstBits < SrcBits;
1494 // If Dst is signed and Src unsigned, we want to be more specific
1495 // about the CheckKind we emit, in this case we want to emit
1496 // ICCK_SignedIntegerTruncationOrSignChange.
1497 bool EmitTruncationFromUnsignedToSigned =
1498 EmitTruncation && DstSigned && !SrcSigned;
1499 // Sign change
1500 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1501 bool BothUnsigned = !SrcSigned && !DstSigned;
1502 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1503 // We can avoid emitting sign change checks in some obvious cases
1504 // 1. If Src and Dst have the same signedness and size
1505 // 2. If both are unsigned sign check is unecessary!
1506 // 3. If Dst is signed and bigger than Src, either
1507 // sign-extension or zero-extension will make sure
1508 // the sign remains.
1509 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1510
1511 if (EmitTruncation)
1512 Check =
1513 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1514 else if (EmitSignChange) {
1515 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1516 "either the widths should be different, or the signednesses.");
1517 Check =
1518 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1519 } else
1520 return;
1521
1522 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1523 if (EmitTruncationFromUnsignedToSigned)
1524 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1525
1526 llvm::Constant *StaticArgs[] = {
1528 EmitCheckTypeDescriptor(DstType),
1529 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1530 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1531
1532 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1533}
1534
1535Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1536 QualType DstType, llvm::Type *SrcTy,
1537 llvm::Type *DstTy,
1538 ScalarConversionOpts Opts) {
1539 // The Element types determine the type of cast to perform.
1540 llvm::Type *SrcElementTy;
1541 llvm::Type *DstElementTy;
1542 QualType SrcElementType;
1543 QualType DstElementType;
1544 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1545 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1546 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1547 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1548 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1549 } else {
1550 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1551 "cannot cast between matrix and non-matrix types");
1552 SrcElementTy = SrcTy;
1553 DstElementTy = DstTy;
1554 SrcElementType = SrcType;
1555 DstElementType = DstType;
1556 }
1557
1558 if (isa<llvm::IntegerType>(SrcElementTy)) {
1559 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1560 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1561 InputSigned = true;
1562 }
1563
1564 if (isa<llvm::IntegerType>(DstElementTy))
1565 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1566 if (InputSigned)
1567 return Builder.CreateSIToFP(Src, DstTy, "conv");
1568 return Builder.CreateUIToFP(Src, DstTy, "conv");
1569 }
1570
1571 if (isa<llvm::IntegerType>(DstElementTy)) {
1572 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1573 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1574
1575 // If we can't recognize overflow as undefined behavior, assume that
1576 // overflow saturates. This protects against normal optimizations if we are
1577 // compiling with non-standard FP semantics.
1578 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1579 llvm::Intrinsic::ID IID =
1580 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1581 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1582 }
1583
1584 if (IsSigned)
1585 return Builder.CreateFPToSI(Src, DstTy, "conv");
1586 return Builder.CreateFPToUI(Src, DstTy, "conv");
1587 }
1588
1589 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1590 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1591 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1592 }
1593 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1594 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1595 return Builder.CreateFPExt(Src, DstTy, "conv");
1596}
1597
1598/// Emit a conversion from the specified type to the specified destination type,
1599/// both of which are LLVM scalar types.
1600Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1601 QualType DstType,
1602 SourceLocation Loc,
1603 ScalarConversionOpts Opts) {
1604 // All conversions involving fixed point types should be handled by the
1605 // EmitFixedPoint family functions. This is done to prevent bloating up this
1606 // function more, and although fixed point numbers are represented by
1607 // integers, we do not want to follow any logic that assumes they should be
1608 // treated as integers.
1609 // TODO(leonardchan): When necessary, add another if statement checking for
1610 // conversions to fixed point types from other types.
1611 if (SrcType->isFixedPointType()) {
1612 if (DstType->isBooleanType())
1613 // It is important that we check this before checking if the dest type is
1614 // an integer because booleans are technically integer types.
1615 // We do not need to check the padding bit on unsigned types if unsigned
1616 // padding is enabled because overflow into this bit is undefined
1617 // behavior.
1618 return Builder.CreateIsNotNull(Src, "tobool");
1619 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1620 DstType->isRealFloatingType())
1621 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1622
1623 llvm_unreachable(
1624 "Unhandled scalar conversion from a fixed point type to another type.");
1625 } else if (DstType->isFixedPointType()) {
1626 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1627 // This also includes converting booleans and enums to fixed point types.
1628 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1629
1630 llvm_unreachable(
1631 "Unhandled scalar conversion to a fixed point type from another type.");
1632 }
1633
1634 QualType NoncanonicalSrcType = SrcType;
1635 QualType NoncanonicalDstType = DstType;
1636
1637 SrcType = CGF.getContext().getCanonicalType(SrcType);
1638 DstType = CGF.getContext().getCanonicalType(DstType);
1639 if (SrcType == DstType) return Src;
1640
1641 if (DstType->isVoidType()) return nullptr;
1642
1643 llvm::Value *OrigSrc = Src;
1644 QualType OrigSrcType = SrcType;
1645 llvm::Type *SrcTy = Src->getType();
1646
1647 // Handle conversions to bool first, they are special: comparisons against 0.
1648 if (DstType->isBooleanType())
1649 return EmitConversionToBool(Src, SrcType);
1650
1651 llvm::Type *DstTy = ConvertType(DstType);
1652
1653 // Cast from half through float if half isn't a native type.
1654 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1655 // Cast to FP using the intrinsic if the half type itself isn't supported.
1656 if (DstTy->isFloatingPointTy()) {
1658 Value *BitCast = Builder.CreateBitCast(Src, CGF.CGM.HalfTy);
1659 return Builder.CreateFPExt(BitCast, DstTy, "conv");
1660 }
1661 } else {
1662 // Cast to other types through float, using either the intrinsic or FPExt,
1663 // depending on whether the half type itself is supported
1664 // (as opposed to operations on half, available with NativeHalfType).
1665
1666 if (Src->getType() != CGF.CGM.HalfTy) {
1668 Src = Builder.CreateBitCast(Src, CGF.CGM.HalfTy);
1669 }
1670
1671 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1672 SrcType = CGF.getContext().FloatTy;
1673 SrcTy = CGF.FloatTy;
1674 }
1675 }
1676
1677 // Ignore conversions like int -> uint.
1678 if (SrcTy == DstTy) {
1679 if (Opts.EmitImplicitIntegerSignChangeChecks)
1680 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1681 NoncanonicalDstType, Loc);
1682
1683 return Src;
1684 }
1685
1686 // Handle pointer conversions next: pointers can only be converted to/from
1687 // other pointers and integers. Check for pointer types in terms of LLVM, as
1688 // some native types (like Obj-C id) may map to a pointer type.
1689 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1690 // The source value may be an integer, or a pointer.
1691 if (isa<llvm::PointerType>(SrcTy))
1692 return Src;
1693
1694 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1695 // First, convert to the correct width so that we control the kind of
1696 // extension.
1697 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1698 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1699 llvm::Value* IntResult =
1700 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1701 // Then, cast to pointer.
1702 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1703 }
1704
1705 if (isa<llvm::PointerType>(SrcTy)) {
1706 // Must be an ptr to int cast.
1707 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1708 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1709 }
1710
1711 // A scalar can be splatted to an extended vector of the same element type
1712 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1713 // Sema should add casts to make sure that the source expression's type is
1714 // the same as the vector's element type (sans qualifiers)
1715 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1716 SrcType.getTypePtr() &&
1717 "Splatted expr doesn't match with vector element type?");
1718
1719 // Splat the element across to all elements
1720 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1721 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1722 }
1723
1724 if (SrcType->isMatrixType() && DstType->isMatrixType())
1725 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1726
1727 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1728 // Allow bitcast from vector to integer/fp of the same size.
1729 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1730 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1731 if (SrcSize == DstSize)
1732 return Builder.CreateBitCast(Src, DstTy, "conv");
1733
1734 // Conversions between vectors of different sizes are not allowed except
1735 // when vectors of half are involved. Operations on storage-only half
1736 // vectors require promoting half vector operands to float vectors and
1737 // truncating the result, which is either an int or float vector, to a
1738 // short or half vector.
1739
1740 // Source and destination are both expected to be vectors.
1741 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1742 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1743 (void)DstElementTy;
1744
1745 assert(((SrcElementTy->isIntegerTy() &&
1746 DstElementTy->isIntegerTy()) ||
1747 (SrcElementTy->isFloatingPointTy() &&
1748 DstElementTy->isFloatingPointTy())) &&
1749 "unexpected conversion between a floating-point vector and an "
1750 "integer vector");
1751
1752 // Truncate an i32 vector to an i16 vector.
1753 if (SrcElementTy->isIntegerTy())
1754 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1755
1756 // Truncate a float vector to a half vector.
1757 if (SrcSize > DstSize)
1758 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1759
1760 // Promote a half vector to a float vector.
1761 return Builder.CreateFPExt(Src, DstTy, "conv");
1762 }
1763
1764 // Finally, we have the arithmetic types: real int/float.
1765 Value *Res = nullptr;
1766 llvm::Type *ResTy = DstTy;
1767
1768 // An overflowing conversion has undefined behavior if either the source type
1769 // or the destination type is a floating-point type. However, we consider the
1770 // range of representable values for all floating-point types to be
1771 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1772 // floating-point type.
1773 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1774 OrigSrcType->isFloatingType())
1775 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1776 Loc);
1777
1778 // Cast to half through float if half isn't a native type.
1779 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1780 // Make sure we cast in a single step if from another FP type.
1781 if (SrcTy->isFloatingPointTy()) {
1782 // Handle the case where the half type is represented as an integer (as
1783 // opposed to operations on half, available with NativeHalfType).
1784
1785 // If the half type is supported, just use an fptrunc.
1786 Value *Res = Builder.CreateFPTrunc(Src, CGF.CGM.HalfTy, "conv");
1787 if (DstTy == CGF.CGM.HalfTy)
1788 return Res;
1789
1790 assert(DstTy->isIntegerTy(16) &&
1792 "Only half FP requires extra conversion");
1793 return Builder.CreateBitCast(Res, DstTy);
1794 }
1795
1796 DstTy = CGF.FloatTy;
1797 }
1798
1799 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1800
1801 if (DstTy != ResTy) {
1802 Res = Builder.CreateFPTrunc(Res, CGF.CGM.HalfTy, "conv");
1803
1804 if (ResTy != CGF.CGM.HalfTy) {
1805 assert(ResTy->isIntegerTy(16) &&
1807 "Only half FP requires extra conversion");
1808 Res = Builder.CreateBitCast(Res, ResTy);
1809 }
1810 }
1811
1812 // Determine whether an overflow behavior of 'trap' has been specified for
1813 // either the destination or the source types. If so, we can elide sanitizer
1814 // capability checks as this overflow behavior kind is also capable of
1815 // emitting traps without runtime sanitizer support.
1816 // Also skip instrumentation if either source or destination has 'wrap'
1817 // behavior - the user has explicitly indicated they accept wrapping
1818 // semantics. Use non-canonical types to preserve OBT annotations.
1819 const auto *DstOBT = NoncanonicalDstType->getAs<OverflowBehaviorType>();
1820 const auto *SrcOBT = NoncanonicalSrcType->getAs<OverflowBehaviorType>();
1821 bool OBTrapInvolved =
1822 (DstOBT && DstOBT->isTrapKind()) || (SrcOBT && SrcOBT->isTrapKind());
1823 bool OBWrapInvolved =
1824 (DstOBT && DstOBT->isWrapKind()) || (SrcOBT && SrcOBT->isWrapKind());
1825
1826 if ((Opts.EmitImplicitIntegerTruncationChecks || OBTrapInvolved) &&
1827 !OBWrapInvolved)
1828 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1829 NoncanonicalDstType, Loc, OBTrapInvolved);
1830
1831 if (Opts.EmitImplicitIntegerSignChangeChecks)
1832 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1833 NoncanonicalDstType, Loc);
1834
1835 return Res;
1836}
1837
1838Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1839 QualType DstTy,
1840 SourceLocation Loc) {
1841 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1842 llvm::Value *Result;
1843 if (SrcTy->isRealFloatingType())
1844 Result = FPBuilder.CreateFloatingToFixed(Src,
1845 CGF.getContext().getFixedPointSemantics(DstTy));
1846 else if (DstTy->isRealFloatingType())
1847 Result = FPBuilder.CreateFixedToFloating(Src,
1849 ConvertType(DstTy));
1850 else {
1851 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1852 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1853
1854 if (DstTy->isIntegerType())
1855 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1856 DstFPSema.getWidth(),
1857 DstFPSema.isSigned());
1858 else if (SrcTy->isIntegerType())
1859 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1860 DstFPSema);
1861 else
1862 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1863 }
1864 return Result;
1865}
1866
1867/// Emit a conversion from the specified complex type to the specified
1868/// destination type, where the destination type is an LLVM scalar type.
1869Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1870 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1871 SourceLocation Loc) {
1872 // Get the source element type.
1873 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1874
1875 // Handle conversions to bool first, they are special: comparisons against 0.
1876 if (DstTy->isBooleanType()) {
1877 // Complex != 0 -> (Real != 0) | (Imag != 0)
1878 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1879 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1880 return Builder.CreateOr(Src.first, Src.second, "tobool");
1881 }
1882
1883 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1884 // the imaginary part of the complex value is discarded and the value of the
1885 // real part is converted according to the conversion rules for the
1886 // corresponding real type.
1887 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1888}
1889
1890Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1891 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1892}
1893
1894/// Emit a sanitization check for the given "binary" operation (which
1895/// might actually be a unary increment which has been lowered to a binary
1896/// operation). The check passes if all values in \p Checks (which are \c i1),
1897/// are \c true.
1898void ScalarExprEmitter::EmitBinOpCheck(
1899 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1900 const BinOpInfo &Info) {
1901 assert(CGF.IsSanitizerScope);
1902 SanitizerHandler Check;
1903 SmallVector<llvm::Constant *, 4> StaticData;
1904 SmallVector<llvm::Value *, 2> DynamicData;
1905 TrapReason TR;
1906
1907 BinaryOperatorKind Opcode = Info.Opcode;
1910
1911 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1912 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1913 if (UO && UO->getOpcode() == UO_Minus) {
1914 Check = SanitizerHandler::NegateOverflow;
1915 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1916 DynamicData.push_back(Info.RHS);
1917 } else {
1918 if (BinaryOperator::isShiftOp(Opcode)) {
1919 // Shift LHS negative or too large, or RHS out of bounds.
1920 Check = SanitizerHandler::ShiftOutOfBounds;
1921 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1922 StaticData.push_back(
1923 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1924 StaticData.push_back(
1925 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1926 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1927 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1928 Check = SanitizerHandler::DivremOverflow;
1929 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1930 } else {
1931 // Arithmetic overflow (+, -, *).
1932 int ArithOverflowKind = 0;
1933 switch (Opcode) {
1934 case BO_Add: {
1935 Check = SanitizerHandler::AddOverflow;
1936 ArithOverflowKind = diag::UBSanArithKind::Add;
1937 break;
1938 }
1939 case BO_Sub: {
1940 Check = SanitizerHandler::SubOverflow;
1941 ArithOverflowKind = diag::UBSanArithKind::Sub;
1942 break;
1943 }
1944 case BO_Mul: {
1945 Check = SanitizerHandler::MulOverflow;
1946 ArithOverflowKind = diag::UBSanArithKind::Mul;
1947 break;
1948 }
1949 default:
1950 llvm_unreachable("unexpected opcode for bin op check");
1951 }
1952 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1954 SanitizerKind::UnsignedIntegerOverflow) ||
1956 SanitizerKind::SignedIntegerOverflow)) {
1957 // Only pay the cost for constructing the trap diagnostic if they are
1958 // going to be used.
1959 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1960 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1961 << Info.E;
1962 }
1963 }
1964 DynamicData.push_back(Info.LHS);
1965 DynamicData.push_back(Info.RHS);
1966 }
1967
1968 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1969}
1970
1971//===----------------------------------------------------------------------===//
1972// Visitor Methods
1973//===----------------------------------------------------------------------===//
1974
1975Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1976 CGF.ErrorUnsupported(E, "scalar expression");
1977 if (E->getType()->isVoidType())
1978 return nullptr;
1979 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1980}
1981
1982Value *
1983ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1984 ASTContext &Context = CGF.getContext();
1985 unsigned AddrSpace =
1987 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1988 E->ComputeName(Context), "__usn_str", AddrSpace);
1989
1990 llvm::Type *ExprTy = ConvertType(E->getType());
1991 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1992 "usn_addr_cast");
1993}
1994
1995Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1996 assert(E->getDataElementCount() == 1);
1997 auto It = E->begin();
1998 return Builder.getInt((*It)->getValue());
1999}
2000
2001Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
2002 // Vector Mask Case
2003 if (E->getNumSubExprs() == 2) {
2004 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
2005 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
2006 Value *Mask;
2007
2008 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
2009 unsigned LHSElts = LTy->getNumElements();
2010
2011 Mask = RHS;
2012
2013 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
2014
2015 // Mask off the high bits of each shuffle index.
2016 Value *MaskBits =
2017 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
2018 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
2019
2020 // newv = undef
2021 // mask = mask & maskbits
2022 // for each elt
2023 // n = extract mask i
2024 // x = extract val n
2025 // newv = insert newv, x, i
2026 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
2027 MTy->getNumElements());
2028 Value* NewV = llvm::PoisonValue::get(RTy);
2029 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
2030 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
2031 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
2032
2033 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
2034 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
2035 }
2036 return NewV;
2037 }
2038
2039 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
2040 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
2041
2042 SmallVector<int, 32> Indices;
2043 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
2044 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
2045 // Check for -1 and output it as undef in the IR.
2046 if (Idx.isSigned() && Idx.isAllOnes())
2047 Indices.push_back(-1);
2048 else
2049 Indices.push_back(Idx.getZExtValue());
2050 }
2051
2052 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
2053}
2054
2055Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
2056 QualType SrcType = E->getSrcExpr()->getType(),
2057 DstType = E->getType();
2058
2059 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
2060
2061 SrcType = CGF.getContext().getCanonicalType(SrcType);
2062 DstType = CGF.getContext().getCanonicalType(DstType);
2063 if (SrcType == DstType) return Src;
2064
2065 assert(SrcType->isVectorType() &&
2066 "ConvertVector source type must be a vector");
2067 assert(DstType->isVectorType() &&
2068 "ConvertVector destination type must be a vector");
2069
2070 llvm::Type *SrcTy = Src->getType();
2071 llvm::Type *DstTy = ConvertType(DstType);
2072
2073 // Ignore conversions like int -> uint.
2074 if (SrcTy == DstTy)
2075 return Src;
2076
2077 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
2078 DstEltType = DstType->castAs<VectorType>()->getElementType();
2079
2080 assert(SrcTy->isVectorTy() &&
2081 "ConvertVector source IR type must be a vector");
2082 assert(DstTy->isVectorTy() &&
2083 "ConvertVector destination IR type must be a vector");
2084
2085 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
2086 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2087
2088 if (DstEltType->isBooleanType()) {
2089 assert((SrcEltTy->isFloatingPointTy() ||
2090 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2091
2092 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2093 if (SrcEltTy->isFloatingPointTy()) {
2094 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2095 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2096 } else {
2097 return Builder.CreateICmpNE(Src, Zero, "tobool");
2098 }
2099 }
2100
2101 // We have the arithmetic types: real int/float.
2102 Value *Res = nullptr;
2103
2104 if (isa<llvm::IntegerType>(SrcEltTy)) {
2105 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2106 if (isa<llvm::IntegerType>(DstEltTy))
2107 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2108 else {
2109 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2110 if (InputSigned)
2111 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2112 else
2113 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2114 }
2115 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2116 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2117 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2118 if (DstEltType->isSignedIntegerOrEnumerationType())
2119 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2120 else
2121 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2122 } else {
2123 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2124 "Unknown real conversion");
2125 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2126 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2127 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2128 else
2129 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2130 }
2131
2132 return Res;
2133}
2134
2135Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2136 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2137 CGF.EmitIgnoredExpr(E->getBase());
2138 return CGF.emitScalarConstant(Constant, E);
2139 } else {
2140 Expr::EvalResult Result;
2142 llvm::APSInt Value = Result.Val.getInt();
2143 CGF.EmitIgnoredExpr(E->getBase());
2144 return Builder.getInt(Value);
2145 }
2146 }
2147
2148 llvm::Value *Result = EmitLoadOfLValue(E);
2149
2150 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2151 // debug info for the pointer, even if there is no variable associated with
2152 // the pointer's expression.
2153 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2154 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2155 if (llvm::GetElementPtrInst *GEP =
2156 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2157 if (llvm::Instruction *Pointer =
2158 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2159 QualType Ty = E->getBase()->getType();
2160 if (!E->isArrow())
2161 Ty = CGF.getContext().getPointerType(Ty);
2162 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2163 }
2164 }
2165 }
2166 }
2167 return Result;
2168}
2169
2170Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2171 TestAndClearIgnoreResultAssign();
2172
2173 // Emit subscript expressions in rvalue context's. For most cases, this just
2174 // loads the lvalue formed by the subscript expr. However, we have to be
2175 // careful, because the base of a vector subscript is occasionally an rvalue,
2176 // so we can't get it as an lvalue.
2177 if (!E->getBase()->getType()->isVectorType() &&
2179 return EmitLoadOfLValue(E);
2180
2181 // Handle the vector case. The base must be a vector, the index must be an
2182 // integer value.
2183 Value *Base = Visit(E->getBase());
2184 Value *Idx = Visit(E->getIdx());
2185 QualType IdxTy = E->getIdx()->getType();
2186
2187 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2188 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2189
2190 return Builder.CreateExtractElement(Base, Idx, "vecext");
2191}
2192
2193Value *ScalarExprEmitter::VisitMatrixSingleSubscriptExpr(
2194 MatrixSingleSubscriptExpr *E) {
2195 TestAndClearIgnoreResultAssign();
2196
2197 auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2198 unsigned NumRows = MatrixTy->getNumRows();
2199 unsigned NumColumns = MatrixTy->getNumColumns();
2200
2201 // Row index
2202 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2203 llvm::MatrixBuilder MB(Builder);
2204
2205 // The row index must be in [0, NumRows)
2206 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2207 MB.CreateIndexAssumption(RowIdx, NumRows);
2208
2209 Value *FlatMatrix = Visit(E->getBase());
2210 llvm::Type *ElemTy = CGF.ConvertTypeForMem(MatrixTy->getElementType());
2211 auto *ResultTy = llvm::FixedVectorType::get(ElemTy, NumColumns);
2212 Value *RowVec = llvm::PoisonValue::get(ResultTy);
2213
2214 for (unsigned Col = 0; Col != NumColumns; ++Col) {
2215 Value *ColVal = llvm::ConstantInt::get(RowIdx->getType(), Col);
2216 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2217 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2218 Value *EltIdx = MB.CreateIndex(RowIdx, ColVal, NumRows, NumColumns,
2219 IsMatrixRowMajor, "matrix_row_idx");
2220 Value *Elt =
2221 Builder.CreateExtractElement(FlatMatrix, EltIdx, "matrix_elem");
2222 Value *Lane = llvm::ConstantInt::get(Builder.getInt32Ty(), Col);
2223 RowVec = Builder.CreateInsertElement(RowVec, Elt, Lane, "matrix_row_ins");
2224 }
2225
2226 return CGF.EmitFromMemory(RowVec, E->getType());
2227}
2228
2229Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2230 TestAndClearIgnoreResultAssign();
2231
2232 // Handle the vector case. The base must be a vector, the index must be an
2233 // integer value.
2234 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2235 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2236
2237 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2238 llvm::MatrixBuilder MB(Builder);
2239
2240 Value *Idx;
2241 unsigned NumCols = MatrixTy->getNumColumns();
2242 unsigned NumRows = MatrixTy->getNumRows();
2243 bool IsMatrixRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2244 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
2245 Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows, NumCols, IsMatrixRowMajor);
2246
2247 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2248 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2249
2250 Value *Matrix = Visit(E->getBase());
2251
2252 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2253 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2254}
2255
2256static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2257 unsigned Off) {
2258 int MV = SVI->getMaskValue(Idx);
2259 if (MV == -1)
2260 return -1;
2261 return Off + MV;
2262}
2263
2264static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2265 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2266 "Index operand too large for shufflevector mask!");
2267 return C->getZExtValue();
2268}
2269
2270Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2271 bool Ignore = TestAndClearIgnoreResultAssign();
2272 (void)Ignore;
2273 unsigned NumInitElements = E->getNumInits();
2274 assert((Ignore == false ||
2275 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2276 "init list ignored");
2277
2278 // HLSL initialization lists in the AST are an expansion which can contain
2279 // side-effecting expressions wrapped in opaque value expressions. To properly
2280 // emit these we need to emit the opaque values before we emit the argument
2281 // expressions themselves. This is a little hacky, but it prevents us needing
2282 // to do a bigger AST-level change for a language feature that we need
2283 // deprecate in the near future. See related HLSL language proposals in the
2284 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2285 // * 0005-strict-initializer-lists.md
2286 // * 0032-constructors.md
2287 if (CGF.getLangOpts().HLSL)
2289
2290 if (E->hadArrayRangeDesignator())
2291 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2292
2293 llvm::VectorType *VType =
2294 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2295
2296 if (!VType) {
2297 if (NumInitElements == 0) {
2298 // C++11 value-initialization for the scalar.
2299 return EmitNullValue(E->getType());
2300 }
2301 // We have a scalar in braces. Just use the first element.
2302 return Visit(E->getInit(0));
2303 }
2304
2305 if (isa<llvm::ScalableVectorType>(VType)) {
2306 if (NumInitElements == 0) {
2307 // C++11 value-initialization for the vector.
2308 return EmitNullValue(E->getType());
2309 }
2310
2311 if (NumInitElements == 1) {
2312 Expr *InitVector = E->getInit(0);
2313
2314 // Initialize from another scalable vector of the same type.
2315 if (InitVector->getType().getCanonicalType() ==
2317 return Visit(InitVector);
2318 }
2319
2320 llvm_unreachable("Unexpected initialization of a scalable vector!");
2321 }
2322
2323 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2324
2325 // Loop over initializers collecting the Value for each, and remembering
2326 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2327 // us to fold the shuffle for the swizzle into the shuffle for the vector
2328 // initializer, since LLVM optimizers generally do not want to touch
2329 // shuffles.
2330 unsigned CurIdx = 0;
2331 bool VIsPoisonShuffle = false;
2332 llvm::Value *V = llvm::PoisonValue::get(VType);
2333 for (unsigned i = 0; i != NumInitElements; ++i) {
2334 Expr *IE = E->getInit(i);
2335 Value *Init = Visit(IE);
2336 SmallVector<int, 16> Args;
2337
2338 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2339
2340 // Handle scalar elements. If the scalar initializer is actually one
2341 // element of a different vector of the same width, use shuffle instead of
2342 // extract+insert.
2343 if (!VVT) {
2344 if (isa<ExtVectorElementExpr>(IE)) {
2345 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2346
2347 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2348 ->getNumElements() == ResElts) {
2349 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2350 Value *LHS = nullptr, *RHS = nullptr;
2351 if (CurIdx == 0) {
2352 // insert into poison -> shuffle (src, poison)
2353 // shufflemask must use an i32
2354 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2355 Args.resize(ResElts, -1);
2356
2357 LHS = EI->getVectorOperand();
2358 RHS = V;
2359 VIsPoisonShuffle = true;
2360 } else if (VIsPoisonShuffle) {
2361 // insert into poison shuffle && size match -> shuffle (v, src)
2362 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2363 for (unsigned j = 0; j != CurIdx; ++j)
2364 Args.push_back(getMaskElt(SVV, j, 0));
2365 Args.push_back(ResElts + C->getZExtValue());
2366 Args.resize(ResElts, -1);
2367
2368 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2369 RHS = EI->getVectorOperand();
2370 VIsPoisonShuffle = false;
2371 }
2372 if (!Args.empty()) {
2373 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2374 ++CurIdx;
2375 continue;
2376 }
2377 }
2378 }
2379 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2380 "vecinit");
2381 VIsPoisonShuffle = false;
2382 ++CurIdx;
2383 continue;
2384 }
2385
2386 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2387
2388 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2389 // input is the same width as the vector being constructed, generate an
2390 // optimized shuffle of the swizzle input into the result.
2391 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2392 if (isa<ExtVectorElementExpr>(IE)) {
2393 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2394 Value *SVOp = SVI->getOperand(0);
2395 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2396
2397 if (OpTy->getNumElements() == ResElts) {
2398 for (unsigned j = 0; j != CurIdx; ++j) {
2399 // If the current vector initializer is a shuffle with poison, merge
2400 // this shuffle directly into it.
2401 if (VIsPoisonShuffle) {
2402 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2403 } else {
2404 Args.push_back(j);
2405 }
2406 }
2407 for (unsigned j = 0, je = InitElts; j != je; ++j)
2408 Args.push_back(getMaskElt(SVI, j, Offset));
2409 Args.resize(ResElts, -1);
2410
2411 if (VIsPoisonShuffle)
2412 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2413
2414 Init = SVOp;
2415 }
2416 }
2417
2418 // Extend init to result vector length, and then shuffle its contribution
2419 // to the vector initializer into V.
2420 if (Args.empty()) {
2421 for (unsigned j = 0; j != InitElts; ++j)
2422 Args.push_back(j);
2423 Args.resize(ResElts, -1);
2424 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2425
2426 Args.clear();
2427 for (unsigned j = 0; j != CurIdx; ++j)
2428 Args.push_back(j);
2429 for (unsigned j = 0; j != InitElts; ++j)
2430 Args.push_back(j + Offset);
2431 Args.resize(ResElts, -1);
2432 }
2433
2434 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2435 // merging subsequent shuffles into this one.
2436 if (CurIdx == 0)
2437 std::swap(V, Init);
2438 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2439 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2440 CurIdx += InitElts;
2441 }
2442
2443 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2444 // Emit remaining default initializers.
2445 llvm::Type *EltTy = VType->getElementType();
2446
2447 // Emit remaining default initializers
2448 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2449 Value *Idx = Builder.getInt32(CurIdx);
2450 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2451 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2452 }
2453
2454 // Matrix initializer lists are in row-major order but the memory layout for
2455 // codegen is determined by the -fmatrix-memory-layout flag (default:
2456 // column-major). When the memory layout is column-major, we need to shuffle
2457 // the elements from row-major to column-major order.
2458 if (const auto *MT = E->getType()->getAs<ConstantMatrixType>();
2459 MT && CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2460 LangOptions::MatrixMemoryLayout::MatrixColMajor) {
2461 SmallVector<int, 16> Mask;
2462 for (unsigned I = 0, N = MT->getNumElementsFlattened(); I < N; ++I)
2463 Mask.push_back(MT->mapColumnMajorToRowMajorFlattenedIndex(I));
2464 V = Builder.CreateShuffleVector(V, Mask, "matrix.rowmajor2colmajor");
2465 }
2466
2467 return V;
2468}
2469
2471 return !D->isWeak();
2472}
2473
2474static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2475 E = E->IgnoreParens();
2476
2477 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2478 if (UO->getOpcode() == UO_Deref)
2479 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2480
2481 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2482 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2483
2484 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2485 if (isa<FieldDecl>(ME->getMemberDecl()))
2486 return true;
2487 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2488 }
2489
2490 // Array subscripts? Anything else?
2491
2492 return false;
2493}
2494
2496 assert(E->getType()->isSignableType(getContext()));
2497
2498 E = E->IgnoreParens();
2499
2500 if (isa<CXXThisExpr>(E))
2501 return true;
2502
2503 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2504 if (UO->getOpcode() == UO_AddrOf)
2505 return isLValueKnownNonNull(*this, UO->getSubExpr());
2506
2507 if (const auto *CE = dyn_cast<CastExpr>(E))
2508 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2509 CE->getCastKind() == CK_ArrayToPointerDecay)
2510 return isLValueKnownNonNull(*this, CE->getSubExpr());
2511
2512 // Maybe honor __nonnull?
2513
2514 return false;
2515}
2516
2518 const Expr *E = CE->getSubExpr();
2519
2520 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2521 return false;
2522
2523 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2524 // We always assume that 'this' is never null.
2525 return false;
2526 }
2527
2528 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2529 // And that glvalue casts are never null.
2530 if (ICE->isGLValue())
2531 return false;
2532 }
2533
2534 return true;
2535}
2536
2537// RHS is an aggregate type
2539 QualType DestTy, SourceLocation Loc) {
2540 SmallVector<LValue, 16> LoadList;
2541 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
2542 // Dest is either a vector, constant matrix, or a builtin
2543 // if its a vector create a temp alloca to store into and return that
2544 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2545 assert(LoadList.size() >= VecTy->getNumElements() &&
2546 "Flattened type on RHS must have the same number or more elements "
2547 "than vector on LHS.");
2548 llvm::Value *V = CGF.Builder.CreateLoad(
2549 CGF.CreateIRTempWithoutCast(DestTy, "flatcast.tmp"));
2550 // write to V.
2551 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2552 RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
2553 assert(RVal.isScalar() &&
2554 "All flattened source values should be scalars.");
2555 llvm::Value *Cast =
2556 CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
2557 VecTy->getElementType(), Loc);
2558 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2559 }
2560 return V;
2561 }
2562 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2563 assert(LoadList.size() >= MatTy->getNumElementsFlattened() &&
2564 "Flattened type on RHS must have the same number or more elements "
2565 "than vector on LHS.");
2566
2567 bool IsRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
2569
2570 llvm::Value *V = CGF.Builder.CreateLoad(
2571 CGF.CreateIRTempWithoutCast(DestTy, "flatcast.tmp"));
2572 // V is an allocated temporary for constructing the matrix.
2573 for (unsigned Row = 0, RE = MatTy->getNumRows(); Row < RE; Row++) {
2574 for (unsigned Col = 0, CE = MatTy->getNumColumns(); Col < CE; Col++) {
2575 // When interpreted as a matrix, \p LoadList is *always* row-major order
2576 // regardless of the default matrix memory layout.
2577 unsigned LoadIdx = MatTy->getRowMajorFlattenedIndex(Row, Col);
2578 RValue RVal = CGF.EmitLoadOfLValue(LoadList[LoadIdx], Loc);
2579 assert(RVal.isScalar() &&
2580 "All flattened source values should be scalars.");
2581 llvm::Value *Cast = CGF.EmitScalarConversion(
2582 RVal.getScalarVal(), LoadList[LoadIdx].getType(),
2583 MatTy->getElementType(), Loc);
2584 unsigned MatrixIdx = MatTy->getFlattenedIndex(Row, Col, IsRowMajor);
2585 V = CGF.Builder.CreateInsertElement(V, Cast, MatrixIdx);
2586 }
2587 }
2588 return V;
2589 }
2590 // if its a builtin just do an extract element or load.
2591 assert(DestTy->isBuiltinType() &&
2592 "Destination type must be a vector, matrix, or builtin type.");
2593 RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
2594 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2595 return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
2596 DestTy, Loc);
2597}
2598
2599// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2600// have to handle a more broad range of conversions than explicit casts, as they
2601// handle things like function to ptr-to-function decay etc.
2602Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2603 llvm::scope_exit RestoreCurCast(
2604 [this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2605 CGF.CurCast = CE;
2606
2607 Expr *E = CE->getSubExpr();
2608 QualType DestTy = CE->getType();
2609 CastKind Kind = CE->getCastKind();
2610 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2611
2612 // These cases are generally not written to ignore the result of
2613 // evaluating their sub-expressions, so we clear this now.
2614 bool Ignored = TestAndClearIgnoreResultAssign();
2615
2616 // Since almost all cast kinds apply to scalars, this switch doesn't have
2617 // a default case, so the compiler will warn on a missing case. The cases
2618 // are in the same order as in the CastKind enum.
2619 switch (Kind) {
2620 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2621 case CK_BuiltinFnToFnPtr:
2622 llvm_unreachable("builtin functions are handled elsewhere");
2623
2624 case CK_LValueBitCast:
2625 case CK_ObjCObjectLValueCast: {
2626 Address Addr = EmitLValue(E).getAddress();
2627 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2628 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2629 return EmitLoadOfLValue(LV, CE->getExprLoc());
2630 }
2631
2632 case CK_LValueToRValueBitCast: {
2633 LValue SourceLVal = CGF.EmitLValue(E);
2634 Address Addr =
2635 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2636 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2637 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2638 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2639 }
2640
2641 case CK_CPointerToObjCPointerCast:
2642 case CK_BlockPointerToObjCPointerCast:
2643 case CK_AnyPointerToBlockPointerCast:
2644 case CK_BitCast: {
2645 Value *Src = Visit(E);
2646 llvm::Type *SrcTy = Src->getType();
2647 llvm::Type *DstTy = ConvertType(DestTy);
2648
2649 // FIXME: this is a gross but seemingly necessary workaround for an issue
2650 // manifesting when a target uses a non-default AS for indirect sret args,
2651 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2652 // on the address of a local struct that gets returned by value yields an
2653 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2654 // DefaultAS. We can only do this subversive thing because sret args are
2655 // manufactured and them residing in the IndirectAS is a target specific
2656 // detail, and doing an AS cast here still retains the semantics the user
2657 // expects. It is desirable to remove this iff a better solution is found.
2658 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2659 return CGF.performAddrSpaceCast(Src, DstTy);
2660
2661 assert(
2662 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2663 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2664 "Address-space cast must be used to convert address spaces");
2665
2666 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2667 if (auto *PT = DestTy->getAs<PointerType>()) {
2669 PT->getPointeeType(),
2670 Address(Src,
2672 E->getType()->castAs<PointerType>()->getPointeeType()),
2673 CGF.getPointerAlign()),
2674 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2675 CE->getBeginLoc());
2676 }
2677 }
2678
2679 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2680 const QualType SrcType = E->getType();
2681
2682 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2683 // Casting to pointer that could carry dynamic information (provided by
2684 // invariant.group) requires launder.
2685 Src = Builder.CreateLaunderInvariantGroup(Src);
2686 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2687 // Casting to pointer that does not carry dynamic information (provided
2688 // by invariant.group) requires stripping it. Note that we don't do it
2689 // if the source could not be dynamic type and destination could be
2690 // dynamic because dynamic information is already laundered. It is
2691 // because launder(strip(src)) == launder(src), so there is no need to
2692 // add extra strip before launder.
2693 Src = Builder.CreateStripInvariantGroup(Src);
2694 }
2695 }
2696
2697 // Update heapallocsite metadata when there is an explicit pointer cast.
2698 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2699 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2700 !isa<CastExpr>(E)) {
2701 QualType PointeeType = DestTy->getPointeeType();
2702 if (!PointeeType.isNull())
2703 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2704 CE->getExprLoc());
2705 }
2706 }
2707
2708 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2709 // same element type, use the llvm.vector.insert intrinsic to perform the
2710 // bitcast.
2711 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2712 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2713 // If we are casting a fixed i8 vector to a scalable i1 predicate
2714 // vector, use a vector insert and bitcast the result.
2715 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2716 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2717 ScalableDstTy = llvm::ScalableVectorType::get(
2718 FixedSrcTy->getElementType(),
2719 llvm::divideCeil(
2720 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2721 }
2722 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2723 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2724 llvm::Value *Result = Builder.CreateInsertVector(
2725 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2726 ScalableDstTy = cast<llvm::ScalableVectorType>(
2727 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2728 if (Result->getType() != ScalableDstTy)
2729 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2730 if (Result->getType() != DstTy)
2731 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2732 return Result;
2733 }
2734 }
2735 }
2736
2737 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2738 // same element type, use the llvm.vector.extract intrinsic to perform the
2739 // bitcast.
2740 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2741 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2742 // If we are casting a scalable i1 predicate vector to a fixed i8
2743 // vector, bitcast the source and use a vector extract.
2744 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2745 FixedDstTy->getElementType()->isIntegerTy(8)) {
2746 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2747 ScalableSrcTy = llvm::ScalableVectorType::get(
2748 ScalableSrcTy->getElementType(),
2749 llvm::alignTo<8>(
2750 ScalableSrcTy->getElementCount().getKnownMinValue()));
2751 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2752 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2753 uint64_t(0));
2754 }
2755
2756 ScalableSrcTy = llvm::ScalableVectorType::get(
2757 FixedDstTy->getElementType(),
2758 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2759 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2760 }
2761 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2762 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2763 "cast.fixed");
2764 }
2765 }
2766
2767 // Perform VLAT <-> VLST bitcast through memory.
2768 // TODO: since the llvm.vector.{insert,extract} intrinsics
2769 // require the element types of the vectors to be the same, we
2770 // need to keep this around for bitcasts between VLAT <-> VLST where
2771 // the element types of the vectors are not the same, until we figure
2772 // out a better way of doing these casts.
2773 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2777 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2778 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2779 CGF.EmitStoreOfScalar(Src, LV);
2780 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2781 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2782 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2783 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2784 }
2785
2786 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2787 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2788 }
2789 case CK_AddressSpaceConversion: {
2790 Expr::EvalResult Result;
2791 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2792 Result.Val.isNullPointer()) {
2793 // If E has side effect, it is emitted even if its final result is a
2794 // null pointer. In that case, a DCE pass should be able to
2795 // eliminate the useless instructions emitted during translating E.
2796 if (Result.HasSideEffects)
2797 Visit(E);
2799 ConvertType(DestTy)), DestTy);
2800 }
2801 // Since target may map different address spaces in AST to the same address
2802 // space, an address space conversion may end up as a bitcast.
2803 return CGF.performAddrSpaceCast(Visit(E), ConvertType(DestTy));
2804 }
2805 case CK_AtomicToNonAtomic:
2806 case CK_NonAtomicToAtomic:
2807 case CK_UserDefinedConversion:
2808 return Visit(E);
2809
2810 case CK_NoOp: {
2811 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2812 }
2813
2814 case CK_BaseToDerived: {
2815 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2816 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2817
2818 Address Base = CGF.EmitPointerWithAlignment(E);
2819 Address Derived =
2820 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2821 CE->path_begin(), CE->path_end(),
2823
2824 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2825 // performed and the object is not of the derived type.
2826 if (CGF.sanitizePerformTypeCheck())
2828 Derived, DestTy->getPointeeType());
2829
2830 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2831 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2832 /*MayBeNull=*/true,
2834 CE->getBeginLoc());
2835
2836 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2837 }
2838 case CK_UncheckedDerivedToBase:
2839 case CK_DerivedToBase: {
2840 // The EmitPointerWithAlignment path does this fine; just discard
2841 // the alignment.
2843 CE->getType()->getPointeeType());
2844 }
2845
2846 case CK_Dynamic: {
2847 Address V = CGF.EmitPointerWithAlignment(E);
2848 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2849 return CGF.EmitDynamicCast(V, DCE);
2850 }
2851
2852 case CK_ArrayToPointerDecay:
2854 CE->getType()->getPointeeType());
2855 case CK_FunctionToPointerDecay:
2856 return EmitLValue(E).getPointer(CGF);
2857
2858 case CK_NullToPointer:
2859 if (MustVisitNullValue(E))
2860 CGF.EmitIgnoredExpr(E);
2861
2862 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2863 DestTy);
2864
2865 case CK_NullToMemberPointer: {
2866 if (MustVisitNullValue(E))
2867 CGF.EmitIgnoredExpr(E);
2868
2869 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2870 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2871 }
2872
2873 case CK_ReinterpretMemberPointer:
2874 case CK_BaseToDerivedMemberPointer:
2875 case CK_DerivedToBaseMemberPointer: {
2876 Value *Src = Visit(E);
2877
2878 // Note that the AST doesn't distinguish between checked and
2879 // unchecked member pointer conversions, so we always have to
2880 // implement checked conversions here. This is inefficient when
2881 // actual control flow may be required in order to perform the
2882 // check, which it is for data member pointers (but not member
2883 // function pointers on Itanium and ARM).
2884 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2885 }
2886
2887 case CK_ARCProduceObject:
2888 return CGF.EmitARCRetainScalarExpr(E);
2889 case CK_ARCConsumeObject:
2890 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2891 case CK_ARCReclaimReturnedObject:
2892 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2893 case CK_ARCExtendBlockObject:
2894 return CGF.EmitARCExtendBlockObject(E);
2895
2896 case CK_CopyAndAutoreleaseBlockObject:
2897 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2898
2899 case CK_FloatingRealToComplex:
2900 case CK_FloatingComplexCast:
2901 case CK_IntegralRealToComplex:
2902 case CK_IntegralComplexCast:
2903 case CK_IntegralComplexToFloatingComplex:
2904 case CK_FloatingComplexToIntegralComplex:
2905 case CK_ConstructorConversion:
2906 case CK_ToUnion:
2907 case CK_HLSLArrayRValue:
2908 llvm_unreachable("scalar cast to non-scalar value");
2909
2910 case CK_LValueToRValue:
2911 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2912 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2913 return Visit(E);
2914
2915 case CK_IntegralToPointer: {
2916 Value *Src = Visit(E);
2917
2918 // First, convert to the correct width so that we control the kind of
2919 // extension.
2920 auto DestLLVMTy = ConvertType(DestTy);
2921 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2922 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2923 llvm::Value* IntResult =
2924 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2925
2926 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2927
2928 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2929 // Going from integer to pointer that could be dynamic requires reloading
2930 // dynamic information from invariant.group.
2931 if (DestTy.mayBeDynamicClass())
2932 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2933 }
2934
2935 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2936 return IntToPtr;
2937 }
2938 case CK_PointerToIntegral: {
2939 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2940 auto *PtrExpr = Visit(E);
2941
2942 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2943 const QualType SrcType = E->getType();
2944
2945 // Casting to integer requires stripping dynamic information as it does
2946 // not carries it.
2947 if (SrcType.mayBeDynamicClass())
2948 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2949 }
2950
2951 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2952 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2953 }
2954 case CK_ToVoid: {
2955 CGF.EmitIgnoredExpr(E);
2956 return nullptr;
2957 }
2958 case CK_MatrixCast: {
2959 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2960 CE->getExprLoc());
2961 }
2962 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2963 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2964 // To perform any necessary Scalar Cast, so this Cast can be handled
2965 // by the regular Vector Splat cast code.
2966 case CK_HLSLAggregateSplatCast:
2967 case CK_VectorSplat: {
2968 llvm::Type *DstTy = ConvertType(DestTy);
2969 Value *Elt = Visit(E);
2970 // Splat the element across to all elements
2971 llvm::ElementCount NumElements =
2972 cast<llvm::VectorType>(DstTy)->getElementCount();
2973 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2974 }
2975
2976 case CK_FixedPointCast:
2977 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2978 CE->getExprLoc());
2979
2980 case CK_FixedPointToBoolean:
2981 assert(E->getType()->isFixedPointType() &&
2982 "Expected src type to be fixed point type");
2983 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2984 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2985 CE->getExprLoc());
2986
2987 case CK_FixedPointToIntegral:
2988 assert(E->getType()->isFixedPointType() &&
2989 "Expected src type to be fixed point type");
2990 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2991 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2992 CE->getExprLoc());
2993
2994 case CK_IntegralToFixedPoint:
2995 assert(E->getType()->isIntegerType() &&
2996 "Expected src type to be an integer");
2997 assert(DestTy->isFixedPointType() &&
2998 "Expected dest type to be fixed point type");
2999 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3000 CE->getExprLoc());
3001
3002 case CK_IntegralCast: {
3003 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
3004 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3005 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
3007 "conv");
3008 }
3009 ScalarConversionOpts Opts;
3010 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
3011 if (!ICE->isPartOfExplicitCast())
3012 Opts = ScalarConversionOpts(CGF.SanOpts);
3013 }
3014 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3015 CE->getExprLoc(), Opts);
3016 }
3017 case CK_IntegralToFloating: {
3018 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3019 // TODO: Support constrained FP intrinsics.
3020 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3021 if (SrcElTy->isSignedIntegerOrEnumerationType())
3022 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
3023 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
3024 }
3025 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3026 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3027 CE->getExprLoc());
3028 }
3029 case CK_FloatingToIntegral: {
3030 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3031 // TODO: Support constrained FP intrinsics.
3032 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
3033 if (DstElTy->isSignedIntegerOrEnumerationType())
3034 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
3035 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
3036 }
3037 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3038 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3039 CE->getExprLoc());
3040 }
3041 case CK_FloatingCast: {
3042 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
3043 // TODO: Support constrained FP intrinsics.
3044 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
3045 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
3046 if (DstElTy->castAs<BuiltinType>()->getKind() <
3047 SrcElTy->castAs<BuiltinType>()->getKind())
3048 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
3049 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
3050 }
3051 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3052 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3053 CE->getExprLoc());
3054 }
3055 case CK_FixedPointToFloating:
3056 case CK_FloatingToFixedPoint: {
3057 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3058 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3059 CE->getExprLoc());
3060 }
3061 case CK_BooleanToSignedIntegral: {
3062 ScalarConversionOpts Opts;
3063 Opts.TreatBooleanAsSigned = true;
3064 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
3065 CE->getExprLoc(), Opts);
3066 }
3067 case CK_IntegralToBoolean:
3068 return EmitIntToBoolConversion(Visit(E));
3069 case CK_PointerToBoolean:
3070 return EmitPointerToBoolConversion(Visit(E), E->getType());
3071 case CK_FloatingToBoolean: {
3072 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
3073 return EmitFloatToBoolConversion(Visit(E));
3074 }
3075 case CK_MemberPointerToBoolean: {
3076 llvm::Value *MemPtr = Visit(E);
3077 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
3078 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
3079 }
3080
3081 case CK_FloatingComplexToReal:
3082 case CK_IntegralComplexToReal:
3083 return CGF.EmitComplexExpr(E, false, true).first;
3084
3085 case CK_FloatingComplexToBoolean:
3086 case CK_IntegralComplexToBoolean: {
3088
3089 // TODO: kill this function off, inline appropriate case here
3090 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
3091 CE->getExprLoc());
3092 }
3093
3094 case CK_ZeroToOCLOpaqueType: {
3095 assert((DestTy->isEventT() || DestTy->isQueueT() ||
3096 DestTy->isOCLIntelSubgroupAVCType()) &&
3097 "CK_ZeroToOCLEvent cast on non-event type");
3098 return llvm::Constant::getNullValue(ConvertType(DestTy));
3099 }
3100
3101 case CK_IntToOCLSampler:
3102 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
3103
3104 case CK_HLSLVectorTruncation: {
3105 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
3106 "Destination type must be a vector or builtin type.");
3107 Value *Vec = Visit(E);
3108 if (auto *VecTy = DestTy->getAs<VectorType>()) {
3109 SmallVector<int> Mask;
3110 unsigned NumElts = VecTy->getNumElements();
3111 for (unsigned I = 0; I != NumElts; ++I)
3112 Mask.push_back(I);
3113
3114 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
3115 }
3116 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3117 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
3118 }
3119 case CK_HLSLMatrixTruncation: {
3120 assert((DestTy->isMatrixType() || DestTy->isBuiltinType()) &&
3121 "Destination type must be a matrix or builtin type.");
3122 Value *Mat = Visit(E);
3123 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
3124 SmallVector<int> Mask(MatTy->getNumElementsFlattened());
3125 unsigned NumCols = MatTy->getNumColumns();
3126 unsigned NumRows = MatTy->getNumRows();
3127 auto *SrcMatTy = E->getType()->getAs<ConstantMatrixType>();
3128 assert(SrcMatTy && "Source type must be a matrix type.");
3129 assert(NumRows <= SrcMatTy->getNumRows());
3130 assert(NumCols <= SrcMatTy->getNumColumns());
3131 bool IsRowMajor = CGF.getLangOpts().getDefaultMatrixMemoryLayout() ==
3132 LangOptions::MatrixMemoryLayout::MatrixRowMajor;
3133 for (unsigned R = 0; R < NumRows; R++)
3134 for (unsigned C = 0; C < NumCols; C++)
3135 Mask[MatTy->getFlattenedIndex(R, C, IsRowMajor)] =
3136 SrcMatTy->getFlattenedIndex(R, C, IsRowMajor);
3137
3138 return Builder.CreateShuffleVector(Mat, Mask, "trunc");
3139 }
3140 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3141 return Builder.CreateExtractElement(Mat, Zero, "cast.mtrunc");
3142 }
3143 case CK_HLSLElementwiseCast: {
3144 RValue RV = CGF.EmitAnyExpr(E);
3145 SourceLocation Loc = CE->getExprLoc();
3146
3147 Address SrcAddr = Address::invalid();
3148
3149 if (RV.isAggregate()) {
3150 SrcAddr = RV.getAggregateAddress();
3151 } else {
3152 SrcAddr = CGF.CreateMemTemp(E->getType(), "hlsl.ewcast.src");
3153 LValue TmpLV = CGF.MakeAddrLValue(SrcAddr, E->getType());
3154 CGF.EmitStoreThroughLValue(RV, TmpLV);
3155 }
3156
3157 LValue SrcVal = CGF.MakeAddrLValue(SrcAddr, E->getType());
3158 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
3159 }
3160
3161 } // end of switch
3162
3163 llvm_unreachable("unknown scalar cast");
3164}
3165
3166Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
3167 CodeGenFunction::StmtExprEvaluation eval(CGF);
3168 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
3169 !E->getType()->isVoidType());
3170 if (!RetAlloca.isValid())
3171 return nullptr;
3172 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
3173 E->getExprLoc());
3174}
3175
3176Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
3177 CodeGenFunction::RunCleanupsScope Scope(CGF);
3178 Value *V = Visit(E->getSubExpr());
3179 // Defend against dominance problems caused by jumps out of expression
3180 // evaluation through the shared cleanup block.
3181 Scope.ForceCleanup({&V});
3182 return V;
3183}
3184
3185//===----------------------------------------------------------------------===//
3186// Unary Operators
3187//===----------------------------------------------------------------------===//
3188
3190 llvm::Value *InVal, bool IsInc,
3191 FPOptions FPFeatures) {
3192 BinOpInfo BinOp;
3193 BinOp.LHS = InVal;
3194 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
3195 BinOp.Ty = E->getType();
3196 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3197 BinOp.FPFeatures = FPFeatures;
3198 BinOp.E = E;
3199 return BinOp;
3200}
3201
3202llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3203 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3204 // Treat positive amount as unsigned to support inc of i1 (needed for
3205 // unsigned _BitInt(1)).
3206 llvm::Value *Amount =
3207 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, !IsInc);
3208 StringRef Name = IsInc ? "inc" : "dec";
3209 QualType Ty = E->getType();
3210 const bool isSigned = Ty->isSignedIntegerOrEnumerationType();
3211 const bool hasSan =
3212 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
3213 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
3214
3215 switch (getOverflowBehaviorConsideringType(CGF, Ty)) {
3216 case LangOptions::OB_Wrap:
3217 return Builder.CreateAdd(InVal, Amount, Name);
3218 case LangOptions::OB_SignedAndDefined:
3219 if (!hasSan)
3220 return Builder.CreateAdd(InVal, Amount, Name);
3221 [[fallthrough]];
3222 case LangOptions::OB_Unset:
3223 if (!E->canOverflow())
3224 return Builder.CreateAdd(InVal, Amount, Name);
3225 if (!hasSan)
3226 return isSigned ? Builder.CreateNSWAdd(InVal, Amount, Name)
3227 : Builder.CreateAdd(InVal, Amount, Name);
3228 [[fallthrough]];
3229 case LangOptions::OB_Trap:
3230 if (!Ty->getAs<OverflowBehaviorType>() && !E->canOverflow())
3231 return Builder.CreateAdd(InVal, Amount, Name);
3232 BinOpInfo Info = createBinOpInfoFromIncDec(
3233 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3234 if (CanElideOverflowCheck(CGF.getContext(), Info))
3235 return isSigned ? Builder.CreateNSWAdd(InVal, Amount, Name)
3236 : Builder.CreateAdd(InVal, Amount, Name);
3237 return EmitOverflowCheckedBinOp(Info);
3238 }
3239 llvm_unreachable("Unknown OverflowBehaviorKind");
3240}
3241
3242namespace {
3243/// Handles check and update for lastprivate conditional variables.
3244class OMPLastprivateConditionalUpdateRAII {
3245private:
3246 CodeGenFunction &CGF;
3247 const UnaryOperator *E;
3248
3249public:
3250 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3251 const UnaryOperator *E)
3252 : CGF(CGF), E(E) {}
3253 ~OMPLastprivateConditionalUpdateRAII() {
3254 if (CGF.getLangOpts().OpenMP)
3256 CGF, E->getSubExpr());
3257 }
3258};
3259} // namespace
3260
3261llvm::Value *
3262ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3263 bool isInc, bool isPre) {
3264 ApplyAtomGroup Grp(CGF.getDebugInfo());
3265 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3266 QualType type = E->getSubExpr()->getType();
3267 llvm::PHINode *atomicPHI = nullptr;
3268 llvm::Value *value;
3269 llvm::Value *input;
3270 llvm::Value *Previous = nullptr;
3271 QualType SrcType = E->getType();
3272
3273 int amount = (isInc ? 1 : -1);
3274 bool isSubtraction = !isInc;
3275
3276 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3277 type = atomicTy->getValueType();
3278 if (isInc && type->isBooleanType()) {
3279 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3280 if (isPre) {
3281 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3282 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3283 return Builder.getTrue();
3284 }
3285 // For atomic bool increment, we just store true and return it for
3286 // preincrement, do an atomic swap with true for postincrement
3287 return Builder.CreateAtomicRMW(
3288 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3289 llvm::AtomicOrdering::SequentiallyConsistent);
3290 }
3291 // Special case for atomic increment / decrement on integers, emit
3292 // atomicrmw instructions. We skip this if we want to be doing overflow
3293 // checking, and fall into the slow path with the atomic cmpxchg loop.
3294 if (!type->isBooleanType() && type->isIntegerType() &&
3295 !(type->isUnsignedIntegerType() &&
3296 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3297 CGF.getLangOpts().getSignedOverflowBehavior() !=
3298 LangOptions::SOB_Trapping) {
3299 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3300 llvm::AtomicRMWInst::Sub;
3301 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3302 llvm::Instruction::Sub;
3303 llvm::Value *amt = CGF.EmitToMemory(
3304 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3305 llvm::Value *old =
3306 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3307 llvm::AtomicOrdering::SequentiallyConsistent);
3308 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3309 }
3310 // Special case for atomic increment/decrement on floats.
3311 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3312 if (type->isFloatingType()) {
3313 llvm::Type *Ty = ConvertType(type);
3314 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3315 llvm::AtomicRMWInst::BinOp aop =
3316 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3317 llvm::Instruction::BinaryOps op =
3318 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3319 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3320 llvm::AtomicRMWInst *old =
3321 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3322 llvm::AtomicOrdering::SequentiallyConsistent);
3323
3324 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3325 }
3326 }
3327 value = EmitLoadOfLValue(LV, E->getExprLoc());
3328 input = value;
3329 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3330 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3331 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3332 value = CGF.EmitToMemory(value, type);
3333 Builder.CreateBr(opBB);
3334 Builder.SetInsertPoint(opBB);
3335 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3336 atomicPHI->addIncoming(value, startBB);
3337 value = atomicPHI;
3338 } else {
3339 value = EmitLoadOfLValue(LV, E->getExprLoc());
3340 input = value;
3341 }
3342
3343 // Special case of integer increment that we have to check first: bool++.
3344 // Due to promotion rules, we get:
3345 // bool++ -> bool = bool + 1
3346 // -> bool = (int)bool + 1
3347 // -> bool = ((int)bool + 1 != 0)
3348 // An interesting aspect of this is that increment is always true.
3349 // Decrement does not have this property.
3350 if (isInc && type->isBooleanType()) {
3351 value = Builder.getTrue();
3352
3353 // Most common case by far: integer increment.
3354 } else if (type->isIntegerType()) {
3355 QualType promotedType;
3356 bool canPerformLossyDemotionCheck = false;
3357
3359 promotedType = CGF.getContext().getPromotedIntegerType(type);
3360 assert(promotedType != type && "Shouldn't promote to the same type.");
3361 canPerformLossyDemotionCheck = true;
3362 canPerformLossyDemotionCheck &=
3364 CGF.getContext().getCanonicalType(promotedType);
3365 canPerformLossyDemotionCheck &=
3367 type, promotedType);
3368 assert((!canPerformLossyDemotionCheck ||
3369 type->isSignedIntegerOrEnumerationType() ||
3370 promotedType->isSignedIntegerOrEnumerationType() ||
3371 ConvertType(type)->getScalarSizeInBits() ==
3372 ConvertType(promotedType)->getScalarSizeInBits()) &&
3373 "The following check expects that if we do promotion to different "
3374 "underlying canonical type, at least one of the types (either "
3375 "base or promoted) will be signed, or the bitwidths will match.");
3376 }
3377 if (CGF.SanOpts.hasOneOf(
3378 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3379 SanitizerKind::ImplicitBitfieldConversion) &&
3380 canPerformLossyDemotionCheck) {
3381 // While `x += 1` (for `x` with width less than int) is modeled as
3382 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3383 // ease; inc/dec with width less than int can't overflow because of
3384 // promotion rules, so we omit promotion+demotion, which means that we can
3385 // not catch lossy "demotion". Because we still want to catch these cases
3386 // when the sanitizer is enabled, we perform the promotion, then perform
3387 // the increment/decrement in the wider type, and finally
3388 // perform the demotion. This will catch lossy demotions.
3389
3390 // We have a special case for bitfields defined using all the bits of the
3391 // type. In this case we need to do the same trick as for the integer
3392 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3393
3394 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3395 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3396 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3397 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3398 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3399 // checks will take care of the conversion.
3400 ScalarConversionOpts Opts;
3401 if (!LV.isBitField())
3402 Opts = ScalarConversionOpts(CGF.SanOpts);
3403 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3404 Previous = value;
3405 SrcType = promotedType;
3406 }
3407
3408 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3409 Opts);
3410
3411 // Note that signed integer inc/dec with width less than int can't
3412 // overflow because of promotion rules; we're just eliding a few steps
3413 // here.
3414 } else if (type->isSignedIntegerOrEnumerationType() ||
3415 type->isUnsignedIntegerType()) {
3416 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3417 } else {
3418 // Treat positive amount as unsigned to support inc of i1 (needed for
3419 // unsigned _BitInt(1)).
3420 llvm::Value *amt =
3421 llvm::ConstantInt::get(value->getType(), amount, !isInc);
3422 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3423 }
3424
3425 // Next most common: pointer increment.
3426 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3427 QualType type = ptr->getPointeeType();
3428
3429 // VLA types don't have constant size.
3430 if (const VariableArrayType *vla
3432 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3433 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3434 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3435 if (CGF.getLangOpts().PointerOverflowDefined)
3436 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3437 else
3438 value = CGF.EmitCheckedInBoundsGEP(
3439 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3440 E->getExprLoc(), "vla.inc");
3441
3442 // Arithmetic on function pointers (!) is just +-1.
3443 } else if (type->isFunctionType()) {
3444 llvm::Value *amt = Builder.getInt32(amount);
3445
3446 if (CGF.getLangOpts().PointerOverflowDefined)
3447 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3448 else
3449 value =
3450 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3451 /*SignedIndices=*/false, isSubtraction,
3452 E->getExprLoc(), "incdec.funcptr");
3453
3454 // For everything else, we can just do a simple increment.
3455 } else {
3456 llvm::Value *amt = Builder.getInt32(amount);
3457 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3458 if (CGF.getLangOpts().PointerOverflowDefined)
3459 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3460 else
3461 value = CGF.EmitCheckedInBoundsGEP(
3462 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3463 E->getExprLoc(), "incdec.ptr");
3464 }
3465
3466 // Vector increment/decrement.
3467 } else if (type->isVectorType()) {
3468 if (type->hasIntegerRepresentation()) {
3469 llvm::Value *amt = llvm::ConstantInt::getSigned(value->getType(), amount);
3470
3471 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3472 } else {
3473 value = Builder.CreateFAdd(
3474 value,
3475 llvm::ConstantFP::get(value->getType(), amount),
3476 isInc ? "inc" : "dec");
3477 }
3478
3479 // Floating point.
3480 } else if (type->isRealFloatingType()) {
3481 // Add the inc/dec to the real part.
3482 llvm::Value *amt;
3483 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3484
3485 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3486 // Another special case: half FP increment should be done via float. If
3487 // the input isn't already half, it may be i16.
3488 Value *bitcast = Builder.CreateBitCast(input, CGF.CGM.HalfTy);
3489 value = Builder.CreateFPExt(bitcast, CGF.CGM.FloatTy, "incdec.conv");
3490 }
3491
3492 if (value->getType()->isFloatTy())
3493 amt = llvm::ConstantFP::get(VMContext,
3494 llvm::APFloat(static_cast<float>(amount)));
3495 else if (value->getType()->isDoubleTy())
3496 amt = llvm::ConstantFP::get(VMContext,
3497 llvm::APFloat(static_cast<double>(amount)));
3498 else {
3499 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3500 // Convert from float.
3501 llvm::APFloat F(static_cast<float>(amount));
3502 bool ignored;
3503 const llvm::fltSemantics *FS;
3504 // Don't use getFloatTypeSemantics because Half isn't
3505 // necessarily represented using the "half" LLVM type.
3506 if (value->getType()->isFP128Ty())
3507 FS = &CGF.getTarget().getFloat128Format();
3508 else if (value->getType()->isHalfTy())
3509 FS = &CGF.getTarget().getHalfFormat();
3510 else if (value->getType()->isBFloatTy())
3511 FS = &CGF.getTarget().getBFloat16Format();
3512 else if (value->getType()->isPPC_FP128Ty())
3513 FS = &CGF.getTarget().getIbm128Format();
3514 else
3515 FS = &CGF.getTarget().getLongDoubleFormat();
3516 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3517 amt = llvm::ConstantFP::get(VMContext, F);
3518 }
3519 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3520
3521 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3522 value = Builder.CreateFPTrunc(value, CGF.CGM.HalfTy, "incdec.conv");
3523 value = Builder.CreateBitCast(value, input->getType());
3524 }
3525
3526 // Fixed-point types.
3527 } else if (type->isFixedPointType()) {
3528 // Fixed-point types are tricky. In some cases, it isn't possible to
3529 // represent a 1 or a -1 in the type at all. Piggyback off of
3530 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3531 BinOpInfo Info;
3532 Info.E = E;
3533 Info.Ty = E->getType();
3534 Info.Opcode = isInc ? BO_Add : BO_Sub;
3535 Info.LHS = value;
3536 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3537 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3538 // since -1 is guaranteed to be representable.
3539 if (type->isSignedFixedPointType()) {
3540 Info.Opcode = isInc ? BO_Sub : BO_Add;
3541 Info.RHS = Builder.CreateNeg(Info.RHS);
3542 }
3543 // Now, convert from our invented integer literal to the type of the unary
3544 // op. This will upscale and saturate if necessary. This value can become
3545 // undef in some cases.
3546 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3547 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3548 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3549 value = EmitFixedPointBinOp(Info);
3550
3551 // Objective-C pointer types.
3552 } else {
3553 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3554
3555 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3556 if (!isInc) size = -size;
3557 llvm::Value *sizeValue =
3558 llvm::ConstantInt::getSigned(CGF.SizeTy, size.getQuantity());
3559
3560 if (CGF.getLangOpts().PointerOverflowDefined)
3561 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3562 else
3563 value = CGF.EmitCheckedInBoundsGEP(
3564 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3565 E->getExprLoc(), "incdec.objptr");
3566 value = Builder.CreateBitCast(value, input->getType());
3567 }
3568
3569 if (atomicPHI) {
3570 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3571 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3572 auto Pair = CGF.EmitAtomicCompareExchange(
3573 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3574 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3575 llvm::Value *success = Pair.second;
3576 atomicPHI->addIncoming(old, curBlock);
3577 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3578 Builder.SetInsertPoint(contBB);
3579 return isPre ? value : input;
3580 }
3581
3582 // Store the updated result through the lvalue.
3583 if (LV.isBitField()) {
3584 Value *Src = Previous ? Previous : value;
3585 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3586 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3587 LV.getBitFieldInfo(), E->getExprLoc());
3588 } else
3589 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3590
3591 // If this is a postinc, return the value read from memory, otherwise use the
3592 // updated value.
3593 return isPre ? value : input;
3594}
3595
3596
3597Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3598 QualType PromotionType) {
3599 QualType promotionTy = PromotionType.isNull()
3600 ? getPromotionType(E->getSubExpr()->getType())
3601 : PromotionType;
3602 Value *result = VisitPlus(E, promotionTy);
3603 if (result && !promotionTy.isNull())
3604 result = EmitUnPromotedValue(result, E->getType());
3605 return result;
3606}
3607
3608Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3609 QualType PromotionType) {
3610 // This differs from gcc, though, most likely due to a bug in gcc.
3611 TestAndClearIgnoreResultAssign();
3612 if (!PromotionType.isNull())
3613 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3614 return Visit(E->getSubExpr());
3615}
3616
3617Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3618 QualType PromotionType) {
3619 QualType promotionTy = PromotionType.isNull()
3620 ? getPromotionType(E->getSubExpr()->getType())
3621 : PromotionType;
3622 Value *result = VisitMinus(E, promotionTy);
3623 if (result && !promotionTy.isNull())
3624 result = EmitUnPromotedValue(result, E->getType());
3625 return result;
3626}
3627
3628Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3629 QualType PromotionType) {
3630 TestAndClearIgnoreResultAssign();
3631 Value *Op;
3632 if (!PromotionType.isNull())
3633 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3634 else
3635 Op = Visit(E->getSubExpr());
3636
3637 // Generate a unary FNeg for FP ops.
3638 if (Op->getType()->isFPOrFPVectorTy())
3639 return Builder.CreateFNeg(Op, "fneg");
3640
3641 // Emit unary minus with EmitSub so we handle overflow cases etc.
3642 BinOpInfo BinOp;
3643 BinOp.RHS = Op;
3644 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3645 BinOp.Ty = E->getType();
3646 BinOp.Opcode = BO_Sub;
3647 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3648 BinOp.E = E;
3649 return EmitSub(BinOp);
3650}
3651
3652Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3653 TestAndClearIgnoreResultAssign();
3654 Value *Op = Visit(E->getSubExpr());
3655 return Builder.CreateNot(Op, "not");
3656}
3657
3658Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3659 // Perform vector logical not on comparison with zero vector.
3660 if (E->getType()->isVectorType() &&
3661 E->getType()->castAs<VectorType>()->getVectorKind() ==
3662 VectorKind::Generic) {
3663 Value *Oper = Visit(E->getSubExpr());
3664 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3665 Value *Result;
3666 if (Oper->getType()->isFPOrFPVectorTy()) {
3667 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3668 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3669 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3670 } else
3671 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3672 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3673 }
3674
3675 // Compare operand to zero.
3676 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3677
3678 // Invert value.
3679 // TODO: Could dynamically modify easy computations here. For example, if
3680 // the operand is an icmp ne, turn into icmp eq.
3681 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3682
3683 // ZExt result to the expr type.
3684 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3685}
3686
3687Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3688 // Try folding the offsetof to a constant.
3689 Expr::EvalResult EVResult;
3690 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3691 llvm::APSInt Value = EVResult.Val.getInt();
3692 return Builder.getInt(Value);
3693 }
3694
3695 // Loop over the components of the offsetof to compute the value.
3696 unsigned n = E->getNumComponents();
3697 llvm::Type* ResultType = ConvertType(E->getType());
3698 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3699 QualType CurrentType = E->getTypeSourceInfo()->getType();
3700 for (unsigned i = 0; i != n; ++i) {
3701 OffsetOfNode ON = E->getComponent(i);
3702 llvm::Value *Offset = nullptr;
3703 switch (ON.getKind()) {
3704 case OffsetOfNode::Array: {
3705 // Compute the index
3706 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3707 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3708 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3709 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3710
3711 // Save the element type
3712 CurrentType =
3713 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3714
3715 // Compute the element size
3716 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3717 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3718
3719 // Multiply out to compute the result
3720 Offset = Builder.CreateMul(Idx, ElemSize);
3721 break;
3722 }
3723
3724 case OffsetOfNode::Field: {
3725 FieldDecl *MemberDecl = ON.getField();
3726 auto *RD = CurrentType->castAsRecordDecl();
3727 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3728
3729 // Compute the index of the field in its parent.
3730 unsigned i = 0;
3731 // FIXME: It would be nice if we didn't have to loop here!
3732 for (RecordDecl::field_iterator Field = RD->field_begin(),
3733 FieldEnd = RD->field_end();
3734 Field != FieldEnd; ++Field, ++i) {
3735 if (*Field == MemberDecl)
3736 break;
3737 }
3738 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3739
3740 // Compute the offset to the field
3741 int64_t OffsetInt = RL.getFieldOffset(i) /
3742 CGF.getContext().getCharWidth();
3743 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3744
3745 // Save the element type.
3746 CurrentType = MemberDecl->getType();
3747 break;
3748 }
3749
3751 llvm_unreachable("dependent __builtin_offsetof");
3752
3753 case OffsetOfNode::Base: {
3754 if (ON.getBase()->isVirtual()) {
3755 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3756 continue;
3757 }
3758
3759 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3760 CurrentType->castAsCanonical<RecordType>()->getDecl());
3761
3762 // Save the element type.
3763 CurrentType = ON.getBase()->getType();
3764
3765 // Compute the offset to the base.
3766 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3767 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3768 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3769 break;
3770 }
3771 }
3772 Result = Builder.CreateAdd(Result, Offset);
3773 }
3774 return Result;
3775}
3776
3777/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3778/// argument of the sizeof expression as an integer.
3779Value *
3780ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3781 const UnaryExprOrTypeTraitExpr *E) {
3782 QualType TypeToSize = E->getTypeOfArgument();
3783 if (auto Kind = E->getKind();
3784 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3785 if (const VariableArrayType *VAT =
3786 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3787 // For _Countof, we only want to evaluate if the extent is actually
3788 // variable as opposed to a multi-dimensional array whose extent is
3789 // constant but whose element type is variable.
3790 bool EvaluateExtent = true;
3791 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3792 EvaluateExtent =
3793 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3794 }
3795 if (EvaluateExtent) {
3796 if (E->isArgumentType()) {
3797 // sizeof(type) - make sure to emit the VLA size.
3798 CGF.EmitVariablyModifiedType(TypeToSize);
3799 } else {
3800 // C99 6.5.3.4p2: If the argument is an expression of type
3801 // VLA, it is evaluated.
3803 }
3804
3805 // For _Countof, we just want to return the size of a single dimension.
3806 if (Kind == UETT_CountOf)
3807 return CGF.getVLAElements1D(VAT).NumElts;
3808
3809 // For sizeof and __datasizeof, we need to scale the number of elements
3810 // by the size of the array element type.
3811 auto VlaSize = CGF.getVLASize(VAT);
3812
3813 // Scale the number of non-VLA elements by the non-VLA element size.
3814 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3815 if (!eltSize.isOne())
3816 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3817 VlaSize.NumElts);
3818 return VlaSize.NumElts;
3819 }
3820 }
3821 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3822 auto Alignment =
3823 CGF.getContext()
3826 .getQuantity();
3827 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3828 } else if (E->getKind() == UETT_VectorElements) {
3829 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3830 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3831 }
3832
3833 // If this isn't sizeof(vla), the result must be constant; use the constant
3834 // folding logic so we don't have to duplicate it here.
3835 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3836}
3837
3838Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3839 QualType PromotionType) {
3840 QualType promotionTy = PromotionType.isNull()
3841 ? getPromotionType(E->getSubExpr()->getType())
3842 : PromotionType;
3843 Value *result = VisitReal(E, promotionTy);
3844 if (result && !promotionTy.isNull())
3845 result = EmitUnPromotedValue(result, E->getType());
3846 return result;
3847}
3848
3849Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3850 QualType PromotionType) {
3851 Expr *Op = E->getSubExpr();
3852 if (Op->getType()->isAnyComplexType()) {
3853 // If it's an l-value, load through the appropriate subobject l-value.
3854 // Note that we have to ask E because Op might be an l-value that
3855 // this won't work for, e.g. an Obj-C property.
3856 if (E->isGLValue()) {
3857 if (!PromotionType.isNull()) {
3859 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3860 PromotionType = PromotionType->isAnyComplexType()
3861 ? PromotionType
3862 : CGF.getContext().getComplexType(PromotionType);
3863 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3864 : result.first;
3865 }
3866
3867 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3868 .getScalarVal();
3869 }
3870 // Otherwise, calculate and project.
3871 return CGF.EmitComplexExpr(Op, false, true).first;
3872 }
3873
3874 if (!PromotionType.isNull())
3875 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3876 return Visit(Op);
3877}
3878
3879Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3880 QualType PromotionType) {
3881 QualType promotionTy = PromotionType.isNull()
3882 ? getPromotionType(E->getSubExpr()->getType())
3883 : PromotionType;
3884 Value *result = VisitImag(E, promotionTy);
3885 if (result && !promotionTy.isNull())
3886 result = EmitUnPromotedValue(result, E->getType());
3887 return result;
3888}
3889
3890Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3891 QualType PromotionType) {
3892 Expr *Op = E->getSubExpr();
3893 if (Op->getType()->isAnyComplexType()) {
3894 // If it's an l-value, load through the appropriate subobject l-value.
3895 // Note that we have to ask E because Op might be an l-value that
3896 // this won't work for, e.g. an Obj-C property.
3897 if (Op->isGLValue()) {
3898 if (!PromotionType.isNull()) {
3900 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3901 PromotionType = PromotionType->isAnyComplexType()
3902 ? PromotionType
3903 : CGF.getContext().getComplexType(PromotionType);
3904 return result.second
3905 ? CGF.EmitPromotedValue(result, PromotionType).second
3906 : result.second;
3907 }
3908
3909 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3910 .getScalarVal();
3911 }
3912 // Otherwise, calculate and project.
3913 return CGF.EmitComplexExpr(Op, true, false).second;
3914 }
3915
3916 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3917 // effects are evaluated, but not the actual value.
3918 if (Op->isGLValue())
3919 CGF.EmitLValue(Op);
3920 else if (!PromotionType.isNull())
3921 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3922 else
3923 CGF.EmitScalarExpr(Op, true);
3924 if (!PromotionType.isNull())
3925 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3926 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3927}
3928
3929//===----------------------------------------------------------------------===//
3930// Binary Operators
3931//===----------------------------------------------------------------------===//
3932
3933Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3934 QualType PromotionType) {
3935 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3936}
3937
3938Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3939 QualType ExprType) {
3940 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3941}
3942
3943Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3944 E = E->IgnoreParens();
3945 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3946 switch (BO->getOpcode()) {
3947#define HANDLE_BINOP(OP) \
3948 case BO_##OP: \
3949 return Emit##OP(EmitBinOps(BO, PromotionType));
3950 HANDLE_BINOP(Add)
3951 HANDLE_BINOP(Sub)
3952 HANDLE_BINOP(Mul)
3953 HANDLE_BINOP(Div)
3954#undef HANDLE_BINOP
3955 default:
3956 break;
3957 }
3958 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3959 switch (UO->getOpcode()) {
3960 case UO_Imag:
3961 return VisitImag(UO, PromotionType);
3962 case UO_Real:
3963 return VisitReal(UO, PromotionType);
3964 case UO_Minus:
3965 return VisitMinus(UO, PromotionType);
3966 case UO_Plus:
3967 return VisitPlus(UO, PromotionType);
3968 default:
3969 break;
3970 }
3971 }
3972 auto result = Visit(const_cast<Expr *>(E));
3973 if (result) {
3974 if (!PromotionType.isNull())
3975 return EmitPromotedValue(result, PromotionType);
3976 else
3977 return EmitUnPromotedValue(result, E->getType());
3978 }
3979 return result;
3980}
3981
3982BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3983 QualType PromotionType) {
3984 TestAndClearIgnoreResultAssign();
3985 BinOpInfo Result;
3986 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3987 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3988 if (!PromotionType.isNull())
3989 Result.Ty = PromotionType;
3990 else
3991 Result.Ty = E->getType();
3992 Result.Opcode = E->getOpcode();
3993 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3994 Result.E = E;
3995 return Result;
3996}
3997
3998LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3999 const CompoundAssignOperator *E,
4000 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
4001 Value *&Result) {
4002 QualType LHSTy = E->getLHS()->getType();
4003 BinOpInfo OpInfo;
4004
4007
4008 // Emit the RHS first. __block variables need to have the rhs evaluated
4009 // first, plus this should improve codegen a little.
4010
4011 QualType PromotionTypeCR;
4012 PromotionTypeCR = getPromotionType(E->getComputationResultType());
4013 if (PromotionTypeCR.isNull())
4014 PromotionTypeCR = E->getComputationResultType();
4015 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
4016 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
4017 if (!PromotionTypeRHS.isNull())
4018 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
4019 else
4020 OpInfo.RHS = Visit(E->getRHS());
4021 OpInfo.Ty = PromotionTypeCR;
4022 OpInfo.Opcode = E->getOpcode();
4023 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
4024 OpInfo.E = E;
4025 // Load/convert the LHS.
4026 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4027
4028 llvm::PHINode *atomicPHI = nullptr;
4029 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
4030 QualType type = atomicTy->getValueType();
4031 if (!type->isBooleanType() && type->isIntegerType() &&
4032 !(type->isUnsignedIntegerType() &&
4033 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
4034 CGF.getLangOpts().getSignedOverflowBehavior() !=
4035 LangOptions::SOB_Trapping) {
4036 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
4037 llvm::Instruction::BinaryOps Op;
4038 switch (OpInfo.Opcode) {
4039 // We don't have atomicrmw operands for *, %, /, <<, >>
4040 case BO_MulAssign: case BO_DivAssign:
4041 case BO_RemAssign:
4042 case BO_ShlAssign:
4043 case BO_ShrAssign:
4044 break;
4045 case BO_AddAssign:
4046 AtomicOp = llvm::AtomicRMWInst::Add;
4047 Op = llvm::Instruction::Add;
4048 break;
4049 case BO_SubAssign:
4050 AtomicOp = llvm::AtomicRMWInst::Sub;
4051 Op = llvm::Instruction::Sub;
4052 break;
4053 case BO_AndAssign:
4054 AtomicOp = llvm::AtomicRMWInst::And;
4055 Op = llvm::Instruction::And;
4056 break;
4057 case BO_XorAssign:
4058 AtomicOp = llvm::AtomicRMWInst::Xor;
4059 Op = llvm::Instruction::Xor;
4060 break;
4061 case BO_OrAssign:
4062 AtomicOp = llvm::AtomicRMWInst::Or;
4063 Op = llvm::Instruction::Or;
4064 break;
4065 default:
4066 llvm_unreachable("Invalid compound assignment type");
4067 }
4068 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
4069 llvm::Value *Amt = CGF.EmitToMemory(
4070 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
4071 E->getExprLoc()),
4072 LHSTy);
4073
4074 llvm::AtomicRMWInst *OldVal =
4075 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
4076
4077 // Since operation is atomic, the result type is guaranteed to be the
4078 // same as the input in LLVM terms.
4079 Result = Builder.CreateBinOp(Op, OldVal, Amt);
4080 return LHSLV;
4081 }
4082 }
4083 // FIXME: For floating point types, we should be saving and restoring the
4084 // floating point environment in the loop.
4085 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
4086 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
4087 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4088 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
4089 Builder.CreateBr(opBB);
4090 Builder.SetInsertPoint(opBB);
4091 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
4092 atomicPHI->addIncoming(OpInfo.LHS, startBB);
4093 OpInfo.LHS = atomicPHI;
4094 }
4095 else
4096 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
4097
4098 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
4099 SourceLocation Loc = E->getExprLoc();
4100 if (!PromotionTypeLHS.isNull())
4101 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
4102 E->getExprLoc());
4103 else
4104 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
4105 E->getComputationLHSType(), Loc);
4106
4107 // Expand the binary operator.
4108 Result = (this->*Func)(OpInfo);
4109
4110 // Convert the result back to the LHS type,
4111 // potentially with Implicit Conversion sanitizer check.
4112 // If LHSLV is a bitfield, use default ScalarConversionOpts
4113 // to avoid emit any implicit integer checks.
4114 Value *Previous = nullptr;
4115 if (LHSLV.isBitField()) {
4116 Previous = Result;
4117 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
4118 } else if (const auto *atomicTy = LHSTy->getAs<AtomicType>()) {
4119 Result =
4120 EmitScalarConversion(Result, PromotionTypeCR, atomicTy->getValueType(),
4121 Loc, ScalarConversionOpts(CGF.SanOpts));
4122 } else {
4123 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
4124 ScalarConversionOpts(CGF.SanOpts));
4125 }
4126
4127 if (atomicPHI) {
4128 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
4129 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
4130 auto Pair = CGF.EmitAtomicCompareExchange(
4131 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
4132 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
4133 llvm::Value *success = Pair.second;
4134 atomicPHI->addIncoming(old, curBlock);
4135 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
4136 Builder.SetInsertPoint(contBB);
4137 return LHSLV;
4138 }
4139
4140 // Store the result value into the LHS lvalue. Bit-fields are handled
4141 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
4142 // 'An assignment expression has the value of the left operand after the
4143 // assignment...'.
4144 if (LHSLV.isBitField()) {
4145 Value *Src = Previous ? Previous : Result;
4146 QualType SrcType = E->getRHS()->getType();
4147 QualType DstType = E->getLHS()->getType();
4149 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
4150 LHSLV.getBitFieldInfo(), E->getExprLoc());
4151 } else
4153
4154 if (CGF.getLangOpts().OpenMP)
4156 E->getLHS());
4157 return LHSLV;
4158}
4159
4160Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
4161 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
4162 bool Ignore = TestAndClearIgnoreResultAssign();
4163 Value *RHS = nullptr;
4164 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
4165
4166 // If the result is clearly ignored, return now.
4167 if (Ignore)
4168 return nullptr;
4169
4170 // The result of an assignment in C is the assigned r-value.
4171 if (!CGF.getLangOpts().CPlusPlus)
4172 return RHS;
4173
4174 // If the lvalue is non-volatile, return the computed value of the assignment.
4175 if (!LHS.isVolatileQualified())
4176 return RHS;
4177
4178 // Otherwise, reload the value.
4179 return EmitLoadOfLValue(LHS, E->getExprLoc());
4180}
4181
4182void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4183 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4184 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4185 Checks;
4186
4187 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4188 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4189 SanitizerKind::SO_IntegerDivideByZero));
4190 }
4191
4192 const auto *BO = cast<BinaryOperator>(Ops.E);
4193 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4194 Ops.Ty->hasSignedIntegerRepresentation() &&
4195 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4196 Ops.mayHaveIntegerOverflow() && !Ops.Ty.isWrapType()) {
4197 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4198
4199 llvm::Value *IntMin =
4200 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4201 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4202
4203 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4204 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4205 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4206 Checks.push_back(
4207 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4208 }
4209
4210 if (Checks.size() > 0)
4211 EmitBinOpCheck(Checks, Ops);
4212}
4213
4214Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4215 {
4216 SanitizerDebugLocation SanScope(&CGF,
4217 {SanitizerKind::SO_IntegerDivideByZero,
4218 SanitizerKind::SO_SignedIntegerOverflow,
4219 SanitizerKind::SO_FloatDivideByZero},
4220 SanitizerHandler::DivremOverflow);
4221 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4222 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4223 Ops.Ty->isIntegerType() &&
4224 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4225 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4226 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4227 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4228 Ops.Ty->isRealFloatingType() &&
4229 Ops.mayHaveFloatDivisionByZero()) {
4230 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4231 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4232 EmitBinOpCheck(
4233 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4234 }
4235 }
4236
4237 if (Ops.Ty->isConstantMatrixType()) {
4238 llvm::MatrixBuilder MB(Builder);
4239 // We need to check the types of the operands of the operator to get the
4240 // correct matrix dimensions.
4241 auto *BO = cast<BinaryOperator>(Ops.E);
4242 (void)BO;
4243 assert(
4245 "first operand must be a matrix");
4246 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4247 "second operand must be an arithmetic type");
4248 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4249 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4250 Ops.Ty->hasUnsignedIntegerRepresentation());
4251 }
4252
4253 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4254 llvm::Value *Val;
4255 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4256 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4257 CGF.SetDivFPAccuracy(Val);
4258 return Val;
4259 }
4260 else if (Ops.isFixedPointOp())
4261 return EmitFixedPointBinOp(Ops);
4262 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4263 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4264 else
4265 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4266}
4267
4268Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4269 // Rem in C can't be a floating point type: C99 6.5.5p2.
4270 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4271 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4272 Ops.Ty->isIntegerType() &&
4273 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4274 SanitizerDebugLocation SanScope(&CGF,
4275 {SanitizerKind::SO_IntegerDivideByZero,
4276 SanitizerKind::SO_SignedIntegerOverflow},
4277 SanitizerHandler::DivremOverflow);
4278 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4279 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4280 }
4281
4282 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4283 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4284
4285 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4286 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4287
4288 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4289}
4290
4291Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4292 unsigned IID;
4293 unsigned OpID = 0;
4294 SanitizerHandler OverflowKind;
4295
4296 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4297 switch (Ops.Opcode) {
4298 case BO_Add:
4299 case BO_AddAssign:
4300 OpID = 1;
4301 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4302 llvm::Intrinsic::uadd_with_overflow;
4303 OverflowKind = SanitizerHandler::AddOverflow;
4304 break;
4305 case BO_Sub:
4306 case BO_SubAssign:
4307 OpID = 2;
4308 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4309 llvm::Intrinsic::usub_with_overflow;
4310 OverflowKind = SanitizerHandler::SubOverflow;
4311 break;
4312 case BO_Mul:
4313 case BO_MulAssign:
4314 OpID = 3;
4315 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4316 llvm::Intrinsic::umul_with_overflow;
4317 OverflowKind = SanitizerHandler::MulOverflow;
4318 break;
4319 default:
4320 llvm_unreachable("Unsupported operation for overflow detection");
4321 }
4322 OpID <<= 1;
4323 if (isSigned)
4324 OpID |= 1;
4325
4326 SanitizerDebugLocation SanScope(&CGF,
4327 {SanitizerKind::SO_SignedIntegerOverflow,
4328 SanitizerKind::SO_UnsignedIntegerOverflow},
4329 OverflowKind);
4330 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4331
4332 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4333
4334 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4335 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4336 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4337
4338 // Handle overflow with llvm.trap if no custom handler has been specified.
4339 const std::string *handlerName =
4341 if (handlerName->empty()) {
4342 // If no -ftrapv handler has been specified, try to use sanitizer runtimes
4343 // if available otherwise just emit a trap. It is possible for unsigned
4344 // arithmetic to result in a trap due to the OverflowBehaviorType attribute
4345 // which describes overflow behavior on a per-type basis.
4346 if (isSigned) {
4347 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4348 llvm::Value *NotOf = Builder.CreateNot(overflow);
4349 EmitBinOpCheck(
4350 std::make_pair(NotOf, SanitizerKind::SO_SignedIntegerOverflow),
4351 Ops);
4352 } else
4353 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4354 return result;
4355 }
4356 if (CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
4357 llvm::Value *NotOf = Builder.CreateNot(overflow);
4358 EmitBinOpCheck(
4359 std::make_pair(NotOf, SanitizerKind::SO_UnsignedIntegerOverflow),
4360 Ops);
4361 } else
4362 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4363 return result;
4364 }
4365
4366 // Branch in case of overflow.
4367 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4368 llvm::BasicBlock *continueBB =
4369 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4370 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4371
4372 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4373
4374 // If an overflow handler is set, then we want to call it and then use its
4375 // result, if it returns.
4376 Builder.SetInsertPoint(overflowBB);
4377
4378 // Get the overflow handler.
4379 llvm::Type *Int8Ty = CGF.Int8Ty;
4380 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4381 llvm::FunctionType *handlerTy =
4382 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4383 llvm::FunctionCallee handler =
4384 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4385
4386 // Sign extend the args to 64-bit, so that we can use the same handler for
4387 // all types of overflow.
4388 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4389 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4390
4391 // Call the handler with the two arguments, the operation, and the size of
4392 // the result.
4393 llvm::Value *handlerArgs[] = {
4394 lhs,
4395 rhs,
4396 Builder.getInt8(OpID),
4397 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4398 };
4399 llvm::Value *handlerResult =
4400 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4401
4402 // Truncate the result back to the desired size.
4403 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4404 Builder.CreateBr(continueBB);
4405
4406 Builder.SetInsertPoint(continueBB);
4407 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4408 phi->addIncoming(result, initialBB);
4409 phi->addIncoming(handlerResult, overflowBB);
4410
4411 return phi;
4412}
4413
4414/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4415/// information.
4416/// This function is used for BO_AddAssign/BO_SubAssign.
4417static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4418 bool isSubtraction) {
4419 // Must have binary (not unary) expr here. Unary pointer
4420 // increment/decrement doesn't use this path.
4422
4423 Value *pointer = op.LHS;
4424 Expr *pointerOperand = expr->getLHS();
4425 Value *index = op.RHS;
4426 Expr *indexOperand = expr->getRHS();
4427
4428 // In a subtraction, the LHS is always the pointer.
4429 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4430 std::swap(pointer, index);
4431 std::swap(pointerOperand, indexOperand);
4432 }
4433
4434 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4435 index, isSubtraction);
4436}
4437
4438/// Emit pointer + index arithmetic.
4440 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4441 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4442 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4443
4444 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4445 auto &DL = CGM.getDataLayout();
4446 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4447
4448 // Some versions of glibc and gcc use idioms (particularly in their malloc
4449 // routines) that add a pointer-sized integer (known to be a pointer value)
4450 // to a null pointer in order to cast the value back to an integer or as
4451 // part of a pointer alignment algorithm. This is undefined behavior, but
4452 // we'd like to be able to compile programs that use it.
4453 //
4454 // Normally, we'd generate a GEP with a null-pointer base here in response
4455 // to that code, but it's also UB to dereference a pointer created that
4456 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4457 // generate a direct cast of the integer value to a pointer.
4458 //
4459 // The idiom (p = nullptr + N) is not met if any of the following are true:
4460 //
4461 // The operation is subtraction.
4462 // The index is not pointer-sized.
4463 // The pointer type is not byte-sized.
4464 //
4465 // Note that we do not suppress the pointer overflow check in this case.
4467 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4468 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4469 if (getLangOpts().PointerOverflowDefined ||
4470 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4471 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4472 PtrTy->getPointerAddressSpace()))
4473 return Ptr;
4474 // The inbounds GEP of null is valid iff the index is zero.
4475 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4476 auto CheckHandler = SanitizerHandler::PointerOverflow;
4477 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4478 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4479 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4480 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4481 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4482 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4483 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4484 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4485 DynamicArgs);
4486 return Ptr;
4487 }
4488
4489 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4490 // Zero-extend or sign-extend the pointer value according to
4491 // whether the index is signed or not.
4492 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4493 "idx.ext");
4494 }
4495
4496 // If this is subtraction, negate the index.
4497 if (isSubtraction)
4498 index = Builder.CreateNeg(index, "idx.neg");
4499
4500 if (SanOpts.has(SanitizerKind::ArrayBounds))
4501 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4502 /*Accessed*/ false);
4503
4504 const PointerType *pointerType =
4505 pointerOperand->getType()->getAs<PointerType>();
4506 if (!pointerType) {
4507 QualType objectType = pointerOperand->getType()
4509 ->getPointeeType();
4510 llvm::Value *objectSize =
4511 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4512
4513 index = Builder.CreateMul(index, objectSize);
4514
4515 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4516 return Builder.CreateBitCast(result, pointer->getType());
4517 }
4518
4519 QualType elementType = pointerType->getPointeeType();
4520 if (const VariableArrayType *vla =
4521 getContext().getAsVariableArrayType(elementType)) {
4522 // The element count here is the total number of non-VLA elements.
4523 llvm::Value *numElements = getVLASize(vla).NumElts;
4524
4525 // Effectively, the multiply by the VLA size is part of the GEP.
4526 // GEP indexes are signed, and scaling an index isn't permitted to
4527 // signed-overflow, so we use the same semantics for our explicit
4528 // multiply. We suppress this if overflow is not undefined behavior.
4529 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4530 if (getLangOpts().PointerOverflowDefined) {
4531 index = Builder.CreateMul(index, numElements, "vla.index");
4532 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4533 } else {
4534 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4535 pointer =
4536 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4537 isSubtraction, BO->getExprLoc(), "add.ptr");
4538 }
4539 return pointer;
4540 }
4541
4542 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4543 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4544 // future proof.
4545 llvm::Type *elemTy;
4546 if (elementType->isVoidType() || elementType->isFunctionType())
4547 elemTy = Int8Ty;
4548 else
4549 elemTy = ConvertTypeForMem(elementType);
4550
4551 if (getLangOpts().PointerOverflowDefined)
4552 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4553
4554 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4555 BO->getExprLoc(), "add.ptr");
4556}
4557
4558// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4559// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4560// the add operand respectively. This allows fmuladd to represent a*b-c, or
4561// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4562// efficient operations.
4563static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4564 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4565 bool negMul, bool negAdd) {
4566 Value *MulOp0 = MulOp->getOperand(0);
4567 Value *MulOp1 = MulOp->getOperand(1);
4568 if (negMul)
4569 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4570 if (negAdd)
4571 Addend = Builder.CreateFNeg(Addend, "neg");
4572
4573 Value *FMulAdd = nullptr;
4574 if (Builder.getIsFPConstrained()) {
4575 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4576 "Only constrained operation should be created when Builder is in FP "
4577 "constrained mode");
4578 FMulAdd = Builder.CreateConstrainedFPCall(
4579 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4580 Addend->getType()),
4581 {MulOp0, MulOp1, Addend});
4582 } else {
4583 FMulAdd = Builder.CreateCall(
4584 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4585 {MulOp0, MulOp1, Addend});
4586 }
4587 MulOp->eraseFromParent();
4588
4589 return FMulAdd;
4590}
4591
4592// Check whether it would be legal to emit an fmuladd intrinsic call to
4593// represent op and if so, build the fmuladd.
4594//
4595// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4596// Does NOT check the type of the operation - it's assumed that this function
4597// will be called from contexts where it's known that the type is contractable.
4598static Value* tryEmitFMulAdd(const BinOpInfo &op,
4599 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4600 bool isSub=false) {
4601
4602 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4603 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4604 "Only fadd/fsub can be the root of an fmuladd.");
4605
4606 // Check whether this op is marked as fusable.
4607 if (!op.FPFeatures.allowFPContractWithinStatement())
4608 return nullptr;
4609
4610 Value *LHS = op.LHS;
4611 Value *RHS = op.RHS;
4612
4613 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4614 // it is the only use of its operand.
4615 bool NegLHS = false;
4616 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4617 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4618 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4619 LHS = LHSUnOp->getOperand(0);
4620 NegLHS = true;
4621 }
4622 }
4623
4624 bool NegRHS = false;
4625 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4626 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4627 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4628 RHS = RHSUnOp->getOperand(0);
4629 NegRHS = true;
4630 }
4631 }
4632
4633 // We have a potentially fusable op. Look for a mul on one of the operands.
4634 // Also, make sure that the mul result isn't used directly. In that case,
4635 // there's no point creating a muladd operation.
4636 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4637 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4638 (LHSBinOp->use_empty() || NegLHS)) {
4639 // If we looked through fneg, erase it.
4640 if (NegLHS)
4641 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4642 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4643 }
4644 }
4645 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4646 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4647 (RHSBinOp->use_empty() || NegRHS)) {
4648 // If we looked through fneg, erase it.
4649 if (NegRHS)
4650 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4651 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4652 }
4653 }
4654
4655 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4656 if (LHSBinOp->getIntrinsicID() ==
4657 llvm::Intrinsic::experimental_constrained_fmul &&
4658 (LHSBinOp->use_empty() || NegLHS)) {
4659 // If we looked through fneg, erase it.
4660 if (NegLHS)
4661 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4662 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4663 }
4664 }
4665 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4666 if (RHSBinOp->getIntrinsicID() ==
4667 llvm::Intrinsic::experimental_constrained_fmul &&
4668 (RHSBinOp->use_empty() || NegRHS)) {
4669 // If we looked through fneg, erase it.
4670 if (NegRHS)
4671 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4672 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4673 }
4674 }
4675
4676 return nullptr;
4677}
4678
4679Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4680 if (op.LHS->getType()->isPointerTy() ||
4681 op.RHS->getType()->isPointerTy())
4683
4684 if (op.Ty->isSignedIntegerOrEnumerationType() ||
4685 op.Ty->isUnsignedIntegerType()) {
4686 const bool isSigned = op.Ty->isSignedIntegerOrEnumerationType();
4687 const bool hasSan =
4688 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
4689 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
4690 switch (getOverflowBehaviorConsideringType(CGF, op.Ty)) {
4691 case LangOptions::OB_Wrap:
4692 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4693 case LangOptions::OB_SignedAndDefined:
4694 if (!hasSan)
4695 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4696 [[fallthrough]];
4697 case LangOptions::OB_Unset:
4698 if (!hasSan)
4699 return isSigned ? Builder.CreateNSWAdd(op.LHS, op.RHS, "add")
4700 : Builder.CreateAdd(op.LHS, op.RHS, "add");
4701 [[fallthrough]];
4702 case LangOptions::OB_Trap:
4703 if (CanElideOverflowCheck(CGF.getContext(), op))
4704 return isSigned ? Builder.CreateNSWAdd(op.LHS, op.RHS, "add")
4705 : Builder.CreateAdd(op.LHS, op.RHS, "add");
4706 return EmitOverflowCheckedBinOp(op);
4707 }
4708 }
4709
4710 // For vector and matrix adds, try to fold into a fmuladd.
4711 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4712 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4713 // Try to form an fmuladd.
4714 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4715 return FMulAdd;
4716 }
4717
4718 if (op.Ty->isConstantMatrixType()) {
4719 llvm::MatrixBuilder MB(Builder);
4720 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4721 return MB.CreateAdd(op.LHS, op.RHS);
4722 }
4723
4724 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4725 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4726 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4727 }
4728
4729 if (op.isFixedPointOp())
4730 return EmitFixedPointBinOp(op);
4731
4732 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4733}
4734
4735/// The resulting value must be calculated with exact precision, so the operands
4736/// may not be the same type.
4737Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4738 using llvm::APSInt;
4739 using llvm::ConstantInt;
4740
4741 // This is either a binary operation where at least one of the operands is
4742 // a fixed-point type, or a unary operation where the operand is a fixed-point
4743 // type. The result type of a binary operation is determined by
4744 // Sema::handleFixedPointConversions().
4745 QualType ResultTy = op.Ty;
4746 QualType LHSTy, RHSTy;
4747 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4748 RHSTy = BinOp->getRHS()->getType();
4749 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4750 // For compound assignment, the effective type of the LHS at this point
4751 // is the computation LHS type, not the actual LHS type, and the final
4752 // result type is not the type of the expression but rather the
4753 // computation result type.
4754 LHSTy = CAO->getComputationLHSType();
4755 ResultTy = CAO->getComputationResultType();
4756 } else
4757 LHSTy = BinOp->getLHS()->getType();
4758 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4759 LHSTy = UnOp->getSubExpr()->getType();
4760 RHSTy = UnOp->getSubExpr()->getType();
4761 }
4762 ASTContext &Ctx = CGF.getContext();
4763 Value *LHS = op.LHS;
4764 Value *RHS = op.RHS;
4765
4766 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4767 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4768 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4769 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4770
4771 // Perform the actual operation.
4772 Value *Result;
4773 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4774 switch (op.Opcode) {
4775 case BO_AddAssign:
4776 case BO_Add:
4777 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4778 break;
4779 case BO_SubAssign:
4780 case BO_Sub:
4781 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4782 break;
4783 case BO_MulAssign:
4784 case BO_Mul:
4785 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4786 break;
4787 case BO_DivAssign:
4788 case BO_Div:
4789 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4790 break;
4791 case BO_ShlAssign:
4792 case BO_Shl:
4793 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4794 break;
4795 case BO_ShrAssign:
4796 case BO_Shr:
4797 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4798 break;
4799 case BO_LT:
4800 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4801 case BO_GT:
4802 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4803 case BO_LE:
4804 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4805 case BO_GE:
4806 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4807 case BO_EQ:
4808 // For equality operations, we assume any padding bits on unsigned types are
4809 // zero'd out. They could be overwritten through non-saturating operations
4810 // that cause overflow, but this leads to undefined behavior.
4811 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4812 case BO_NE:
4813 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4814 case BO_Cmp:
4815 case BO_LAnd:
4816 case BO_LOr:
4817 llvm_unreachable("Found unimplemented fixed point binary operation");
4818 case BO_PtrMemD:
4819 case BO_PtrMemI:
4820 case BO_Rem:
4821 case BO_Xor:
4822 case BO_And:
4823 case BO_Or:
4824 case BO_Assign:
4825 case BO_RemAssign:
4826 case BO_AndAssign:
4827 case BO_XorAssign:
4828 case BO_OrAssign:
4829 case BO_Comma:
4830 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4831 }
4832
4833 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4835 // Convert to the result type.
4836 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4837 : CommonFixedSema,
4838 ResultFixedSema);
4839}
4840
4841Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4842 // The LHS is always a pointer if either side is.
4843 if (!op.LHS->getType()->isPointerTy()) {
4844 if (op.Ty->isSignedIntegerOrEnumerationType() ||
4845 op.Ty->isUnsignedIntegerType()) {
4846 const bool isSigned = op.Ty->isSignedIntegerOrEnumerationType();
4847 const bool hasSan =
4848 isSigned ? CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)
4849 : CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow);
4850 switch (getOverflowBehaviorConsideringType(CGF, op.Ty)) {
4851 case LangOptions::OB_Wrap:
4852 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4853 case LangOptions::OB_SignedAndDefined:
4854 if (!hasSan)
4855 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4856 [[fallthrough]];
4857 case LangOptions::OB_Unset:
4858 if (!hasSan)
4859 return isSigned ? Builder.CreateNSWSub(op.LHS, op.RHS, "sub")
4860 : Builder.CreateSub(op.LHS, op.RHS, "sub");
4861 [[fallthrough]];
4862 case LangOptions::OB_Trap:
4863 if (CanElideOverflowCheck(CGF.getContext(), op))
4864 return isSigned ? Builder.CreateNSWSub(op.LHS, op.RHS, "sub")
4865 : Builder.CreateSub(op.LHS, op.RHS, "sub");
4866 return EmitOverflowCheckedBinOp(op);
4867 }
4868 }
4869
4870 // For vector and matrix subs, try to fold into a fmuladd.
4871 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4872 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4873 // Try to form an fmuladd.
4874 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4875 return FMulAdd;
4876 }
4877
4878 if (op.Ty->isConstantMatrixType()) {
4879 llvm::MatrixBuilder MB(Builder);
4880 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4881 return MB.CreateSub(op.LHS, op.RHS);
4882 }
4883
4884 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4885 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4886 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4887 }
4888
4889 if (op.isFixedPointOp())
4890 return EmitFixedPointBinOp(op);
4891
4892 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4893 }
4894
4895 // If the RHS is not a pointer, then we have normal pointer
4896 // arithmetic.
4897 if (!op.RHS->getType()->isPointerTy())
4899
4900 // Otherwise, this is a pointer subtraction.
4901
4902 // Do the raw subtraction part.
4903 llvm::Value *LHS
4904 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4905 llvm::Value *RHS
4906 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4907 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4908
4909 // Okay, figure out the element size.
4910 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4911 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4912
4913 llvm::Value *divisor = nullptr;
4914
4915 // For a variable-length array, this is going to be non-constant.
4916 if (const VariableArrayType *vla
4917 = CGF.getContext().getAsVariableArrayType(elementType)) {
4918 auto VlaSize = CGF.getVLASize(vla);
4919 elementType = VlaSize.Type;
4920 divisor = VlaSize.NumElts;
4921
4922 // Scale the number of non-VLA elements by the non-VLA element size.
4923 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4924 if (!eltSize.isOne())
4925 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4926
4927 // For everything elese, we can just compute it, safe in the
4928 // assumption that Sema won't let anything through that we can't
4929 // safely compute the size of.
4930 } else {
4931 CharUnits elementSize;
4932 // Handle GCC extension for pointer arithmetic on void* and
4933 // function pointer types.
4934 if (elementType->isVoidType() || elementType->isFunctionType())
4935 elementSize = CharUnits::One();
4936 else
4937 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4938
4939 // Don't even emit the divide for element size of 1.
4940 if (elementSize.isOne())
4941 return diffInChars;
4942
4943 divisor = CGF.CGM.getSize(elementSize);
4944 }
4945
4946 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4947 // pointer difference in C is only defined in the case where both operands
4948 // are pointing to elements of an array.
4949 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4950}
4951
4952Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4953 bool RHSIsSigned) {
4954 llvm::IntegerType *Ty;
4955 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4956 Ty = cast<llvm::IntegerType>(VT->getElementType());
4957 else
4958 Ty = cast<llvm::IntegerType>(LHS->getType());
4959 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4960 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4961 // this in ConstantInt::get, this results in the value getting truncated.
4962 // Constrain the return value to be max(RHS) in this case.
4963 llvm::Type *RHSTy = RHS->getType();
4964 llvm::APInt RHSMax =
4965 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4966 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4967 if (RHSMax.ult(Ty->getBitWidth()))
4968 return llvm::ConstantInt::get(RHSTy, RHSMax);
4969 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4970}
4971
4972Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4973 const Twine &Name) {
4974 llvm::IntegerType *Ty;
4975 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4976 Ty = cast<llvm::IntegerType>(VT->getElementType());
4977 else
4978 Ty = cast<llvm::IntegerType>(LHS->getType());
4979
4980 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4981 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4982
4983 return Builder.CreateURem(
4984 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4985}
4986
4987Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4988 // TODO: This misses out on the sanitizer check below.
4989 if (Ops.isFixedPointOp())
4990 return EmitFixedPointBinOp(Ops);
4991
4992 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4993 // RHS to the same size as the LHS.
4994 Value *RHS = Ops.RHS;
4995 if (Ops.LHS->getType() != RHS->getType())
4996 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4997
4998 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4999 Ops.Ty->hasSignedIntegerRepresentation() &&
5001 !CGF.getLangOpts().CPlusPlus20;
5002 bool SanitizeUnsignedBase =
5003 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
5004 Ops.Ty->hasUnsignedIntegerRepresentation();
5005 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
5006 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
5007 // OpenCL 6.3j: shift values are effectively % word size of LHS.
5008 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
5009 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
5010 else if ((SanitizeBase || SanitizeExponent) &&
5011 isa<llvm::IntegerType>(Ops.LHS->getType())) {
5012 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
5013 if (SanitizeSignedBase)
5014 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
5015 if (SanitizeUnsignedBase)
5016 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
5017 if (SanitizeExponent)
5018 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
5019
5020 SanitizerDebugLocation SanScope(&CGF, Ordinals,
5021 SanitizerHandler::ShiftOutOfBounds);
5022 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
5023 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
5024 llvm::Value *WidthMinusOne =
5025 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
5026 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
5027
5028 if (SanitizeExponent) {
5029 Checks.push_back(
5030 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
5031 }
5032
5033 if (SanitizeBase) {
5034 // Check whether we are shifting any non-zero bits off the top of the
5035 // integer. We only emit this check if exponent is valid - otherwise
5036 // instructions below will have undefined behavior themselves.
5037 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
5038 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
5039 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
5040 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
5041 llvm::Value *PromotedWidthMinusOne =
5042 (RHS == Ops.RHS) ? WidthMinusOne
5043 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
5044 CGF.EmitBlock(CheckShiftBase);
5045 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
5046 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
5047 /*NUW*/ true, /*NSW*/ true),
5048 "shl.check");
5049 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
5050 // In C99, we are not permitted to shift a 1 bit into the sign bit.
5051 // Under C++11's rules, shifting a 1 bit into the sign bit is
5052 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
5053 // define signed left shifts, so we use the C99 and C++11 rules there).
5054 // Unsigned shifts can always shift into the top bit.
5055 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
5056 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
5057 }
5058 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
5059 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
5060 CGF.EmitBlock(Cont);
5061 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
5062 BaseCheck->addIncoming(Builder.getTrue(), Orig);
5063 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
5064 Checks.push_back(std::make_pair(
5065 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
5066 : SanitizerKind::SO_UnsignedShiftBase));
5067 }
5068
5069 assert(!Checks.empty());
5070 EmitBinOpCheck(Checks, Ops);
5071 }
5072
5073 return Builder.CreateShl(Ops.LHS, RHS, "shl");
5074}
5075
5076Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
5077 // TODO: This misses out on the sanitizer check below.
5078 if (Ops.isFixedPointOp())
5079 return EmitFixedPointBinOp(Ops);
5080
5081 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
5082 // RHS to the same size as the LHS.
5083 Value *RHS = Ops.RHS;
5084 if (Ops.LHS->getType() != RHS->getType())
5085 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
5086
5087 // OpenCL 6.3j: shift values are effectively % word size of LHS.
5088 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
5089 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
5090 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
5091 isa<llvm::IntegerType>(Ops.LHS->getType())) {
5092 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
5093 SanitizerHandler::ShiftOutOfBounds);
5094 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
5095 llvm::Value *Valid = Builder.CreateICmpULE(
5096 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
5097 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
5098 }
5099
5100 if (Ops.Ty->hasUnsignedIntegerRepresentation())
5101 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
5102 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
5103}
5104
5106// return corresponding comparison intrinsic for given vector type
5107static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
5108 BuiltinType::Kind ElemKind) {
5109 switch (ElemKind) {
5110 default: llvm_unreachable("unexpected element type");
5111 case BuiltinType::Char_U:
5112 case BuiltinType::UChar:
5113 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5114 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
5115 case BuiltinType::Char_S:
5116 case BuiltinType::SChar:
5117 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
5118 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
5119 case BuiltinType::UShort:
5120 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5121 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
5122 case BuiltinType::Short:
5123 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
5124 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
5125 case BuiltinType::UInt:
5126 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5127 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
5128 case BuiltinType::Int:
5129 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
5130 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
5131 case BuiltinType::ULong:
5132 case BuiltinType::ULongLong:
5133 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5134 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
5135 case BuiltinType::Long:
5136 case BuiltinType::LongLong:
5137 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
5138 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
5139 case BuiltinType::Float:
5140 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
5141 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
5142 case BuiltinType::Double:
5143 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
5144 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
5145 case BuiltinType::UInt128:
5146 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5147 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
5148 case BuiltinType::Int128:
5149 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5150 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
5151 }
5152}
5153
5154Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
5155 llvm::CmpInst::Predicate UICmpOpc,
5156 llvm::CmpInst::Predicate SICmpOpc,
5157 llvm::CmpInst::Predicate FCmpOpc,
5158 bool IsSignaling) {
5159 TestAndClearIgnoreResultAssign();
5160 Value *Result;
5161 QualType LHSTy = E->getLHS()->getType();
5162 QualType RHSTy = E->getRHS()->getType();
5163 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
5164 assert(E->getOpcode() == BO_EQ ||
5165 E->getOpcode() == BO_NE);
5166 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
5167 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
5169 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
5170 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
5171 BinOpInfo BOInfo = EmitBinOps(E);
5172 Value *LHS = BOInfo.LHS;
5173 Value *RHS = BOInfo.RHS;
5174
5175 // If AltiVec, the comparison results in a numeric type, so we use
5176 // intrinsics comparing vectors and giving 0 or 1 as a result
5177 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
5178 // constants for mapping CR6 register bits to predicate result
5179 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
5180
5181 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
5182
5183 // in several cases vector arguments order will be reversed
5184 Value *FirstVecArg = LHS,
5185 *SecondVecArg = RHS;
5186
5187 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
5188 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
5189
5190 switch(E->getOpcode()) {
5191 default: llvm_unreachable("is not a comparison operation");
5192 case BO_EQ:
5193 CR6 = CR6_LT;
5194 ID = GetIntrinsic(VCMPEQ, ElementKind);
5195 break;
5196 case BO_NE:
5197 CR6 = CR6_EQ;
5198 ID = GetIntrinsic(VCMPEQ, ElementKind);
5199 break;
5200 case BO_LT:
5201 CR6 = CR6_LT;
5202 ID = GetIntrinsic(VCMPGT, ElementKind);
5203 std::swap(FirstVecArg, SecondVecArg);
5204 break;
5205 case BO_GT:
5206 CR6 = CR6_LT;
5207 ID = GetIntrinsic(VCMPGT, ElementKind);
5208 break;
5209 case BO_LE:
5210 if (ElementKind == BuiltinType::Float) {
5211 CR6 = CR6_LT;
5212 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5213 std::swap(FirstVecArg, SecondVecArg);
5214 }
5215 else {
5216 CR6 = CR6_EQ;
5217 ID = GetIntrinsic(VCMPGT, ElementKind);
5218 }
5219 break;
5220 case BO_GE:
5221 if (ElementKind == BuiltinType::Float) {
5222 CR6 = CR6_LT;
5223 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5224 }
5225 else {
5226 CR6 = CR6_EQ;
5227 ID = GetIntrinsic(VCMPGT, ElementKind);
5228 std::swap(FirstVecArg, SecondVecArg);
5229 }
5230 break;
5231 }
5232
5233 Value *CR6Param = Builder.getInt32(CR6);
5234 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5235 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5236
5237 // The result type of intrinsic may not be same as E->getType().
5238 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5239 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5240 // do nothing, if ResultTy is not i1 at the same time, it will cause
5241 // crash later.
5242 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5243 if (ResultTy->getBitWidth() > 1 &&
5244 E->getType() == CGF.getContext().BoolTy)
5245 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5246 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5247 E->getExprLoc());
5248 }
5249
5250 if (BOInfo.isFixedPointOp()) {
5251 Result = EmitFixedPointBinOp(BOInfo);
5252 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5253 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5254 if (!IsSignaling)
5255 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5256 else
5257 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5258 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5259 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5260 } else {
5261 // Unsigned integers and pointers.
5262
5263 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5266
5267 // Dynamic information is required to be stripped for comparisons,
5268 // because it could leak the dynamic information. Based on comparisons
5269 // of pointers to dynamic objects, the optimizer can replace one pointer
5270 // with another, which might be incorrect in presence of invariant
5271 // groups. Comparison with null is safe because null does not carry any
5272 // dynamic information.
5273 if (LHSTy.mayBeDynamicClass())
5274 LHS = Builder.CreateStripInvariantGroup(LHS);
5275 if (RHSTy.mayBeDynamicClass())
5276 RHS = Builder.CreateStripInvariantGroup(RHS);
5277 }
5278
5279 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5280 }
5281
5282 // If this is a vector comparison, sign extend the result to the appropriate
5283 // vector integer type and return it (don't convert to bool).
5284 if (LHSTy->isVectorType())
5285 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5286
5287 } else {
5288 // Complex Comparison: can only be an equality comparison.
5290 QualType CETy;
5291 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5292 LHS = CGF.EmitComplexExpr(E->getLHS());
5293 CETy = CTy->getElementType();
5294 } else {
5295 LHS.first = Visit(E->getLHS());
5296 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5297 CETy = LHSTy;
5298 }
5299 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5300 RHS = CGF.EmitComplexExpr(E->getRHS());
5301 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5302 CTy->getElementType()) &&
5303 "The element types must always match.");
5304 (void)CTy;
5305 } else {
5306 RHS.first = Visit(E->getRHS());
5307 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5308 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5309 "The element types must always match.");
5310 }
5311
5312 Value *ResultR, *ResultI;
5313 if (CETy->isRealFloatingType()) {
5314 // As complex comparisons can only be equality comparisons, they
5315 // are never signaling comparisons.
5316 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5317 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5318 } else {
5319 // Complex comparisons can only be equality comparisons. As such, signed
5320 // and unsigned opcodes are the same.
5321 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5322 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5323 }
5324
5325 if (E->getOpcode() == BO_EQ) {
5326 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5327 } else {
5328 assert(E->getOpcode() == BO_NE &&
5329 "Complex comparison other than == or != ?");
5330 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5331 }
5332 }
5333
5334 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5335 E->getExprLoc());
5336}
5337
5339 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5340 // In case we have the integer or bitfield sanitizer checks enabled
5341 // we want to get the expression before scalar conversion.
5342 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5343 CastKind Kind = ICE->getCastKind();
5344 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5345 *SrcType = ICE->getSubExpr()->getType();
5346 *Previous = EmitScalarExpr(ICE->getSubExpr());
5347 // Pass default ScalarConversionOpts to avoid emitting
5348 // integer sanitizer checks as E refers to bitfield.
5349 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5350 ICE->getExprLoc());
5351 }
5352 }
5353 return EmitScalarExpr(E->getRHS());
5354}
5355
5356Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5357 ApplyAtomGroup Grp(CGF.getDebugInfo());
5358 bool Ignore = TestAndClearIgnoreResultAssign();
5359
5360 Value *RHS;
5361 LValue LHS;
5362
5363 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5366 llvm::Value *RV =
5367 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5368 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5370
5371 if (Ignore)
5372 return nullptr;
5373 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5374 LV.getAddress(), /*nonnull*/ false);
5375 return RV;
5376 }
5377
5378 switch (E->getLHS()->getType().getObjCLifetime()) {
5380 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5381 break;
5382
5384 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5385 break;
5386
5388 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5389 break;
5390
5392 RHS = Visit(E->getRHS());
5393 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5394 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5395 break;
5396
5398 // __block variables need to have the rhs evaluated first, plus
5399 // this should improve codegen just a little.
5400 Value *Previous = nullptr;
5401 QualType SrcType = E->getRHS()->getType();
5402 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5403 // we want to extract that value and potentially (if the bitfield sanitizer
5404 // is enabled) use it to check for an implicit conversion.
5405 if (E->getLHS()->refersToBitField())
5406 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5407 else
5408 RHS = Visit(E->getRHS());
5409
5410 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5411
5412 // Store the value into the LHS. Bit-fields are handled specially
5413 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5414 // 'An assignment expression has the value of the left operand after
5415 // the assignment...'.
5416 if (LHS.isBitField()) {
5417 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5418 // If the expression contained an implicit conversion, make sure
5419 // to use the value before the scalar conversion.
5420 Value *Src = Previous ? Previous : RHS;
5421 QualType DstType = E->getLHS()->getType();
5422 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5423 LHS.getBitFieldInfo(), E->getExprLoc());
5424 } else {
5425 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5426 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5427 }
5428 }
5429 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5430 if (CGF.getLangOpts().OpenMP) {
5432 E->getLHS());
5433 }
5434
5435 // If the result is clearly ignored, return now.
5436 if (Ignore)
5437 return nullptr;
5438
5439 // The result of an assignment in C is the assigned r-value.
5440 if (!CGF.getLangOpts().CPlusPlus)
5441 return RHS;
5442
5443 // If the lvalue is non-volatile, return the computed value of the assignment.
5444 if (!LHS.isVolatileQualified())
5445 return RHS;
5446
5447 // Otherwise, reload the value.
5448 return EmitLoadOfLValue(LHS, E->getExprLoc());
5449}
5450
5451Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5452 auto HasLHSSkip = CGF.hasSkipCounter(E);
5453 auto HasRHSSkip = CGF.hasSkipCounter(E->getRHS());
5454
5455 // Perform vector logical and on comparisons with zero vectors.
5456 if (E->getType()->isVectorType()) {
5458
5459 Value *LHS = Visit(E->getLHS());
5460 Value *RHS = Visit(E->getRHS());
5461 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5462 if (LHS->getType()->isFPOrFPVectorTy()) {
5463 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5464 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5465 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5466 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5467 } else {
5468 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5469 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5470 }
5471 Value *And = Builder.CreateAnd(LHS, RHS);
5472 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5473 }
5474
5475 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5476 llvm::Type *ResTy = ConvertType(E->getType());
5477
5478 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5479 // If we have 1 && X, just emit X without inserting the control flow.
5480 bool LHSCondVal;
5481 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5482 if (LHSCondVal) { // If we have 1 && X, just emit X.
5483 CGF.incrementProfileCounter(CGF.UseExecPath, E, /*UseBoth=*/true);
5484
5485 // If the top of the logical operator nest, reset the MCDC temp to 0.
5486 if (CGF.isMCDCDecisionExpr(E))
5488
5489 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5490
5491 // If we're generating for profiling or coverage, generate a branch to a
5492 // block that increments the RHS counter needed to track branch condition
5493 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5494 // "FalseBlock" after the increment is done.
5495 if (InstrumentRegions &&
5497 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5498 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5499 llvm::BasicBlock *RHSSkip =
5500 (HasRHSSkip ? CGF.createBasicBlock("land.rhsskip") : FBlock);
5501 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5502 Builder.CreateCondBr(RHSCond, RHSBlockCnt, RHSSkip);
5503 CGF.EmitBlock(RHSBlockCnt);
5505 CGF.EmitBranch(FBlock);
5506 if (HasRHSSkip) {
5507 CGF.EmitBlock(RHSSkip);
5509 }
5510 CGF.EmitBlock(FBlock);
5511 } else
5512 CGF.markStmtMaybeUsed(E->getRHS());
5513
5514 // If the top of the logical operator nest, update the MCDC bitmap.
5515 if (CGF.isMCDCDecisionExpr(E))
5517
5518 // ZExt result to int or bool.
5519 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5520 }
5521
5522 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5523 if (!CGF.ContainsLabel(E->getRHS())) {
5524 CGF.markStmtAsUsed(false, E);
5525 if (HasLHSSkip)
5527
5528 CGF.markStmtMaybeUsed(E->getRHS());
5529
5530 return llvm::Constant::getNullValue(ResTy);
5531 }
5532 }
5533
5534 // If the top of the logical operator nest, reset the MCDC temp to 0.
5535 if (CGF.isMCDCDecisionExpr(E))
5537
5538 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5539 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5540
5541 llvm::BasicBlock *LHSFalseBlock =
5542 (HasLHSSkip ? CGF.createBasicBlock("land.lhsskip") : ContBlock);
5543
5544 CodeGenFunction::ConditionalEvaluation eval(CGF);
5545
5546 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5547 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, LHSFalseBlock,
5548 CGF.getProfileCount(E->getRHS()));
5549
5550 if (HasLHSSkip) {
5551 CGF.EmitBlock(LHSFalseBlock);
5553 CGF.EmitBranch(ContBlock);
5554 }
5555
5556 // Any edges into the ContBlock are now from an (indeterminate number of)
5557 // edges from this first condition. All of these values will be false. Start
5558 // setting up the PHI node in the Cont Block for this.
5559 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5560 "", ContBlock);
5561 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5562 PI != PE; ++PI)
5563 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5564
5565 eval.begin(CGF);
5566 CGF.EmitBlock(RHSBlock);
5568 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5569 eval.end(CGF);
5570
5571 // Reaquire the RHS block, as there may be subblocks inserted.
5572 RHSBlock = Builder.GetInsertBlock();
5573
5574 // If we're generating for profiling or coverage, generate a branch on the
5575 // RHS to a block that increments the RHS true counter needed to track branch
5576 // condition coverage.
5577 llvm::BasicBlock *ContIncoming = RHSBlock;
5578 if (InstrumentRegions &&
5580 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5581 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5582 llvm::BasicBlock *RHSBlockSkip =
5583 (HasRHSSkip ? CGF.createBasicBlock("land.rhsskip") : ContBlock);
5584 Builder.CreateCondBr(RHSCond, RHSBlockCnt, RHSBlockSkip);
5585 CGF.EmitBlock(RHSBlockCnt);
5587 CGF.EmitBranch(ContBlock);
5588 PN->addIncoming(RHSCond, RHSBlockCnt);
5589 if (HasRHSSkip) {
5590 CGF.EmitBlock(RHSBlockSkip);
5592 CGF.EmitBranch(ContBlock);
5593 ContIncoming = RHSBlockSkip;
5594 }
5595 }
5596
5597 // Emit an unconditional branch from this block to ContBlock.
5598 {
5599 // There is no need to emit line number for unconditional branch.
5600 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5601 CGF.EmitBlock(ContBlock);
5602 }
5603 // Insert an entry into the phi node for the edge with the value of RHSCond.
5604 PN->addIncoming(RHSCond, ContIncoming);
5605
5606 // If the top of the logical operator nest, update the MCDC bitmap.
5607 if (CGF.isMCDCDecisionExpr(E))
5609
5610 // Artificial location to preserve the scope information
5611 {
5613 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5614 }
5615
5616 // ZExt result to int.
5617 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5618}
5619
5620Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5621 auto HasLHSSkip = CGF.hasSkipCounter(E);
5622 auto HasRHSSkip = CGF.hasSkipCounter(E->getRHS());
5623
5624 // Perform vector logical or on comparisons with zero vectors.
5625 if (E->getType()->isVectorType()) {
5627
5628 Value *LHS = Visit(E->getLHS());
5629 Value *RHS = Visit(E->getRHS());
5630 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5631 if (LHS->getType()->isFPOrFPVectorTy()) {
5632 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5633 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5634 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5635 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5636 } else {
5637 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5638 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5639 }
5640 Value *Or = Builder.CreateOr(LHS, RHS);
5641 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5642 }
5643
5644 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5645 llvm::Type *ResTy = ConvertType(E->getType());
5646
5647 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5648 // If we have 0 || X, just emit X without inserting the control flow.
5649 bool LHSCondVal;
5650 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5651 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5652 CGF.incrementProfileCounter(CGF.UseExecPath, E, /*UseBoth=*/true);
5653
5654 // If the top of the logical operator nest, reset the MCDC temp to 0.
5655 if (CGF.isMCDCDecisionExpr(E))
5657
5658 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5659
5660 // If we're generating for profiling or coverage, generate a branch to a
5661 // block that increments the RHS counter need to track branch condition
5662 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5663 // "FalseBlock" after the increment is done.
5664 if (InstrumentRegions &&
5666 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5667 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5668 llvm::BasicBlock *RHSSkip =
5669 (HasRHSSkip ? CGF.createBasicBlock("lor.rhsskip") : FBlock);
5670 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5671 Builder.CreateCondBr(RHSCond, RHSSkip, RHSBlockCnt);
5672 CGF.EmitBlock(RHSBlockCnt);
5674 CGF.EmitBranch(FBlock);
5675 if (HasRHSSkip) {
5676 CGF.EmitBlock(RHSSkip);
5678 }
5679 CGF.EmitBlock(FBlock);
5680 } else
5681 CGF.markStmtMaybeUsed(E->getRHS());
5682
5683 // If the top of the logical operator nest, update the MCDC bitmap.
5684 if (CGF.isMCDCDecisionExpr(E))
5686
5687 // ZExt result to int or bool.
5688 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5689 }
5690
5691 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5692 if (!CGF.ContainsLabel(E->getRHS())) {
5693 CGF.markStmtAsUsed(false, E);
5694 if (HasLHSSkip)
5696
5697 CGF.markStmtMaybeUsed(E->getRHS());
5698
5699 return llvm::ConstantInt::get(ResTy, 1);
5700 }
5701 }
5702
5703 // If the top of the logical operator nest, reset the MCDC temp to 0.
5704 if (CGF.isMCDCDecisionExpr(E))
5706
5707 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5708 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5709 llvm::BasicBlock *LHSTrueBlock =
5710 (HasLHSSkip ? CGF.createBasicBlock("lor.lhsskip") : ContBlock);
5711
5712 CodeGenFunction::ConditionalEvaluation eval(CGF);
5713
5714 // Branch on the LHS first. If it is true, go to the success (cont) block.
5715 CGF.EmitBranchOnBoolExpr(E->getLHS(), LHSTrueBlock, RHSBlock,
5717 CGF.getProfileCount(E->getRHS()));
5718
5719 if (HasLHSSkip) {
5720 CGF.EmitBlock(LHSTrueBlock);
5722 CGF.EmitBranch(ContBlock);
5723 }
5724
5725 // Any edges into the ContBlock are now from an (indeterminate number of)
5726 // edges from this first condition. All of these values will be true. Start
5727 // setting up the PHI node in the Cont Block for this.
5728 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5729 "", ContBlock);
5730 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5731 PI != PE; ++PI)
5732 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5733
5734 eval.begin(CGF);
5735
5736 // Emit the RHS condition as a bool value.
5737 CGF.EmitBlock(RHSBlock);
5739 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5740
5741 eval.end(CGF);
5742
5743 // Reaquire the RHS block, as there may be subblocks inserted.
5744 RHSBlock = Builder.GetInsertBlock();
5745
5746 // If we're generating for profiling or coverage, generate a branch on the
5747 // RHS to a block that increments the RHS true counter needed to track branch
5748 // condition coverage.
5749 llvm::BasicBlock *ContIncoming = RHSBlock;
5750 if (InstrumentRegions &&
5752 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5753 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5754 llvm::BasicBlock *RHSTrueBlock =
5755 (HasRHSSkip ? CGF.createBasicBlock("lor.rhsskip") : ContBlock);
5756 Builder.CreateCondBr(RHSCond, RHSTrueBlock, RHSBlockCnt);
5757 CGF.EmitBlock(RHSBlockCnt);
5759 CGF.EmitBranch(ContBlock);
5760 PN->addIncoming(RHSCond, RHSBlockCnt);
5761 if (HasRHSSkip) {
5762 CGF.EmitBlock(RHSTrueBlock);
5764 CGF.EmitBranch(ContBlock);
5765 ContIncoming = RHSTrueBlock;
5766 }
5767 }
5768
5769 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5770 // into the phi node for the edge with the value of RHSCond.
5771 CGF.EmitBlock(ContBlock);
5772 PN->addIncoming(RHSCond, ContIncoming);
5773
5774 // If the top of the logical operator nest, update the MCDC bitmap.
5775 if (CGF.isMCDCDecisionExpr(E))
5777
5778 // ZExt result to int.
5779 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5780}
5781
5782Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5783 CGF.EmitIgnoredExpr(E->getLHS());
5784 CGF.EnsureInsertPoint();
5785 return Visit(E->getRHS());
5786}
5787
5788//===----------------------------------------------------------------------===//
5789// Other Operators
5790//===----------------------------------------------------------------------===//
5791
5792/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5793/// expression is cheap enough and side-effect-free enough to evaluate
5794/// unconditionally instead of conditionally. This is used to convert control
5795/// flow into selects in some cases.
5797 CodeGenFunction &CGF) {
5798 // Anything that is an integer or floating point constant is fine.
5799 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5800
5801 // Even non-volatile automatic variables can't be evaluated unconditionally.
5802 // Referencing a thread_local may cause non-trivial initialization work to
5803 // occur. If we're inside a lambda and one of the variables is from the scope
5804 // outside the lambda, that function may have returned already. Reading its
5805 // locals is a bad idea. Also, these reads may introduce races there didn't
5806 // exist in the source-level program.
5807}
5808
5809
5810Value *ScalarExprEmitter::
5811VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5812 TestAndClearIgnoreResultAssign();
5813
5814 // Bind the common expression if necessary.
5815 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5816
5817 Expr *condExpr = E->getCond();
5818 Expr *lhsExpr = E->getTrueExpr();
5819 Expr *rhsExpr = E->getFalseExpr();
5820
5821 // If the condition constant folds and can be elided, try to avoid emitting
5822 // the condition and the dead arm.
5823 bool CondExprBool;
5824 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5825 Expr *live = lhsExpr, *dead = rhsExpr;
5826 if (!CondExprBool) std::swap(live, dead);
5827
5828 // If the dead side doesn't have labels we need, just emit the Live part.
5829 if (!CGF.ContainsLabel(dead)) {
5830 CGF.incrementProfileCounter(CondExprBool ? CGF.UseExecPath
5831 : CGF.UseSkipPath,
5832 E, /*UseBoth=*/true);
5833 Value *Result = Visit(live);
5834 CGF.markStmtMaybeUsed(dead);
5835
5836 // If the live part is a throw expression, it acts like it has a void
5837 // type, so evaluating it returns a null Value*. However, a conditional
5838 // with non-void type must return a non-null Value*.
5839 if (!Result && !E->getType()->isVoidType())
5840 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5841
5842 return Result;
5843 }
5844 }
5845
5846 // OpenCL: If the condition is a vector, we can treat this condition like
5847 // the select function.
5848 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5849 condExpr->getType()->isExtVectorType())) {
5851
5852 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5853 llvm::Value *LHS = Visit(lhsExpr);
5854 llvm::Value *RHS = Visit(rhsExpr);
5855
5856 llvm::Type *condType = ConvertType(condExpr->getType());
5857 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5858
5859 unsigned numElem = vecTy->getNumElements();
5860 llvm::Type *elemType = vecTy->getElementType();
5861
5862 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5863 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5864 llvm::Value *tmp = Builder.CreateSExt(
5865 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5866 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5867
5868 // Cast float to int to perform ANDs if necessary.
5869 llvm::Value *RHSTmp = RHS;
5870 llvm::Value *LHSTmp = LHS;
5871 bool wasCast = false;
5872 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5873 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5874 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5875 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5876 wasCast = true;
5877 }
5878
5879 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5880 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5881 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5882 if (wasCast)
5883 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5884
5885 return tmp5;
5886 }
5887
5888 if (condExpr->getType()->isVectorType() ||
5889 condExpr->getType()->isSveVLSBuiltinType()) {
5891
5892 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5893 llvm::Value *LHS = Visit(lhsExpr);
5894 llvm::Value *RHS = Visit(rhsExpr);
5895
5896 llvm::Type *CondType = ConvertType(condExpr->getType());
5897 auto *VecTy = cast<llvm::VectorType>(CondType);
5898
5899 if (VecTy->getElementType()->isIntegerTy(1))
5900 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5901
5902 // OpenCL uses the MSB of the mask vector.
5903 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5904 if (condExpr->getType()->isExtVectorType())
5905 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5906 else
5907 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5908 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5909 }
5910
5911 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5912 // select instead of as control flow. We can only do this if it is cheap and
5913 // safe to evaluate the LHS and RHS unconditionally.
5917 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5918 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5919
5920 CGF.incrementProfileCounter(E, StepV);
5921
5922 llvm::Value *LHS = Visit(lhsExpr);
5923 llvm::Value *RHS = Visit(rhsExpr);
5924 if (!LHS) {
5925 // If the conditional has void type, make sure we return a null Value*.
5926 assert(!RHS && "LHS and RHS types must match");
5927 return nullptr;
5928 }
5929 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5930 }
5931
5932 // If the top of the logical operator nest, reset the MCDC temp to 0.
5933 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5935
5936 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5937 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5938 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5939
5940 CodeGenFunction::ConditionalEvaluation eval(CGF);
5941 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5942 CGF.getProfileCount(lhsExpr));
5943
5944 CGF.EmitBlock(LHSBlock);
5945
5946 // If the top of the logical operator nest, update the MCDC bitmap for the
5947 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5948 // may also contain a boolean expression.
5949 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5951
5953 eval.begin(CGF);
5954 Value *LHS = Visit(lhsExpr);
5955 eval.end(CGF);
5956
5957 LHSBlock = Builder.GetInsertBlock();
5958 Builder.CreateBr(ContBlock);
5959
5960 CGF.EmitBlock(RHSBlock);
5961
5962 // If the top of the logical operator nest, update the MCDC bitmap for the
5963 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5964 // may also contain a boolean expression.
5965 if (auto E = CGF.stripCond(condExpr); CGF.isMCDCDecisionExpr(E))
5967
5969 eval.begin(CGF);
5970 Value *RHS = Visit(rhsExpr);
5971 eval.end(CGF);
5972
5973 RHSBlock = Builder.GetInsertBlock();
5974 CGF.EmitBlock(ContBlock);
5975
5976 // If the LHS or RHS is a throw expression, it will be legitimately null.
5977 if (!LHS)
5978 return RHS;
5979 if (!RHS)
5980 return LHS;
5981
5982 // Create a PHI node for the real part.
5983 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5984 PN->addIncoming(LHS, LHSBlock);
5985 PN->addIncoming(RHS, RHSBlock);
5986
5987 return PN;
5988}
5989
5990Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5991 return Visit(E->getChosenSubExpr());
5992}
5993
5994Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5995 Address ArgValue = Address::invalid();
5996 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5997
5998 return ArgPtr.getScalarVal();
5999}
6000
6001Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
6002 return CGF.EmitBlockLiteral(block);
6003}
6004
6005// Convert a vec3 to vec4, or vice versa.
6007 Value *Src, unsigned NumElementsDst) {
6008 static constexpr int Mask[] = {0, 1, 2, -1};
6009 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
6010}
6011
6012// Create cast instructions for converting LLVM value \p Src to LLVM type \p
6013// DstTy. \p Src has the same size as \p DstTy. Both are single value types
6014// but could be scalar or vectors of different lengths, and either can be
6015// pointer.
6016// There are 4 cases:
6017// 1. non-pointer -> non-pointer : needs 1 bitcast
6018// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
6019// 3. pointer -> non-pointer
6020// a) pointer -> intptr_t : needs 1 ptrtoint
6021// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
6022// 4. non-pointer -> pointer
6023// a) intptr_t -> pointer : needs 1 inttoptr
6024// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
6025// Note: for cases 3b and 4b two casts are required since LLVM casts do not
6026// allow casting directly between pointer types and non-integer non-pointer
6027// types.
6029 const llvm::DataLayout &DL,
6030 Value *Src, llvm::Type *DstTy,
6031 StringRef Name = "") {
6032 auto SrcTy = Src->getType();
6033
6034 // Case 1.
6035 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
6036 return Builder.CreateBitCast(Src, DstTy, Name);
6037
6038 // Case 2.
6039 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
6040 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
6041
6042 // Case 3.
6043 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
6044 // Case 3b.
6045 if (!DstTy->isIntegerTy())
6046 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
6047 // Cases 3a and 3b.
6048 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
6049 }
6050
6051 // Case 4b.
6052 if (!SrcTy->isIntegerTy())
6053 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
6054 // Cases 4a and 4b.
6055 return Builder.CreateIntToPtr(Src, DstTy, Name);
6056}
6057
6058Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
6059 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
6060 llvm::Type *DstTy = ConvertType(E->getType());
6061
6062 llvm::Type *SrcTy = Src->getType();
6063 unsigned NumElementsSrc =
6065 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
6066 : 0;
6067 unsigned NumElementsDst =
6069 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
6070 : 0;
6071
6072 // Use bit vector expansion for ext_vector_type boolean vectors.
6073 if (E->getType()->isExtVectorBoolType())
6074 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
6075
6076 // Going from vec3 to non-vec3 is a special case and requires a shuffle
6077 // vector to get a vec4, then a bitcast if the target type is different.
6078 if (NumElementsSrc == 3 && NumElementsDst != 3) {
6079 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
6080 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
6081 DstTy);
6082
6083 Src->setName("astype");
6084 return Src;
6085 }
6086
6087 // Going from non-vec3 to vec3 is a special case and requires a bitcast
6088 // to vec4 if the original type is not vec4, then a shuffle vector to
6089 // get a vec3.
6090 if (NumElementsSrc != 3 && NumElementsDst == 3) {
6091 auto *Vec4Ty = llvm::FixedVectorType::get(
6092 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
6093 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
6094 Vec4Ty);
6095
6096 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
6097 Src->setName("astype");
6098 return Src;
6099 }
6100
6101 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
6102 Src, DstTy, "astype");
6103}
6104
6105Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
6106 return CGF.EmitAtomicExpr(E).getScalarVal();
6107}
6108
6109//===----------------------------------------------------------------------===//
6110// Entry Point into this File
6111//===----------------------------------------------------------------------===//
6112
6113/// Emit the computation of the specified expression of scalar type, ignoring
6114/// the result.
6115Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
6116 assert(E && hasScalarEvaluationKind(E->getType()) &&
6117 "Invalid scalar expression to emit");
6118
6119 return ScalarExprEmitter(*this, IgnoreResultAssign)
6120 .Visit(const_cast<Expr *>(E));
6121}
6122
6123/// Emit a conversion from the specified type to the specified destination type,
6124/// both of which are LLVM scalar types.
6126 QualType DstTy,
6127 SourceLocation Loc) {
6128 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
6129 "Invalid scalar expression to emit");
6130 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
6131}
6132
6133/// Emit a conversion from the specified complex type to the specified
6134/// destination type, where the destination type is an LLVM scalar type.
6136 QualType SrcTy,
6137 QualType DstTy,
6138 SourceLocation Loc) {
6139 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
6140 "Invalid complex -> scalar conversion");
6141 return ScalarExprEmitter(*this)
6142 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
6143}
6144
6145
6146Value *
6148 QualType PromotionType) {
6149 if (!PromotionType.isNull())
6150 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
6151 else
6152 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
6153}
6154
6155
6158 bool isInc, bool isPre) {
6159 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
6160}
6161
6163 // object->isa or (*object).isa
6164 // Generate code as for: *(Class*)object
6165
6166 Expr *BaseExpr = E->getBase();
6168 if (BaseExpr->isPRValue()) {
6169 llvm::Type *BaseTy =
6171 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
6172 } else {
6173 Addr = EmitLValue(BaseExpr).getAddress();
6174 }
6175
6176 // Cast the address to Class*.
6177 Addr = Addr.withElementType(ConvertType(E->getType()));
6178 return MakeAddrLValue(Addr, E->getType());
6179}
6180
6181
6183 const CompoundAssignOperator *E) {
6185 ScalarExprEmitter Scalar(*this);
6186 Value *Result = nullptr;
6187 switch (E->getOpcode()) {
6188#define COMPOUND_OP(Op) \
6189 case BO_##Op##Assign: \
6190 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
6191 Result)
6192 COMPOUND_OP(Mul);
6193 COMPOUND_OP(Div);
6194 COMPOUND_OP(Rem);
6195 COMPOUND_OP(Add);
6196 COMPOUND_OP(Sub);
6197 COMPOUND_OP(Shl);
6198 COMPOUND_OP(Shr);
6200 COMPOUND_OP(Xor);
6201 COMPOUND_OP(Or);
6202#undef COMPOUND_OP
6203
6204 case BO_PtrMemD:
6205 case BO_PtrMemI:
6206 case BO_Mul:
6207 case BO_Div:
6208 case BO_Rem:
6209 case BO_Add:
6210 case BO_Sub:
6211 case BO_Shl:
6212 case BO_Shr:
6213 case BO_LT:
6214 case BO_GT:
6215 case BO_LE:
6216 case BO_GE:
6217 case BO_EQ:
6218 case BO_NE:
6219 case BO_Cmp:
6220 case BO_And:
6221 case BO_Xor:
6222 case BO_Or:
6223 case BO_LAnd:
6224 case BO_LOr:
6225 case BO_Assign:
6226 case BO_Comma:
6227 llvm_unreachable("Not valid compound assignment operators");
6228 }
6229
6230 llvm_unreachable("Unhandled compound assignment operator");
6231}
6232
6234 // The total (signed) byte offset for the GEP.
6235 llvm::Value *TotalOffset;
6236 // The offset overflow flag - true if the total offset overflows.
6237 llvm::Value *OffsetOverflows;
6238};
6239
6240/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6241/// and compute the total offset it applies from it's base pointer BasePtr.
6242/// Returns offset in bytes and a boolean flag whether an overflow happened
6243/// during evaluation.
6245 llvm::LLVMContext &VMContext,
6246 CodeGenModule &CGM,
6247 CGBuilderTy &Builder) {
6248 const auto &DL = CGM.getDataLayout();
6249
6250 // The total (signed) byte offset for the GEP.
6251 llvm::Value *TotalOffset = nullptr;
6252
6253 // Was the GEP already reduced to a constant?
6254 if (isa<llvm::Constant>(GEPVal)) {
6255 // Compute the offset by casting both pointers to integers and subtracting:
6256 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6257 Value *BasePtr_int =
6258 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6259 Value *GEPVal_int =
6260 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6261 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6262 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6263 }
6264
6265 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6266 assert(GEP->getPointerOperand() == BasePtr &&
6267 "BasePtr must be the base of the GEP.");
6268 assert(GEP->isInBounds() && "Expected inbounds GEP");
6269
6270 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6271
6272 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6273 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6274 auto *SAddIntrinsic =
6275 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6276 auto *SMulIntrinsic =
6277 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6278
6279 // The offset overflow flag - true if the total offset overflows.
6280 llvm::Value *OffsetOverflows = Builder.getFalse();
6281
6282 /// Return the result of the given binary operation.
6283 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6284 llvm::Value *RHS) -> llvm::Value * {
6285 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6286
6287 // If the operands are constants, return a constant result.
6288 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6289 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6290 llvm::APInt N;
6291 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6292 /*Signed=*/true, N);
6293 if (HasOverflow)
6294 OffsetOverflows = Builder.getTrue();
6295 return llvm::ConstantInt::get(VMContext, N);
6296 }
6297 }
6298
6299 // Otherwise, compute the result with checked arithmetic.
6300 auto *ResultAndOverflow = Builder.CreateCall(
6301 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6302 OffsetOverflows = Builder.CreateOr(
6303 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6304 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6305 };
6306
6307 // Determine the total byte offset by looking at each GEP operand.
6308 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6309 GTI != GTE; ++GTI) {
6310 llvm::Value *LocalOffset;
6311 auto *Index = GTI.getOperand();
6312 // Compute the local offset contributed by this indexing step:
6313 if (auto *STy = GTI.getStructTypeOrNull()) {
6314 // For struct indexing, the local offset is the byte position of the
6315 // specified field.
6316 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6317 LocalOffset = llvm::ConstantInt::get(
6318 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6319 } else {
6320 // Otherwise this is array-like indexing. The local offset is the index
6321 // multiplied by the element size.
6322 auto *ElementSize =
6323 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6324 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6325 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6326 }
6327
6328 // If this is the first offset, set it as the total offset. Otherwise, add
6329 // the local offset into the running total.
6330 if (!TotalOffset || TotalOffset == Zero)
6331 TotalOffset = LocalOffset;
6332 else
6333 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6334 }
6335
6336 return {TotalOffset, OffsetOverflows};
6337}
6338
6339Value *
6340CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6341 ArrayRef<Value *> IdxList,
6342 bool SignedIndices, bool IsSubtraction,
6343 SourceLocation Loc, const Twine &Name) {
6344 llvm::Type *PtrTy = Ptr->getType();
6345
6346 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6347 if (!SignedIndices && !IsSubtraction)
6348 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6349
6350 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6351
6352 // If the pointer overflow sanitizer isn't enabled, do nothing.
6353 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6354 return GEPVal;
6355
6356 // Perform nullptr-and-offset check unless the nullptr is defined.
6357 bool PerformNullCheck = !NullPointerIsDefined(
6358 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6359 // Check for overflows unless the GEP got constant-folded,
6360 // and only in the default address space
6361 bool PerformOverflowCheck =
6362 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6363
6364 if (!(PerformNullCheck || PerformOverflowCheck))
6365 return GEPVal;
6366
6367 const auto &DL = CGM.getDataLayout();
6368
6369 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6370 auto CheckHandler = SanitizerHandler::PointerOverflow;
6371 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6372 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6373
6374 GEPOffsetAndOverflow EvaluatedGEP =
6375 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6376
6377 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6378 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6379 "If the offset got constant-folded, we don't expect that there was an "
6380 "overflow.");
6381
6382 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6383
6384 // Common case: if the total offset is zero, don't emit a check.
6385 if (EvaluatedGEP.TotalOffset == Zero)
6386 return GEPVal;
6387
6388 // Now that we've computed the total offset, add it to the base pointer (with
6389 // wrapping semantics).
6390 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6391 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6392
6393 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6394 2>
6395 Checks;
6396
6397 if (PerformNullCheck) {
6398 // If the base pointer evaluates to a null pointer value,
6399 // the only valid pointer this inbounds GEP can produce is also
6400 // a null pointer, so the offset must also evaluate to zero.
6401 // Likewise, if we have non-zero base pointer, we can not get null pointer
6402 // as a result, so the offset can not be -intptr_t(BasePtr).
6403 // In other words, both pointers are either null, or both are non-null,
6404 // or the behaviour is undefined.
6405 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6406 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6407 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6408 Checks.emplace_back(Valid, CheckOrdinal);
6409 }
6410
6411 if (PerformOverflowCheck) {
6412 // The GEP is valid if:
6413 // 1) The total offset doesn't overflow, and
6414 // 2) The sign of the difference between the computed address and the base
6415 // pointer matches the sign of the total offset.
6416 llvm::Value *ValidGEP;
6417 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6418 if (SignedIndices) {
6419 // GEP is computed as `unsigned base + signed offset`, therefore:
6420 // * If offset was positive, then the computed pointer can not be
6421 // [unsigned] less than the base pointer, unless it overflowed.
6422 // * If offset was negative, then the computed pointer can not be
6423 // [unsigned] greater than the bas pointere, unless it overflowed.
6424 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6425 auto *PosOrZeroOffset =
6426 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6427 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6428 ValidGEP =
6429 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6430 } else if (!IsSubtraction) {
6431 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6432 // computed pointer can not be [unsigned] less than base pointer,
6433 // unless there was an overflow.
6434 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6435 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6436 } else {
6437 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6438 // computed pointer can not be [unsigned] greater than base pointer,
6439 // unless there was an overflow.
6440 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6441 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6442 }
6443 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6444 Checks.emplace_back(ValidGEP, CheckOrdinal);
6445 }
6446
6447 assert(!Checks.empty() && "Should have produced some checks.");
6448
6449 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6450 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6451 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6452 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6453
6454 return GEPVal;
6455}
6456
6458 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6459 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6460 const Twine &Name) {
6461 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6462 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6463 if (!SignedIndices && !IsSubtraction)
6464 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6465
6466 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6467 }
6468
6469 return RawAddress(
6470 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6471 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6472 elementType, Align);
6473}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
static uint32_t getBitWidth(const Expr *E)
llvm::APSInt APSInt
Definition Compiler.cpp:24
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:508
bool isLValue() const
Definition APValue.h:490
bool isInt() const
Definition APValue.h:485
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:951
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:916
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
bool isUnaryOverflowPatternExcluded(const UnaryOperator *UO)
uint64_t getCharWidth() const
Return the size of the character type, in bits.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
LabelDecl * getLabel() const
Definition Expr.h:4576
uint64_t getValue() const
Definition ExprCXX.h:3045
QualType getElementType() const
Definition TypeBase.h:3742
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6751
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4188
bool isCompoundAssignmentOp() const
Definition Expr.h:4185
SourceLocation getExprLoc() const
Definition Expr.h:4082
bool isShiftOp() const
Definition Expr.h:4130
Expr * getRHS() const
Definition Expr.h:4093
bool isShiftAssignOp() const
Definition Expr.h:4199
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4254
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2205
Opcode getOpcode() const
Definition Expr.h:4086
BinaryOperatorKind Opcode
Definition Expr.h:4046
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:741
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4333
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:305
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
path_iterator path_begin()
Definition Expr.h:3749
CastKind getCastKind() const
Definition Expr.h:3723
bool changesVolatileQualification() const
Return.
Definition Expr.h:3813
path_iterator path_end()
Definition Expr.h:3750
Expr * getSubExpr()
Definition Expr.h:3729
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1632
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:102
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:94
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:84
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:71
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3089
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7153
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:257
SanitizerSet SanOpts
Sanitizers enabled for this function.
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:251
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2975
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3973
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6444
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:7254
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:247
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:388
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2979
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
bool hasSkipCounter(const Stmt *S) const
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3629
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3863
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:176
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3953
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:251
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6367
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2472
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
RawAddress CreateIRTempWithoutCast(QualType T, const Twine &Name="tmp")
CreateIRTempWithoutCast - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:183
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:64
void EmitBoundsCheck(const Expr *ArrayExpr, const Expr *ArrayExprBase, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1255
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4121
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6320
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2037
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3493
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2223
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:6306
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
static const Expr * stripCond(const Expr *C)
Ignore parentheses and logical-NOT to track conditions consistently.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2713
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4556
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:557
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:273
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:912
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:7263
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1591
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:660
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1672
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:742
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4033
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:5126
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4463
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2257
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:51
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1934
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3524
void markStmtAsUsed(bool Skipped, const Stmt *S)
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
bool isMCDCDecisionExpr(const Expr *E) const
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:640
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1387
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:183
bool isBitField() const
Definition CGValue.h:288
bool isVolatileQualified() const
Definition CGValue.h:297
const Qualifiers & getQuals() const
Definition CGValue.h:350
Address getAddress() const
Definition CGValue.h:373
QualType getType() const
Definition CGValue.h:303
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:446
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
QualType getComputationLHSType() const
Definition Expr.h:4337
QualType getComputationResultType() const
Definition Expr.h:4340
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
APValue getAPValueResult() const
Definition Expr.cpp:413
bool hasAPValueResult() const
Definition Expr.h:1160
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4395
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4812
T * getAttr() const
Definition DeclBase.h:573
ChildElementIter< false > begin()
Definition Expr.h:5235
size_t getDataElementCount() const
Definition Expr.h:5151
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:677
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:479
QualType getType() const
Definition Expr.h:144
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1578
llvm::APFloat getValue() const
Definition Expr.h:1669
const Expr * getSubExpr() const
Definition Expr.h:1065
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3856
unsigned getNumInits() const
Definition Expr.h:5332
bool hadArrayRangeDesignator() const
Definition Expr.h:5486
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
bool isSignedOverflowDefined() const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4345
Expr * getBase() const
Definition Expr.h:3444
bool isArrow() const
Definition Expr.h:3551
VersionTuple getVersion() const
Definition ExprObjC.h:1723
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1495
Expr * getBase() const
Definition ExprObjC.h:1520
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1543
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1361
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:8006
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:8043
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2589
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2577
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2570
unsigned getNumComponents() const
Definition Expr.h:2585
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2482
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2488
@ Array
An index into an array.
Definition Expr.h:2429
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2433
@ Field
A field.
Definition Expr.h:2431
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2436
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2478
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2498
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1211
Expr * getSelectedExpr() const
Definition ExprCXX.h:4640
const Expr * getSubExpr() const
Definition Expr.h:2202
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3336
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1459
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:131
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8388
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8573
QualType getCanonicalType() const
Definition TypeBase.h:8440
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1625
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:136
bool isCanonical() const
Definition TypeBase.h:8445
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4527
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:587
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4698
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4679
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4685
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4516
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2282
SourceLocation getLocation() const
Definition Expr.h:5064
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
virtual bool useFP16ConversionIntrinsics() const
Check whether conversions to and from __fp16 should go through an integer bitcast with i16.
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:788
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:798
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:809
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:817
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:825
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8370
bool getBoolValue() const
Definition ExprCXX.h:2948
const APValue & getAPValue() const
Definition ExprCXX.h:2953
bool isStoredAsBoolean() const
Definition ExprCXX.h:2944
bool isVoidType() const
Definition TypeBase.h:8991
bool isBooleanType() const
Definition TypeBase.h:9128
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8637
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2253
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2307
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2374
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9035
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9285
bool isReferenceType() const
Definition TypeBase.h:8649
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1922
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2651
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isExtVectorType() const
Definition TypeBase.h:8768
bool isExtVectorBoolType() const
Definition TypeBase.h:8772
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8910
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8748
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8760
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:9051
bool isHalfType() const
Definition TypeBase.h:8995
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2274
bool isQueueT() const
Definition TypeBase.h:8881
bool isMatrixType() const
Definition TypeBase.h:8788
bool isEventT() const
Definition TypeBase.h:8873
bool isFunctionType() const
Definition TypeBase.h:8621
bool isVectorType() const
Definition TypeBase.h:8764
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2357
bool isFloatingType() const
Definition Type.cpp:2341
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2284
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2936
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9218
bool isNullPtrType() const
Definition TypeBase.h:9028
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2697
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2660
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2403
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2301
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5582
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3974
Represents a GCC generic vector type.
Definition TypeBase.h:4183
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
bool BitCast(InterpState &S, CodePtr OpPC)
Definition Interp.h:3681
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1326
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1986
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1341
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::Type * HalfTy
half, bfloat, float, double
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184