clang 19.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CGRecordLayout.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "TargetInfo.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclObjC.h"
26#include "clang/AST/Expr.h"
31#include "llvm/ADT/APFixedPoint.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/FixedPointBuilder.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/IntrinsicsPowerPC.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/IR/Module.h"
44#include "llvm/Support/TypeSize.h"
45#include <cstdarg>
46#include <optional>
47
48using namespace clang;
49using namespace CodeGen;
50using llvm::Value;
51
52//===----------------------------------------------------------------------===//
53// Scalar Expression Emitter
54//===----------------------------------------------------------------------===//
55
56namespace llvm {
57extern cl::opt<bool> EnableSingleByteCoverage;
58} // namespace llvm
59
60namespace {
61
62/// Determine whether the given binary operation may overflow.
63/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
64/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
65/// the returned overflow check is precise. The returned value is 'true' for
66/// all other opcodes, to be conservative.
67bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
68 BinaryOperator::Opcode Opcode, bool Signed,
69 llvm::APInt &Result) {
70 // Assume overflow is possible, unless we can prove otherwise.
71 bool Overflow = true;
72 const auto &LHSAP = LHS->getValue();
73 const auto &RHSAP = RHS->getValue();
74 if (Opcode == BO_Add) {
75 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
76 : LHSAP.uadd_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Sub) {
78 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
79 : LHSAP.usub_ov(RHSAP, Overflow);
80 } else if (Opcode == BO_Mul) {
81 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
82 : LHSAP.umul_ov(RHSAP, Overflow);
83 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
84 if (Signed && !RHS->isZero())
85 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
86 else
87 return false;
88 }
89 return Overflow;
90}
91
92struct BinOpInfo {
93 Value *LHS;
94 Value *RHS;
95 QualType Ty; // Computation Type.
96 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
97 FPOptions FPFeatures;
98 const Expr *E; // Entire expr, for error unsupported. May not be binop.
99
100 /// Check if the binop can result in integer overflow.
101 bool mayHaveIntegerOverflow() const {
102 // Without constant input, we can't rule out overflow.
103 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
104 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
105 if (!LHSCI || !RHSCI)
106 return true;
107
108 llvm::APInt Result;
109 return ::mayHaveIntegerOverflow(
110 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
111 }
112
113 /// Check if the binop computes a division or a remainder.
114 bool isDivremOp() const {
115 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
116 Opcode == BO_RemAssign;
117 }
118
119 /// Check if the binop can result in an integer division by zero.
120 bool mayHaveIntegerDivisionByZero() const {
121 if (isDivremOp())
122 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
123 return CI->isZero();
124 return true;
125 }
126
127 /// Check if the binop can result in a float division by zero.
128 bool mayHaveFloatDivisionByZero() const {
129 if (isDivremOp())
130 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
131 return CFP->isZero();
132 return true;
133 }
134
135 /// Check if at least one operand is a fixed point type. In such cases, this
136 /// operation did not follow usual arithmetic conversion and both operands
137 /// might not be of the same type.
138 bool isFixedPointOp() const {
139 // We cannot simply check the result type since comparison operations return
140 // an int.
141 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
142 QualType LHSType = BinOp->getLHS()->getType();
143 QualType RHSType = BinOp->getRHS()->getType();
144 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
145 }
146 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
147 return UnOp->getSubExpr()->getType()->isFixedPointType();
148 return false;
149 }
150
151 /// Check if the RHS has a signed integer representation.
152 bool rhsHasSignedIntegerRepresentation() const {
153 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
154 QualType RHSType = BinOp->getRHS()->getType();
155 return RHSType->hasSignedIntegerRepresentation();
156 }
157 return false;
158 }
159};
160
161static bool MustVisitNullValue(const Expr *E) {
162 // If a null pointer expression's type is the C++0x nullptr_t, then
163 // it's not necessarily a simple constant and it must be evaluated
164 // for its potential side effects.
165 return E->getType()->isNullPtrType();
166}
167
168/// If \p E is a widened promoted integer, get its base (unpromoted) type.
169static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
170 const Expr *E) {
171 const Expr *Base = E->IgnoreImpCasts();
172 if (E == Base)
173 return std::nullopt;
174
175 QualType BaseTy = Base->getType();
176 if (!Ctx.isPromotableIntegerType(BaseTy) ||
177 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
178 return std::nullopt;
179
180 return BaseTy;
181}
182
183/// Check if \p E is a widened promoted integer.
184static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
185 return getUnwidenedIntegerType(Ctx, E).has_value();
186}
187
188/// Check if we can skip the overflow check for \p Op.
189static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
190 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
191 "Expected a unary or binary operator");
192
193 // If the binop has constant inputs and we can prove there is no overflow,
194 // we can elide the overflow check.
195 if (!Op.mayHaveIntegerOverflow())
196 return true;
197
198 // If a unary op has a widened operand, the op cannot overflow.
199 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
200 return !UO->canOverflow();
201
202 // We usually don't need overflow checks for binops with widened operands.
203 // Multiplication with promoted unsigned operands is a special case.
204 const auto *BO = cast<BinaryOperator>(Op.E);
205 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
206 if (!OptionalLHSTy)
207 return false;
208
209 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
210 if (!OptionalRHSTy)
211 return false;
212
213 QualType LHSTy = *OptionalLHSTy;
214 QualType RHSTy = *OptionalRHSTy;
215
216 // This is the simple case: binops without unsigned multiplication, and with
217 // widened operands. No overflow check is needed here.
218 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
219 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
220 return true;
221
222 // For unsigned multiplication the overflow check can be elided if either one
223 // of the unpromoted types are less than half the size of the promoted type.
224 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
225 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
226 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
227}
228
229class ScalarExprEmitter
230 : public StmtVisitor<ScalarExprEmitter, Value*> {
231 CodeGenFunction &CGF;
232 CGBuilderTy &Builder;
233 bool IgnoreResultAssign;
234 llvm::LLVMContext &VMContext;
235public:
236
237 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
238 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
239 VMContext(cgf.getLLVMContext()) {
240 }
241
242 //===--------------------------------------------------------------------===//
243 // Utilities
244 //===--------------------------------------------------------------------===//
245
246 bool TestAndClearIgnoreResultAssign() {
247 bool I = IgnoreResultAssign;
248 IgnoreResultAssign = false;
249 return I;
250 }
251
252 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
253 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
254 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
255 return CGF.EmitCheckedLValue(E, TCK);
256 }
257
258 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
259 const BinOpInfo &Info);
260
261 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
262 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
263 }
264
265 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
266 const AlignValueAttr *AVAttr = nullptr;
267 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
268 const ValueDecl *VD = DRE->getDecl();
269
270 if (VD->getType()->isReferenceType()) {
271 if (const auto *TTy =
273 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
274 } else {
275 // Assumptions for function parameters are emitted at the start of the
276 // function, so there is no need to repeat that here,
277 // unless the alignment-assumption sanitizer is enabled,
278 // then we prefer the assumption over alignment attribute
279 // on IR function param.
280 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
281 return;
282
283 AVAttr = VD->getAttr<AlignValueAttr>();
284 }
285 }
286
287 if (!AVAttr)
288 if (const auto *TTy = E->getType()->getAs<TypedefType>())
289 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
290
291 if (!AVAttr)
292 return;
293
294 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
295 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
296 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
297 }
298
299 /// EmitLoadOfLValue - Given an expression with complex type that represents a
300 /// value l-value, this method emits the address of the l-value, then loads
301 /// and returns the result.
302 Value *EmitLoadOfLValue(const Expr *E) {
303 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
304 E->getExprLoc());
305
306 EmitLValueAlignmentAssumption(E, V);
307 return V;
308 }
309
310 /// EmitConversionToBool - Convert the specified expression value to a
311 /// boolean (i1) truth value. This is equivalent to "Val != 0".
312 Value *EmitConversionToBool(Value *Src, QualType DstTy);
313
314 /// Emit a check that a conversion from a floating-point type does not
315 /// overflow.
316 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
317 Value *Src, QualType SrcType, QualType DstType,
318 llvm::Type *DstTy, SourceLocation Loc);
319
320 /// Known implicit conversion check kinds.
321 /// This is used for bitfield conversion checks as well.
322 /// Keep in sync with the enum of the same name in ubsan_handlers.h
323 enum ImplicitConversionCheckKind : unsigned char {
324 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
325 ICCK_UnsignedIntegerTruncation = 1,
326 ICCK_SignedIntegerTruncation = 2,
327 ICCK_IntegerSignChange = 3,
328 ICCK_SignedIntegerTruncationOrSignChange = 4,
329 };
330
331 /// Emit a check that an [implicit] truncation of an integer does not
332 /// discard any bits. It is not UB, so we use the value after truncation.
333 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
334 QualType DstType, SourceLocation Loc);
335
336 /// Emit a check that an [implicit] conversion of an integer does not change
337 /// the sign of the value. It is not UB, so we use the value after conversion.
338 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
339 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
340 QualType DstType, SourceLocation Loc);
341
342 /// Emit a conversion from the specified type to the specified destination
343 /// type, both of which are LLVM scalar types.
344 struct ScalarConversionOpts {
345 bool TreatBooleanAsSigned;
346 bool EmitImplicitIntegerTruncationChecks;
347 bool EmitImplicitIntegerSignChangeChecks;
348
349 ScalarConversionOpts()
350 : TreatBooleanAsSigned(false),
351 EmitImplicitIntegerTruncationChecks(false),
352 EmitImplicitIntegerSignChangeChecks(false) {}
353
354 ScalarConversionOpts(clang::SanitizerSet SanOpts)
355 : TreatBooleanAsSigned(false),
356 EmitImplicitIntegerTruncationChecks(
357 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
358 EmitImplicitIntegerSignChangeChecks(
359 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
360 };
361 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
362 llvm::Type *SrcTy, llvm::Type *DstTy,
363 ScalarConversionOpts Opts);
364 Value *
365 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
366 SourceLocation Loc,
367 ScalarConversionOpts Opts = ScalarConversionOpts());
368
369 /// Convert between either a fixed point and other fixed point or fixed point
370 /// and an integer.
371 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
372 SourceLocation Loc);
373
374 /// Emit a conversion from the specified complex type to the specified
375 /// destination type, where the destination type is an LLVM scalar type.
376 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
377 QualType SrcTy, QualType DstTy,
378 SourceLocation Loc);
379
380 /// EmitNullValue - Emit a value that corresponds to null for the given type.
381 Value *EmitNullValue(QualType Ty);
382
383 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
384 Value *EmitFloatToBoolConversion(Value *V) {
385 // Compare against 0.0 for fp scalars.
386 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
387 return Builder.CreateFCmpUNE(V, Zero, "tobool");
388 }
389
390 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
391 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
392 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
393
394 return Builder.CreateICmpNE(V, Zero, "tobool");
395 }
396
397 Value *EmitIntToBoolConversion(Value *V) {
398 // Because of the type rules of C, we often end up computing a
399 // logical value, then zero extending it to int, then wanting it
400 // as a logical value again. Optimize this common case.
401 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
402 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
403 Value *Result = ZI->getOperand(0);
404 // If there aren't any more uses, zap the instruction to save space.
405 // Note that there can be more uses, for example if this
406 // is the result of an assignment.
407 if (ZI->use_empty())
408 ZI->eraseFromParent();
409 return Result;
410 }
411 }
412
413 return Builder.CreateIsNotNull(V, "tobool");
414 }
415
416 //===--------------------------------------------------------------------===//
417 // Visitor Methods
418 //===--------------------------------------------------------------------===//
419
420 Value *Visit(Expr *E) {
421 ApplyDebugLocation DL(CGF, E);
423 }
424
425 Value *VisitStmt(Stmt *S) {
426 S->dump(llvm::errs(), CGF.getContext());
427 llvm_unreachable("Stmt can't have complex result type!");
428 }
429 Value *VisitExpr(Expr *S);
430
431 Value *VisitConstantExpr(ConstantExpr *E) {
432 // A constant expression of type 'void' generates no code and produces no
433 // value.
434 if (E->getType()->isVoidType())
435 return nullptr;
436
437 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
438 if (E->isGLValue())
439 return CGF.Builder.CreateLoad(Address(
440 Result, CGF.ConvertTypeForMem(E->getType()),
442 return Result;
443 }
444 return Visit(E->getSubExpr());
445 }
446 Value *VisitParenExpr(ParenExpr *PE) {
447 return Visit(PE->getSubExpr());
448 }
449 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
450 return Visit(E->getReplacement());
451 }
452 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
453 return Visit(GE->getResultExpr());
454 }
455 Value *VisitCoawaitExpr(CoawaitExpr *S) {
456 return CGF.EmitCoawaitExpr(*S).getScalarVal();
457 }
458 Value *VisitCoyieldExpr(CoyieldExpr *S) {
459 return CGF.EmitCoyieldExpr(*S).getScalarVal();
460 }
461 Value *VisitUnaryCoawait(const UnaryOperator *E) {
462 return Visit(E->getSubExpr());
463 }
464
465 // Leaves.
466 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
467 return Builder.getInt(E->getValue());
468 }
469 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
470 return Builder.getInt(E->getValue());
471 }
472 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
473 return llvm::ConstantFP::get(VMContext, E->getValue());
474 }
475 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
476 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
477 }
478 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
479 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
480 }
481 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
482 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
483 }
484 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
485 if (E->getType()->isVoidType())
486 return nullptr;
487
488 return EmitNullValue(E->getType());
489 }
490 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
491 return EmitNullValue(E->getType());
492 }
493 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
494 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
495 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
496 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
497 return Builder.CreateBitCast(V, ConvertType(E->getType()));
498 }
499
500 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
501 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
502 }
503
504 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
505 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
506 }
507
508 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
509
510 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
511 if (E->isGLValue())
512 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
513 E->getExprLoc());
514
515 // Otherwise, assume the mapping is the scalar directly.
517 }
518
519 // l-values.
520 Value *VisitDeclRefExpr(DeclRefExpr *E) {
521 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
522 return CGF.emitScalarConstant(Constant, E);
523 return EmitLoadOfLValue(E);
524 }
525
526 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
527 return CGF.EmitObjCSelectorExpr(E);
528 }
529 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
530 return CGF.EmitObjCProtocolExpr(E);
531 }
532 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
533 return EmitLoadOfLValue(E);
534 }
535 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
536 if (E->getMethodDecl() &&
538 return EmitLoadOfLValue(E);
539 return CGF.EmitObjCMessageExpr(E).getScalarVal();
540 }
541
542 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
543 LValue LV = CGF.EmitObjCIsaExpr(E);
545 return V;
546 }
547
548 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
549 VersionTuple Version = E->getVersion();
550
551 // If we're checking for a platform older than our minimum deployment
552 // target, we can fold the check away.
553 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
554 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
555
556 return CGF.EmitBuiltinAvailable(Version);
557 }
558
559 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
560 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
561 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
562 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
563 Value *VisitMemberExpr(MemberExpr *E);
564 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
565 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
566 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
567 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
568 // literals aren't l-values in C++. We do so simply because that's the
569 // cleanest way to handle compound literals in C++.
570 // See the discussion here: https://reviews.llvm.org/D64464
571 return EmitLoadOfLValue(E);
572 }
573
574 Value *VisitInitListExpr(InitListExpr *E);
575
576 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
577 assert(CGF.getArrayInitIndex() &&
578 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
579 return CGF.getArrayInitIndex();
580 }
581
582 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
583 return EmitNullValue(E->getType());
584 }
585 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
586 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
587 return VisitCastExpr(E);
588 }
589 Value *VisitCastExpr(CastExpr *E);
590
591 Value *VisitCallExpr(const CallExpr *E) {
593 return EmitLoadOfLValue(E);
594
595 Value *V = CGF.EmitCallExpr(E).getScalarVal();
596
597 EmitLValueAlignmentAssumption(E, V);
598 return V;
599 }
600
601 Value *VisitStmtExpr(const StmtExpr *E);
602
603 // Unary Operators.
604 Value *VisitUnaryPostDec(const UnaryOperator *E) {
605 LValue LV = EmitLValue(E->getSubExpr());
606 return EmitScalarPrePostIncDec(E, LV, false, false);
607 }
608 Value *VisitUnaryPostInc(const UnaryOperator *E) {
609 LValue LV = EmitLValue(E->getSubExpr());
610 return EmitScalarPrePostIncDec(E, LV, true, false);
611 }
612 Value *VisitUnaryPreDec(const UnaryOperator *E) {
613 LValue LV = EmitLValue(E->getSubExpr());
614 return EmitScalarPrePostIncDec(E, LV, false, true);
615 }
616 Value *VisitUnaryPreInc(const UnaryOperator *E) {
617 LValue LV = EmitLValue(E->getSubExpr());
618 return EmitScalarPrePostIncDec(E, LV, true, true);
619 }
620
621 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
622 llvm::Value *InVal,
623 bool IsInc);
624
625 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
626 bool isInc, bool isPre);
627
628
629 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
630 if (isa<MemberPointerType>(E->getType())) // never sugared
631 return CGF.CGM.getMemberPointerConstant(E);
632
633 return EmitLValue(E->getSubExpr()).getPointer(CGF);
634 }
635 Value *VisitUnaryDeref(const UnaryOperator *E) {
636 if (E->getType()->isVoidType())
637 return Visit(E->getSubExpr()); // the actual value should be unused
638 return EmitLoadOfLValue(E);
639 }
640
641 Value *VisitUnaryPlus(const UnaryOperator *E,
642 QualType PromotionType = QualType());
643 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
644 Value *VisitUnaryMinus(const UnaryOperator *E,
645 QualType PromotionType = QualType());
646 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
647
648 Value *VisitUnaryNot (const UnaryOperator *E);
649 Value *VisitUnaryLNot (const UnaryOperator *E);
650 Value *VisitUnaryReal(const UnaryOperator *E,
651 QualType PromotionType = QualType());
652 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
653 Value *VisitUnaryImag(const UnaryOperator *E,
654 QualType PromotionType = QualType());
655 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
656 Value *VisitUnaryExtension(const UnaryOperator *E) {
657 return Visit(E->getSubExpr());
658 }
659
660 // C++
661 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
662 return EmitLoadOfLValue(E);
663 }
664 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
665 auto &Ctx = CGF.getContext();
669 SLE->getType());
670 }
671
672 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
673 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
674 return Visit(DAE->getExpr());
675 }
676 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
677 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
678 return Visit(DIE->getExpr());
679 }
680 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
681 return CGF.LoadCXXThis();
682 }
683
684 Value *VisitExprWithCleanups(ExprWithCleanups *E);
685 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
686 return CGF.EmitCXXNewExpr(E);
687 }
688 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
689 CGF.EmitCXXDeleteExpr(E);
690 return nullptr;
691 }
692
693 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
694 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
695 }
696
697 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
698 return Builder.getInt1(E->isSatisfied());
699 }
700
701 Value *VisitRequiresExpr(const RequiresExpr *E) {
702 return Builder.getInt1(E->isSatisfied());
703 }
704
705 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
706 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
707 }
708
709 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
710 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
711 }
712
713 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
714 // C++ [expr.pseudo]p1:
715 // The result shall only be used as the operand for the function call
716 // operator (), and the result of such a call has type void. The only
717 // effect is the evaluation of the postfix-expression before the dot or
718 // arrow.
719 CGF.EmitScalarExpr(E->getBase());
720 return nullptr;
721 }
722
723 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
724 return EmitNullValue(E->getType());
725 }
726
727 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
728 CGF.EmitCXXThrowExpr(E);
729 return nullptr;
730 }
731
732 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
733 return Builder.getInt1(E->getValue());
734 }
735
736 // Binary Operators.
737 Value *EmitMul(const BinOpInfo &Ops) {
738 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
739 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
741 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
742 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
743 [[fallthrough]];
745 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
746 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
747 [[fallthrough]];
749 if (CanElideOverflowCheck(CGF.getContext(), Ops))
750 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
751 return EmitOverflowCheckedBinOp(Ops);
752 }
753 }
754
755 if (Ops.Ty->isConstantMatrixType()) {
756 llvm::MatrixBuilder MB(Builder);
757 // We need to check the types of the operands of the operator to get the
758 // correct matrix dimensions.
759 auto *BO = cast<BinaryOperator>(Ops.E);
760 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
761 BO->getLHS()->getType().getCanonicalType());
762 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
763 BO->getRHS()->getType().getCanonicalType());
764 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
765 if (LHSMatTy && RHSMatTy)
766 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
767 LHSMatTy->getNumColumns(),
768 RHSMatTy->getNumColumns());
769 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
770 }
771
772 if (Ops.Ty->isUnsignedIntegerType() &&
773 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
774 !CanElideOverflowCheck(CGF.getContext(), Ops))
775 return EmitOverflowCheckedBinOp(Ops);
776
777 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
778 // Preserve the old values
779 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
780 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
781 }
782 if (Ops.isFixedPointOp())
783 return EmitFixedPointBinOp(Ops);
784 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
785 }
786 /// Create a binary op that checks for overflow.
787 /// Currently only supports +, - and *.
788 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
789
790 // Check for undefined division and modulus behaviors.
791 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
792 llvm::Value *Zero,bool isDiv);
793 // Common helper for getting how wide LHS of shift is.
794 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
795
796 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
797 // non powers of two.
798 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
799
800 Value *EmitDiv(const BinOpInfo &Ops);
801 Value *EmitRem(const BinOpInfo &Ops);
802 Value *EmitAdd(const BinOpInfo &Ops);
803 Value *EmitSub(const BinOpInfo &Ops);
804 Value *EmitShl(const BinOpInfo &Ops);
805 Value *EmitShr(const BinOpInfo &Ops);
806 Value *EmitAnd(const BinOpInfo &Ops) {
807 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
808 }
809 Value *EmitXor(const BinOpInfo &Ops) {
810 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
811 }
812 Value *EmitOr (const BinOpInfo &Ops) {
813 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
814 }
815
816 // Helper functions for fixed point binary operations.
817 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
818
819 BinOpInfo EmitBinOps(const BinaryOperator *E,
820 QualType PromotionTy = QualType());
821
822 Value *EmitPromotedValue(Value *result, QualType PromotionType);
823 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
824 Value *EmitPromoted(const Expr *E, QualType PromotionType);
825
826 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
827 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
828 Value *&Result);
829
830 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
831 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
832
833 QualType getPromotionType(QualType Ty) {
834 const auto &Ctx = CGF.getContext();
835 if (auto *CT = Ty->getAs<ComplexType>()) {
836 QualType ElementType = CT->getElementType();
837 if (ElementType.UseExcessPrecision(Ctx))
838 return Ctx.getComplexType(Ctx.FloatTy);
839 }
840
841 if (Ty.UseExcessPrecision(Ctx)) {
842 if (auto *VT = Ty->getAs<VectorType>()) {
843 unsigned NumElements = VT->getNumElements();
844 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
845 }
846 return Ctx.FloatTy;
847 }
848
849 return QualType();
850 }
851
852 // Binary operators and binary compound assignment operators.
853#define HANDLEBINOP(OP) \
854 Value *VisitBin##OP(const BinaryOperator *E) { \
855 QualType promotionTy = getPromotionType(E->getType()); \
856 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
857 if (result && !promotionTy.isNull()) \
858 result = EmitUnPromotedValue(result, E->getType()); \
859 return result; \
860 } \
861 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
862 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
863 }
864 HANDLEBINOP(Mul)
865 HANDLEBINOP(Div)
866 HANDLEBINOP(Rem)
867 HANDLEBINOP(Add)
868 HANDLEBINOP(Sub)
869 HANDLEBINOP(Shl)
870 HANDLEBINOP(Shr)
872 HANDLEBINOP(Xor)
874#undef HANDLEBINOP
875
876 // Comparisons.
877 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
878 llvm::CmpInst::Predicate SICmpOpc,
879 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
880#define VISITCOMP(CODE, UI, SI, FP, SIG) \
881 Value *VisitBin##CODE(const BinaryOperator *E) { \
882 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
883 llvm::FCmpInst::FP, SIG); }
884 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
885 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
886 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
887 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
888 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
889 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
890#undef VISITCOMP
891
892 Value *VisitBinAssign (const BinaryOperator *E);
893
894 Value *VisitBinLAnd (const BinaryOperator *E);
895 Value *VisitBinLOr (const BinaryOperator *E);
896 Value *VisitBinComma (const BinaryOperator *E);
897
898 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
899 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
900
901 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
902 return Visit(E->getSemanticForm());
903 }
904
905 // Other Operators.
906 Value *VisitBlockExpr(const BlockExpr *BE);
907 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
908 Value *VisitChooseExpr(ChooseExpr *CE);
909 Value *VisitVAArgExpr(VAArgExpr *VE);
910 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
911 return CGF.EmitObjCStringLiteral(E);
912 }
913 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
914 return CGF.EmitObjCBoxedExpr(E);
915 }
916 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
917 return CGF.EmitObjCArrayLiteral(E);
918 }
919 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
920 return CGF.EmitObjCDictionaryLiteral(E);
921 }
922 Value *VisitAsTypeExpr(AsTypeExpr *CE);
923 Value *VisitAtomicExpr(AtomicExpr *AE);
924 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
925 return Visit(E->getSelectedExpr());
926 }
927};
928} // end anonymous namespace.
929
930//===----------------------------------------------------------------------===//
931// Utilities
932//===----------------------------------------------------------------------===//
933
934/// EmitConversionToBool - Convert the specified expression value to a
935/// boolean (i1) truth value. This is equivalent to "Val != 0".
936Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
937 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
938
939 if (SrcType->isRealFloatingType())
940 return EmitFloatToBoolConversion(Src);
941
942 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
943 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
944
945 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
946 "Unknown scalar type to convert");
947
948 if (isa<llvm::IntegerType>(Src->getType()))
949 return EmitIntToBoolConversion(Src);
950
951 assert(isa<llvm::PointerType>(Src->getType()));
952 return EmitPointerToBoolConversion(Src, SrcType);
953}
954
955void ScalarExprEmitter::EmitFloatConversionCheck(
956 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
957 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
958 assert(SrcType->isFloatingType() && "not a conversion from floating point");
959 if (!isa<llvm::IntegerType>(DstTy))
960 return;
961
962 CodeGenFunction::SanitizerScope SanScope(&CGF);
963 using llvm::APFloat;
964 using llvm::APSInt;
965
966 llvm::Value *Check = nullptr;
967 const llvm::fltSemantics &SrcSema =
968 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
969
970 // Floating-point to integer. This has undefined behavior if the source is
971 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
972 // to an integer).
973 unsigned Width = CGF.getContext().getIntWidth(DstType);
975
976 APSInt Min = APSInt::getMinValue(Width, Unsigned);
977 APFloat MinSrc(SrcSema, APFloat::uninitialized);
978 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
979 APFloat::opOverflow)
980 // Don't need an overflow check for lower bound. Just check for
981 // -Inf/NaN.
982 MinSrc = APFloat::getInf(SrcSema, true);
983 else
984 // Find the largest value which is too small to represent (before
985 // truncation toward zero).
986 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
987
988 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
989 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
990 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
991 APFloat::opOverflow)
992 // Don't need an overflow check for upper bound. Just check for
993 // +Inf/NaN.
994 MaxSrc = APFloat::getInf(SrcSema, false);
995 else
996 // Find the smallest value which is too large to represent (before
997 // truncation toward zero).
998 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
999
1000 // If we're converting from __half, convert the range to float to match
1001 // the type of src.
1002 if (OrigSrcType->isHalfType()) {
1003 const llvm::fltSemantics &Sema =
1004 CGF.getContext().getFloatTypeSemantics(SrcType);
1005 bool IsInexact;
1006 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1007 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1008 }
1009
1010 llvm::Value *GE =
1011 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1012 llvm::Value *LE =
1013 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1014 Check = Builder.CreateAnd(GE, LE);
1015
1016 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1017 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1018 CGF.EmitCheckTypeDescriptor(DstType)};
1019 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
1020 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
1021}
1022
1023// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1024// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1025static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1026 std::pair<llvm::Value *, SanitizerMask>>
1028 QualType DstType, CGBuilderTy &Builder) {
1029 llvm::Type *SrcTy = Src->getType();
1030 llvm::Type *DstTy = Dst->getType();
1031 (void)DstTy; // Only used in assert()
1032
1033 // This should be truncation of integral types.
1034 assert(Src != Dst);
1035 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1036 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1037 "non-integer llvm type");
1038
1039 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1040 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1041
1042 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1043 // Else, it is a signed truncation.
1044 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1045 SanitizerMask Mask;
1046 if (!SrcSigned && !DstSigned) {
1047 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1048 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
1049 } else {
1050 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1051 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
1052 }
1053
1054 llvm::Value *Check = nullptr;
1055 // 1. Extend the truncated value back to the same width as the Src.
1056 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1057 // 2. Equality-compare with the original source value
1058 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1059 // If the comparison result is 'i1 false', then the truncation was lossy.
1060 return std::make_pair(Kind, std::make_pair(Check, Mask));
1061}
1062
1064 QualType SrcType, QualType DstType) {
1065 return SrcType->isIntegerType() && DstType->isIntegerType();
1066}
1067
1068void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1069 Value *Dst, QualType DstType,
1070 SourceLocation Loc) {
1071 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1072 return;
1073
1074 // We only care about int->int conversions here.
1075 // We ignore conversions to/from pointer and/or bool.
1077 DstType))
1078 return;
1079
1080 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1081 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1082 // This must be truncation. Else we do not care.
1083 if (SrcBits <= DstBits)
1084 return;
1085
1086 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1087
1088 // If the integer sign change sanitizer is enabled,
1089 // and we are truncating from larger unsigned type to smaller signed type,
1090 // let that next sanitizer deal with it.
1091 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1092 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1093 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1094 (!SrcSigned && DstSigned))
1095 return;
1096
1097 CodeGenFunction::SanitizerScope SanScope(&CGF);
1098
1099 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1100 std::pair<llvm::Value *, SanitizerMask>>
1101 Check =
1102 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1103 // If the comparison result is 'i1 false', then the truncation was lossy.
1104
1105 // Do we care about this type of truncation?
1106 if (!CGF.SanOpts.has(Check.second.second))
1107 return;
1108
1109 llvm::Constant *StaticArgs[] = {
1110 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1111 CGF.EmitCheckTypeDescriptor(DstType),
1112 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1113 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1114
1115 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1116 {Src, Dst});
1117}
1118
1119static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1120 const char *Name,
1121 CGBuilderTy &Builder) {
1122 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1123 llvm::Type *VTy = V->getType();
1124 if (!VSigned) {
1125 // If the value is unsigned, then it is never negative.
1126 return llvm::ConstantInt::getFalse(VTy->getContext());
1127 }
1128 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1129 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1130 llvm::Twine(Name) + "." + V->getName() +
1131 ".negativitycheck");
1132}
1133
1134// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1135// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1136static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1137 std::pair<llvm::Value *, SanitizerMask>>
1139 QualType DstType, CGBuilderTy &Builder) {
1140 llvm::Type *SrcTy = Src->getType();
1141 llvm::Type *DstTy = Dst->getType();
1142
1143 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1144 "non-integer llvm type");
1145
1146 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1147 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1148 (void)SrcSigned; // Only used in assert()
1149 (void)DstSigned; // Only used in assert()
1150 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1151 unsigned DstBits = DstTy->getScalarSizeInBits();
1152 (void)SrcBits; // Only used in assert()
1153 (void)DstBits; // Only used in assert()
1154
1155 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1156 "either the widths should be different, or the signednesses.");
1157
1158 // 1. Was the old Value negative?
1159 llvm::Value *SrcIsNegative =
1160 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1161 // 2. Is the new Value negative?
1162 llvm::Value *DstIsNegative =
1163 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1164 // 3. Now, was the 'negativity status' preserved during the conversion?
1165 // NOTE: conversion from negative to zero is considered to change the sign.
1166 // (We want to get 'false' when the conversion changed the sign)
1167 // So we should just equality-compare the negativity statuses.
1168 llvm::Value *Check = nullptr;
1169 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1170 // If the comparison result is 'false', then the conversion changed the sign.
1171 return std::make_pair(
1172 ScalarExprEmitter::ICCK_IntegerSignChange,
1173 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1174}
1175
1176void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1177 Value *Dst, QualType DstType,
1178 SourceLocation Loc) {
1179 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1180 return;
1181
1182 llvm::Type *SrcTy = Src->getType();
1183 llvm::Type *DstTy = Dst->getType();
1184
1185 // We only care about int->int conversions here.
1186 // We ignore conversions to/from pointer and/or bool.
1188 DstType))
1189 return;
1190
1191 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1192 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1193 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1194 unsigned DstBits = DstTy->getScalarSizeInBits();
1195
1196 // Now, we do not need to emit the check in *all* of the cases.
1197 // We can avoid emitting it in some obvious cases where it would have been
1198 // dropped by the opt passes (instcombine) always anyways.
1199 // If it's a cast between effectively the same type, no check.
1200 // NOTE: this is *not* equivalent to checking the canonical types.
1201 if (SrcSigned == DstSigned && SrcBits == DstBits)
1202 return;
1203 // At least one of the values needs to have signed type.
1204 // If both are unsigned, then obviously, neither of them can be negative.
1205 if (!SrcSigned && !DstSigned)
1206 return;
1207 // If the conversion is to *larger* *signed* type, then no check is needed.
1208 // Because either sign-extension happens (so the sign will remain),
1209 // or zero-extension will happen (the sign bit will be zero.)
1210 if ((DstBits > SrcBits) && DstSigned)
1211 return;
1212 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1213 (SrcBits > DstBits) && SrcSigned) {
1214 // If the signed integer truncation sanitizer is enabled,
1215 // and this is a truncation from signed type, then no check is needed.
1216 // Because here sign change check is interchangeable with truncation check.
1217 return;
1218 }
1219 // That's it. We can't rule out any more cases with the data we have.
1220
1221 CodeGenFunction::SanitizerScope SanScope(&CGF);
1222
1223 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1224 std::pair<llvm::Value *, SanitizerMask>>
1225 Check;
1226
1227 // Each of these checks needs to return 'false' when an issue was detected.
1228 ImplicitConversionCheckKind CheckKind;
1230 // So we can 'and' all the checks together, and still get 'false',
1231 // if at least one of the checks detected an issue.
1232
1233 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1234 CheckKind = Check.first;
1235 Checks.emplace_back(Check.second);
1236
1237 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1238 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1239 // If the signed integer truncation sanitizer was enabled,
1240 // and we are truncating from larger unsigned type to smaller signed type,
1241 // let's handle the case we skipped in that check.
1242 Check =
1243 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1244 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1245 Checks.emplace_back(Check.second);
1246 // If the comparison result is 'i1 false', then the truncation was lossy.
1247 }
1248
1249 llvm::Constant *StaticArgs[] = {
1250 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1251 CGF.EmitCheckTypeDescriptor(DstType),
1252 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1253 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1254 // EmitCheck() will 'and' all the checks together.
1255 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1256 {Src, Dst});
1257}
1258
1259// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1260// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1261static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1262 std::pair<llvm::Value *, SanitizerMask>>
1264 QualType DstType, CGBuilderTy &Builder) {
1265 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1266 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1267
1268 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1269 if (!SrcSigned && !DstSigned)
1270 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1271 else
1272 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1273
1274 llvm::Value *Check = nullptr;
1275 // 1. Extend the truncated value back to the same width as the Src.
1276 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1277 // 2. Equality-compare with the original source value
1278 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1279 // If the comparison result is 'i1 false', then the truncation was lossy.
1280
1281 return std::make_pair(
1282 Kind, std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));
1283}
1284
1285// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1286// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1287static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1288 std::pair<llvm::Value *, SanitizerMask>>
1290 QualType DstType, CGBuilderTy &Builder) {
1291 // 1. Was the old Value negative?
1292 llvm::Value *SrcIsNegative =
1293 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1294 // 2. Is the new Value negative?
1295 llvm::Value *DstIsNegative =
1296 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1297 // 3. Now, was the 'negativity status' preserved during the conversion?
1298 // NOTE: conversion from negative to zero is considered to change the sign.
1299 // (We want to get 'false' when the conversion changed the sign)
1300 // So we should just equality-compare the negativity statuses.
1301 llvm::Value *Check = nullptr;
1302 Check =
1303 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1304 // If the comparison result is 'false', then the conversion changed the sign.
1305 return std::make_pair(
1306 ScalarExprEmitter::ICCK_IntegerSignChange,
1307 std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));
1308}
1309
1310void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1311 Value *Dst, QualType DstType,
1312 const CGBitFieldInfo &Info,
1313 SourceLocation Loc) {
1314
1315 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1316 return;
1317
1318 // We only care about int->int conversions here.
1319 // We ignore conversions to/from pointer and/or bool.
1321 DstType))
1322 return;
1323
1324 if (DstType->isBooleanType() || SrcType->isBooleanType())
1325 return;
1326
1327 // This should be truncation of integral types.
1328 assert(isa<llvm::IntegerType>(Src->getType()) &&
1329 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1330
1331 // TODO: Calculate src width to avoid emitting code
1332 // for unecessary cases.
1333 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1334 unsigned DstBits = Info.Size;
1335
1336 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1337 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1338
1339 CodeGenFunction::SanitizerScope SanScope(this);
1340
1341 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1342 std::pair<llvm::Value *, SanitizerMask>>
1343 Check;
1344
1345 // Truncation
1346 bool EmitTruncation = DstBits < SrcBits;
1347 // If Dst is signed and Src unsigned, we want to be more specific
1348 // about the CheckKind we emit, in this case we want to emit
1349 // ICCK_SignedIntegerTruncationOrSignChange.
1350 bool EmitTruncationFromUnsignedToSigned =
1351 EmitTruncation && DstSigned && !SrcSigned;
1352 // Sign change
1353 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1354 bool BothUnsigned = !SrcSigned && !DstSigned;
1355 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1356 // We can avoid emitting sign change checks in some obvious cases
1357 // 1. If Src and Dst have the same signedness and size
1358 // 2. If both are unsigned sign check is unecessary!
1359 // 3. If Dst is signed and bigger than Src, either
1360 // sign-extension or zero-extension will make sure
1361 // the sign remains.
1362 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1363
1364 if (EmitTruncation)
1365 Check =
1366 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1367 else if (EmitSignChange) {
1368 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1369 "either the widths should be different, or the signednesses.");
1370 Check =
1371 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1372 } else
1373 return;
1374
1375 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1376 if (EmitTruncationFromUnsignedToSigned)
1377 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1378
1379 llvm::Constant *StaticArgs[] = {
1381 EmitCheckTypeDescriptor(DstType),
1382 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1383 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1384
1385 EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1386 {Src, Dst});
1387}
1388
1389Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1390 QualType DstType, llvm::Type *SrcTy,
1391 llvm::Type *DstTy,
1392 ScalarConversionOpts Opts) {
1393 // The Element types determine the type of cast to perform.
1394 llvm::Type *SrcElementTy;
1395 llvm::Type *DstElementTy;
1396 QualType SrcElementType;
1397 QualType DstElementType;
1398 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1399 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1400 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1401 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1402 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1403 } else {
1404 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1405 "cannot cast between matrix and non-matrix types");
1406 SrcElementTy = SrcTy;
1407 DstElementTy = DstTy;
1408 SrcElementType = SrcType;
1409 DstElementType = DstType;
1410 }
1411
1412 if (isa<llvm::IntegerType>(SrcElementTy)) {
1413 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1414 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1415 InputSigned = true;
1416 }
1417
1418 if (isa<llvm::IntegerType>(DstElementTy))
1419 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1420 if (InputSigned)
1421 return Builder.CreateSIToFP(Src, DstTy, "conv");
1422 return Builder.CreateUIToFP(Src, DstTy, "conv");
1423 }
1424
1425 if (isa<llvm::IntegerType>(DstElementTy)) {
1426 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1427 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1428
1429 // If we can't recognize overflow as undefined behavior, assume that
1430 // overflow saturates. This protects against normal optimizations if we are
1431 // compiling with non-standard FP semantics.
1432 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1433 llvm::Intrinsic::ID IID =
1434 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1435 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1436 }
1437
1438 if (IsSigned)
1439 return Builder.CreateFPToSI(Src, DstTy, "conv");
1440 return Builder.CreateFPToUI(Src, DstTy, "conv");
1441 }
1442
1443 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1444 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1445 return Builder.CreateFPExt(Src, DstTy, "conv");
1446}
1447
1448/// Emit a conversion from the specified type to the specified destination type,
1449/// both of which are LLVM scalar types.
1450Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1451 QualType DstType,
1452 SourceLocation Loc,
1453 ScalarConversionOpts Opts) {
1454 // All conversions involving fixed point types should be handled by the
1455 // EmitFixedPoint family functions. This is done to prevent bloating up this
1456 // function more, and although fixed point numbers are represented by
1457 // integers, we do not want to follow any logic that assumes they should be
1458 // treated as integers.
1459 // TODO(leonardchan): When necessary, add another if statement checking for
1460 // conversions to fixed point types from other types.
1461 if (SrcType->isFixedPointType()) {
1462 if (DstType->isBooleanType())
1463 // It is important that we check this before checking if the dest type is
1464 // an integer because booleans are technically integer types.
1465 // We do not need to check the padding bit on unsigned types if unsigned
1466 // padding is enabled because overflow into this bit is undefined
1467 // behavior.
1468 return Builder.CreateIsNotNull(Src, "tobool");
1469 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1470 DstType->isRealFloatingType())
1471 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1472
1473 llvm_unreachable(
1474 "Unhandled scalar conversion from a fixed point type to another type.");
1475 } else if (DstType->isFixedPointType()) {
1476 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1477 // This also includes converting booleans and enums to fixed point types.
1478 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1479
1480 llvm_unreachable(
1481 "Unhandled scalar conversion to a fixed point type from another type.");
1482 }
1483
1484 QualType NoncanonicalSrcType = SrcType;
1485 QualType NoncanonicalDstType = DstType;
1486
1487 SrcType = CGF.getContext().getCanonicalType(SrcType);
1488 DstType = CGF.getContext().getCanonicalType(DstType);
1489 if (SrcType == DstType) return Src;
1490
1491 if (DstType->isVoidType()) return nullptr;
1492
1493 llvm::Value *OrigSrc = Src;
1494 QualType OrigSrcType = SrcType;
1495 llvm::Type *SrcTy = Src->getType();
1496
1497 // Handle conversions to bool first, they are special: comparisons against 0.
1498 if (DstType->isBooleanType())
1499 return EmitConversionToBool(Src, SrcType);
1500
1501 llvm::Type *DstTy = ConvertType(DstType);
1502
1503 // Cast from half through float if half isn't a native type.
1504 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1505 // Cast to FP using the intrinsic if the half type itself isn't supported.
1506 if (DstTy->isFloatingPointTy()) {
1508 return Builder.CreateCall(
1509 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1510 Src);
1511 } else {
1512 // Cast to other types through float, using either the intrinsic or FPExt,
1513 // depending on whether the half type itself is supported
1514 // (as opposed to operations on half, available with NativeHalfType).
1516 Src = Builder.CreateCall(
1517 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1518 CGF.CGM.FloatTy),
1519 Src);
1520 } else {
1521 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1522 }
1523 SrcType = CGF.getContext().FloatTy;
1524 SrcTy = CGF.FloatTy;
1525 }
1526 }
1527
1528 // Ignore conversions like int -> uint.
1529 if (SrcTy == DstTy) {
1530 if (Opts.EmitImplicitIntegerSignChangeChecks)
1531 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1532 NoncanonicalDstType, Loc);
1533
1534 return Src;
1535 }
1536
1537 // Handle pointer conversions next: pointers can only be converted to/from
1538 // other pointers and integers. Check for pointer types in terms of LLVM, as
1539 // some native types (like Obj-C id) may map to a pointer type.
1540 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1541 // The source value may be an integer, or a pointer.
1542 if (isa<llvm::PointerType>(SrcTy))
1543 return Src;
1544
1545 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1546 // First, convert to the correct width so that we control the kind of
1547 // extension.
1548 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1549 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1550 llvm::Value* IntResult =
1551 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1552 // Then, cast to pointer.
1553 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1554 }
1555
1556 if (isa<llvm::PointerType>(SrcTy)) {
1557 // Must be an ptr to int cast.
1558 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1559 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1560 }
1561
1562 // A scalar can be splatted to an extended vector of the same element type
1563 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1564 // Sema should add casts to make sure that the source expression's type is
1565 // the same as the vector's element type (sans qualifiers)
1566 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1567 SrcType.getTypePtr() &&
1568 "Splatted expr doesn't match with vector element type?");
1569
1570 // Splat the element across to all elements
1571 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1572 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1573 }
1574
1575 if (SrcType->isMatrixType() && DstType->isMatrixType())
1576 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1577
1578 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1579 // Allow bitcast from vector to integer/fp of the same size.
1580 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1581 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1582 if (SrcSize == DstSize)
1583 return Builder.CreateBitCast(Src, DstTy, "conv");
1584
1585 // Conversions between vectors of different sizes are not allowed except
1586 // when vectors of half are involved. Operations on storage-only half
1587 // vectors require promoting half vector operands to float vectors and
1588 // truncating the result, which is either an int or float vector, to a
1589 // short or half vector.
1590
1591 // Source and destination are both expected to be vectors.
1592 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1593 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1594 (void)DstElementTy;
1595
1596 assert(((SrcElementTy->isIntegerTy() &&
1597 DstElementTy->isIntegerTy()) ||
1598 (SrcElementTy->isFloatingPointTy() &&
1599 DstElementTy->isFloatingPointTy())) &&
1600 "unexpected conversion between a floating-point vector and an "
1601 "integer vector");
1602
1603 // Truncate an i32 vector to an i16 vector.
1604 if (SrcElementTy->isIntegerTy())
1605 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1606
1607 // Truncate a float vector to a half vector.
1608 if (SrcSize > DstSize)
1609 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1610
1611 // Promote a half vector to a float vector.
1612 return Builder.CreateFPExt(Src, DstTy, "conv");
1613 }
1614
1615 // Finally, we have the arithmetic types: real int/float.
1616 Value *Res = nullptr;
1617 llvm::Type *ResTy = DstTy;
1618
1619 // An overflowing conversion has undefined behavior if either the source type
1620 // or the destination type is a floating-point type. However, we consider the
1621 // range of representable values for all floating-point types to be
1622 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1623 // floating-point type.
1624 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1625 OrigSrcType->isFloatingType())
1626 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1627 Loc);
1628
1629 // Cast to half through float if half isn't a native type.
1630 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1631 // Make sure we cast in a single step if from another FP type.
1632 if (SrcTy->isFloatingPointTy()) {
1633 // Use the intrinsic if the half type itself isn't supported
1634 // (as opposed to operations on half, available with NativeHalfType).
1636 return Builder.CreateCall(
1637 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1638 // If the half type is supported, just use an fptrunc.
1639 return Builder.CreateFPTrunc(Src, DstTy);
1640 }
1641 DstTy = CGF.FloatTy;
1642 }
1643
1644 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1645
1646 if (DstTy != ResTy) {
1648 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1649 Res = Builder.CreateCall(
1650 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1651 Res);
1652 } else {
1653 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1654 }
1655 }
1656
1657 if (Opts.EmitImplicitIntegerTruncationChecks)
1658 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1659 NoncanonicalDstType, Loc);
1660
1661 if (Opts.EmitImplicitIntegerSignChangeChecks)
1662 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1663 NoncanonicalDstType, Loc);
1664
1665 return Res;
1666}
1667
1668Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1669 QualType DstTy,
1670 SourceLocation Loc) {
1671 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1672 llvm::Value *Result;
1673 if (SrcTy->isRealFloatingType())
1674 Result = FPBuilder.CreateFloatingToFixed(Src,
1675 CGF.getContext().getFixedPointSemantics(DstTy));
1676 else if (DstTy->isRealFloatingType())
1677 Result = FPBuilder.CreateFixedToFloating(Src,
1679 ConvertType(DstTy));
1680 else {
1681 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1682 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1683
1684 if (DstTy->isIntegerType())
1685 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1686 DstFPSema.getWidth(),
1687 DstFPSema.isSigned());
1688 else if (SrcTy->isIntegerType())
1689 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1690 DstFPSema);
1691 else
1692 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1693 }
1694 return Result;
1695}
1696
1697/// Emit a conversion from the specified complex type to the specified
1698/// destination type, where the destination type is an LLVM scalar type.
1699Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1701 SourceLocation Loc) {
1702 // Get the source element type.
1703 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1704
1705 // Handle conversions to bool first, they are special: comparisons against 0.
1706 if (DstTy->isBooleanType()) {
1707 // Complex != 0 -> (Real != 0) | (Imag != 0)
1708 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1709 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1710 return Builder.CreateOr(Src.first, Src.second, "tobool");
1711 }
1712
1713 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1714 // the imaginary part of the complex value is discarded and the value of the
1715 // real part is converted according to the conversion rules for the
1716 // corresponding real type.
1717 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1718}
1719
1720Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1721 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1722}
1723
1724/// Emit a sanitization check for the given "binary" operation (which
1725/// might actually be a unary increment which has been lowered to a binary
1726/// operation). The check passes if all values in \p Checks (which are \c i1),
1727/// are \c true.
1728void ScalarExprEmitter::EmitBinOpCheck(
1729 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1730 assert(CGF.IsSanitizerScope);
1731 SanitizerHandler Check;
1734
1735 BinaryOperatorKind Opcode = Info.Opcode;
1738
1739 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1740 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1741 if (UO && UO->getOpcode() == UO_Minus) {
1742 Check = SanitizerHandler::NegateOverflow;
1743 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1744 DynamicData.push_back(Info.RHS);
1745 } else {
1746 if (BinaryOperator::isShiftOp(Opcode)) {
1747 // Shift LHS negative or too large, or RHS out of bounds.
1748 Check = SanitizerHandler::ShiftOutOfBounds;
1749 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1750 StaticData.push_back(
1751 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1752 StaticData.push_back(
1753 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1754 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1755 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1756 Check = SanitizerHandler::DivremOverflow;
1757 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1758 } else {
1759 // Arithmetic overflow (+, -, *).
1760 switch (Opcode) {
1761 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1762 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1763 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1764 default: llvm_unreachable("unexpected opcode for bin op check");
1765 }
1766 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1767 }
1768 DynamicData.push_back(Info.LHS);
1769 DynamicData.push_back(Info.RHS);
1770 }
1771
1772 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1773}
1774
1775//===----------------------------------------------------------------------===//
1776// Visitor Methods
1777//===----------------------------------------------------------------------===//
1778
1779Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1780 CGF.ErrorUnsupported(E, "scalar expression");
1781 if (E->getType()->isVoidType())
1782 return nullptr;
1783 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1784}
1785
1786Value *
1787ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1788 ASTContext &Context = CGF.getContext();
1789 unsigned AddrSpace =
1791 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1792 E->ComputeName(Context), "__usn_str", AddrSpace);
1793
1794 llvm::Type *ExprTy = ConvertType(E->getType());
1795 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1796 "usn_addr_cast");
1797}
1798
1799Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1800 // Vector Mask Case
1801 if (E->getNumSubExprs() == 2) {
1802 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1803 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1804 Value *Mask;
1805
1806 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1807 unsigned LHSElts = LTy->getNumElements();
1808
1809 Mask = RHS;
1810
1811 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1812
1813 // Mask off the high bits of each shuffle index.
1814 Value *MaskBits =
1815 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1816 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1817
1818 // newv = undef
1819 // mask = mask & maskbits
1820 // for each elt
1821 // n = extract mask i
1822 // x = extract val n
1823 // newv = insert newv, x, i
1824 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1825 MTy->getNumElements());
1826 Value* NewV = llvm::PoisonValue::get(RTy);
1827 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1828 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1829 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1830
1831 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1832 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1833 }
1834 return NewV;
1835 }
1836
1837 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1838 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1839
1840 SmallVector<int, 32> Indices;
1841 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1842 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1843 // Check for -1 and output it as undef in the IR.
1844 if (Idx.isSigned() && Idx.isAllOnes())
1845 Indices.push_back(-1);
1846 else
1847 Indices.push_back(Idx.getZExtValue());
1848 }
1849
1850 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1851}
1852
1853Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1854 QualType SrcType = E->getSrcExpr()->getType(),
1855 DstType = E->getType();
1856
1857 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1858
1859 SrcType = CGF.getContext().getCanonicalType(SrcType);
1860 DstType = CGF.getContext().getCanonicalType(DstType);
1861 if (SrcType == DstType) return Src;
1862
1863 assert(SrcType->isVectorType() &&
1864 "ConvertVector source type must be a vector");
1865 assert(DstType->isVectorType() &&
1866 "ConvertVector destination type must be a vector");
1867
1868 llvm::Type *SrcTy = Src->getType();
1869 llvm::Type *DstTy = ConvertType(DstType);
1870
1871 // Ignore conversions like int -> uint.
1872 if (SrcTy == DstTy)
1873 return Src;
1874
1875 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1876 DstEltType = DstType->castAs<VectorType>()->getElementType();
1877
1878 assert(SrcTy->isVectorTy() &&
1879 "ConvertVector source IR type must be a vector");
1880 assert(DstTy->isVectorTy() &&
1881 "ConvertVector destination IR type must be a vector");
1882
1883 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1884 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1885
1886 if (DstEltType->isBooleanType()) {
1887 assert((SrcEltTy->isFloatingPointTy() ||
1888 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1889
1890 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1891 if (SrcEltTy->isFloatingPointTy()) {
1892 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1893 } else {
1894 return Builder.CreateICmpNE(Src, Zero, "tobool");
1895 }
1896 }
1897
1898 // We have the arithmetic types: real int/float.
1899 Value *Res = nullptr;
1900
1901 if (isa<llvm::IntegerType>(SrcEltTy)) {
1902 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1903 if (isa<llvm::IntegerType>(DstEltTy))
1904 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1905 else if (InputSigned)
1906 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1907 else
1908 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1909 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1910 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1911 if (DstEltType->isSignedIntegerOrEnumerationType())
1912 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1913 else
1914 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1915 } else {
1916 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1917 "Unknown real conversion");
1918 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1919 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1920 else
1921 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1922 }
1923
1924 return Res;
1925}
1926
1927Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1928 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1929 CGF.EmitIgnoredExpr(E->getBase());
1930 return CGF.emitScalarConstant(Constant, E);
1931 } else {
1934 llvm::APSInt Value = Result.Val.getInt();
1935 CGF.EmitIgnoredExpr(E->getBase());
1936 return Builder.getInt(Value);
1937 }
1938 }
1939
1940 return EmitLoadOfLValue(E);
1941}
1942
1943Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1944 TestAndClearIgnoreResultAssign();
1945
1946 // Emit subscript expressions in rvalue context's. For most cases, this just
1947 // loads the lvalue formed by the subscript expr. However, we have to be
1948 // careful, because the base of a vector subscript is occasionally an rvalue,
1949 // so we can't get it as an lvalue.
1950 if (!E->getBase()->getType()->isVectorType() &&
1952 return EmitLoadOfLValue(E);
1953
1954 // Handle the vector case. The base must be a vector, the index must be an
1955 // integer value.
1956 Value *Base = Visit(E->getBase());
1957 Value *Idx = Visit(E->getIdx());
1958 QualType IdxTy = E->getIdx()->getType();
1959
1960 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1961 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1962
1963 return Builder.CreateExtractElement(Base, Idx, "vecext");
1964}
1965
1966Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1967 TestAndClearIgnoreResultAssign();
1968
1969 // Handle the vector case. The base must be a vector, the index must be an
1970 // integer value.
1971 Value *RowIdx = Visit(E->getRowIdx());
1972 Value *ColumnIdx = Visit(E->getColumnIdx());
1973
1974 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
1975 unsigned NumRows = MatrixTy->getNumRows();
1976 llvm::MatrixBuilder MB(Builder);
1977 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
1978 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
1979 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
1980
1981 Value *Matrix = Visit(E->getBase());
1982
1983 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1984 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
1985}
1986
1987static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1988 unsigned Off) {
1989 int MV = SVI->getMaskValue(Idx);
1990 if (MV == -1)
1991 return -1;
1992 return Off + MV;
1993}
1994
1995static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1996 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1997 "Index operand too large for shufflevector mask!");
1998 return C->getZExtValue();
1999}
2000
2001Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2002 bool Ignore = TestAndClearIgnoreResultAssign();
2003 (void)Ignore;
2004 assert (Ignore == false && "init list ignored");
2005 unsigned NumInitElements = E->getNumInits();
2006
2007 if (E->hadArrayRangeDesignator())
2008 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2009
2010 llvm::VectorType *VType =
2011 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2012
2013 if (!VType) {
2014 if (NumInitElements == 0) {
2015 // C++11 value-initialization for the scalar.
2016 return EmitNullValue(E->getType());
2017 }
2018 // We have a scalar in braces. Just use the first element.
2019 return Visit(E->getInit(0));
2020 }
2021
2022 if (isa<llvm::ScalableVectorType>(VType)) {
2023 if (NumInitElements == 0) {
2024 // C++11 value-initialization for the vector.
2025 return EmitNullValue(E->getType());
2026 }
2027
2028 if (NumInitElements == 1) {
2029 Expr *InitVector = E->getInit(0);
2030
2031 // Initialize from another scalable vector of the same type.
2032 if (InitVector->getType() == E->getType())
2033 return Visit(InitVector);
2034 }
2035
2036 llvm_unreachable("Unexpected initialization of a scalable vector!");
2037 }
2038
2039 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2040
2041 // Loop over initializers collecting the Value for each, and remembering
2042 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2043 // us to fold the shuffle for the swizzle into the shuffle for the vector
2044 // initializer, since LLVM optimizers generally do not want to touch
2045 // shuffles.
2046 unsigned CurIdx = 0;
2047 bool VIsPoisonShuffle = false;
2048 llvm::Value *V = llvm::PoisonValue::get(VType);
2049 for (unsigned i = 0; i != NumInitElements; ++i) {
2050 Expr *IE = E->getInit(i);
2051 Value *Init = Visit(IE);
2053
2054 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2055
2056 // Handle scalar elements. If the scalar initializer is actually one
2057 // element of a different vector of the same width, use shuffle instead of
2058 // extract+insert.
2059 if (!VVT) {
2060 if (isa<ExtVectorElementExpr>(IE)) {
2061 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2062
2063 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2064 ->getNumElements() == ResElts) {
2065 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2066 Value *LHS = nullptr, *RHS = nullptr;
2067 if (CurIdx == 0) {
2068 // insert into poison -> shuffle (src, poison)
2069 // shufflemask must use an i32
2070 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2071 Args.resize(ResElts, -1);
2072
2073 LHS = EI->getVectorOperand();
2074 RHS = V;
2075 VIsPoisonShuffle = true;
2076 } else if (VIsPoisonShuffle) {
2077 // insert into poison shuffle && size match -> shuffle (v, src)
2078 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2079 for (unsigned j = 0; j != CurIdx; ++j)
2080 Args.push_back(getMaskElt(SVV, j, 0));
2081 Args.push_back(ResElts + C->getZExtValue());
2082 Args.resize(ResElts, -1);
2083
2084 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2085 RHS = EI->getVectorOperand();
2086 VIsPoisonShuffle = false;
2087 }
2088 if (!Args.empty()) {
2089 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2090 ++CurIdx;
2091 continue;
2092 }
2093 }
2094 }
2095 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2096 "vecinit");
2097 VIsPoisonShuffle = false;
2098 ++CurIdx;
2099 continue;
2100 }
2101
2102 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2103
2104 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2105 // input is the same width as the vector being constructed, generate an
2106 // optimized shuffle of the swizzle input into the result.
2107 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2108 if (isa<ExtVectorElementExpr>(IE)) {
2109 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2110 Value *SVOp = SVI->getOperand(0);
2111 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2112
2113 if (OpTy->getNumElements() == ResElts) {
2114 for (unsigned j = 0; j != CurIdx; ++j) {
2115 // If the current vector initializer is a shuffle with poison, merge
2116 // this shuffle directly into it.
2117 if (VIsPoisonShuffle) {
2118 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2119 } else {
2120 Args.push_back(j);
2121 }
2122 }
2123 for (unsigned j = 0, je = InitElts; j != je; ++j)
2124 Args.push_back(getMaskElt(SVI, j, Offset));
2125 Args.resize(ResElts, -1);
2126
2127 if (VIsPoisonShuffle)
2128 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2129
2130 Init = SVOp;
2131 }
2132 }
2133
2134 // Extend init to result vector length, and then shuffle its contribution
2135 // to the vector initializer into V.
2136 if (Args.empty()) {
2137 for (unsigned j = 0; j != InitElts; ++j)
2138 Args.push_back(j);
2139 Args.resize(ResElts, -1);
2140 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2141
2142 Args.clear();
2143 for (unsigned j = 0; j != CurIdx; ++j)
2144 Args.push_back(j);
2145 for (unsigned j = 0; j != InitElts; ++j)
2146 Args.push_back(j + Offset);
2147 Args.resize(ResElts, -1);
2148 }
2149
2150 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2151 // merging subsequent shuffles into this one.
2152 if (CurIdx == 0)
2153 std::swap(V, Init);
2154 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2155 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2156 CurIdx += InitElts;
2157 }
2158
2159 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2160 // Emit remaining default initializers.
2161 llvm::Type *EltTy = VType->getElementType();
2162
2163 // Emit remaining default initializers
2164 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2165 Value *Idx = Builder.getInt32(CurIdx);
2166 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2167 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2168 }
2169 return V;
2170}
2171
2173 const Expr *E = CE->getSubExpr();
2174
2175 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2176 return false;
2177
2178 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2179 // We always assume that 'this' is never null.
2180 return false;
2181 }
2182
2183 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2184 // And that glvalue casts are never null.
2185 if (ICE->isGLValue())
2186 return false;
2187 }
2188
2189 return true;
2190}
2191
2192// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2193// have to handle a more broad range of conversions than explicit casts, as they
2194// handle things like function to ptr-to-function decay etc.
2195Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2196 Expr *E = CE->getSubExpr();
2197 QualType DestTy = CE->getType();
2198 CastKind Kind = CE->getCastKind();
2199 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2200
2201 // These cases are generally not written to ignore the result of
2202 // evaluating their sub-expressions, so we clear this now.
2203 bool Ignored = TestAndClearIgnoreResultAssign();
2204
2205 // Since almost all cast kinds apply to scalars, this switch doesn't have
2206 // a default case, so the compiler will warn on a missing case. The cases
2207 // are in the same order as in the CastKind enum.
2208 switch (Kind) {
2209 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2210 case CK_BuiltinFnToFnPtr:
2211 llvm_unreachable("builtin functions are handled elsewhere");
2212
2213 case CK_LValueBitCast:
2214 case CK_ObjCObjectLValueCast: {
2215 Address Addr = EmitLValue(E).getAddress(CGF);
2216 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2217 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2218 return EmitLoadOfLValue(LV, CE->getExprLoc());
2219 }
2220
2221 case CK_LValueToRValueBitCast: {
2222 LValue SourceLVal = CGF.EmitLValue(E);
2223 Address Addr = SourceLVal.getAddress(CGF).withElementType(
2224 CGF.ConvertTypeForMem(DestTy));
2225 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2227 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2228 }
2229
2230 case CK_CPointerToObjCPointerCast:
2231 case CK_BlockPointerToObjCPointerCast:
2232 case CK_AnyPointerToBlockPointerCast:
2233 case CK_BitCast: {
2234 Value *Src = Visit(const_cast<Expr*>(E));
2235 llvm::Type *SrcTy = Src->getType();
2236 llvm::Type *DstTy = ConvertType(DestTy);
2237 assert(
2238 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2239 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2240 "Address-space cast must be used to convert address spaces");
2241
2242 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2243 if (auto *PT = DestTy->getAs<PointerType>()) {
2245 PT->getPointeeType(),
2246 Address(Src,
2249 CGF.getPointerAlign()),
2250 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2251 CE->getBeginLoc());
2252 }
2253 }
2254
2255 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2256 const QualType SrcType = E->getType();
2257
2258 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2259 // Casting to pointer that could carry dynamic information (provided by
2260 // invariant.group) requires launder.
2261 Src = Builder.CreateLaunderInvariantGroup(Src);
2262 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2263 // Casting to pointer that does not carry dynamic information (provided
2264 // by invariant.group) requires stripping it. Note that we don't do it
2265 // if the source could not be dynamic type and destination could be
2266 // dynamic because dynamic information is already laundered. It is
2267 // because launder(strip(src)) == launder(src), so there is no need to
2268 // add extra strip before launder.
2269 Src = Builder.CreateStripInvariantGroup(Src);
2270 }
2271 }
2272
2273 // Update heapallocsite metadata when there is an explicit pointer cast.
2274 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2275 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2276 !isa<CastExpr>(E)) {
2277 QualType PointeeType = DestTy->getPointeeType();
2278 if (!PointeeType.isNull())
2279 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2280 CE->getExprLoc());
2281 }
2282 }
2283
2284 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2285 // same element type, use the llvm.vector.insert intrinsic to perform the
2286 // bitcast.
2287 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2288 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2289 // If we are casting a fixed i8 vector to a scalable i1 predicate
2290 // vector, use a vector insert and bitcast the result.
2291 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2292 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
2293 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2294 ScalableDstTy = llvm::ScalableVectorType::get(
2295 FixedSrcTy->getElementType(),
2296 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
2297 }
2298 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2299 llvm::Value *UndefVec = llvm::UndefValue::get(ScalableDstTy);
2300 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2301 llvm::Value *Result = Builder.CreateInsertVector(
2302 ScalableDstTy, UndefVec, Src, Zero, "cast.scalable");
2303 if (Result->getType() != DstTy)
2304 Result = Builder.CreateBitCast(Result, DstTy);
2305 return Result;
2306 }
2307 }
2308 }
2309
2310 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2311 // same element type, use the llvm.vector.extract intrinsic to perform the
2312 // bitcast.
2313 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2314 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2315 // If we are casting a scalable i1 predicate vector to a fixed i8
2316 // vector, bitcast the source and use a vector extract.
2317 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2318 ScalableSrcTy->getElementCount().isKnownMultipleOf(8) &&
2319 FixedDstTy->getElementType()->isIntegerTy(8)) {
2320 ScalableSrcTy = llvm::ScalableVectorType::get(
2321 FixedDstTy->getElementType(),
2322 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2323 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2324 }
2325 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {
2326 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2327 return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
2328 }
2329 }
2330 }
2331
2332 // Perform VLAT <-> VLST bitcast through memory.
2333 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2334 // require the element types of the vectors to be the same, we
2335 // need to keep this around for bitcasts between VLAT <-> VLST where
2336 // the element types of the vectors are not the same, until we figure
2337 // out a better way of doing these casts.
2338 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2339 isa<llvm::ScalableVectorType>(DstTy)) ||
2340 (isa<llvm::ScalableVectorType>(SrcTy) &&
2341 isa<llvm::FixedVectorType>(DstTy))) {
2342 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2343 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2344 CGF.EmitStoreOfScalar(Src, LV);
2345 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2346 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2348 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2349 }
2350 return Builder.CreateBitCast(Src, DstTy);
2351 }
2352 case CK_AddressSpaceConversion: {
2354 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2355 Result.Val.isNullPointer()) {
2356 // If E has side effect, it is emitted even if its final result is a
2357 // null pointer. In that case, a DCE pass should be able to
2358 // eliminate the useless instructions emitted during translating E.
2359 if (Result.HasSideEffects)
2360 Visit(E);
2361 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2362 ConvertType(DestTy)), DestTy);
2363 }
2364 // Since target may map different address spaces in AST to the same address
2365 // space, an address space conversion may end up as a bitcast.
2367 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2368 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2369 }
2370 case CK_AtomicToNonAtomic:
2371 case CK_NonAtomicToAtomic:
2372 case CK_UserDefinedConversion:
2373 return Visit(const_cast<Expr*>(E));
2374
2375 case CK_NoOp: {
2376 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
2377 : Visit(const_cast<Expr *>(E));
2378 }
2379
2380 case CK_BaseToDerived: {
2381 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2382 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2383
2385 Address Derived =
2386 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2387 CE->path_begin(), CE->path_end(),
2389
2390 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2391 // performed and the object is not of the derived type.
2392 if (CGF.sanitizePerformTypeCheck())
2394 Derived, DestTy->getPointeeType());
2395
2396 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2397 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2398 /*MayBeNull=*/true,
2400 CE->getBeginLoc());
2401
2402 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2403 }
2404 case CK_UncheckedDerivedToBase:
2405 case CK_DerivedToBase: {
2406 // The EmitPointerWithAlignment path does this fine; just discard
2407 // the alignment.
2409 CE->getType()->getPointeeType());
2410 }
2411
2412 case CK_Dynamic: {
2414 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2415 return CGF.EmitDynamicCast(V, DCE);
2416 }
2417
2418 case CK_ArrayToPointerDecay:
2420 CE->getType()->getPointeeType());
2421 case CK_FunctionToPointerDecay:
2422 return EmitLValue(E).getPointer(CGF);
2423
2424 case CK_NullToPointer:
2425 if (MustVisitNullValue(E))
2426 CGF.EmitIgnoredExpr(E);
2427
2428 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2429 DestTy);
2430
2431 case CK_NullToMemberPointer: {
2432 if (MustVisitNullValue(E))
2433 CGF.EmitIgnoredExpr(E);
2434
2435 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2436 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2437 }
2438
2439 case CK_ReinterpretMemberPointer:
2440 case CK_BaseToDerivedMemberPointer:
2441 case CK_DerivedToBaseMemberPointer: {
2442 Value *Src = Visit(E);
2443
2444 // Note that the AST doesn't distinguish between checked and
2445 // unchecked member pointer conversions, so we always have to
2446 // implement checked conversions here. This is inefficient when
2447 // actual control flow may be required in order to perform the
2448 // check, which it is for data member pointers (but not member
2449 // function pointers on Itanium and ARM).
2450 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2451 }
2452
2453 case CK_ARCProduceObject:
2454 return CGF.EmitARCRetainScalarExpr(E);
2455 case CK_ARCConsumeObject:
2456 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2457 case CK_ARCReclaimReturnedObject:
2458 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2459 case CK_ARCExtendBlockObject:
2460 return CGF.EmitARCExtendBlockObject(E);
2461
2462 case CK_CopyAndAutoreleaseBlockObject:
2463 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2464
2465 case CK_FloatingRealToComplex:
2466 case CK_FloatingComplexCast:
2467 case CK_IntegralRealToComplex:
2468 case CK_IntegralComplexCast:
2469 case CK_IntegralComplexToFloatingComplex:
2470 case CK_FloatingComplexToIntegralComplex:
2471 case CK_ConstructorConversion:
2472 case CK_ToUnion:
2473 case CK_HLSLArrayRValue:
2474 llvm_unreachable("scalar cast to non-scalar value");
2475
2476 case CK_LValueToRValue:
2477 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2478 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2479 return Visit(const_cast<Expr*>(E));
2480
2481 case CK_IntegralToPointer: {
2482 Value *Src = Visit(const_cast<Expr*>(E));
2483
2484 // First, convert to the correct width so that we control the kind of
2485 // extension.
2486 auto DestLLVMTy = ConvertType(DestTy);
2487 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2488 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2489 llvm::Value* IntResult =
2490 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2491
2492 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2493
2494 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2495 // Going from integer to pointer that could be dynamic requires reloading
2496 // dynamic information from invariant.group.
2497 if (DestTy.mayBeDynamicClass())
2498 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2499 }
2500 return IntToPtr;
2501 }
2502 case CK_PointerToIntegral: {
2503 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2504 auto *PtrExpr = Visit(E);
2505
2506 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2507 const QualType SrcType = E->getType();
2508
2509 // Casting to integer requires stripping dynamic information as it does
2510 // not carries it.
2511 if (SrcType.mayBeDynamicClass())
2512 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2513 }
2514
2515 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2516 }
2517 case CK_ToVoid: {
2518 CGF.EmitIgnoredExpr(E);
2519 return nullptr;
2520 }
2521 case CK_MatrixCast: {
2522 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2523 CE->getExprLoc());
2524 }
2525 case CK_VectorSplat: {
2526 llvm::Type *DstTy = ConvertType(DestTy);
2527 Value *Elt = Visit(const_cast<Expr *>(E));
2528 // Splat the element across to all elements
2529 llvm::ElementCount NumElements =
2530 cast<llvm::VectorType>(DstTy)->getElementCount();
2531 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2532 }
2533
2534 case CK_FixedPointCast:
2535 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2536 CE->getExprLoc());
2537
2538 case CK_FixedPointToBoolean:
2539 assert(E->getType()->isFixedPointType() &&
2540 "Expected src type to be fixed point type");
2541 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2542 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2543 CE->getExprLoc());
2544
2545 case CK_FixedPointToIntegral:
2546 assert(E->getType()->isFixedPointType() &&
2547 "Expected src type to be fixed point type");
2548 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2549 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2550 CE->getExprLoc());
2551
2552 case CK_IntegralToFixedPoint:
2553 assert(E->getType()->isIntegerType() &&
2554 "Expected src type to be an integer");
2555 assert(DestTy->isFixedPointType() &&
2556 "Expected dest type to be fixed point type");
2557 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2558 CE->getExprLoc());
2559
2560 case CK_IntegralCast: {
2561 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2562 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2563 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2565 "conv");
2566 }
2567 ScalarConversionOpts Opts;
2568 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2569 if (!ICE->isPartOfExplicitCast())
2570 Opts = ScalarConversionOpts(CGF.SanOpts);
2571 }
2572 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2573 CE->getExprLoc(), Opts);
2574 }
2575 case CK_IntegralToFloating: {
2576 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2577 // TODO: Support constrained FP intrinsics.
2578 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2579 if (SrcElTy->isSignedIntegerOrEnumerationType())
2580 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2581 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2582 }
2583 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2584 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2585 CE->getExprLoc());
2586 }
2587 case CK_FloatingToIntegral: {
2588 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2589 // TODO: Support constrained FP intrinsics.
2590 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2591 if (DstElTy->isSignedIntegerOrEnumerationType())
2592 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2593 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2594 }
2595 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2596 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2597 CE->getExprLoc());
2598 }
2599 case CK_FloatingCast: {
2600 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2601 // TODO: Support constrained FP intrinsics.
2602 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2603 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2604 if (DstElTy->castAs<BuiltinType>()->getKind() <
2605 SrcElTy->castAs<BuiltinType>()->getKind())
2606 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2607 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2608 }
2609 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2610 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2611 CE->getExprLoc());
2612 }
2613 case CK_FixedPointToFloating:
2614 case CK_FloatingToFixedPoint: {
2615 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2616 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2617 CE->getExprLoc());
2618 }
2619 case CK_BooleanToSignedIntegral: {
2620 ScalarConversionOpts Opts;
2621 Opts.TreatBooleanAsSigned = true;
2622 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2623 CE->getExprLoc(), Opts);
2624 }
2625 case CK_IntegralToBoolean:
2626 return EmitIntToBoolConversion(Visit(E));
2627 case CK_PointerToBoolean:
2628 return EmitPointerToBoolConversion(Visit(E), E->getType());
2629 case CK_FloatingToBoolean: {
2630 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2631 return EmitFloatToBoolConversion(Visit(E));
2632 }
2633 case CK_MemberPointerToBoolean: {
2634 llvm::Value *MemPtr = Visit(E);
2635 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2636 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2637 }
2638
2639 case CK_FloatingComplexToReal:
2640 case CK_IntegralComplexToReal:
2641 return CGF.EmitComplexExpr(E, false, true).first;
2642
2643 case CK_FloatingComplexToBoolean:
2644 case CK_IntegralComplexToBoolean: {
2646
2647 // TODO: kill this function off, inline appropriate case here
2648 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2649 CE->getExprLoc());
2650 }
2651
2652 case CK_ZeroToOCLOpaqueType: {
2653 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2654 DestTy->isOCLIntelSubgroupAVCType()) &&
2655 "CK_ZeroToOCLEvent cast on non-event type");
2656 return llvm::Constant::getNullValue(ConvertType(DestTy));
2657 }
2658
2659 case CK_IntToOCLSampler:
2660 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2661
2662 case CK_HLSLVectorTruncation: {
2663 assert(DestTy->isVectorType() && "Expected dest type to be vector type");
2664 Value *Vec = Visit(const_cast<Expr *>(E));
2666 unsigned NumElts = DestTy->castAs<VectorType>()->getNumElements();
2667 for (unsigned I = 0; I != NumElts; ++I)
2668 Mask.push_back(I);
2669
2670 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
2671 }
2672
2673 } // end of switch
2674
2675 llvm_unreachable("unknown scalar cast");
2676}
2677
2678Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2679 CodeGenFunction::StmtExprEvaluation eval(CGF);
2680 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2681 !E->getType()->isVoidType());
2682 if (!RetAlloca.isValid())
2683 return nullptr;
2684 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2685 E->getExprLoc());
2686}
2687
2688Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2689 CodeGenFunction::RunCleanupsScope Scope(CGF);
2690 Value *V = Visit(E->getSubExpr());
2691 // Defend against dominance problems caused by jumps out of expression
2692 // evaluation through the shared cleanup block.
2693 Scope.ForceCleanup({&V});
2694 return V;
2695}
2696
2697//===----------------------------------------------------------------------===//
2698// Unary Operators
2699//===----------------------------------------------------------------------===//
2700
2702 llvm::Value *InVal, bool IsInc,
2703 FPOptions FPFeatures) {
2704 BinOpInfo BinOp;
2705 BinOp.LHS = InVal;
2706 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2707 BinOp.Ty = E->getType();
2708 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2709 BinOp.FPFeatures = FPFeatures;
2710 BinOp.E = E;
2711 return BinOp;
2712}
2713
2714llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2715 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2716 llvm::Value *Amount =
2717 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2718 StringRef Name = IsInc ? "inc" : "dec";
2719 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2721 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2722 return Builder.CreateAdd(InVal, Amount, Name);
2723 [[fallthrough]];
2725 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2726 return Builder.CreateNSWAdd(InVal, Amount, Name);
2727 [[fallthrough]];
2729 if (!E->canOverflow())
2730 return Builder.CreateNSWAdd(InVal, Amount, Name);
2731 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2732 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2733 }
2734 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2735}
2736
2737namespace {
2738/// Handles check and update for lastprivate conditional variables.
2739class OMPLastprivateConditionalUpdateRAII {
2740private:
2741 CodeGenFunction &CGF;
2742 const UnaryOperator *E;
2743
2744public:
2745 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2746 const UnaryOperator *E)
2747 : CGF(CGF), E(E) {}
2748 ~OMPLastprivateConditionalUpdateRAII() {
2749 if (CGF.getLangOpts().OpenMP)
2751 CGF, E->getSubExpr());
2752 }
2753};
2754} // namespace
2755
2756llvm::Value *
2757ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2758 bool isInc, bool isPre) {
2759 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2760 QualType type = E->getSubExpr()->getType();
2761 llvm::PHINode *atomicPHI = nullptr;
2762 llvm::Value *value;
2763 llvm::Value *input;
2764 llvm::Value *Previous = nullptr;
2765 QualType SrcType = E->getType();
2766
2767 int amount = (isInc ? 1 : -1);
2768 bool isSubtraction = !isInc;
2769
2770 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2771 type = atomicTy->getValueType();
2772 if (isInc && type->isBooleanType()) {
2773 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2774 if (isPre) {
2775 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2776 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2777 return Builder.getTrue();
2778 }
2779 // For atomic bool increment, we just store true and return it for
2780 // preincrement, do an atomic swap with true for postincrement
2781 return Builder.CreateAtomicRMW(
2782 llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
2783 llvm::AtomicOrdering::SequentiallyConsistent);
2784 }
2785 // Special case for atomic increment / decrement on integers, emit
2786 // atomicrmw instructions. We skip this if we want to be doing overflow
2787 // checking, and fall into the slow path with the atomic cmpxchg loop.
2788 if (!type->isBooleanType() && type->isIntegerType() &&
2789 !(type->isUnsignedIntegerType() &&
2790 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2791 CGF.getLangOpts().getSignedOverflowBehavior() !=
2793 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2794 llvm::AtomicRMWInst::Sub;
2795 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2796 llvm::Instruction::Sub;
2797 llvm::Value *amt = CGF.EmitToMemory(
2798 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2799 llvm::Value *old =
2800 Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
2801 llvm::AtomicOrdering::SequentiallyConsistent);
2802 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2803 }
2804 value = EmitLoadOfLValue(LV, E->getExprLoc());
2805 input = value;
2806 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2807 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2808 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2809 value = CGF.EmitToMemory(value, type);
2810 Builder.CreateBr(opBB);
2811 Builder.SetInsertPoint(opBB);
2812 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2813 atomicPHI->addIncoming(value, startBB);
2814 value = atomicPHI;
2815 } else {
2816 value = EmitLoadOfLValue(LV, E->getExprLoc());
2817 input = value;
2818 }
2819
2820 // Special case of integer increment that we have to check first: bool++.
2821 // Due to promotion rules, we get:
2822 // bool++ -> bool = bool + 1
2823 // -> bool = (int)bool + 1
2824 // -> bool = ((int)bool + 1 != 0)
2825 // An interesting aspect of this is that increment is always true.
2826 // Decrement does not have this property.
2827 if (isInc && type->isBooleanType()) {
2828 value = Builder.getTrue();
2829
2830 // Most common case by far: integer increment.
2831 } else if (type->isIntegerType()) {
2832 QualType promotedType;
2833 bool canPerformLossyDemotionCheck = false;
2835 promotedType = CGF.getContext().getPromotedIntegerType(type);
2836 assert(promotedType != type && "Shouldn't promote to the same type.");
2837 canPerformLossyDemotionCheck = true;
2838 canPerformLossyDemotionCheck &=
2840 CGF.getContext().getCanonicalType(promotedType);
2841 canPerformLossyDemotionCheck &=
2843 type, promotedType);
2844 assert((!canPerformLossyDemotionCheck ||
2845 type->isSignedIntegerOrEnumerationType() ||
2846 promotedType->isSignedIntegerOrEnumerationType() ||
2847 ConvertType(type)->getScalarSizeInBits() ==
2848 ConvertType(promotedType)->getScalarSizeInBits()) &&
2849 "The following check expects that if we do promotion to different "
2850 "underlying canonical type, at least one of the types (either "
2851 "base or promoted) will be signed, or the bitwidths will match.");
2852 }
2853 if (CGF.SanOpts.hasOneOf(
2854 SanitizerKind::ImplicitIntegerArithmeticValueChange |
2855 SanitizerKind::ImplicitBitfieldConversion) &&
2856 canPerformLossyDemotionCheck) {
2857 // While `x += 1` (for `x` with width less than int) is modeled as
2858 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2859 // ease; inc/dec with width less than int can't overflow because of
2860 // promotion rules, so we omit promotion+demotion, which means that we can
2861 // not catch lossy "demotion". Because we still want to catch these cases
2862 // when the sanitizer is enabled, we perform the promotion, then perform
2863 // the increment/decrement in the wider type, and finally
2864 // perform the demotion. This will catch lossy demotions.
2865
2866 // We have a special case for bitfields defined using all the bits of the
2867 // type. In this case we need to do the same trick as for the integer
2868 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
2869
2870 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2871 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2872 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2873 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2874 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
2875 // checks will take care of the conversion.
2876 ScalarConversionOpts Opts;
2877 if (!LV.isBitField())
2878 Opts = ScalarConversionOpts(CGF.SanOpts);
2879 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
2880 Previous = value;
2881 SrcType = promotedType;
2882 }
2883
2884 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2885 Opts);
2886
2887 // Note that signed integer inc/dec with width less than int can't
2888 // overflow because of promotion rules; we're just eliding a few steps
2889 // here.
2890 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2891 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2892 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2893 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2894 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2895 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2896 } else {
2897 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2898 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2899 }
2900
2901 // Next most common: pointer increment.
2902 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2903 QualType type = ptr->getPointeeType();
2904
2905 // VLA types don't have constant size.
2906 if (const VariableArrayType *vla
2908 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2909 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2910 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
2912 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
2913 else
2914 value = CGF.EmitCheckedInBoundsGEP(
2915 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
2916 E->getExprLoc(), "vla.inc");
2917
2918 // Arithmetic on function pointers (!) is just +-1.
2919 } else if (type->isFunctionType()) {
2920 llvm::Value *amt = Builder.getInt32(amount);
2921
2923 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
2924 else
2925 value =
2926 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
2927 /*SignedIndices=*/false, isSubtraction,
2928 E->getExprLoc(), "incdec.funcptr");
2929
2930 // For everything else, we can just do a simple increment.
2931 } else {
2932 llvm::Value *amt = Builder.getInt32(amount);
2933 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2935 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
2936 else
2937 value = CGF.EmitCheckedInBoundsGEP(
2938 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
2939 E->getExprLoc(), "incdec.ptr");
2940 }
2941
2942 // Vector increment/decrement.
2943 } else if (type->isVectorType()) {
2944 if (type->hasIntegerRepresentation()) {
2945 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2946
2947 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2948 } else {
2949 value = Builder.CreateFAdd(
2950 value,
2951 llvm::ConstantFP::get(value->getType(), amount),
2952 isInc ? "inc" : "dec");
2953 }
2954
2955 // Floating point.
2956 } else if (type->isRealFloatingType()) {
2957 // Add the inc/dec to the real part.
2958 llvm::Value *amt;
2959 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2960
2961 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2962 // Another special case: half FP increment should be done via float
2964 value = Builder.CreateCall(
2965 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2966 CGF.CGM.FloatTy),
2967 input, "incdec.conv");
2968 } else {
2969 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2970 }
2971 }
2972
2973 if (value->getType()->isFloatTy())
2974 amt = llvm::ConstantFP::get(VMContext,
2975 llvm::APFloat(static_cast<float>(amount)));
2976 else if (value->getType()->isDoubleTy())
2977 amt = llvm::ConstantFP::get(VMContext,
2978 llvm::APFloat(static_cast<double>(amount)));
2979 else {
2980 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
2981 // Convert from float.
2982 llvm::APFloat F(static_cast<float>(amount));
2983 bool ignored;
2984 const llvm::fltSemantics *FS;
2985 // Don't use getFloatTypeSemantics because Half isn't
2986 // necessarily represented using the "half" LLVM type.
2987 if (value->getType()->isFP128Ty())
2988 FS = &CGF.getTarget().getFloat128Format();
2989 else if (value->getType()->isHalfTy())
2990 FS = &CGF.getTarget().getHalfFormat();
2991 else if (value->getType()->isBFloatTy())
2992 FS = &CGF.getTarget().getBFloat16Format();
2993 else if (value->getType()->isPPC_FP128Ty())
2994 FS = &CGF.getTarget().getIbm128Format();
2995 else
2996 FS = &CGF.getTarget().getLongDoubleFormat();
2997 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
2998 amt = llvm::ConstantFP::get(VMContext, F);
2999 }
3000 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3001
3002 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3004 value = Builder.CreateCall(
3005 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3006 CGF.CGM.FloatTy),
3007 value, "incdec.conv");
3008 } else {
3009 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3010 }
3011 }
3012
3013 // Fixed-point types.
3014 } else if (type->isFixedPointType()) {
3015 // Fixed-point types are tricky. In some cases, it isn't possible to
3016 // represent a 1 or a -1 in the type at all. Piggyback off of
3017 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3018 BinOpInfo Info;
3019 Info.E = E;
3020 Info.Ty = E->getType();
3021 Info.Opcode = isInc ? BO_Add : BO_Sub;
3022 Info.LHS = value;
3023 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3024 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3025 // since -1 is guaranteed to be representable.
3026 if (type->isSignedFixedPointType()) {
3027 Info.Opcode = isInc ? BO_Sub : BO_Add;
3028 Info.RHS = Builder.CreateNeg(Info.RHS);
3029 }
3030 // Now, convert from our invented integer literal to the type of the unary
3031 // op. This will upscale and saturate if necessary. This value can become
3032 // undef in some cases.
3033 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3034 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3035 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3036 value = EmitFixedPointBinOp(Info);
3037
3038 // Objective-C pointer types.
3039 } else {
3040 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3041
3043 if (!isInc) size = -size;
3044 llvm::Value *sizeValue =
3045 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
3046
3048 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3049 else
3050 value = CGF.EmitCheckedInBoundsGEP(
3051 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3052 E->getExprLoc(), "incdec.objptr");
3053 value = Builder.CreateBitCast(value, input->getType());
3054 }
3055
3056 if (atomicPHI) {
3057 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3058 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3059 auto Pair = CGF.EmitAtomicCompareExchange(
3060 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3061 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3062 llvm::Value *success = Pair.second;
3063 atomicPHI->addIncoming(old, curBlock);
3064 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3065 Builder.SetInsertPoint(contBB);
3066 return isPre ? value : input;
3067 }
3068
3069 // Store the updated result through the lvalue.
3070 if (LV.isBitField()) {
3071 Value *Src = Previous ? Previous : value;
3072 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3073 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3074 LV.getBitFieldInfo(), E->getExprLoc());
3075 } else
3076 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3077
3078 // If this is a postinc, return the value read from memory, otherwise use the
3079 // updated value.
3080 return isPre ? value : input;
3081}
3082
3083
3084Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3085 QualType PromotionType) {
3086 QualType promotionTy = PromotionType.isNull()
3087 ? getPromotionType(E->getSubExpr()->getType())
3088 : PromotionType;
3089 Value *result = VisitPlus(E, promotionTy);
3090 if (result && !promotionTy.isNull())
3091 result = EmitUnPromotedValue(result, E->getType());
3092 return result;
3093}
3094
3095Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3096 QualType PromotionType) {
3097 // This differs from gcc, though, most likely due to a bug in gcc.
3098 TestAndClearIgnoreResultAssign();
3099 if (!PromotionType.isNull())
3100 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3101 return Visit(E->getSubExpr());
3102}
3103
3104Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3105 QualType PromotionType) {
3106 QualType promotionTy = PromotionType.isNull()
3107 ? getPromotionType(E->getSubExpr()->getType())
3108 : PromotionType;
3109 Value *result = VisitMinus(E, promotionTy);
3110 if (result && !promotionTy.isNull())
3111 result = EmitUnPromotedValue(result, E->getType());
3112 return result;
3113}
3114
3115Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3116 QualType PromotionType) {
3117 TestAndClearIgnoreResultAssign();
3118 Value *Op;
3119 if (!PromotionType.isNull())
3120 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3121 else
3122 Op = Visit(E->getSubExpr());
3123
3124 // Generate a unary FNeg for FP ops.
3125 if (Op->getType()->isFPOrFPVectorTy())
3126 return Builder.CreateFNeg(Op, "fneg");
3127
3128 // Emit unary minus with EmitSub so we handle overflow cases etc.
3129 BinOpInfo BinOp;
3130 BinOp.RHS = Op;
3131 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3132 BinOp.Ty = E->getType();
3133 BinOp.Opcode = BO_Sub;
3134 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3135 BinOp.E = E;
3136 return EmitSub(BinOp);
3137}
3138
3139Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3140 TestAndClearIgnoreResultAssign();
3141 Value *Op = Visit(E->getSubExpr());
3142 return Builder.CreateNot(Op, "not");
3143}
3144
3145Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3146 // Perform vector logical not on comparison with zero vector.
3147 if (E->getType()->isVectorType() &&
3148 E->getType()->castAs<VectorType>()->getVectorKind() ==
3150 Value *Oper = Visit(E->getSubExpr());
3151 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3152 Value *Result;
3153 if (Oper->getType()->isFPOrFPVectorTy()) {
3154 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3155 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3156 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3157 } else
3158 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3159 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3160 }
3161
3162 // Compare operand to zero.
3163 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3164
3165 // Invert value.
3166 // TODO: Could dynamically modify easy computations here. For example, if
3167 // the operand is an icmp ne, turn into icmp eq.
3168 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3169
3170 // ZExt result to the expr type.
3171 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3172}
3173
3174Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3175 // Try folding the offsetof to a constant.
3176 Expr::EvalResult EVResult;
3177 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3178 llvm::APSInt Value = EVResult.Val.getInt();
3179 return Builder.getInt(Value);
3180 }
3181
3182 // Loop over the components of the offsetof to compute the value.
3183 unsigned n = E->getNumComponents();
3184 llvm::Type* ResultType = ConvertType(E->getType());
3185 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3186 QualType CurrentType = E->getTypeSourceInfo()->getType();
3187 for (unsigned i = 0; i != n; ++i) {
3188 OffsetOfNode ON = E->getComponent(i);
3189 llvm::Value *Offset = nullptr;
3190 switch (ON.getKind()) {
3191 case OffsetOfNode::Array: {
3192 // Compute the index
3193 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3194 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3195 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3196 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3197
3198 // Save the element type
3199 CurrentType =
3200 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3201
3202 // Compute the element size
3203 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3204 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3205
3206 // Multiply out to compute the result
3207 Offset = Builder.CreateMul(Idx, ElemSize);
3208 break;
3209 }
3210
3211 case OffsetOfNode::Field: {
3212 FieldDecl *MemberDecl = ON.getField();
3213 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3214 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3215
3216 // Compute the index of the field in its parent.
3217 unsigned i = 0;
3218 // FIXME: It would be nice if we didn't have to loop here!
3219 for (RecordDecl::field_iterator Field = RD->field_begin(),
3220 FieldEnd = RD->field_end();
3221 Field != FieldEnd; ++Field, ++i) {
3222 if (*Field == MemberDecl)
3223 break;
3224 }
3225 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3226
3227 // Compute the offset to the field
3228 int64_t OffsetInt = RL.getFieldOffset(i) /
3229 CGF.getContext().getCharWidth();
3230 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3231
3232 // Save the element type.
3233 CurrentType = MemberDecl->getType();
3234 break;
3235 }
3236
3238 llvm_unreachable("dependent __builtin_offsetof");
3239
3240 case OffsetOfNode::Base: {
3241 if (ON.getBase()->isVirtual()) {
3242 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3243 continue;
3244 }
3245
3246 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3247 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3248
3249 // Save the element type.
3250 CurrentType = ON.getBase()->getType();
3251
3252 // Compute the offset to the base.
3253 auto *BaseRT = CurrentType->castAs<RecordType>();
3254 auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
3255 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3256 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3257 break;
3258 }
3259 }
3260 Result = Builder.CreateAdd(Result, Offset);
3261 }
3262 return Result;
3263}
3264
3265/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3266/// argument of the sizeof expression as an integer.
3267Value *
3268ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3269 const UnaryExprOrTypeTraitExpr *E) {
3270 QualType TypeToSize = E->getTypeOfArgument();
3271 if (auto Kind = E->getKind();
3272 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
3273 if (const VariableArrayType *VAT =
3274 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3275 if (E->isArgumentType()) {
3276 // sizeof(type) - make sure to emit the VLA size.
3277 CGF.EmitVariablyModifiedType(TypeToSize);
3278 } else {
3279 // C99 6.5.3.4p2: If the argument is an expression of type
3280 // VLA, it is evaluated.
3282 }
3283
3284 auto VlaSize = CGF.getVLASize(VAT);
3285 llvm::Value *size = VlaSize.NumElts;
3286
3287 // Scale the number of non-VLA elements by the non-VLA element size.
3288 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3289 if (!eltSize.isOne())
3290 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
3291
3292 return size;
3293 }
3294 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3295 auto Alignment =
3296 CGF.getContext()
3299 .getQuantity();
3300 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3301 } else if (E->getKind() == UETT_VectorElements) {
3302 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3303 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3304 }
3305
3306 // If this isn't sizeof(vla), the result must be constant; use the constant
3307 // folding logic so we don't have to duplicate it here.
3308 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3309}
3310
3311Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3312 QualType PromotionType) {
3313 QualType promotionTy = PromotionType.isNull()
3314 ? getPromotionType(E->getSubExpr()->getType())
3315 : PromotionType;
3316 Value *result = VisitReal(E, promotionTy);
3317 if (result && !promotionTy.isNull())
3318 result = EmitUnPromotedValue(result, E->getType());
3319 return result;
3320}
3321
3322Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3323 QualType PromotionType) {
3324 Expr *Op = E->getSubExpr();
3325 if (Op->getType()->isAnyComplexType()) {
3326 // If it's an l-value, load through the appropriate subobject l-value.
3327 // Note that we have to ask E because Op might be an l-value that
3328 // this won't work for, e.g. an Obj-C property.
3329 if (E->isGLValue()) {
3330 if (!PromotionType.isNull()) {
3332 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3333 if (result.first)
3334 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3335 return result.first;
3336 } else {
3337 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3338 .getScalarVal();
3339 }
3340 }
3341 // Otherwise, calculate and project.
3342 return CGF.EmitComplexExpr(Op, false, true).first;
3343 }
3344
3345 if (!PromotionType.isNull())
3346 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3347 return Visit(Op);
3348}
3349
3350Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3351 QualType PromotionType) {
3352 QualType promotionTy = PromotionType.isNull()
3353 ? getPromotionType(E->getSubExpr()->getType())
3354 : PromotionType;
3355 Value *result = VisitImag(E, promotionTy);
3356 if (result && !promotionTy.isNull())
3357 result = EmitUnPromotedValue(result, E->getType());
3358 return result;
3359}
3360
3361Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3362 QualType PromotionType) {
3363 Expr *Op = E->getSubExpr();
3364 if (Op->getType()->isAnyComplexType()) {
3365 // If it's an l-value, load through the appropriate subobject l-value.
3366 // Note that we have to ask E because Op might be an l-value that
3367 // this won't work for, e.g. an Obj-C property.
3368 if (Op->isGLValue()) {
3369 if (!PromotionType.isNull()) {
3371 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3372 if (result.second)
3373 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3374 return result.second;
3375 } else {
3376 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3377 .getScalarVal();
3378 }
3379 }
3380 // Otherwise, calculate and project.
3381 return CGF.EmitComplexExpr(Op, true, false).second;
3382 }
3383
3384 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3385 // effects are evaluated, but not the actual value.
3386 if (Op->isGLValue())
3387 CGF.EmitLValue(Op);
3388 else if (!PromotionType.isNull())
3389 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3390 else
3391 CGF.EmitScalarExpr(Op, true);
3392 if (!PromotionType.isNull())
3393 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3394 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3395}
3396
3397//===----------------------------------------------------------------------===//
3398// Binary Operators
3399//===----------------------------------------------------------------------===//
3400
3401Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3402 QualType PromotionType) {
3403 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3404}
3405
3406Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3407 QualType ExprType) {
3408 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3409}
3410
3411Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3412 E = E->IgnoreParens();
3413 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3414 switch (BO->getOpcode()) {
3415#define HANDLE_BINOP(OP) \
3416 case BO_##OP: \
3417 return Emit##OP(EmitBinOps(BO, PromotionType));
3418 HANDLE_BINOP(Add)
3419 HANDLE_BINOP(Sub)
3420 HANDLE_BINOP(Mul)
3421 HANDLE_BINOP(Div)
3422#undef HANDLE_BINOP
3423 default:
3424 break;
3425 }
3426 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3427 switch (UO->getOpcode()) {
3428 case UO_Imag:
3429 return VisitImag(UO, PromotionType);
3430 case UO_Real:
3431 return VisitReal(UO, PromotionType);
3432 case UO_Minus:
3433 return VisitMinus(UO, PromotionType);
3434 case UO_Plus:
3435 return VisitPlus(UO, PromotionType);
3436 default:
3437 break;
3438 }
3439 }
3440 auto result = Visit(const_cast<Expr *>(E));
3441 if (result) {
3442 if (!PromotionType.isNull())
3443 return EmitPromotedValue(result, PromotionType);
3444 else
3445 return EmitUnPromotedValue(result, E->getType());
3446 }
3447 return result;
3448}
3449
3450BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3451 QualType PromotionType) {
3452 TestAndClearIgnoreResultAssign();
3453 BinOpInfo Result;
3454 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3455 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3456 if (!PromotionType.isNull())
3457 Result.Ty = PromotionType;
3458 else
3459 Result.Ty = E->getType();
3460 Result.Opcode = E->getOpcode();
3461 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3462 Result.E = E;
3463 return Result;
3464}
3465
3466LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3467 const CompoundAssignOperator *E,
3468 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3469 Value *&Result) {
3470 QualType LHSTy = E->getLHS()->getType();
3471 BinOpInfo OpInfo;
3472
3475
3476 // Emit the RHS first. __block variables need to have the rhs evaluated
3477 // first, plus this should improve codegen a little.
3478
3479 QualType PromotionTypeCR;
3480 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3481 if (PromotionTypeCR.isNull())
3482 PromotionTypeCR = E->getComputationResultType();
3483 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3484 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3485 if (!PromotionTypeRHS.isNull())
3486 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3487 else
3488 OpInfo.RHS = Visit(E->getRHS());
3489 OpInfo.Ty = PromotionTypeCR;
3490 OpInfo.Opcode = E->getOpcode();
3491 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3492 OpInfo.E = E;
3493 // Load/convert the LHS.
3494 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3495
3496 llvm::PHINode *atomicPHI = nullptr;
3497 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3498 QualType type = atomicTy->getValueType();
3499 if (!type->isBooleanType() && type->isIntegerType() &&
3500 !(type->isUnsignedIntegerType() &&
3501 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3502 CGF.getLangOpts().getSignedOverflowBehavior() !=
3504 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3505 llvm::Instruction::BinaryOps Op;
3506 switch (OpInfo.Opcode) {
3507 // We don't have atomicrmw operands for *, %, /, <<, >>
3508 case BO_MulAssign: case BO_DivAssign:
3509 case BO_RemAssign:
3510 case BO_ShlAssign:
3511 case BO_ShrAssign:
3512 break;
3513 case BO_AddAssign:
3514 AtomicOp = llvm::AtomicRMWInst::Add;
3515 Op = llvm::Instruction::Add;
3516 break;
3517 case BO_SubAssign:
3518 AtomicOp = llvm::AtomicRMWInst::Sub;
3519 Op = llvm::Instruction::Sub;
3520 break;
3521 case BO_AndAssign:
3522 AtomicOp = llvm::AtomicRMWInst::And;
3523 Op = llvm::Instruction::And;
3524 break;
3525 case BO_XorAssign:
3526 AtomicOp = llvm::AtomicRMWInst::Xor;
3527 Op = llvm::Instruction::Xor;
3528 break;
3529 case BO_OrAssign:
3530 AtomicOp = llvm::AtomicRMWInst::Or;
3531 Op = llvm::Instruction::Or;
3532 break;
3533 default:
3534 llvm_unreachable("Invalid compound assignment type");
3535 }
3536 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3537 llvm::Value *Amt = CGF.EmitToMemory(
3538 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3539 E->getExprLoc()),
3540 LHSTy);
3541 Value *OldVal = Builder.CreateAtomicRMW(
3542 AtomicOp, LHSLV.getAddress(CGF), Amt,
3543 llvm::AtomicOrdering::SequentiallyConsistent);
3544
3545 // Since operation is atomic, the result type is guaranteed to be the
3546 // same as the input in LLVM terms.
3547 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3548 return LHSLV;
3549 }
3550 }
3551 // FIXME: For floating point types, we should be saving and restoring the
3552 // floating point environment in the loop.
3553 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3554 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3555 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3556 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3557 Builder.CreateBr(opBB);
3558 Builder.SetInsertPoint(opBB);
3559 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3560 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3561 OpInfo.LHS = atomicPHI;
3562 }
3563 else
3564 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3565
3566 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3567 SourceLocation Loc = E->getExprLoc();
3568 if (!PromotionTypeLHS.isNull())
3569 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
3570 E->getExprLoc());
3571 else
3572 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
3573 E->getComputationLHSType(), Loc);
3574
3575 // Expand the binary operator.
3576 Result = (this->*Func)(OpInfo);
3577
3578 // Convert the result back to the LHS type,
3579 // potentially with Implicit Conversion sanitizer check.
3580 // If LHSLV is a bitfield, use default ScalarConversionOpts
3581 // to avoid emit any implicit integer checks.
3582 Value *Previous = nullptr;
3583 if (LHSLV.isBitField()) {
3584 Previous = Result;
3585 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
3586 } else
3587 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
3588 ScalarConversionOpts(CGF.SanOpts));
3589
3590 if (atomicPHI) {
3591 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3592 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3593 auto Pair = CGF.EmitAtomicCompareExchange(
3594 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3595 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3596 llvm::Value *success = Pair.second;
3597 atomicPHI->addIncoming(old, curBlock);
3598 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3599 Builder.SetInsertPoint(contBB);
3600 return LHSLV;
3601 }
3602
3603 // Store the result value into the LHS lvalue. Bit-fields are handled
3604 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3605 // 'An assignment expression has the value of the left operand after the
3606 // assignment...'.
3607 if (LHSLV.isBitField()) {
3608 Value *Src = Previous ? Previous : Result;
3609 QualType SrcType = E->getRHS()->getType();
3610 QualType DstType = E->getLHS()->getType();
3612 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
3613 LHSLV.getBitFieldInfo(), E->getExprLoc());
3614 } else
3616
3617 if (CGF.getLangOpts().OpenMP)
3619 E->getLHS());
3620 return LHSLV;
3621}
3622
3623Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3624 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3625 bool Ignore = TestAndClearIgnoreResultAssign();
3626 Value *RHS = nullptr;
3627 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3628
3629 // If the result is clearly ignored, return now.
3630 if (Ignore)
3631 return nullptr;
3632
3633 // The result of an assignment in C is the assigned r-value.
3634 if (!CGF.getLangOpts().CPlusPlus)
3635 return RHS;
3636
3637 // If the lvalue is non-volatile, return the computed value of the assignment.
3638 if (!LHS.isVolatileQualified())
3639 return RHS;
3640
3641 // Otherwise, reload the value.
3642 return EmitLoadOfLValue(LHS, E->getExprLoc());
3643}
3644
3645void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3646 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3648
3649 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3650 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3651 SanitizerKind::IntegerDivideByZero));
3652 }
3653
3654 const auto *BO = cast<BinaryOperator>(Ops.E);
3655 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3656 Ops.Ty->hasSignedIntegerRepresentation() &&
3657 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3658 Ops.mayHaveIntegerOverflow()) {
3659 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3660
3661 llvm::Value *IntMin =
3662 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3663 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3664
3665 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3666 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3667 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3668 Checks.push_back(
3669 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3670 }
3671
3672 if (Checks.size() > 0)
3673 EmitBinOpCheck(Checks, Ops);
3674}
3675
3676Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3677 {
3678 CodeGenFunction::SanitizerScope SanScope(&CGF);
3679 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3680 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3681 Ops.Ty->isIntegerType() &&
3682 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3683 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3684 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3685 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3686 Ops.Ty->isRealFloatingType() &&
3687 Ops.mayHaveFloatDivisionByZero()) {
3688 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3689 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3690 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3691 Ops);
3692 }
3693 }
3694
3695 if (Ops.Ty->isConstantMatrixType()) {
3696 llvm::MatrixBuilder MB(Builder);
3697 // We need to check the types of the operands of the operator to get the
3698 // correct matrix dimensions.
3699 auto *BO = cast<BinaryOperator>(Ops.E);
3700 (void)BO;
3701 assert(
3702 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3703 "first operand must be a matrix");
3704 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3705 "second operand must be an arithmetic type");
3706 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3707 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3708 Ops.Ty->hasUnsignedIntegerRepresentation());
3709 }
3710
3711 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3712 llvm::Value *Val;
3713 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3714 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3715 CGF.SetDivFPAccuracy(Val);
3716 return Val;
3717 }
3718 else if (Ops.isFixedPointOp())
3719 return EmitFixedPointBinOp(Ops);
3720 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3721 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3722 else
3723 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3724}
3725
3726Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3727 // Rem in C can't be a floating point type: C99 6.5.5p2.
3728 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3729 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3730 Ops.Ty->isIntegerType() &&
3731 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3732 CodeGenFunction::SanitizerScope SanScope(&CGF);
3733 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3734 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3735 }
3736
3737 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3738 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3739 else
3740 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3741}
3742
3743Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3744 unsigned IID;
3745 unsigned OpID = 0;
3746 SanitizerHandler OverflowKind;
3747
3748 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3749 switch (Ops.Opcode) {
3750 case BO_Add:
3751 case BO_AddAssign:
3752 OpID = 1;
3753 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3754 llvm::Intrinsic::uadd_with_overflow;
3755 OverflowKind = SanitizerHandler::AddOverflow;
3756 break;
3757 case BO_Sub:
3758 case BO_SubAssign:
3759 OpID = 2;
3760 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3761 llvm::Intrinsic::usub_with_overflow;
3762 OverflowKind = SanitizerHandler::SubOverflow;
3763 break;
3764 case BO_Mul:
3765 case BO_MulAssign:
3766 OpID = 3;
3767 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3768 llvm::Intrinsic::umul_with_overflow;
3769 OverflowKind = SanitizerHandler::MulOverflow;
3770 break;
3771 default:
3772 llvm_unreachable("Unsupported operation for overflow detection");
3773 }
3774 OpID <<= 1;
3775 if (isSigned)
3776 OpID |= 1;
3777
3778 CodeGenFunction::SanitizerScope SanScope(&CGF);
3779 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3780
3781 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3782
3783 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3784 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3785 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3786
3787 // Handle overflow with llvm.trap if no custom handler has been specified.
3788 const std::string *handlerName =
3790 if (handlerName->empty()) {
3791 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3792 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3793 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3794 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3795 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3796 : SanitizerKind::UnsignedIntegerOverflow;
3797 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3798 } else
3799 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3800 return result;
3801 }
3802
3803 // Branch in case of overflow.
3804 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3805 llvm::BasicBlock *continueBB =
3806 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3807 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3808
3809 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3810
3811 // If an overflow handler is set, then we want to call it and then use its
3812 // result, if it returns.
3813 Builder.SetInsertPoint(overflowBB);
3814
3815 // Get the overflow handler.
3816 llvm::Type *Int8Ty = CGF.Int8Ty;
3817 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3818 llvm::FunctionType *handlerTy =
3819 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3820 llvm::FunctionCallee handler =
3821 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3822
3823 // Sign extend the args to 64-bit, so that we can use the same handler for
3824 // all types of overflow.
3825 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3826 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3827
3828 // Call the handler with the two arguments, the operation, and the size of
3829 // the result.
3830 llvm::Value *handlerArgs[] = {
3831 lhs,
3832 rhs,
3833 Builder.getInt8(OpID),
3834 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3835 };
3836 llvm::Value *handlerResult =
3837 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3838
3839 // Truncate the result back to the desired size.
3840 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3841 Builder.CreateBr(continueBB);
3842
3843 Builder.SetInsertPoint(continueBB);
3844 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3845 phi->addIncoming(result, initialBB);
3846 phi->addIncoming(handlerResult, overflowBB);
3847
3848 return phi;
3849}
3850
3851/// Emit pointer + index arithmetic.
3853 const BinOpInfo &op,
3854 bool isSubtraction) {
3855 // Must have binary (not unary) expr here. Unary pointer
3856 // increment/decrement doesn't use this path.
3857 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3858
3859 Value *pointer = op.LHS;
3860 Expr *pointerOperand = expr->getLHS();
3861 Value *index = op.RHS;
3862 Expr *indexOperand = expr->getRHS();
3863
3864 // In a subtraction, the LHS is always the pointer.
3865 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3866 std::swap(pointer, index);
3867 std::swap(pointerOperand, indexOperand);
3868 }
3869
3870 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3871
3872 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3873 auto &DL = CGF.CGM.getDataLayout();
3874 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3875
3876 // Some versions of glibc and gcc use idioms (particularly in their malloc
3877 // routines) that add a pointer-sized integer (known to be a pointer value)
3878 // to a null pointer in order to cast the value back to an integer or as
3879 // part of a pointer alignment algorithm. This is undefined behavior, but
3880 // we'd like to be able to compile programs that use it.
3881 //
3882 // Normally, we'd generate a GEP with a null-pointer base here in response
3883 // to that code, but it's also UB to dereference a pointer created that
3884 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3885 // generate a direct cast of the integer value to a pointer.
3886 //
3887 // The idiom (p = nullptr + N) is not met if any of the following are true:
3888 //
3889 // The operation is subtraction.
3890 // The index is not pointer-sized.
3891 // The pointer type is not byte-sized.
3892 //
3894 op.Opcode,
3895 expr->getLHS(),
3896 expr->getRHS()))
3897 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3898
3899 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3900 // Zero-extend or sign-extend the pointer value according to
3901 // whether the index is signed or not.
3902 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3903 "idx.ext");
3904 }
3905
3906 // If this is subtraction, negate the index.
3907 if (isSubtraction)
3908 index = CGF.Builder.CreateNeg(index, "idx.neg");
3909
3910 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3911 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3912 /*Accessed*/ false);
3913
3915 = pointerOperand->getType()->getAs<PointerType>();
3916 if (!pointerType) {
3917 QualType objectType = pointerOperand->getType()
3919 ->getPointeeType();
3920 llvm::Value *objectSize
3921 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3922
3923 index = CGF.Builder.CreateMul(index, objectSize);
3924
3925 Value *result =
3926 CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
3927 return CGF.Builder.CreateBitCast(result, pointer->getType());
3928 }
3929
3930 QualType elementType = pointerType->getPointeeType();
3931 if (const VariableArrayType *vla
3932 = CGF.getContext().getAsVariableArrayType(elementType)) {
3933 // The element count here is the total number of non-VLA elements.
3934 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3935
3936 // Effectively, the multiply by the VLA size is part of the GEP.
3937 // GEP indexes are signed, and scaling an index isn't permitted to
3938 // signed-overflow, so we use the same semantics for our explicit
3939 // multiply. We suppress this if overflow is not undefined behavior.
3940 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3942 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3943 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3944 } else {
3945 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3946 pointer = CGF.EmitCheckedInBoundsGEP(
3947 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3948 "add.ptr");
3949 }
3950 return pointer;
3951 }
3952
3953 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3954 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3955 // future proof.
3956 llvm::Type *elemTy;
3957 if (elementType->isVoidType() || elementType->isFunctionType())
3958 elemTy = CGF.Int8Ty;
3959 else
3960 elemTy = CGF.ConvertTypeForMem(elementType);
3961
3963 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3964
3965 return CGF.EmitCheckedInBoundsGEP(
3966 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3967 "add.ptr");
3968}
3969
3970// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3971// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3972// the add operand respectively. This allows fmuladd to represent a*b-c, or
3973// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3974// efficient operations.
3975static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3976 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3977 bool negMul, bool negAdd) {
3978 Value *MulOp0 = MulOp->getOperand(0);
3979 Value *MulOp1 = MulOp->getOperand(1);
3980 if (negMul)
3981 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3982 if (negAdd)
3983 Addend = Builder.CreateFNeg(Addend, "neg");
3984
3985 Value *FMulAdd = nullptr;
3986 if (Builder.getIsFPConstrained()) {
3987 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
3988 "Only constrained operation should be created when Builder is in FP "
3989 "constrained mode");
3990 FMulAdd = Builder.CreateConstrainedFPCall(
3991 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
3992 Addend->getType()),
3993 {MulOp0, MulOp1, Addend});
3994 } else {
3995 FMulAdd = Builder.CreateCall(
3996 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
3997 {MulOp0, MulOp1, Addend});
3998 }
3999 MulOp->eraseFromParent();
4000
4001 return FMulAdd;
4002}
4003
4004// Check whether it would be legal to emit an fmuladd intrinsic call to
4005// represent op and if so, build the fmuladd.
4006//
4007// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4008// Does NOT check the type of the operation - it's assumed that this function
4009// will be called from contexts where it's known that the type is contractable.
4010static Value* tryEmitFMulAdd(const BinOpInfo &op,
4011 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4012 bool isSub=false) {
4013
4014 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4015 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4016 "Only fadd/fsub can be the root of an fmuladd.");
4017
4018 // Check whether this op is marked as fusable.
4019 if (!op.FPFeatures.allowFPContractWithinStatement())
4020 return nullptr;
4021
4022 Value *LHS = op.LHS;
4023 Value *RHS = op.RHS;
4024
4025 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4026 // it is the only use of its operand.
4027 bool NegLHS = false;
4028 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4029 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4030 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4031 LHS = LHSUnOp->getOperand(0);
4032 NegLHS = true;
4033 }
4034 }
4035
4036 bool NegRHS = false;
4037 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4038 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4039 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4040 RHS = RHSUnOp->getOperand(0);
4041 NegRHS = true;
4042 }
4043 }
4044
4045 // We have a potentially fusable op. Look for a mul on one of the operands.
4046 // Also, make sure that the mul result isn't used directly. In that case,
4047 // there's no point creating a muladd operation.
4048 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4049 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4050 (LHSBinOp->use_empty() || NegLHS)) {
4051 // If we looked through fneg, erase it.
4052 if (NegLHS)
4053 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4054 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4055 }
4056 }
4057 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4058 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4059 (RHSBinOp->use_empty() || NegRHS)) {
4060 // If we looked through fneg, erase it.
4061 if (NegRHS)
4062 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4063 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4064 }
4065 }
4066
4067 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4068 if (LHSBinOp->getIntrinsicID() ==
4069 llvm::Intrinsic::experimental_constrained_fmul &&
4070 (LHSBinOp->use_empty() || NegLHS)) {
4071 // If we looked through fneg, erase it.
4072 if (NegLHS)
4073 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4074 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4075 }
4076 }
4077 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4078 if (RHSBinOp->getIntrinsicID() ==
4079 llvm::Intrinsic::experimental_constrained_fmul &&
4080 (RHSBinOp->use_empty() || NegRHS)) {
4081 // If we looked through fneg, erase it.
4082 if (NegRHS)
4083 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4084 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4085 }
4086 }
4087
4088 return nullptr;
4089}
4090
4091Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4092 if (op.LHS->getType()->isPointerTy() ||
4093 op.RHS->getType()->isPointerTy())
4095
4096 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4097 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4099 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4100 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4101 [[fallthrough]];
4103 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4104 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4105 [[fallthrough]];
4107 if (CanElideOverflowCheck(CGF.getContext(), op))
4108 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4109 return EmitOverflowCheckedBinOp(op);
4110 }
4111 }
4112
4113 // For vector and matrix adds, try to fold into a fmuladd.
4114 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4115 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4116 // Try to form an fmuladd.
4117 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4118 return FMulAdd;
4119 }
4120
4121 if (op.Ty->isConstantMatrixType()) {
4122 llvm::MatrixBuilder MB(Builder);
4123 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4124 return MB.CreateAdd(op.LHS, op.RHS);
4125 }
4126
4127 if (op.Ty->isUnsignedIntegerType() &&
4128 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4129 !CanElideOverflowCheck(CGF.getContext(), op))
4130 return EmitOverflowCheckedBinOp(op);
4131
4132 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4133 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4134 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4135 }
4136
4137 if (op.isFixedPointOp())
4138 return EmitFixedPointBinOp(op);
4139
4140 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4141}
4142
4143/// The resulting value must be calculated with exact precision, so the operands
4144/// may not be the same type.
4145Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4146 using llvm::APSInt;
4147 using llvm::ConstantInt;
4148
4149 // This is either a binary operation where at least one of the operands is
4150 // a fixed-point type, or a unary operation where the operand is a fixed-point
4151 // type. The result type of a binary operation is determined by
4152 // Sema::handleFixedPointConversions().
4153 QualType ResultTy = op.Ty;
4154 QualType LHSTy, RHSTy;
4155 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4156 RHSTy = BinOp->getRHS()->getType();
4157 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4158 // For compound assignment, the effective type of the LHS at this point
4159 // is the computation LHS type, not the actual LHS type, and the final
4160 // result type is not the type of the expression but rather the
4161 // computation result type.
4162 LHSTy = CAO->getComputationLHSType();
4163 ResultTy = CAO->getComputationResultType();
4164 } else
4165 LHSTy = BinOp->getLHS()->getType();
4166 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4167 LHSTy = UnOp->getSubExpr()->getType();
4168 RHSTy = UnOp->getSubExpr()->getType();
4169 }
4170 ASTContext &Ctx = CGF.getContext();
4171 Value *LHS = op.LHS;
4172 Value *RHS = op.RHS;
4173
4174 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4175 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4176 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4177 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4178
4179 // Perform the actual operation.
4180 Value *Result;
4181 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4182 switch (op.Opcode) {
4183 case BO_AddAssign:
4184 case BO_Add:
4185 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4186 break;
4187 case BO_SubAssign:
4188 case BO_Sub:
4189 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4190 break;
4191 case BO_MulAssign:
4192 case BO_Mul:
4193 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4194 break;
4195 case BO_DivAssign:
4196 case BO_Div:
4197 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4198 break;
4199 case BO_ShlAssign:
4200 case BO_Shl:
4201 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4202 break;
4203 case BO_ShrAssign:
4204 case BO_Shr:
4205 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4206 break;
4207 case BO_LT:
4208 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4209 case BO_GT:
4210 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4211 case BO_LE:
4212 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4213 case BO_GE:
4214 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4215 case BO_EQ:
4216 // For equality operations, we assume any padding bits on unsigned types are
4217 // zero'd out. They could be overwritten through non-saturating operations
4218 // that cause overflow, but this leads to undefined behavior.
4219 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4220 case BO_NE:
4221 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4222 case BO_Cmp:
4223 case BO_LAnd:
4224 case BO_LOr:
4225 llvm_unreachable("Found unimplemented fixed point binary operation");
4226 case BO_PtrMemD:
4227 case BO_PtrMemI:
4228 case BO_Rem:
4229 case BO_Xor:
4230 case BO_And:
4231 case BO_Or:
4232 case BO_Assign:
4233 case BO_RemAssign:
4234 case BO_AndAssign:
4235 case BO_XorAssign:
4236 case BO_OrAssign:
4237 case BO_Comma:
4238 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4239 }
4240
4241 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4243 // Convert to the result type.
4244 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4245 : CommonFixedSema,
4246 ResultFixedSema);
4247}
4248
4249Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4250 // The LHS is always a pointer if either side is.
4251 if (!op.LHS->getType()->isPointerTy()) {
4252 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4253 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4255 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4256 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4257 [[fallthrough]];
4259 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4260 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4261 [[fallthrough]];
4263 if (CanElideOverflowCheck(CGF.getContext(), op))
4264 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4265 return EmitOverflowCheckedBinOp(op);
4266 }
4267 }
4268
4269 // For vector and matrix subs, try to fold into a fmuladd.
4270 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4271 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4272 // Try to form an fmuladd.
4273 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4274 return FMulAdd;
4275 }
4276
4277 if (op.Ty->isConstantMatrixType()) {
4278 llvm::MatrixBuilder MB(Builder);
4279 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4280 return MB.CreateSub(op.LHS, op.RHS);
4281 }
4282
4283 if (op.Ty->isUnsignedIntegerType() &&
4284 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4285 !CanElideOverflowCheck(CGF.getContext(), op))
4286 return EmitOverflowCheckedBinOp(op);
4287
4288 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4289 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4290 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4291 }
4292
4293 if (op.isFixedPointOp())
4294 return EmitFixedPointBinOp(op);
4295
4296 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4297 }
4298
4299 // If the RHS is not a pointer, then we have normal pointer
4300 // arithmetic.
4301 if (!op.RHS->getType()->isPointerTy())
4303
4304 // Otherwise, this is a pointer subtraction.
4305
4306 // Do the raw subtraction part.
4307 llvm::Value *LHS
4308 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4309 llvm::Value *RHS
4310 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4311 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4312
4313 // Okay, figure out the element size.
4314 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4315 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4316
4317 llvm::Value *divisor = nullptr;
4318
4319 // For a variable-length array, this is going to be non-constant.
4320 if (const VariableArrayType *vla
4321 = CGF.getContext().getAsVariableArrayType(elementType)) {
4322 auto VlaSize = CGF.getVLASize(vla);
4323 elementType = VlaSize.Type;
4324 divisor = VlaSize.NumElts;
4325
4326 // Scale the number of non-VLA elements by the non-VLA element size.
4327 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4328 if (!eltSize.isOne())
4329 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4330
4331 // For everything elese, we can just compute it, safe in the
4332 // assumption that Sema won't let anything through that we can't
4333 // safely compute the size of.
4334 } else {
4335 CharUnits elementSize;
4336 // Handle GCC extension for pointer arithmetic on void* and
4337 // function pointer types.
4338 if (elementType->isVoidType() || elementType->isFunctionType())
4339 elementSize = CharUnits::One();
4340 else
4341 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4342
4343 // Don't even emit the divide for element size of 1.
4344 if (elementSize.isOne())
4345 return diffInChars;
4346
4347 divisor = CGF.CGM.getSize(elementSize);
4348 }
4349
4350 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4351 // pointer difference in C is only defined in the case where both operands
4352 // are pointing to elements of an array.
4353 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4354}
4355
4356Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4357 bool RHSIsSigned) {
4358 llvm::IntegerType *Ty;
4359 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4360 Ty = cast<llvm::IntegerType>(VT->getElementType());
4361 else
4362 Ty = cast<llvm::IntegerType>(LHS->getType());
4363 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4364 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4365 // this in ConstantInt::get, this results in the value getting truncated.
4366 // Constrain the return value to be max(RHS) in this case.
4367 llvm::Type *RHSTy = RHS->getType();
4368 llvm::APInt RHSMax =
4369 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4370 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4371 if (RHSMax.ult(Ty->getBitWidth()))
4372 return llvm::ConstantInt::get(RHSTy, RHSMax);
4373 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4374}
4375
4376Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4377 const Twine &Name) {
4378 llvm::IntegerType *Ty;
4379 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4380 Ty = cast<llvm::IntegerType>(VT->getElementType());
4381 else
4382 Ty = cast<llvm::IntegerType>(LHS->getType());
4383
4384 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4385 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4386
4387 return Builder.CreateURem(
4388 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4389}
4390
4391Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4392 // TODO: This misses out on the sanitizer check below.
4393 if (Ops.isFixedPointOp())
4394 return EmitFixedPointBinOp(Ops);
4395
4396 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4397 // RHS to the same size as the LHS.
4398 Value *RHS = Ops.RHS;
4399 if (Ops.LHS->getType() != RHS->getType())
4400 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4401
4402 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4403 Ops.Ty->hasSignedIntegerRepresentation() &&
4405 !CGF.getLangOpts().CPlusPlus20;
4406 bool SanitizeUnsignedBase =
4407 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4408 Ops.Ty->hasUnsignedIntegerRepresentation();
4409 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4410 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4411 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4412 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4413 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4414 else if ((SanitizeBase || SanitizeExponent) &&
4415 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4416 CodeGenFunction::SanitizerScope SanScope(&CGF);
4418 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4419 llvm::Value *WidthMinusOne =
4420 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4421 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4422
4423 if (SanitizeExponent) {
4424 Checks.push_back(
4425 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
4426 }
4427
4428 if (SanitizeBase) {
4429 // Check whether we are shifting any non-zero bits off the top of the
4430 // integer. We only emit this check if exponent is valid - otherwise
4431 // instructions below will have undefined behavior themselves.
4432 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4433 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4434 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4435 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4436 llvm::Value *PromotedWidthMinusOne =
4437 (RHS == Ops.RHS) ? WidthMinusOne
4438 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4439 CGF.EmitBlock(CheckShiftBase);
4440 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4441 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4442 /*NUW*/ true, /*NSW*/ true),
4443 "shl.check");
4444 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4445 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4446 // Under C++11's rules, shifting a 1 bit into the sign bit is
4447 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4448 // define signed left shifts, so we use the C99 and C++11 rules there).
4449 // Unsigned shifts can always shift into the top bit.
4450 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4451 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4452 }
4453 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4454 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4455 CGF.EmitBlock(Cont);
4456 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4457 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4458 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4459 Checks.push_back(std::make_pair(
4460 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
4461 : SanitizerKind::UnsignedShiftBase));
4462 }
4463
4464 assert(!Checks.empty());
4465 EmitBinOpCheck(Checks, Ops);
4466 }
4467
4468 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4469}
4470
4471Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4472 // TODO: This misses out on the sanitizer check below.
4473 if (Ops.isFixedPointOp())
4474 return EmitFixedPointBinOp(Ops);
4475
4476 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4477 // RHS to the same size as the LHS.
4478 Value *RHS = Ops.RHS;
4479 if (Ops.LHS->getType() != RHS->getType())
4480 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4481
4482 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4483 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4484 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4485 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4486 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4487 CodeGenFunction::SanitizerScope SanScope(&CGF);
4488 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4489 llvm::Value *Valid = Builder.CreateICmpULE(
4490 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4491 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
4492 }
4493
4494 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4495 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4496 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4497}
4498
4500// return corresponding comparison intrinsic for given vector type
4501static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4502 BuiltinType::Kind ElemKind) {
4503 switch (ElemKind) {
4504 default: llvm_unreachable("unexpected element type");
4505 case BuiltinType::Char_U:
4506 case BuiltinType::UChar:
4507 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4508 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4509 case BuiltinType::Char_S:
4510 case BuiltinType::SChar:
4511 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4512 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4513 case BuiltinType::UShort:
4514 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4515 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4516 case BuiltinType::Short:
4517 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4518 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4519 case BuiltinType::UInt:
4520 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4521 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4522 case BuiltinType::Int:
4523 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4524 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4525 case BuiltinType::ULong:
4526 case BuiltinType::ULongLong:
4527 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4528 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4529 case BuiltinType::Long:
4530 case BuiltinType::LongLong:
4531 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4532 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4533 case BuiltinType::Float:
4534 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4535 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4536 case BuiltinType::Double:
4537 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4538 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4539 case BuiltinType::UInt128:
4540 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4541 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4542 case BuiltinType::Int128:
4543 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4544 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4545 }
4546}
4547
4548Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4549 llvm::CmpInst::Predicate UICmpOpc,
4550 llvm::CmpInst::Predicate SICmpOpc,
4551 llvm::CmpInst::Predicate FCmpOpc,
4552 bool IsSignaling) {
4553 TestAndClearIgnoreResultAssign();
4554 Value *Result;
4555 QualType LHSTy = E->getLHS()->getType();
4556 QualType RHSTy = E->getRHS()->getType();
4557 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4558 assert(E->getOpcode() == BO_EQ ||
4559 E->getOpcode() == BO_NE);
4560 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4561 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4563 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4564 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4565 BinOpInfo BOInfo = EmitBinOps(E);
4566 Value *LHS = BOInfo.LHS;
4567 Value *RHS = BOInfo.RHS;
4568
4569 // If AltiVec, the comparison results in a numeric type, so we use
4570 // intrinsics comparing vectors and giving 0 or 1 as a result
4571 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4572 // constants for mapping CR6 register bits to predicate result
4573 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4574
4575 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4576
4577 // in several cases vector arguments order will be reversed
4578 Value *FirstVecArg = LHS,
4579 *SecondVecArg = RHS;
4580
4581 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4582 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4583
4584 switch(E->getOpcode()) {
4585 default: llvm_unreachable("is not a comparison operation");
4586 case BO_EQ:
4587 CR6 = CR6_LT;
4588 ID = GetIntrinsic(VCMPEQ, ElementKind);
4589 break;
4590 case BO_NE:
4591 CR6 = CR6_EQ;
4592 ID = GetIntrinsic(VCMPEQ, ElementKind);
4593 break;
4594 case BO_LT:
4595 CR6 = CR6_LT;
4596 ID = GetIntrinsic(VCMPGT, ElementKind);
4597 std::swap(FirstVecArg, SecondVecArg);
4598 break;
4599 case BO_GT:
4600 CR6 = CR6_LT;
4601 ID = GetIntrinsic(VCMPGT, ElementKind);
4602 break;
4603 case BO_LE:
4604 if (ElementKind == BuiltinType::Float) {
4605 CR6 = CR6_LT;
4606 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4607 std::swap(FirstVecArg, SecondVecArg);
4608 }
4609 else {
4610 CR6 = CR6_EQ;
4611 ID = GetIntrinsic(VCMPGT, ElementKind);
4612 }
4613 break;
4614 case BO_GE:
4615 if (ElementKind == BuiltinType::Float) {
4616 CR6 = CR6_LT;
4617 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4618 }
4619 else {
4620 CR6 = CR6_EQ;
4621 ID = GetIntrinsic(VCMPGT, ElementKind);
4622 std::swap(FirstVecArg, SecondVecArg);
4623 }
4624 break;
4625 }
4626
4627 Value *CR6Param = Builder.getInt32(CR6);
4628 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4629 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4630
4631 // The result type of intrinsic may not be same as E->getType().
4632 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4633 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4634 // do nothing, if ResultTy is not i1 at the same time, it will cause
4635 // crash later.
4636 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4637 if (ResultTy->getBitWidth() > 1 &&
4638 E->getType() == CGF.getContext().BoolTy)
4639 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4640 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4641 E->getExprLoc());
4642 }
4643
4644 if (BOInfo.isFixedPointOp()) {
4645 Result = EmitFixedPointBinOp(BOInfo);
4646 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4647 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4648 if (!IsSignaling)
4649 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4650 else
4651 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4652 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4653 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4654 } else {
4655 // Unsigned integers and pointers.
4656
4657 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4658 !isa<llvm::ConstantPointerNull>(LHS) &&
4659 !isa<llvm::ConstantPointerNull>(RHS)) {
4660
4661 // Dynamic information is required to be stripped for comparisons,
4662 // because it could leak the dynamic information. Based on comparisons
4663 // of pointers to dynamic objects, the optimizer can replace one pointer
4664 // with another, which might be incorrect in presence of invariant
4665 // groups. Comparison with null is safe because null does not carry any
4666 // dynamic information.
4667 if (LHSTy.mayBeDynamicClass())
4668 LHS = Builder.CreateStripInvariantGroup(LHS);
4669 if (RHSTy.mayBeDynamicClass())
4670 RHS = Builder.CreateStripInvariantGroup(RHS);
4671 }
4672
4673 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4674 }
4675
4676 // If this is a vector comparison, sign extend the result to the appropriate
4677 // vector integer type and return it (don't convert to bool).
4678 if (LHSTy->isVectorType())
4679 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4680
4681 } else {
4682 // Complex Comparison: can only be an equality comparison.
4684 QualType CETy;
4685 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4686 LHS = CGF.EmitComplexExpr(E->getLHS());
4687 CETy = CTy->getElementType();
4688 } else {
4689 LHS.first = Visit(E->getLHS());
4690 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4691 CETy = LHSTy;
4692 }
4693 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4694 RHS = CGF.EmitComplexExpr(E->getRHS());
4695 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4696 CTy->getElementType()) &&
4697 "The element types must always match.");
4698 (void)CTy;
4699 } else {
4700 RHS.first = Visit(E->getRHS());
4701 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4702 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4703 "The element types must always match.");
4704 }
4705
4706 Value *ResultR, *ResultI;
4707 if (CETy->isRealFloatingType()) {
4708 // As complex comparisons can only be equality comparisons, they
4709 // are never signaling comparisons.
4710 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4711 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4712 } else {
4713 // Complex comparisons can only be equality comparisons. As such, signed
4714 // and unsigned opcodes are the same.
4715 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4716 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4717 }
4718
4719 if (E->getOpcode() == BO_EQ) {
4720 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4721 } else {
4722 assert(E->getOpcode() == BO_NE &&
4723 "Complex comparison other than == or != ?");
4724 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4725 }
4726 }
4727
4728 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4729 E->getExprLoc());
4730}
4731
4733 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
4734 // In case we have the integer or bitfield sanitizer checks enabled
4735 // we want to get the expression before scalar conversion.
4736 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
4737 CastKind Kind = ICE->getCastKind();
4738 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
4739 *SrcType = ICE->getSubExpr()->getType();
4740 *Previous = EmitScalarExpr(ICE->getSubExpr());
4741 // Pass default ScalarConversionOpts to avoid emitting
4742 // integer sanitizer checks as E refers to bitfield.
4743 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
4744 ICE->getExprLoc());
4745 }
4746 }
4747 return EmitScalarExpr(E->getRHS());
4748}
4749
4750Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4751 bool Ignore = TestAndClearIgnoreResultAssign();
4752
4753 Value *RHS;
4754 LValue LHS;
4755
4756 switch (E->getLHS()->getType().getObjCLifetime()) {
4758 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4759 break;
4760
4762 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4763 break;
4764
4766 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4767 break;
4768
4770 RHS = Visit(E->getRHS());
4771 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4772 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4773 break;
4774
4776 // __block variables need to have the rhs evaluated first, plus
4777 // this should improve codegen just a little.
4778 Value *Previous = nullptr;
4779 QualType SrcType = E->getRHS()->getType();
4780 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
4781 // we want to extract that value and potentially (if the bitfield sanitizer
4782 // is enabled) use it to check for an implicit conversion.
4783 if (E->getLHS()->refersToBitField())
4784 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
4785 else
4786 RHS = Visit(E->getRHS());
4787
4788 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4789
4790 // Store the value into the LHS. Bit-fields are handled specially
4791 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4792 // 'An assignment expression has the value of the left operand after
4793 // the assignment...'.
4794 if (LHS.isBitField()) {
4795 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4796 // If the expression contained an implicit conversion, make sure
4797 // to use the value before the scalar conversion.
4798 Value *Src = Previous ? Previous : RHS;
4799 QualType DstType = E->getLHS()->getType();
4800 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
4801 LHS.getBitFieldInfo(), E->getExprLoc());
4802 } else {
4803 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4804 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4805 }
4806 }
4807
4808 // If the result is clearly ignored, return now.
4809 if (Ignore)
4810 return nullptr;
4811
4812 // The result of an assignment in C is the assigned r-value.
4813 if (!CGF.getLangOpts().CPlusPlus)
4814 return RHS;
4815
4816 // If the lvalue is non-volatile, return the computed value of the assignment.
4817 if (!LHS.isVolatileQualified())
4818 return RHS;
4819
4820 // Otherwise, reload the value.
4821 return EmitLoadOfLValue(LHS, E->getExprLoc());
4822}
4823
4824Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4825 // Perform vector logical and on comparisons with zero vectors.
4826 if (E->getType()->isVectorType()) {
4828
4829 Value *LHS = Visit(E->getLHS());
4830 Value *RHS = Visit(E->getRHS());
4831 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4832 if (LHS->getType()->isFPOrFPVectorTy()) {
4833 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4834 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4835 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4836 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4837 } else {
4838 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4839 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4840 }
4841 Value *And = Builder.CreateAnd(LHS, RHS);
4842 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4843 }
4844
4845 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4846 llvm::Type *ResTy = ConvertType(E->getType());
4847
4848 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4849 // If we have 1 && X, just emit X without inserting the control flow.
4850 bool LHSCondVal;
4851 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4852 if (LHSCondVal) { // If we have 1 && X, just emit X.
4854
4855 // If the top of the logical operator nest, reset the MCDC temp to 0.
4856 if (CGF.MCDCLogOpStack.empty())
4858
4859 CGF.MCDCLogOpStack.push_back(E);
4860
4861 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4862
4863 // If we're generating for profiling or coverage, generate a branch to a
4864 // block that increments the RHS counter needed to track branch condition
4865 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4866 // "FalseBlock" after the increment is done.
4867 if (InstrumentRegions &&
4869 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4870 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4871 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4872 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4873 CGF.EmitBlock(RHSBlockCnt);
4875 CGF.EmitBranch(FBlock);
4876 CGF.EmitBlock(FBlock);
4877 }
4878
4879 CGF.MCDCLogOpStack.pop_back();
4880 // If the top of the logical operator nest, update the MCDC bitmap.
4881 if (CGF.MCDCLogOpStack.empty())
4883
4884 // ZExt result to int or bool.
4885 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4886 }
4887
4888 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4889 if (!CGF.ContainsLabel(E->getRHS()))
4890 return llvm::Constant::getNullValue(ResTy);
4891 }
4892
4893 // If the top of the logical operator nest, reset the MCDC temp to 0.
4894 if (CGF.MCDCLogOpStack.empty())
4896
4897 CGF.MCDCLogOpStack.push_back(E);
4898
4899 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4900 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4901
4902 CodeGenFunction::ConditionalEvaluation eval(CGF);
4903
4904 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4905 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4906 CGF.getProfileCount(E->getRHS()));
4907
4908 // Any edges into the ContBlock are now from an (indeterminate number of)
4909 // edges from this first condition. All of these values will be false. Start
4910 // setting up the PHI node in the Cont Block for this.
4911 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4912 "", ContBlock);
4913 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4914 PI != PE; ++PI)
4915 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4916
4917 eval.begin(CGF);
4918 CGF.EmitBlock(RHSBlock);
4920 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4921 eval.end(CGF);
4922
4923 // Reaquire the RHS block, as there may be subblocks inserted.
4924 RHSBlock = Builder.GetInsertBlock();
4925
4926 // If we're generating for profiling or coverage, generate a branch on the
4927 // RHS to a block that increments the RHS true counter needed to track branch
4928 // condition coverage.
4929 if (InstrumentRegions &&
4931 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4932 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4933 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4934 CGF.EmitBlock(RHSBlockCnt);
4936 CGF.EmitBranch(ContBlock);
4937 PN->addIncoming(RHSCond, RHSBlockCnt);
4938 }
4939
4940 // Emit an unconditional branch from this block to ContBlock.
4941 {
4942 // There is no need to emit line number for unconditional branch.
4943 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4944 CGF.EmitBlock(ContBlock);
4945 }
4946 // Insert an entry into the phi node for the edge with the value of RHSCond.
4947 PN->addIncoming(RHSCond, RHSBlock);
4948
4949 CGF.MCDCLogOpStack.pop_back();
4950 // If the top of the logical operator nest, update the MCDC bitmap.
4951 if (CGF.MCDCLogOpStack.empty())
4953
4954 // Artificial location to preserve the scope information
4955 {
4957 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4958 }
4959
4960 // ZExt result to int.
4961 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4962}
4963
4964Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4965 // Perform vector logical or on comparisons with zero vectors.
4966 if (E->getType()->isVectorType()) {
4968
4969 Value *LHS = Visit(E->getLHS());
4970 Value *RHS = Visit(E->getRHS());
4971 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4972 if (LHS->getType()->isFPOrFPVectorTy()) {
4973 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4974 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4975 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4976 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4977 } else {
4978 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4979 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4980 }
4981 Value *Or = Builder.CreateOr(LHS, RHS);
4982 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4983 }
4984
4985 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4986 llvm::Type *ResTy = ConvertType(E->getType());
4987
4988 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
4989 // If we have 0 || X, just emit X without inserting the control flow.
4990 bool LHSCondVal;
4991 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4992 if (!LHSCondVal) { // If we have 0 || X, just emit X.
4994
4995 // If the top of the logical operator nest, reset the MCDC temp to 0.
4996 if (CGF.MCDCLogOpStack.empty())
4998
4999 CGF.MCDCLogOpStack.push_back(E);
5000
5001 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5002
5003 // If we're generating for profiling or coverage, generate a branch to a
5004 // block that increments the RHS counter need to track branch condition
5005 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5006 // "FalseBlock" after the increment is done.
5007 if (InstrumentRegions &&
5009 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5010 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5011 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5012 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5013 CGF.EmitBlock(RHSBlockCnt);
5015 CGF.EmitBranch(FBlock);
5016 CGF.EmitBlock(FBlock);
5017 }
5018
5019 CGF.MCDCLogOpStack.pop_back();
5020 // If the top of the logical operator nest, update the MCDC bitmap.
5021 if (CGF.MCDCLogOpStack.empty())
5023
5024 // ZExt result to int or bool.
5025 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5026 }
5027
5028 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5029 if (!CGF.ContainsLabel(E->getRHS()))
5030 return llvm::ConstantInt::get(ResTy, 1);
5031 }
5032
5033 // If the top of the logical operator nest, reset the MCDC temp to 0.
5034 if (CGF.MCDCLogOpStack.empty())
5036
5037 CGF.MCDCLogOpStack.push_back(E);
5038
5039 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5040 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5041
5042 CodeGenFunction::ConditionalEvaluation eval(CGF);
5043
5044 // Branch on the LHS first. If it is true, go to the success (cont) block.
5045 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5047 CGF.getProfileCount(E->getRHS()));
5048
5049 // Any edges into the ContBlock are now from an (indeterminate number of)
5050 // edges from this first condition. All of these values will be true. Start
5051 // setting up the PHI node in the Cont Block for this.
5052 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5053 "", ContBlock);
5054 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5055 PI != PE; ++PI)
5056 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5057
5058 eval.begin(CGF);
5059
5060 // Emit the RHS condition as a bool value.
5061 CGF.EmitBlock(RHSBlock);
5063 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5064
5065 eval.end(CGF);
5066
5067 // Reaquire the RHS block, as there may be subblocks inserted.
5068 RHSBlock = Builder.GetInsertBlock();
5069
5070 // If we're generating for profiling or coverage, generate a branch on the
5071 // RHS to a block that increments the RHS true counter needed to track branch
5072 // condition coverage.
5073 if (InstrumentRegions &&
5075 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5076 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5077 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5078 CGF.EmitBlock(RHSBlockCnt);
5080 CGF.EmitBranch(ContBlock);
5081 PN->addIncoming(RHSCond, RHSBlockCnt);
5082 }
5083
5084 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5085 // into the phi node for the edge with the value of RHSCond.
5086 CGF.EmitBlock(ContBlock);
5087 PN->addIncoming(RHSCond, RHSBlock);
5088
5089 CGF.MCDCLogOpStack.pop_back();
5090 // If the top of the logical operator nest, update the MCDC bitmap.
5091 if (CGF.MCDCLogOpStack.empty())
5093
5094 // ZExt result to int.
5095 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5096}
5097
5098Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5099 CGF.EmitIgnoredExpr(E->getLHS());
5100 CGF.EnsureInsertPoint();
5101 return Visit(E->getRHS());
5102}
5103
5104//===----------------------------------------------------------------------===//
5105// Other Operators
5106//===----------------------------------------------------------------------===//
5107
5108/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5109/// expression is cheap enough and side-effect-free enough to evaluate
5110/// unconditionally instead of conditionally. This is used to convert control
5111/// flow into selects in some cases.
5113 CodeGenFunction &CGF) {
5114 // Anything that is an integer or floating point constant is fine.
5115 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5116
5117 // Even non-volatile automatic variables can't be evaluated unconditionally.
5118 // Referencing a thread_local may cause non-trivial initialization work to
5119 // occur. If we're inside a lambda and one of the variables is from the scope
5120 // outside the lambda, that function may have returned already. Reading its
5121 // locals is a bad idea. Also, these reads may introduce races there didn't
5122 // exist in the source-level program.
5123}
5124
5125
5126Value *ScalarExprEmitter::
5127VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5128 TestAndClearIgnoreResultAssign();
5129
5130 // Bind the common expression if necessary.
5131 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5132
5133 Expr *condExpr = E->getCond();
5134 Expr *lhsExpr = E->getTrueExpr();
5135 Expr *rhsExpr = E->getFalseExpr();
5136
5137 // If the condition constant folds and can be elided, try to avoid emitting
5138 // the condition and the dead arm.
5139 bool CondExprBool;
5140 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5141 Expr *live = lhsExpr, *dead = rhsExpr;
5142 if (!CondExprBool) std::swap(live, dead);
5143
5144 // If the dead side doesn't have labels we need, just emit the Live part.
5145 if (!CGF.ContainsLabel(dead)) {
5146 if (CondExprBool) {
5148 CGF.incrementProfileCounter(lhsExpr);
5149 CGF.incrementProfileCounter(rhsExpr);
5150 }
5152 }
5153 Value *Result = Visit(live);
5154
5155 // If the live part is a throw expression, it acts like it has a void
5156 // type, so evaluating it returns a null Value*. However, a conditional
5157 // with non-void type must return a non-null Value*.
5158 if (!Result && !E->getType()->isVoidType())
5159 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5160
5161 return Result;
5162 }
5163 }
5164
5165 // OpenCL: If the condition is a vector, we can treat this condition like
5166 // the select function.
5167 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
5168 condExpr->getType()->isExtVectorType()) {
5170
5171 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5172 llvm::Value *LHS = Visit(lhsExpr);
5173 llvm::Value *RHS = Visit(rhsExpr);
5174
5175 llvm::Type *condType = ConvertType(condExpr->getType());
5176 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5177
5178 unsigned numElem = vecTy->getNumElements();
5179 llvm::Type *elemType = vecTy->getElementType();
5180
5181 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5182 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5183 llvm::Value *tmp = Builder.CreateSExt(
5184 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5185 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5186
5187 // Cast float to int to perform ANDs if necessary.
5188 llvm::Value *RHSTmp = RHS;
5189 llvm::Value *LHSTmp = LHS;
5190 bool wasCast = false;
5191 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5192 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5193 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5194 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5195 wasCast = true;
5196 }
5197
5198 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5199 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5200 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5201 if (wasCast)
5202 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5203
5204 return tmp5;
5205 }
5206
5207 if (condExpr->getType()->isVectorType() ||
5208 condExpr->getType()->isSveVLSBuiltinType()) {
5210
5211 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5212 llvm::Value *LHS = Visit(lhsExpr);
5213 llvm::Value *RHS = Visit(rhsExpr);
5214
5215 llvm::Type *CondType = ConvertType(condExpr->getType());
5216 auto *VecTy = cast<llvm::VectorType>(CondType);
5217 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5218
5219 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5220 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5221 }
5222
5223 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5224 // select instead of as control flow. We can only do this if it is cheap and
5225 // safe to evaluate the LHS and RHS unconditionally.
5226 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
5228 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5229 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5230
5232 CGF.incrementProfileCounter(lhsExpr);
5233 CGF.incrementProfileCounter(rhsExpr);
5235 } else
5236 CGF.incrementProfileCounter(E, StepV);
5237
5238 llvm::Value *LHS = Visit(lhsExpr);
5239 llvm::Value *RHS = Visit(rhsExpr);
5240 if (!LHS) {
5241 // If the conditional has void type, make sure we return a null Value*.
5242 assert(!RHS && "LHS and RHS types must match");
5243 return nullptr;
5244 }
5245 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5246 }
5247
5248 // If the top of the logical operator nest, reset the MCDC temp to 0.
5249 if (CGF.MCDCLogOpStack.empty())
5250 CGF.maybeResetMCDCCondBitmap(condExpr);
5251
5252 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5253 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5254 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5255
5256 CodeGenFunction::ConditionalEvaluation eval(CGF);
5257 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5258 CGF.getProfileCount(lhsExpr));
5259
5260 CGF.EmitBlock(LHSBlock);
5261
5262 // If the top of the logical operator nest, update the MCDC bitmap for the
5263 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5264 // may also contain a boolean expression.
5265 if (CGF.MCDCLogOpStack.empty())
5266 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5267
5269 CGF.incrementProfileCounter(lhsExpr);
5270 else
5272
5273 eval.begin(CGF);
5274 Value *LHS = Visit(lhsExpr);
5275 eval.end(CGF);
5276
5277 LHSBlock = Builder.GetInsertBlock();
5278 Builder.CreateBr(ContBlock);
5279
5280 CGF.EmitBlock(RHSBlock);
5281
5282 // If the top of the logical operator nest, update the MCDC bitmap for the
5283 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5284 // may also contain a boolean expression.
5285 if (CGF.MCDCLogOpStack.empty())
5286 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5287
5289 CGF.incrementProfileCounter(rhsExpr);
5290
5291 eval.begin(CGF);
5292 Value *RHS = Visit(rhsExpr);
5293 eval.end(CGF);
5294
5295 RHSBlock = Builder.GetInsertBlock();
5296 CGF.EmitBlock(ContBlock);
5297
5298 // If the LHS or RHS is a throw expression, it will be legitimately null.
5299 if (!LHS)
5300 return RHS;
5301 if (!RHS)
5302 return LHS;
5303
5304 // Create a PHI node for the real part.
5305 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5306 PN->addIncoming(LHS, LHSBlock);
5307 PN->addIncoming(RHS, RHSBlock);
5308
5309 // When single byte coverage mode is enabled, add a counter to continuation
5310 // block.
5313
5314 return PN;
5315}
5316
5317Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5318 return Visit(E->getChosenSubExpr());
5319}
5320
5321Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5322 QualType Ty = VE->getType();
5323
5324 if (Ty->isVariablyModifiedType())
5326
5327 Address ArgValue = Address::invalid();
5328 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5329
5330 llvm::Type *ArgTy = ConvertType(VE->getType());
5331
5332 // If EmitVAArg fails, emit an error.
5333 if (!ArgPtr.isValid()) {
5334 CGF.ErrorUnsupported(VE, "va_arg expression");
5335 return llvm::UndefValue::get(ArgTy);
5336 }
5337
5338 // FIXME Volatility.
5339 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
5340
5341 // If EmitVAArg promoted the type, we must truncate it.
5342 if (ArgTy != Val->getType()) {
5343 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
5344 Val = Builder.CreateIntToPtr(Val, ArgTy);
5345 else
5346 Val = Builder.CreateTrunc(Val, ArgTy);
5347 }
5348
5349 return Val;
5350}
5351
5352Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5353 return CGF.EmitBlockLiteral(block);
5354}
5355
5356// Convert a vec3 to vec4, or vice versa.
5358 Value *Src, unsigned NumElementsDst) {
5359 static constexpr int Mask[] = {0, 1, 2, -1};
5360 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5361}
5362
5363// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5364// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5365// but could be scalar or vectors of different lengths, and either can be
5366// pointer.
5367// There are 4 cases:
5368// 1. non-pointer -> non-pointer : needs 1 bitcast
5369// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5370// 3. pointer -> non-pointer
5371// a) pointer -> intptr_t : needs 1 ptrtoint
5372// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5373// 4. non-pointer -> pointer
5374// a) intptr_t -> pointer : needs 1 inttoptr
5375// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5376// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5377// allow casting directly between pointer types and non-integer non-pointer
5378// types.
5380 const llvm::DataLayout &DL,
5381 Value *Src, llvm::Type *DstTy,
5382 StringRef Name = "") {
5383 auto SrcTy = Src->getType();
5384
5385 // Case 1.
5386 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5387 return Builder.CreateBitCast(Src, DstTy, Name);
5388
5389 // Case 2.
5390 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5391 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5392
5393 // Case 3.
5394 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5395 // Case 3b.
5396 if (!DstTy->isIntegerTy())
5397 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5398 // Cases 3a and 3b.
5399 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5400 }
5401
5402 // Case 4b.
5403 if (!SrcTy->isIntegerTy())
5404 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5405 // Cases 4a and 4b.
5406 return Builder.CreateIntToPtr(Src, DstTy, Name);
5407}
5408
5409Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5410 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5411 llvm::Type *DstTy = ConvertType(E->getType());
5412
5413 llvm::Type *SrcTy = Src->getType();
5414 unsigned NumElementsSrc =
5415 isa<llvm::VectorType>(SrcTy)
5416 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5417 : 0;
5418 unsigned NumElementsDst =
5419 isa<llvm::VectorType>(DstTy)
5420 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5421 : 0;
5422
5423 // Use bit vector expansion for ext_vector_type boolean vectors.
5424 if (E->getType()->isExtVectorBoolType())
5425 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5426
5427 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5428 // vector to get a vec4, then a bitcast if the target type is different.
5429 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5430 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5431 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5432 DstTy);
5433
5434 Src->setName("astype");
5435 return Src;
5436 }
5437
5438 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5439 // to vec4 if the original type is not vec4, then a shuffle vector to
5440 // get a vec3.
5441 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5442 auto *Vec4Ty = llvm::FixedVectorType::get(
5443 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5444 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5445 Vec4Ty);
5446
5447 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5448 Src->setName("astype");
5449 return Src;
5450 }
5451
5452 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5453 Src, DstTy, "astype");
5454}
5455
5456Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5457 return CGF.EmitAtomicExpr(E).getScalarVal();
5458}
5459
5460//===----------------------------------------------------------------------===//
5461// Entry Point into this File
5462//===----------------------------------------------------------------------===//
5463
5464/// Emit the computation of the specified expression of scalar type, ignoring
5465/// the result.
5466Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5467 assert(E && hasScalarEvaluationKind(E->getType()) &&
5468 "Invalid scalar expression to emit");
5469
5470 return ScalarExprEmitter(*this, IgnoreResultAssign)
5471 .Visit(const_cast<Expr *>(E));
5472}
5473
5474/// Emit a conversion from the specified type to the specified destination type,
5475/// both of which are LLVM scalar types.
5477 QualType DstTy,
5478 SourceLocation Loc) {
5479 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5480 "Invalid scalar expression to emit");
5481 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5482}
5483
5484/// Emit a conversion from the specified complex type to the specified
5485/// destination type, where the destination type is an LLVM scalar type.
5487 QualType SrcTy,
5488 QualType DstTy,
5489 SourceLocation Loc) {
5490 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5491 "Invalid complex -> scalar conversion");
5492 return ScalarExprEmitter(*this)
5493 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5494}
5495
5496
5497Value *
5499 QualType PromotionType) {
5500 if (!PromotionType.isNull())
5501 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5502 else
5503 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
5504}
5505
5506
5507llvm::Value *CodeGenFunction::
5509 bool isInc, bool isPre) {
5510 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5511}
5512
5514 // object->isa or (*object).isa
5515 // Generate code as for: *(Class*)object
5516
5517 Expr *BaseExpr = E->getBase();
5518 Address Addr = Address::invalid();
5519 if (BaseExpr->isPRValue()) {
5520 llvm::Type *BaseTy =
5522 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
5523 } else {
5524 Addr = EmitLValue(BaseExpr).getAddress(*this);
5525 }
5526
5527 // Cast the address to Class*.
5528 Addr = Addr.withElementType(ConvertType(E->getType()));
5529 return MakeAddrLValue(Addr, E->getType());
5530}
5531
5532
5534 const CompoundAssignOperator *E) {
5535 ScalarExprEmitter Scalar(*this);
5536 Value *Result = nullptr;
5537 switch (E->getOpcode()) {
5538#define COMPOUND_OP(Op) \
5539 case BO_##Op##Assign: \
5540 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5541 Result)
5542 COMPOUND_OP(Mul);
5543 COMPOUND_OP(Div);
5544 COMPOUND_OP(Rem);
5545 COMPOUND_OP(Add);
5546 COMPOUND_OP(Sub);
5547 COMPOUND_OP(Shl);
5548 COMPOUND_OP(Shr);
5550 COMPOUND_OP(Xor);
5551 COMPOUND_OP(Or);
5552#undef COMPOUND_OP
5553
5554 case BO_PtrMemD:
5555 case BO_PtrMemI:
5556 case BO_Mul:
5557 case BO_Div:
5558 case BO_Rem:
5559 case BO_Add:
5560 case BO_Sub:
5561 case BO_Shl:
5562 case BO_Shr:
5563 case BO_LT:
5564 case BO_GT:
5565 case BO_LE:
5566 case BO_GE:
5567 case BO_EQ:
5568 case BO_NE:
5569 case BO_Cmp:
5570 case BO_And:
5571 case BO_Xor:
5572 case BO_Or:
5573 case BO_LAnd:
5574 case BO_LOr:
5575 case BO_Assign:
5576 case BO_Comma:
5577 llvm_unreachable("Not valid compound assignment operators");
5578 }
5579
5580 llvm_unreachable("Unhandled compound assignment operator");
5581}
5582
5584 // The total (signed) byte offset for the GEP.
5585 llvm::Value *TotalOffset;
5586 // The offset overflow flag - true if the total offset overflows.
5587 llvm::Value *OffsetOverflows;
5588};
5589
5590/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5591/// and compute the total offset it applies from it's base pointer BasePtr.
5592/// Returns offset in bytes and a boolean flag whether an overflow happened
5593/// during evaluation.
5595 llvm::LLVMContext &VMContext,
5596 CodeGenModule &CGM,
5597 CGBuilderTy &Builder) {
5598 const auto &DL = CGM.getDataLayout();
5599
5600 // The total (signed) byte offset for the GEP.
5601 llvm::Value *TotalOffset = nullptr;
5602
5603 // Was the GEP already reduced to a constant?
5604 if (isa<llvm::Constant>(GEPVal)) {
5605 // Compute the offset by casting both pointers to integers and subtracting:
5606 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5607 Value *BasePtr_int =
5608 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
5609 Value *GEPVal_int =
5610 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
5611 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
5612 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5613 }
5614
5615 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
5616 assert(GEP->getPointerOperand() == BasePtr &&
5617 "BasePtr must be the base of the GEP.");
5618 assert(GEP->isInBounds() && "Expected inbounds GEP");
5619
5620 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5621
5622 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5623 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5624 auto *SAddIntrinsic =
5625 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
5626 auto *SMulIntrinsic =
5627 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
5628
5629 // The offset overflow flag - true if the total offset overflows.
5630 llvm::Value *OffsetOverflows = Builder.getFalse();
5631
5632 /// Return the result of the given binary operation.
5633 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5634 llvm::Value *RHS) -> llvm::Value * {
5635 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
5636
5637 // If the operands are constants, return a constant result.
5638 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
5639 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
5640 llvm::APInt N;
5641 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
5642 /*Signed=*/true, N);
5643 if (HasOverflow)
5644 OffsetOverflows = Builder.getTrue();
5645 return llvm::ConstantInt::get(VMContext, N);
5646 }
5647 }
5648
5649 // Otherwise, compute the result with checked arithmetic.
5650 auto *ResultAndOverflow = Builder.CreateCall(
5651 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
5652 OffsetOverflows = Builder.CreateOr(
5653 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
5654 return Builder.CreateExtractValue(ResultAndOverflow, 0);
5655 };
5656
5657 // Determine the total byte offset by looking at each GEP operand.
5658 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
5659 GTI != GTE; ++GTI) {
5660 llvm::Value *LocalOffset;
5661 auto *Index = GTI.getOperand();
5662 // Compute the local offset contributed by this indexing step:
5663 if (auto *STy = GTI.getStructTypeOrNull()) {
5664 // For struct indexing, the local offset is the byte position of the
5665 // specified field.
5666 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
5667 LocalOffset = llvm::ConstantInt::get(
5668 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
5669 } else {
5670 // Otherwise this is array-like indexing. The local offset is the index
5671 // multiplied by the element size.
5672 auto *ElementSize =
5673 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
5674 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
5675 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
5676 }
5677
5678 // If this is the first offset, set it as the total offset. Otherwise, add
5679 // the local offset into the running total.
5680 if (!TotalOffset || TotalOffset == Zero)
5681 TotalOffset = LocalOffset;
5682 else
5683 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
5684 }
5685
5686 return {TotalOffset, OffsetOverflows};
5687}
5688
5689Value *
5690CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
5691 ArrayRef<Value *> IdxList,
5692 bool SignedIndices, bool IsSubtraction,
5693 SourceLocation Loc, const Twine &Name) {
5694 llvm::Type *PtrTy = Ptr->getType();
5695 Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name);
5696
5697 // If the pointer overflow sanitizer isn't enabled, do nothing.
5698 if (!SanOpts.has(SanitizerKind::PointerOverflow))
5699 return GEPVal;
5700
5701 // Perform nullptr-and-offset check unless the nullptr is defined.
5702 bool PerformNullCheck = !NullPointerIsDefined(
5703 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
5704 // Check for overflows unless the GEP got constant-folded,
5705 // and only in the default address space
5706 bool PerformOverflowCheck =
5707 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5708
5709 if (!(PerformNullCheck || PerformOverflowCheck))
5710 return GEPVal;
5711
5712 const auto &DL = CGM.getDataLayout();
5713
5714 SanitizerScope SanScope(this);
5715 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5716
5717 GEPOffsetAndOverflow EvaluatedGEP =
5719
5720 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5721 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5722 "If the offset got constant-folded, we don't expect that there was an "
5723 "overflow.");
5724
5725 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5726
5727 // Common case: if the total offset is zero, and we are using C++ semantics,
5728 // where nullptr+0 is defined, don't emit a check.
5729 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5730 return GEPVal;
5731
5732 // Now that we've computed the total offset, add it to the base pointer (with
5733 // wrapping semantics).
5734 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
5735 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
5736
5738
5739 if (PerformNullCheck) {
5740 // In C++, if the base pointer evaluates to a null pointer value,
5741 // the only valid pointer this inbounds GEP can produce is also
5742 // a null pointer, so the offset must also evaluate to zero.
5743 // Likewise, if we have non-zero base pointer, we can not get null pointer
5744 // as a result, so the offset can not be -intptr_t(BasePtr).
5745 // In other words, both pointers are either null, or both are non-null,
5746 // or the behaviour is undefined.
5747 //
5748 // C, however, is more strict in this regard, and gives more
5749 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5750 // So both the input to the 'gep inbounds' AND the output must not be null.
5751 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
5752 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
5753 auto *Valid =
5754 CGM.getLangOpts().CPlusPlus
5755 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
5756 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
5757 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
5758 }
5759
5760 if (PerformOverflowCheck) {
5761 // The GEP is valid if:
5762 // 1) The total offset doesn't overflow, and
5763 // 2) The sign of the difference between the computed address and the base
5764 // pointer matches the sign of the total offset.
5765 llvm::Value *ValidGEP;
5766 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
5767 if (SignedIndices) {
5768 // GEP is computed as `unsigned base + signed offset`, therefore:
5769 // * If offset was positive, then the computed pointer can not be
5770 // [unsigned] less than the base pointer, unless it overflowed.
5771 // * If offset was negative, then the computed pointer can not be
5772 // [unsigned] greater than the bas pointere, unless it overflowed.
5773 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5774 auto *PosOrZeroOffset =
5775 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
5776 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
5777 ValidGEP =
5778 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
5779 } else if (!IsSubtraction) {
5780 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5781 // computed pointer can not be [unsigned] less than base pointer,
5782 // unless there was an overflow.
5783 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5784 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5785 } else {
5786 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5787 // computed pointer can not be [unsigned] greater than base pointer,
5788 // unless there was an overflow.
5789 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5790 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
5791 }
5792 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
5793 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
5794 }
5795
5796 assert(!Checks.empty() && "Should have produced some checks.");
5797
5798 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5799 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5800 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5801 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5802
5803 return GEPVal;
5804}
5805
5807 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
5808 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
5809 const Twine &Name) {
5810 if (!SanOpts.has(SanitizerKind::PointerOverflow))
5811 return Builder.CreateInBoundsGEP(Addr, IdxList, elementType, Align, Name);
5812
5813 return RawAddress(
5815 IdxList, SignedIndices, IsSubtraction, Loc, Name),
5816 elementType, Align);
5817}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3284
ASTImporterLookupTable & LT
llvm::APSInt APSInt
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:992
CodeGenFunction::ComplexPairTy ComplexPairTy
#define HANDLE_BINOP(OP)
#define VISITCOMP(CODE, UI, SI, FP, SIG)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static Value * emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
IntrinsicType
@ VCMPGT
@ VCMPEQ
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, CodeGenFunction &CGF)
isCheapEnoughToEvaluateUnconditionally - Return true if the specified expression is cheap enough and ...
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
static Decl::Kind getKind(const Decl *D)
Definition: DeclBase.cpp:1109
StateNode * Previous
llvm::APInt getValue() const
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
APSInt & getInt()
Definition: APValue.h:423
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
CanQualType FloatTy
Definition: ASTContext.h:1103
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2574
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
const LangOptions & getLangOpts() const
Definition: ASTContext.h:775
CanQualType BoolTy
Definition: ASTContext.h:1092
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2617
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2340
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2770
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:757
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2344
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:196
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4141
Expr * getCond() const
getCond - Return the expression representing the condition for the ?: operator.
Definition: Expr.h:4319
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition: Expr.h:4325
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition: Expr.h:4331
AddrLabelExpr - The GNU address of label extension, representing &&label.
Definition: Expr.h:4338
LabelDecl * getLabel() const
Definition: Expr.h:4361
Represents the index of the current element of an array being initialized by an ArrayInitLoopExpr.
Definition: Expr.h:5564
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2664
An Embarcadero array type trait, as used in the implementation of __array_rank and __array_extent.
Definition: ExprCXX.h:2846
uint64_t getValue() const
Definition: ExprCXX.h:2892
QualType getElementType() const
Definition: Type.h:3526
AsTypeExpr - Clang builtin function __builtin_astype [OpenCL 6.2.4.2] This AST node provides support ...
Definition: Expr.h:6234
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:6253
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6437
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3840
Expr * getLHS() const
Definition: Expr.h:3889
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition: Expr.h:3986
bool isCompoundAssignmentOp() const
Definition: Expr.h:3983
SourceLocation getExprLoc() const
Definition: Expr.h:3880
bool isShiftOp() const
Definition: Expr.h:3928
Expr * getRHS() const
Definition: Expr.h:3891
bool isShiftAssignOp() const
Definition: Expr.h:3997
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition: Expr.h:4039
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition: Expr.cpp:2206
Opcode getOpcode() const
Definition: Expr.h:3884
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6173
This class is used for builtin types like 'int'.
Definition: Type.h:2977
Kind getKind() const
Definition: Type.h:3019
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
A boolean literal, per ([C++ lex.bool] Boolean literals).
Definition: ExprCXX.h:720
bool getValue() const
Definition: ExprCXX.h:737
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1264
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1371
Expr * getExpr()
Get the initialization expression that will be used.
Definition: ExprCXX.cpp:1035
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2491
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:478
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2234
Represents a C++11 noexcept expression (C++ [expr.unary.noexcept]).
Definition: ExprCXX.h:4095
bool getValue() const
Definition: ExprCXX.h:4118
The null pointer literal (C++11 [lex.nullptr])
Definition: ExprCXX.h:765
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2610
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
A rewritten comparison expression that was originally written using operator syntax.
Definition: ExprCXX.h:283
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition: ExprCXX.h:301
An expression "T()" which creates a value-initialized rvalue of type T, which is a non-class type.
Definition: ExprCXX.h:2175
Represents the this expression in C++.
Definition: ExprCXX.h:1148
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1202
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2820
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition: Expr.cpp:1590
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3483
path_iterator path_begin()
Definition: Expr.h:3553
CastKind getCastKind() const
Definition: Expr.h:3527
bool changesVolatileQualification() const
Return.
Definition: Expr.h:3612
path_iterator path_end()
Definition: Expr.h:3554
Expr * getSubExpr()
Definition: Expr.h:3533
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
unsigned getValue() const
Definition: Expr.h:1610
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:4558
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:4594
Represents a 'co_await' expression.
Definition: ExprCXX.h:5151
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
bool isValid() const
Definition: Address.h:154
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:824
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:864
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:881
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:292
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:345
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:105
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition: CGCXXABI.cpp:74
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Type * ConvertTypeForMem(QualType T)
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
const TargetInfo & getTarget() const
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
uint64_t getCurrentProfileCount()
Get the profiler's current count.
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
llvm::Type * ConvertType(QualType T)
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAtomicExpr(AtomicExpr *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1235
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
LValue - This represents an lvalue references.
Definition: CGValue.h:181
bool isBitField() const
Definition: CGValue.h:283
bool isVolatileQualified() const
Definition: CGValue.h:288
void setTBAAInfo(TBAAAccessInfo Info)
Definition: CGValue.h:339
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:370
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:432
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
An abstract representation of an aligned address.
Definition: Address.h:41
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Complex values, per C99 6.2.5p11.
Definition: Type.h:3082
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:4088
QualType getComputationLHSType() const
Definition: Expr.h:4122
QualType getComputationResultType() const
Definition: Expr.h:4125
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3413
Represents the specialization of a concept - evaluates to a prvalue of type bool.
Definition: ExprConcepts.h:42
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
Definition: ExprConcepts.h:124
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4163
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4181
ConvertVectorExpr - Clang builtin function __builtin_convertvector This AST node provides support for...
Definition: Expr.h:4499
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:4519
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5232
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2342
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
T * getAttr() const
Definition: DeclBase.h:579
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3730
Represents an expression – generally a full-expression – that introduces cleanups to be run at the en...
Definition: ExprCXX.h:3443
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition: Expr.h:280
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition: Expr.h:671
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3055
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition: Expr.h:278
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3039
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
QualType getType() const
Definition: Expr.h:142
An expression trait intrinsic.
Definition: ExprCXX.h:2917
ExtVectorType - Extended vector type.
Definition: Type.h:4057
Represents a member of a struct/union/class.
Definition: Decl.h:3058
llvm::APFloat getValue() const
Definition: Expr.h:1647
const Expr * getSubExpr() const
Definition: Expr.h:1052
GNUNullExpr - Implements the GNU __null extension, which is a name for a null pointer constant that h...
Definition: Expr.h:4633
Represents a C11 generic selection.
Definition: Expr.h:5725
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition: Expr.h:3655
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:5600
Describes an C or C++ initializer list.
Definition: Expr.h:4847
unsigned getNumInits() const
Definition: Expr.h:4877
bool hadArrayRangeDesignator() const
Definition: Expr.h:5024
const Expr * getInit(unsigned Init) const
Definition: Expr.h:4893
bool isSignedOverflowDefined() const
Definition: LangOptions.h:616
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Definition: LangOptions.h:506
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4686
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2742
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition: Type.h:4127
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3172
Expr * getBase() const
Definition: Expr.h:3249
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3456
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp,...
Definition: ExprObjC.h:191
A runtime availability query.
Definition: ExprObjC.h:1696
VersionTuple getVersion() const
Definition: ExprObjC.h:1719
ObjCBoolLiteralExpr - Objective-C Boolean Literal.
Definition: ExprObjC.h:87
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:127
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:309
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition: ExprObjC.h:1491
Expr * getBase() const
Definition: ExprObjC.h:1516
SourceLocation getExprLoc() const LLVM_READONLY
Definition: ExprObjC.h:1539
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
const ObjCMethodDecl * getMethodDecl() const
Definition: ExprObjC.h:1356
QualType getReturnType() const
Definition: DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition: Type.h:7004
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition: Type.h:7041
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:505
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:51
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition: Expr.h:2465
Expr * getIndexExpr(unsigned Idx)
Definition: Expr.h:2526
const OffsetOfNode & getComponent(unsigned Idx) const
Definition: Expr.h:2512
TypeSourceInfo * getTypeSourceInfo() const
Definition: Expr.h:2505
unsigned getNumComponents() const
Definition: Expr.h:2522
Helper class for OffsetOfExpr.
Definition: Expr.h:2359
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition: Expr.h:2417
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition: Expr.h:2423
@ Array
An index into an array.
Definition: Expr.h:2364
@ Identifier
A field in a dependent type, known only by its name.
Definition: Expr.h:2368
@ Field
A field.
Definition: Expr.h:2366
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition: Expr.h:2371
Kind getKind() const
Determine what kind of offsetof node this is.
Definition: Expr.h:2413
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition: Expr.h:2433
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1168
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:1198
Expr * getSelectedExpr() const
Definition: ExprCXX.h:4418
ParenExpr - This represents a parethesized expression, e.g.
Definition: Expr.h:2130
const Expr * getSubExpr() const
Definition: Expr.h:2145
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3135
QualType getPointeeType() const
Definition: Type.h:3145
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6305
A (possibly-)qualified type.
Definition: Type.h:940
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition: Type.cpp:95
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1007
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:7355
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7481
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1432
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:7556
QualType getCanonicalType() const
Definition: Type.h:7407
bool UseExcessPrecision(const ASTContext &Ctx)
Definition: Type.cpp:1560
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition: Type.cpp:100
bool isCanonical() const
Definition: Type.h:7412
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:347
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:340
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:336
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:350
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:353
Represents a struct/union/class.
Definition: Decl.h:4169
field_iterator field_end() const
Definition: Decl.h:4378
field_iterator field_begin() const
Definition: Decl.cpp:5071
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5545
C++2a [expr.prim.req]: A requires-expression provides a concise way to express requirements on templa...
Definition: ExprConcepts.h:510
bool isSatisfied() const
Whether or not the requires clause is satisfied.
Definition: ExprConcepts.h:562
std::string ComputeName(ASTContext &Context) const
Definition: Expr.cpp:592
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Sema - This implements semantic analysis and AST building for C.
Definition: Sema.h:457
ShuffleVectorExpr - clang-specific builtin-in function __builtin_shufflevector.
Definition: Expr.h:4431
llvm::APSInt getShuffleMaskIdx(const ASTContext &Ctx, unsigned N) const
Definition: Expr.h:4482
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition: Expr.h:4465
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition: Expr.h:4471
Represents an expression that computes the length of a parameter pack.
Definition: ExprCXX.h:4227
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition: ExprCXX.h:4302
Represents a function call to one of __builtin_LINE(), __builtin_COLUMN(), __builtin_FUNCTION(),...
Definition: Expr.h:4727
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition: Expr.cpp:2273
SourceLocation getLocation() const
Definition: Expr.h:4771
Encodes a location in the source.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4383
CompoundStmt * getSubStmt()
Definition: Expr.h:4400
RetTy Visit(PTR(Stmt) S, ParamTys... P)
Definition: StmtVisitor.h:44
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:185
Stmt - This represents one statement.
Definition: Stmt.h:84
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4442
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
Definition: TargetInfo.h:972
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
Definition: TargetInfo.h:1628
const llvm::fltSemantics & getHalfFormat() const
Definition: TargetInfo.h:744
const llvm::fltSemantics & getBFloat16Format() const
Definition: TargetInfo.h:754
const llvm::fltSemantics & getLongDoubleFormat() const
Definition: TargetInfo.h:765
const llvm::fltSemantics & getFloat128Format() const
Definition: TargetInfo.h:773
const llvm::fltSemantics & getIbm128Format() const
Definition: TargetInfo.h:781
QualType getType() const
Return the type wrapped by this type source info.
Definition: Type.h:7337
A type trait used in the implementation of various C++11 and Library TR1 trait templates.
Definition: ExprCXX.h:2761
bool getValue() const
Definition: ExprCXX.h:2802
bool isVoidType() const
Definition: Type.h:7901
bool isBooleanType() const
Definition: Type.h:8029
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2155
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:2205
bool isArithmeticType() const
Definition: Type.cpp:2269
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:7941
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8186
bool isReferenceType() const
Definition: Type.h:7620
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition: Type.cpp:1855
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition: Type.cpp:2487
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:694
bool isExtVectorType() const
Definition: Type.h:7718
bool isExtVectorBoolType() const
Definition: Type.h:7722
bool isOCLIntelSubgroupAVCType() const
Definition: Type.h:7846
bool isAnyComplexType() const
Definition: Type.h:7710
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition: Type.h:7954
bool isHalfType() const
Definition: Type.h:7905
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2174
bool isQueueT() const
Definition: Type.h:7817
bool isMatrixType() const
Definition: Type.h:7728
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2667
bool isEventT() const
Definition: Type.h:7809
bool isFunctionType() const
Definition: Type.h:7604
bool isVectorType() const
Definition: Type.h:7714
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2254
bool isFloatingType() const
Definition: Type.cpp:2237
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition: Type.cpp:2184
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8119
bool isNullPtrType() const
Definition: Type.h:7934
UnaryExprOrTypeTraitExpr - expression with either a type or (unevaluated) expression operand.
Definition: Expr.h:2568
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition: Expr.h:2637
bool isArgumentType() const
Definition: Expr.h:2610
UnaryExprOrTypeTrait getKind() const
Definition: Expr.h:2600
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2183
SourceLocation getExprLoc() const
Definition: Expr.h:2311
Expr * getSubExpr() const
Definition: Expr.h:2228
Opcode getOpcode() const
Definition: Expr.h:2223
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition: Expr.h:2338
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition: Expr.h:2241
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4667
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:706
QualType getType() const
Definition: Decl.h:717
QualType getType() const
Definition: Value.cpp:234
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3743
Represents a GCC generic vector type.
Definition: Type.h:3965
VectorKind getVectorKind() const
Definition: Type.h:3985
QualType getElementType() const
Definition: Type.h:3979
Defines the clang::TargetInfo interface.
const AstTypeMatcher< PointerType > pointerType
Matches pointer types, but does not match Objective-C object pointer types.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
llvm::APFloat APFloat
Definition: Floating.h:23
llvm::APInt APInt
Definition: Integral.h:29
bool LE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:882
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1873
bool GE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:897
The JSON file list parser is used to communicate input to InstallAPI.
BinaryOperatorKind
@ Result
The result type of a method or function.
CastKind
CastKind - The kind of operation required for a conversion.
const FunctionProtoType * T
@ Generic
not a target-specific vector type
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define true
Definition: stdbool.h:21
#define false
Definition: stdbool.h:22
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:165