clang 22.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
86 : LHSAP.usub_ov(RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
89 : LHSAP.umul_ov(RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(BaseTy) ||
184 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Check if we can skip the overflow check for \p Op.
196static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
197 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
198 "Expected a unary or binary operator");
199
200 // If the binop has constant inputs and we can prove there is no overflow,
201 // we can elide the overflow check.
202 if (!Op.mayHaveIntegerOverflow())
203 return true;
204
205 if (Op.Ty->isSignedIntegerType() &&
206 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
207 Op.Ty)) {
208 return true;
209 }
210
211 if (Op.Ty->isUnsignedIntegerType() &&
212 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
213 Op.Ty)) {
214 return true;
215 }
216
217 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
218
219 if (UO && UO->getOpcode() == UO_Minus &&
222 UO->isIntegerConstantExpr(Ctx))
223 return true;
224
225 // If a unary op has a widened operand, the op cannot overflow.
226 if (UO)
227 return !UO->canOverflow();
228
229 // We usually don't need overflow checks for binops with widened operands.
230 // Multiplication with promoted unsigned operands is a special case.
231 const auto *BO = cast<BinaryOperator>(Op.E);
232 if (BO->hasExcludedOverflowPattern())
233 return true;
234
235 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
236 if (!OptionalLHSTy)
237 return false;
238
239 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
240 if (!OptionalRHSTy)
241 return false;
242
243 QualType LHSTy = *OptionalLHSTy;
244 QualType RHSTy = *OptionalRHSTy;
245
246 // This is the simple case: binops without unsigned multiplication, and with
247 // widened operands. No overflow check is needed here.
248 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
249 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
250 return true;
251
252 // For unsigned multiplication the overflow check can be elided if either one
253 // of the unpromoted types are less than half the size of the promoted type.
254 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
255 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
256 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
257}
258
259class ScalarExprEmitter
260 : public StmtVisitor<ScalarExprEmitter, Value*> {
261 CodeGenFunction &CGF;
262 CGBuilderTy &Builder;
263 bool IgnoreResultAssign;
264 llvm::LLVMContext &VMContext;
265public:
266
267 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
268 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
269 VMContext(cgf.getLLVMContext()) {
270 }
271
272 //===--------------------------------------------------------------------===//
273 // Utilities
274 //===--------------------------------------------------------------------===//
275
276 bool TestAndClearIgnoreResultAssign() {
277 bool I = IgnoreResultAssign;
278 IgnoreResultAssign = false;
279 return I;
280 }
281
282 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
283 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
284 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
285 return CGF.EmitCheckedLValue(E, TCK);
286 }
287
288 void EmitBinOpCheck(
289 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
290 const BinOpInfo &Info);
291
292 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
293 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
294 }
295
296 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
297 const AlignValueAttr *AVAttr = nullptr;
298 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
299 const ValueDecl *VD = DRE->getDecl();
300
301 if (VD->getType()->isReferenceType()) {
302 if (const auto *TTy =
303 VD->getType().getNonReferenceType()->getAs<TypedefType>())
304 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
305 } else {
306 // Assumptions for function parameters are emitted at the start of the
307 // function, so there is no need to repeat that here,
308 // unless the alignment-assumption sanitizer is enabled,
309 // then we prefer the assumption over alignment attribute
310 // on IR function param.
311 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
312 return;
313
314 AVAttr = VD->getAttr<AlignValueAttr>();
315 }
316 }
317
318 if (!AVAttr)
319 if (const auto *TTy = E->getType()->getAs<TypedefType>())
320 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
321
322 if (!AVAttr)
323 return;
324
325 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
326 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
327 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
328 }
329
330 /// EmitLoadOfLValue - Given an expression with complex type that represents a
331 /// value l-value, this method emits the address of the l-value, then loads
332 /// and returns the result.
333 Value *EmitLoadOfLValue(const Expr *E) {
334 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
335 E->getExprLoc());
336
337 EmitLValueAlignmentAssumption(E, V);
338 return V;
339 }
340
341 /// EmitConversionToBool - Convert the specified expression value to a
342 /// boolean (i1) truth value. This is equivalent to "Val != 0".
343 Value *EmitConversionToBool(Value *Src, QualType DstTy);
344
345 /// Emit a check that a conversion from a floating-point type does not
346 /// overflow.
347 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
348 Value *Src, QualType SrcType, QualType DstType,
349 llvm::Type *DstTy, SourceLocation Loc);
350
351 /// Known implicit conversion check kinds.
352 /// This is used for bitfield conversion checks as well.
353 /// Keep in sync with the enum of the same name in ubsan_handlers.h
354 enum ImplicitConversionCheckKind : unsigned char {
355 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
356 ICCK_UnsignedIntegerTruncation = 1,
357 ICCK_SignedIntegerTruncation = 2,
358 ICCK_IntegerSignChange = 3,
359 ICCK_SignedIntegerTruncationOrSignChange = 4,
360 };
361
362 /// Emit a check that an [implicit] truncation of an integer does not
363 /// discard any bits. It is not UB, so we use the value after truncation.
364 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
365 QualType DstType, SourceLocation Loc);
366
367 /// Emit a check that an [implicit] conversion of an integer does not change
368 /// the sign of the value. It is not UB, so we use the value after conversion.
369 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
370 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
371 QualType DstType, SourceLocation Loc);
372
373 /// Emit a conversion from the specified type to the specified destination
374 /// type, both of which are LLVM scalar types.
375 struct ScalarConversionOpts {
376 bool TreatBooleanAsSigned;
377 bool EmitImplicitIntegerTruncationChecks;
378 bool EmitImplicitIntegerSignChangeChecks;
379
380 ScalarConversionOpts()
381 : TreatBooleanAsSigned(false),
382 EmitImplicitIntegerTruncationChecks(false),
383 EmitImplicitIntegerSignChangeChecks(false) {}
384
385 ScalarConversionOpts(clang::SanitizerSet SanOpts)
386 : TreatBooleanAsSigned(false),
387 EmitImplicitIntegerTruncationChecks(
388 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
389 EmitImplicitIntegerSignChangeChecks(
390 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
391 };
392 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
393 llvm::Type *SrcTy, llvm::Type *DstTy,
394 ScalarConversionOpts Opts);
395 Value *
396 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
397 SourceLocation Loc,
398 ScalarConversionOpts Opts = ScalarConversionOpts());
399
400 /// Convert between either a fixed point and other fixed point or fixed point
401 /// and an integer.
402 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
403 SourceLocation Loc);
404
405 /// Emit a conversion from the specified complex type to the specified
406 /// destination type, where the destination type is an LLVM scalar type.
407 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
408 QualType SrcTy, QualType DstTy,
409 SourceLocation Loc);
410
411 /// EmitNullValue - Emit a value that corresponds to null for the given type.
412 Value *EmitNullValue(QualType Ty);
413
414 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
415 Value *EmitFloatToBoolConversion(Value *V) {
416 // Compare against 0.0 for fp scalars.
417 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
418 return Builder.CreateFCmpUNE(V, Zero, "tobool");
419 }
420
421 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
422 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
423 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
424
425 return Builder.CreateICmpNE(V, Zero, "tobool");
426 }
427
428 Value *EmitIntToBoolConversion(Value *V) {
429 // Because of the type rules of C, we often end up computing a
430 // logical value, then zero extending it to int, then wanting it
431 // as a logical value again. Optimize this common case.
432 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
433 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
434 Value *Result = ZI->getOperand(0);
435 // If there aren't any more uses, zap the instruction to save space.
436 // Note that there can be more uses, for example if this
437 // is the result of an assignment.
438 if (ZI->use_empty())
439 ZI->eraseFromParent();
440 return Result;
441 }
442 }
443
444 return Builder.CreateIsNotNull(V, "tobool");
445 }
446
447 //===--------------------------------------------------------------------===//
448 // Visitor Methods
449 //===--------------------------------------------------------------------===//
450
451 Value *Visit(Expr *E) {
452 ApplyDebugLocation DL(CGF, E);
453 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
454 }
455
456 Value *VisitStmt(Stmt *S) {
457 S->dump(llvm::errs(), CGF.getContext());
458 llvm_unreachable("Stmt can't have complex result type!");
459 }
460 Value *VisitExpr(Expr *S);
461
462 Value *VisitConstantExpr(ConstantExpr *E) {
463 // A constant expression of type 'void' generates no code and produces no
464 // value.
465 if (E->getType()->isVoidType())
466 return nullptr;
467
468 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
469 if (E->isGLValue()) {
470 // This was already converted to an rvalue when it was constant
471 // evaluated.
472 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
473 return Result;
474 return CGF.EmitLoadOfScalar(
475 Address(Result, CGF.convertTypeForLoadStore(E->getType()),
477 /*Volatile*/ false, E->getType(), E->getExprLoc());
478 }
479 return Result;
480 }
481 return Visit(E->getSubExpr());
482 }
483 Value *VisitParenExpr(ParenExpr *PE) {
484 return Visit(PE->getSubExpr());
485 }
486 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
487 return Visit(E->getReplacement());
488 }
489 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
490 return Visit(GE->getResultExpr());
491 }
492 Value *VisitCoawaitExpr(CoawaitExpr *S) {
493 return CGF.EmitCoawaitExpr(*S).getScalarVal();
494 }
495 Value *VisitCoyieldExpr(CoyieldExpr *S) {
496 return CGF.EmitCoyieldExpr(*S).getScalarVal();
497 }
498 Value *VisitUnaryCoawait(const UnaryOperator *E) {
499 return Visit(E->getSubExpr());
500 }
501
502 // Leaves.
503 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
504 return Builder.getInt(E->getValue());
505 }
506 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
507 return Builder.getInt(E->getValue());
508 }
509 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
510 return llvm::ConstantFP::get(VMContext, E->getValue());
511 }
512 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
513 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
514 }
515 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
516 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
517 }
518 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
519 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
520 }
521 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
522 if (E->getType()->isVoidType())
523 return nullptr;
524
525 return EmitNullValue(E->getType());
526 }
527 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
528 return EmitNullValue(E->getType());
529 }
530 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
531 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
532 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
533 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
534 return Builder.CreateBitCast(V, ConvertType(E->getType()));
535 }
536
537 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
538 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
539 }
540
541 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
542 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
543 }
544
545 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
546 Value *VisitEmbedExpr(EmbedExpr *E);
547
548 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
549 if (E->isGLValue())
550 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
551 E->getExprLoc());
552
553 // Otherwise, assume the mapping is the scalar directly.
555 }
556
557 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
558 llvm_unreachable("Codegen for this isn't defined/implemented");
559 }
560
561 // l-values.
562 Value *VisitDeclRefExpr(DeclRefExpr *E) {
563 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
564 return CGF.emitScalarConstant(Constant, E);
565 return EmitLoadOfLValue(E);
566 }
567
568 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
569 return CGF.EmitObjCSelectorExpr(E);
570 }
571 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
572 return CGF.EmitObjCProtocolExpr(E);
573 }
574 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
575 return EmitLoadOfLValue(E);
576 }
577 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
578 if (E->getMethodDecl() &&
580 return EmitLoadOfLValue(E);
581 return CGF.EmitObjCMessageExpr(E).getScalarVal();
582 }
583
584 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
585 LValue LV = CGF.EmitObjCIsaExpr(E);
587 return V;
588 }
589
590 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
591 VersionTuple Version = E->getVersion();
592
593 // If we're checking for a platform older than our minimum deployment
594 // target, we can fold the check away.
595 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
596 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
597
598 return CGF.EmitBuiltinAvailable(Version);
599 }
600
601 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
602 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
603 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
604 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
605 Value *VisitMemberExpr(MemberExpr *E);
606 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
607 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
608 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
609 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
610 // literals aren't l-values in C++. We do so simply because that's the
611 // cleanest way to handle compound literals in C++.
612 // See the discussion here: https://reviews.llvm.org/D64464
613 return EmitLoadOfLValue(E);
614 }
615
616 Value *VisitInitListExpr(InitListExpr *E);
617
618 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
619 assert(CGF.getArrayInitIndex() &&
620 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
621 return CGF.getArrayInitIndex();
622 }
623
624 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
625 return EmitNullValue(E->getType());
626 }
627 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
628 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
629 return VisitCastExpr(E);
630 }
631 Value *VisitCastExpr(CastExpr *E);
632
633 Value *VisitCallExpr(const CallExpr *E) {
635 return EmitLoadOfLValue(E);
636
637 Value *V = CGF.EmitCallExpr(E).getScalarVal();
638
639 EmitLValueAlignmentAssumption(E, V);
640 return V;
641 }
642
643 Value *VisitStmtExpr(const StmtExpr *E);
644
645 // Unary Operators.
646 Value *VisitUnaryPostDec(const UnaryOperator *E) {
647 LValue LV = EmitLValue(E->getSubExpr());
648 return EmitScalarPrePostIncDec(E, LV, false, false);
649 }
650 Value *VisitUnaryPostInc(const UnaryOperator *E) {
651 LValue LV = EmitLValue(E->getSubExpr());
652 return EmitScalarPrePostIncDec(E, LV, true, false);
653 }
654 Value *VisitUnaryPreDec(const UnaryOperator *E) {
655 LValue LV = EmitLValue(E->getSubExpr());
656 return EmitScalarPrePostIncDec(E, LV, false, true);
657 }
658 Value *VisitUnaryPreInc(const UnaryOperator *E) {
659 LValue LV = EmitLValue(E->getSubExpr());
660 return EmitScalarPrePostIncDec(E, LV, true, true);
661 }
662
663 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
664 llvm::Value *InVal,
665 bool IsInc);
666
667 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
668 bool isInc, bool isPre);
669
670
671 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
672 if (isa<MemberPointerType>(E->getType())) // never sugared
673 return CGF.CGM.getMemberPointerConstant(E);
674
675 return EmitLValue(E->getSubExpr()).getPointer(CGF);
676 }
677 Value *VisitUnaryDeref(const UnaryOperator *E) {
678 if (E->getType()->isVoidType())
679 return Visit(E->getSubExpr()); // the actual value should be unused
680 return EmitLoadOfLValue(E);
681 }
682
683 Value *VisitUnaryPlus(const UnaryOperator *E,
684 QualType PromotionType = QualType());
685 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
686 Value *VisitUnaryMinus(const UnaryOperator *E,
687 QualType PromotionType = QualType());
688 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
689
690 Value *VisitUnaryNot (const UnaryOperator *E);
691 Value *VisitUnaryLNot (const UnaryOperator *E);
692 Value *VisitUnaryReal(const UnaryOperator *E,
693 QualType PromotionType = QualType());
694 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
695 Value *VisitUnaryImag(const UnaryOperator *E,
696 QualType PromotionType = QualType());
697 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
698 Value *VisitUnaryExtension(const UnaryOperator *E) {
699 return Visit(E->getSubExpr());
700 }
701
702 // C++
703 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
704 return EmitLoadOfLValue(E);
705 }
706 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
707 auto &Ctx = CGF.getContext();
710 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
711 SLE->getType());
712 }
713
714 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
715 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
716 return Visit(DAE->getExpr());
717 }
718 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
719 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
720 return Visit(DIE->getExpr());
721 }
722 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
723 return CGF.LoadCXXThis();
724 }
725
726 Value *VisitExprWithCleanups(ExprWithCleanups *E);
727 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
728 return CGF.EmitCXXNewExpr(E);
729 }
730 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
731 CGF.EmitCXXDeleteExpr(E);
732 return nullptr;
733 }
734
735 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
736 if (E->isStoredAsBoolean())
737 return llvm::ConstantInt::get(ConvertType(E->getType()),
738 E->getBoolValue());
739 assert(E->getAPValue().isInt() && "APValue type not supported");
740 return llvm::ConstantInt::get(ConvertType(E->getType()),
741 E->getAPValue().getInt());
742 }
743
744 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
745 return Builder.getInt1(E->isSatisfied());
746 }
747
748 Value *VisitRequiresExpr(const RequiresExpr *E) {
749 return Builder.getInt1(E->isSatisfied());
750 }
751
752 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
753 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
754 }
755
756 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
757 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
758 }
759
760 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
761 // C++ [expr.pseudo]p1:
762 // The result shall only be used as the operand for the function call
763 // operator (), and the result of such a call has type void. The only
764 // effect is the evaluation of the postfix-expression before the dot or
765 // arrow.
766 CGF.EmitScalarExpr(E->getBase());
767 return nullptr;
768 }
769
770 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
771 return EmitNullValue(E->getType());
772 }
773
774 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
775 CGF.EmitCXXThrowExpr(E);
776 return nullptr;
777 }
778
779 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
780 return Builder.getInt1(E->getValue());
781 }
782
783 // Binary Operators.
784 Value *EmitMul(const BinOpInfo &Ops) {
785 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
786 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
787 case LangOptions::SOB_Defined:
788 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
789 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
790 [[fallthrough]];
791 case LangOptions::SOB_Undefined:
792 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
793 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
794 [[fallthrough]];
795 case LangOptions::SOB_Trapping:
796 if (CanElideOverflowCheck(CGF.getContext(), Ops))
797 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
798 return EmitOverflowCheckedBinOp(Ops);
799 }
800 }
801
802 if (Ops.Ty->isConstantMatrixType()) {
803 llvm::MatrixBuilder MB(Builder);
804 // We need to check the types of the operands of the operator to get the
805 // correct matrix dimensions.
806 auto *BO = cast<BinaryOperator>(Ops.E);
807 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
808 BO->getLHS()->getType().getCanonicalType());
809 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
810 BO->getRHS()->getType().getCanonicalType());
811 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
812 if (LHSMatTy && RHSMatTy)
813 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
814 LHSMatTy->getNumColumns(),
815 RHSMatTy->getNumColumns());
816 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
817 }
818
819 if (Ops.Ty->isUnsignedIntegerType() &&
820 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
821 !CanElideOverflowCheck(CGF.getContext(), Ops))
822 return EmitOverflowCheckedBinOp(Ops);
823
824 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
825 // Preserve the old values
826 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
827 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
828 }
829 if (Ops.isFixedPointOp())
830 return EmitFixedPointBinOp(Ops);
831 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
832 }
833 /// Create a binary op that checks for overflow.
834 /// Currently only supports +, - and *.
835 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
836
837 // Check for undefined division and modulus behaviors.
838 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
839 llvm::Value *Zero,bool isDiv);
840 // Common helper for getting how wide LHS of shift is.
841 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
842
843 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
844 // non powers of two.
845 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
846
847 Value *EmitDiv(const BinOpInfo &Ops);
848 Value *EmitRem(const BinOpInfo &Ops);
849 Value *EmitAdd(const BinOpInfo &Ops);
850 Value *EmitSub(const BinOpInfo &Ops);
851 Value *EmitShl(const BinOpInfo &Ops);
852 Value *EmitShr(const BinOpInfo &Ops);
853 Value *EmitAnd(const BinOpInfo &Ops) {
854 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
855 }
856 Value *EmitXor(const BinOpInfo &Ops) {
857 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
858 }
859 Value *EmitOr (const BinOpInfo &Ops) {
860 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
861 }
862
863 // Helper functions for fixed point binary operations.
864 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
865
866 BinOpInfo EmitBinOps(const BinaryOperator *E,
867 QualType PromotionTy = QualType());
868
869 Value *EmitPromotedValue(Value *result, QualType PromotionType);
870 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
871 Value *EmitPromoted(const Expr *E, QualType PromotionType);
872
873 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
874 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
875 Value *&Result);
876
877 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
878 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
879
880 QualType getPromotionType(QualType Ty) {
881 const auto &Ctx = CGF.getContext();
882 if (auto *CT = Ty->getAs<ComplexType>()) {
883 QualType ElementType = CT->getElementType();
884 if (ElementType.UseExcessPrecision(Ctx))
885 return Ctx.getComplexType(Ctx.FloatTy);
886 }
887
888 if (Ty.UseExcessPrecision(Ctx)) {
889 if (auto *VT = Ty->getAs<VectorType>()) {
890 unsigned NumElements = VT->getNumElements();
891 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
892 }
893 return Ctx.FloatTy;
894 }
895
896 return QualType();
897 }
898
899 // Binary operators and binary compound assignment operators.
900#define HANDLEBINOP(OP) \
901 Value *VisitBin##OP(const BinaryOperator *E) { \
902 QualType promotionTy = getPromotionType(E->getType()); \
903 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
904 if (result && !promotionTy.isNull()) \
905 result = EmitUnPromotedValue(result, E->getType()); \
906 return result; \
907 } \
908 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
909 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
910 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
911 }
912 HANDLEBINOP(Mul)
913 HANDLEBINOP(Div)
914 HANDLEBINOP(Rem)
915 HANDLEBINOP(Add)
916 HANDLEBINOP(Sub)
917 HANDLEBINOP(Shl)
918 HANDLEBINOP(Shr)
920 HANDLEBINOP(Xor)
922#undef HANDLEBINOP
923
924 // Comparisons.
925 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
926 llvm::CmpInst::Predicate SICmpOpc,
927 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
928#define VISITCOMP(CODE, UI, SI, FP, SIG) \
929 Value *VisitBin##CODE(const BinaryOperator *E) { \
930 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
931 llvm::FCmpInst::FP, SIG); }
932 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
933 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
934 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
935 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
936 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
937 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
938#undef VISITCOMP
939
940 Value *VisitBinAssign (const BinaryOperator *E);
941
942 Value *VisitBinLAnd (const BinaryOperator *E);
943 Value *VisitBinLOr (const BinaryOperator *E);
944 Value *VisitBinComma (const BinaryOperator *E);
945
946 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
947 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
948
949 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
950 return Visit(E->getSemanticForm());
951 }
952
953 // Other Operators.
954 Value *VisitBlockExpr(const BlockExpr *BE);
955 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
956 Value *VisitChooseExpr(ChooseExpr *CE);
957 Value *VisitVAArgExpr(VAArgExpr *VE);
958 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
959 return CGF.EmitObjCStringLiteral(E);
960 }
961 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
962 return CGF.EmitObjCBoxedExpr(E);
963 }
964 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
965 return CGF.EmitObjCArrayLiteral(E);
966 }
967 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
968 return CGF.EmitObjCDictionaryLiteral(E);
969 }
970 Value *VisitAsTypeExpr(AsTypeExpr *CE);
971 Value *VisitAtomicExpr(AtomicExpr *AE);
972 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
973 return Visit(E->getSelectedExpr());
974 }
975};
976} // end anonymous namespace.
977
978//===----------------------------------------------------------------------===//
979// Utilities
980//===----------------------------------------------------------------------===//
981
982/// EmitConversionToBool - Convert the specified expression value to a
983/// boolean (i1) truth value. This is equivalent to "Val != 0".
984Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
985 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
986
987 if (SrcType->isRealFloatingType())
988 return EmitFloatToBoolConversion(Src);
989
990 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
991 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
992
993 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
994 "Unknown scalar type to convert");
995
997 return EmitIntToBoolConversion(Src);
998
999 assert(isa<llvm::PointerType>(Src->getType()));
1000 return EmitPointerToBoolConversion(Src, SrcType);
1001}
1002
1003void ScalarExprEmitter::EmitFloatConversionCheck(
1004 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1005 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1006 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1007 if (!isa<llvm::IntegerType>(DstTy))
1008 return;
1009
1010 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1011 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1012 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1013 using llvm::APFloat;
1014 using llvm::APSInt;
1015
1016 llvm::Value *Check = nullptr;
1017 const llvm::fltSemantics &SrcSema =
1018 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1019
1020 // Floating-point to integer. This has undefined behavior if the source is
1021 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1022 // to an integer).
1023 unsigned Width = CGF.getContext().getIntWidth(DstType);
1025
1026 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1027 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1028 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1029 APFloat::opOverflow)
1030 // Don't need an overflow check for lower bound. Just check for
1031 // -Inf/NaN.
1032 MinSrc = APFloat::getInf(SrcSema, true);
1033 else
1034 // Find the largest value which is too small to represent (before
1035 // truncation toward zero).
1036 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1037
1038 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1039 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1040 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1041 APFloat::opOverflow)
1042 // Don't need an overflow check for upper bound. Just check for
1043 // +Inf/NaN.
1044 MaxSrc = APFloat::getInf(SrcSema, false);
1045 else
1046 // Find the smallest value which is too large to represent (before
1047 // truncation toward zero).
1048 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1049
1050 // If we're converting from __half, convert the range to float to match
1051 // the type of src.
1052 if (OrigSrcType->isHalfType()) {
1053 const llvm::fltSemantics &Sema =
1054 CGF.getContext().getFloatTypeSemantics(SrcType);
1055 bool IsInexact;
1056 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1057 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1058 }
1059
1060 llvm::Value *GE =
1061 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1062 llvm::Value *LE =
1063 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1064 Check = Builder.CreateAnd(GE, LE);
1065
1066 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1067 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1068 CGF.EmitCheckTypeDescriptor(DstType)};
1069 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1070 OrigSrc);
1071}
1072
1073// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1074// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1075static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1076 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1078 QualType DstType, CGBuilderTy &Builder) {
1079 llvm::Type *SrcTy = Src->getType();
1080 llvm::Type *DstTy = Dst->getType();
1081 (void)DstTy; // Only used in assert()
1082
1083 // This should be truncation of integral types.
1084 assert(Src != Dst);
1085 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1086 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1087 "non-integer llvm type");
1088
1089 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1090 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1091
1092 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1093 // Else, it is a signed truncation.
1094 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1096 if (!SrcSigned && !DstSigned) {
1097 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1098 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1099 } else {
1100 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1101 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1102 }
1103
1104 llvm::Value *Check = nullptr;
1105 // 1. Extend the truncated value back to the same width as the Src.
1106 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1107 // 2. Equality-compare with the original source value
1108 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1109 // If the comparison result is 'i1 false', then the truncation was lossy.
1110 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1111}
1112
1114 QualType SrcType, QualType DstType) {
1115 return SrcType->isIntegerType() && DstType->isIntegerType();
1116}
1117
1118void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1119 Value *Dst, QualType DstType,
1120 SourceLocation Loc) {
1121 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1122 return;
1123
1124 // We only care about int->int conversions here.
1125 // We ignore conversions to/from pointer and/or bool.
1127 DstType))
1128 return;
1129
1130 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1131 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1132 // This must be truncation. Else we do not care.
1133 if (SrcBits <= DstBits)
1134 return;
1135
1136 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1137
1138 // If the integer sign change sanitizer is enabled,
1139 // and we are truncating from larger unsigned type to smaller signed type,
1140 // let that next sanitizer deal with it.
1141 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1142 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1143 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1144 (!SrcSigned && DstSigned))
1145 return;
1146
1147 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1148 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1149 Check;
1150
1151 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1152 {
1153 // We don't know the check kind until we call
1154 // EmitIntegerTruncationCheckHelper, but we want to annotate
1155 // EmitIntegerTruncationCheckHelper's instructions too.
1156 SanitizerDebugLocation SanScope(
1157 &CGF,
1158 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1159 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1160 CheckHandler);
1161 Check =
1162 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1163 // If the comparison result is 'i1 false', then the truncation was lossy.
1164 }
1165
1166 // Do we care about this type of truncation?
1167 if (!CGF.SanOpts.has(Check.second.second))
1168 return;
1169
1170 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1171
1172 // Does some SSCL ignore this type?
1174 SanitizerMask::bitPosToMask(Check.second.second), DstType))
1175 return;
1176
1177 llvm::Constant *StaticArgs[] = {
1178 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1179 CGF.EmitCheckTypeDescriptor(DstType),
1180 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1181 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1182
1183 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1184}
1185
1186static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1187 const char *Name,
1188 CGBuilderTy &Builder) {
1189 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1190 llvm::Type *VTy = V->getType();
1191 if (!VSigned) {
1192 // If the value is unsigned, then it is never negative.
1193 return llvm::ConstantInt::getFalse(VTy->getContext());
1194 }
1195 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1196 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1197 llvm::Twine(Name) + "." + V->getName() +
1198 ".negativitycheck");
1199}
1200
1201// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1202// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1203static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1204 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1206 QualType DstType, CGBuilderTy &Builder) {
1207 llvm::Type *SrcTy = Src->getType();
1208 llvm::Type *DstTy = Dst->getType();
1209
1210 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1211 "non-integer llvm type");
1212
1213 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1214 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1215 (void)SrcSigned; // Only used in assert()
1216 (void)DstSigned; // Only used in assert()
1217 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1218 unsigned DstBits = DstTy->getScalarSizeInBits();
1219 (void)SrcBits; // Only used in assert()
1220 (void)DstBits; // Only used in assert()
1221
1222 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1223 "either the widths should be different, or the signednesses.");
1224
1225 // 1. Was the old Value negative?
1226 llvm::Value *SrcIsNegative =
1227 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1228 // 2. Is the new Value negative?
1229 llvm::Value *DstIsNegative =
1230 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1231 // 3. Now, was the 'negativity status' preserved during the conversion?
1232 // NOTE: conversion from negative to zero is considered to change the sign.
1233 // (We want to get 'false' when the conversion changed the sign)
1234 // So we should just equality-compare the negativity statuses.
1235 llvm::Value *Check = nullptr;
1236 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1237 // If the comparison result is 'false', then the conversion changed the sign.
1238 return std::make_pair(
1239 ScalarExprEmitter::ICCK_IntegerSignChange,
1240 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1241}
1242
1243void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1244 Value *Dst, QualType DstType,
1245 SourceLocation Loc) {
1246 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange))
1247 return;
1248
1249 llvm::Type *SrcTy = Src->getType();
1250 llvm::Type *DstTy = Dst->getType();
1251
1252 // We only care about int->int conversions here.
1253 // We ignore conversions to/from pointer and/or bool.
1255 DstType))
1256 return;
1257
1258 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1259 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1260 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1261 unsigned DstBits = DstTy->getScalarSizeInBits();
1262
1263 // Now, we do not need to emit the check in *all* of the cases.
1264 // We can avoid emitting it in some obvious cases where it would have been
1265 // dropped by the opt passes (instcombine) always anyways.
1266 // If it's a cast between effectively the same type, no check.
1267 // NOTE: this is *not* equivalent to checking the canonical types.
1268 if (SrcSigned == DstSigned && SrcBits == DstBits)
1269 return;
1270 // At least one of the values needs to have signed type.
1271 // If both are unsigned, then obviously, neither of them can be negative.
1272 if (!SrcSigned && !DstSigned)
1273 return;
1274 // If the conversion is to *larger* *signed* type, then no check is needed.
1275 // Because either sign-extension happens (so the sign will remain),
1276 // or zero-extension will happen (the sign bit will be zero.)
1277 if ((DstBits > SrcBits) && DstSigned)
1278 return;
1279 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1280 (SrcBits > DstBits) && SrcSigned) {
1281 // If the signed integer truncation sanitizer is enabled,
1282 // and this is a truncation from signed type, then no check is needed.
1283 // Because here sign change check is interchangeable with truncation check.
1284 return;
1285 }
1286 // Does an SSCL have an entry for the DstType under its respective sanitizer
1287 // section?
1288 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1289 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1290 return;
1291 if (!DstSigned &&
1293 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1294 return;
1295 // That's it. We can't rule out any more cases with the data we have.
1296
1297 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1298 SanitizerDebugLocation SanScope(
1299 &CGF,
1300 {SanitizerKind::SO_ImplicitIntegerSignChange,
1301 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1302 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1303 CheckHandler);
1304
1305 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1306 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1307 Check;
1308
1309 // Each of these checks needs to return 'false' when an issue was detected.
1310 ImplicitConversionCheckKind CheckKind;
1311 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1312 2>
1313 Checks;
1314 // So we can 'and' all the checks together, and still get 'false',
1315 // if at least one of the checks detected an issue.
1316
1317 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1318 CheckKind = Check.first;
1319 Checks.emplace_back(Check.second);
1320
1321 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1322 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1323 // If the signed integer truncation sanitizer was enabled,
1324 // and we are truncating from larger unsigned type to smaller signed type,
1325 // let's handle the case we skipped in that check.
1326 Check =
1327 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1328 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1329 Checks.emplace_back(Check.second);
1330 // If the comparison result is 'i1 false', then the truncation was lossy.
1331 }
1332
1333 llvm::Constant *StaticArgs[] = {
1334 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1335 CGF.EmitCheckTypeDescriptor(DstType),
1336 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1337 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1338 // EmitCheck() will 'and' all the checks together.
1339 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1340}
1341
1342// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1343// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1344static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1345 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1347 QualType DstType, CGBuilderTy &Builder) {
1348 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1349 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1350
1351 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1352 if (!SrcSigned && !DstSigned)
1353 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1354 else
1355 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1356
1357 llvm::Value *Check = nullptr;
1358 // 1. Extend the truncated value back to the same width as the Src.
1359 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1360 // 2. Equality-compare with the original source value
1361 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1362 // If the comparison result is 'i1 false', then the truncation was lossy.
1363
1364 return std::make_pair(
1365 Kind,
1366 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1367}
1368
1369// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1370// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1371static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1372 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1374 QualType DstType, CGBuilderTy &Builder) {
1375 // 1. Was the old Value negative?
1376 llvm::Value *SrcIsNegative =
1377 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1378 // 2. Is the new Value negative?
1379 llvm::Value *DstIsNegative =
1380 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1381 // 3. Now, was the 'negativity status' preserved during the conversion?
1382 // NOTE: conversion from negative to zero is considered to change the sign.
1383 // (We want to get 'false' when the conversion changed the sign)
1384 // So we should just equality-compare the negativity statuses.
1385 llvm::Value *Check = nullptr;
1386 Check =
1387 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1388 // If the comparison result is 'false', then the conversion changed the sign.
1389 return std::make_pair(
1390 ScalarExprEmitter::ICCK_IntegerSignChange,
1391 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1392}
1393
1395 Value *Dst, QualType DstType,
1396 const CGBitFieldInfo &Info,
1397 SourceLocation Loc) {
1398
1399 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1400 return;
1401
1402 // We only care about int->int conversions here.
1403 // We ignore conversions to/from pointer and/or bool.
1405 DstType))
1406 return;
1407
1408 if (DstType->isBooleanType() || SrcType->isBooleanType())
1409 return;
1410
1411 // This should be truncation of integral types.
1412 assert(isa<llvm::IntegerType>(Src->getType()) &&
1413 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1414
1415 // TODO: Calculate src width to avoid emitting code
1416 // for unecessary cases.
1417 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1418 unsigned DstBits = Info.Size;
1419
1420 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1421 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1422
1423 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1424 SanitizerDebugLocation SanScope(
1425 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1426
1427 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1428 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1429 Check;
1430
1431 // Truncation
1432 bool EmitTruncation = DstBits < SrcBits;
1433 // If Dst is signed and Src unsigned, we want to be more specific
1434 // about the CheckKind we emit, in this case we want to emit
1435 // ICCK_SignedIntegerTruncationOrSignChange.
1436 bool EmitTruncationFromUnsignedToSigned =
1437 EmitTruncation && DstSigned && !SrcSigned;
1438 // Sign change
1439 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1440 bool BothUnsigned = !SrcSigned && !DstSigned;
1441 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1442 // We can avoid emitting sign change checks in some obvious cases
1443 // 1. If Src and Dst have the same signedness and size
1444 // 2. If both are unsigned sign check is unecessary!
1445 // 3. If Dst is signed and bigger than Src, either
1446 // sign-extension or zero-extension will make sure
1447 // the sign remains.
1448 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1449
1450 if (EmitTruncation)
1451 Check =
1452 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1453 else if (EmitSignChange) {
1454 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1455 "either the widths should be different, or the signednesses.");
1456 Check =
1457 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1458 } else
1459 return;
1460
1461 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1462 if (EmitTruncationFromUnsignedToSigned)
1463 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1464
1465 llvm::Constant *StaticArgs[] = {
1467 EmitCheckTypeDescriptor(DstType),
1468 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1469 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1470
1471 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1472}
1473
1474Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1475 QualType DstType, llvm::Type *SrcTy,
1476 llvm::Type *DstTy,
1477 ScalarConversionOpts Opts) {
1478 // The Element types determine the type of cast to perform.
1479 llvm::Type *SrcElementTy;
1480 llvm::Type *DstElementTy;
1481 QualType SrcElementType;
1482 QualType DstElementType;
1483 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1484 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1485 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1486 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1487 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1488 } else {
1489 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1490 "cannot cast between matrix and non-matrix types");
1491 SrcElementTy = SrcTy;
1492 DstElementTy = DstTy;
1493 SrcElementType = SrcType;
1494 DstElementType = DstType;
1495 }
1496
1497 if (isa<llvm::IntegerType>(SrcElementTy)) {
1498 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1499 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1500 InputSigned = true;
1501 }
1502
1503 if (isa<llvm::IntegerType>(DstElementTy))
1504 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1505 if (InputSigned)
1506 return Builder.CreateSIToFP(Src, DstTy, "conv");
1507 return Builder.CreateUIToFP(Src, DstTy, "conv");
1508 }
1509
1510 if (isa<llvm::IntegerType>(DstElementTy)) {
1511 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1512 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1513
1514 // If we can't recognize overflow as undefined behavior, assume that
1515 // overflow saturates. This protects against normal optimizations if we are
1516 // compiling with non-standard FP semantics.
1517 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1518 llvm::Intrinsic::ID IID =
1519 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1520 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1521 }
1522
1523 if (IsSigned)
1524 return Builder.CreateFPToSI(Src, DstTy, "conv");
1525 return Builder.CreateFPToUI(Src, DstTy, "conv");
1526 }
1527
1528 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1529 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1530 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1531 }
1532 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1533 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1534 return Builder.CreateFPExt(Src, DstTy, "conv");
1535}
1536
1537/// Emit a conversion from the specified type to the specified destination type,
1538/// both of which are LLVM scalar types.
1539Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1540 QualType DstType,
1541 SourceLocation Loc,
1542 ScalarConversionOpts Opts) {
1543 // All conversions involving fixed point types should be handled by the
1544 // EmitFixedPoint family functions. This is done to prevent bloating up this
1545 // function more, and although fixed point numbers are represented by
1546 // integers, we do not want to follow any logic that assumes they should be
1547 // treated as integers.
1548 // TODO(leonardchan): When necessary, add another if statement checking for
1549 // conversions to fixed point types from other types.
1550 if (SrcType->isFixedPointType()) {
1551 if (DstType->isBooleanType())
1552 // It is important that we check this before checking if the dest type is
1553 // an integer because booleans are technically integer types.
1554 // We do not need to check the padding bit on unsigned types if unsigned
1555 // padding is enabled because overflow into this bit is undefined
1556 // behavior.
1557 return Builder.CreateIsNotNull(Src, "tobool");
1558 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1559 DstType->isRealFloatingType())
1560 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1561
1562 llvm_unreachable(
1563 "Unhandled scalar conversion from a fixed point type to another type.");
1564 } else if (DstType->isFixedPointType()) {
1565 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1566 // This also includes converting booleans and enums to fixed point types.
1567 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1568
1569 llvm_unreachable(
1570 "Unhandled scalar conversion to a fixed point type from another type.");
1571 }
1572
1573 QualType NoncanonicalSrcType = SrcType;
1574 QualType NoncanonicalDstType = DstType;
1575
1576 SrcType = CGF.getContext().getCanonicalType(SrcType);
1577 DstType = CGF.getContext().getCanonicalType(DstType);
1578 if (SrcType == DstType) return Src;
1579
1580 if (DstType->isVoidType()) return nullptr;
1581
1582 llvm::Value *OrigSrc = Src;
1583 QualType OrigSrcType = SrcType;
1584 llvm::Type *SrcTy = Src->getType();
1585
1586 // Handle conversions to bool first, they are special: comparisons against 0.
1587 if (DstType->isBooleanType())
1588 return EmitConversionToBool(Src, SrcType);
1589
1590 llvm::Type *DstTy = ConvertType(DstType);
1591
1592 // Cast from half through float if half isn't a native type.
1593 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1594 // Cast to FP using the intrinsic if the half type itself isn't supported.
1595 if (DstTy->isFloatingPointTy()) {
1597 return Builder.CreateCall(
1598 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1599 Src);
1600 } else {
1601 // Cast to other types through float, using either the intrinsic or FPExt,
1602 // depending on whether the half type itself is supported
1603 // (as opposed to operations on half, available with NativeHalfType).
1605 Src = Builder.CreateCall(
1606 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1607 CGF.CGM.FloatTy),
1608 Src);
1609 } else {
1610 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1611 }
1612 SrcType = CGF.getContext().FloatTy;
1613 SrcTy = CGF.FloatTy;
1614 }
1615 }
1616
1617 // Ignore conversions like int -> uint.
1618 if (SrcTy == DstTy) {
1619 if (Opts.EmitImplicitIntegerSignChangeChecks)
1620 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1621 NoncanonicalDstType, Loc);
1622
1623 return Src;
1624 }
1625
1626 // Handle pointer conversions next: pointers can only be converted to/from
1627 // other pointers and integers. Check for pointer types in terms of LLVM, as
1628 // some native types (like Obj-C id) may map to a pointer type.
1629 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1630 // The source value may be an integer, or a pointer.
1631 if (isa<llvm::PointerType>(SrcTy))
1632 return Src;
1633
1634 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1635 // First, convert to the correct width so that we control the kind of
1636 // extension.
1637 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1638 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1639 llvm::Value* IntResult =
1640 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1641 // Then, cast to pointer.
1642 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1643 }
1644
1645 if (isa<llvm::PointerType>(SrcTy)) {
1646 // Must be an ptr to int cast.
1647 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1648 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1649 }
1650
1651 // A scalar can be splatted to an extended vector of the same element type
1652 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1653 // Sema should add casts to make sure that the source expression's type is
1654 // the same as the vector's element type (sans qualifiers)
1655 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1656 SrcType.getTypePtr() &&
1657 "Splatted expr doesn't match with vector element type?");
1658
1659 // Splat the element across to all elements
1660 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1661 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1662 }
1663
1664 if (SrcType->isMatrixType() && DstType->isMatrixType())
1665 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1666
1667 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1668 // Allow bitcast from vector to integer/fp of the same size.
1669 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1670 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1671 if (SrcSize == DstSize)
1672 return Builder.CreateBitCast(Src, DstTy, "conv");
1673
1674 // Conversions between vectors of different sizes are not allowed except
1675 // when vectors of half are involved. Operations on storage-only half
1676 // vectors require promoting half vector operands to float vectors and
1677 // truncating the result, which is either an int or float vector, to a
1678 // short or half vector.
1679
1680 // Source and destination are both expected to be vectors.
1681 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1682 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1683 (void)DstElementTy;
1684
1685 assert(((SrcElementTy->isIntegerTy() &&
1686 DstElementTy->isIntegerTy()) ||
1687 (SrcElementTy->isFloatingPointTy() &&
1688 DstElementTy->isFloatingPointTy())) &&
1689 "unexpected conversion between a floating-point vector and an "
1690 "integer vector");
1691
1692 // Truncate an i32 vector to an i16 vector.
1693 if (SrcElementTy->isIntegerTy())
1694 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1695
1696 // Truncate a float vector to a half vector.
1697 if (SrcSize > DstSize)
1698 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1699
1700 // Promote a half vector to a float vector.
1701 return Builder.CreateFPExt(Src, DstTy, "conv");
1702 }
1703
1704 // Finally, we have the arithmetic types: real int/float.
1705 Value *Res = nullptr;
1706 llvm::Type *ResTy = DstTy;
1707
1708 // An overflowing conversion has undefined behavior if either the source type
1709 // or the destination type is a floating-point type. However, we consider the
1710 // range of representable values for all floating-point types to be
1711 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1712 // floating-point type.
1713 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1714 OrigSrcType->isFloatingType())
1715 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1716 Loc);
1717
1718 // Cast to half through float if half isn't a native type.
1719 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1720 // Make sure we cast in a single step if from another FP type.
1721 if (SrcTy->isFloatingPointTy()) {
1722 // Use the intrinsic if the half type itself isn't supported
1723 // (as opposed to operations on half, available with NativeHalfType).
1725 return Builder.CreateCall(
1726 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1727 // If the half type is supported, just use an fptrunc.
1728 return Builder.CreateFPTrunc(Src, DstTy);
1729 }
1730 DstTy = CGF.FloatTy;
1731 }
1732
1733 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1734
1735 if (DstTy != ResTy) {
1737 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1738 Res = Builder.CreateCall(
1739 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1740 Res);
1741 } else {
1742 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1743 }
1744 }
1745
1746 if (Opts.EmitImplicitIntegerTruncationChecks)
1747 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1748 NoncanonicalDstType, Loc);
1749
1750 if (Opts.EmitImplicitIntegerSignChangeChecks)
1751 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1752 NoncanonicalDstType, Loc);
1753
1754 return Res;
1755}
1756
1757Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1758 QualType DstTy,
1759 SourceLocation Loc) {
1760 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1761 llvm::Value *Result;
1762 if (SrcTy->isRealFloatingType())
1763 Result = FPBuilder.CreateFloatingToFixed(Src,
1764 CGF.getContext().getFixedPointSemantics(DstTy));
1765 else if (DstTy->isRealFloatingType())
1766 Result = FPBuilder.CreateFixedToFloating(Src,
1768 ConvertType(DstTy));
1769 else {
1770 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1771 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1772
1773 if (DstTy->isIntegerType())
1774 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1775 DstFPSema.getWidth(),
1776 DstFPSema.isSigned());
1777 else if (SrcTy->isIntegerType())
1778 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1779 DstFPSema);
1780 else
1781 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1782 }
1783 return Result;
1784}
1785
1786/// Emit a conversion from the specified complex type to the specified
1787/// destination type, where the destination type is an LLVM scalar type.
1788Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1789 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1790 SourceLocation Loc) {
1791 // Get the source element type.
1792 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1793
1794 // Handle conversions to bool first, they are special: comparisons against 0.
1795 if (DstTy->isBooleanType()) {
1796 // Complex != 0 -> (Real != 0) | (Imag != 0)
1797 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1798 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1799 return Builder.CreateOr(Src.first, Src.second, "tobool");
1800 }
1801
1802 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1803 // the imaginary part of the complex value is discarded and the value of the
1804 // real part is converted according to the conversion rules for the
1805 // corresponding real type.
1806 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1807}
1808
1809Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1810 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1811}
1812
1813/// Emit a sanitization check for the given "binary" operation (which
1814/// might actually be a unary increment which has been lowered to a binary
1815/// operation). The check passes if all values in \p Checks (which are \c i1),
1816/// are \c true.
1817void ScalarExprEmitter::EmitBinOpCheck(
1818 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1819 const BinOpInfo &Info) {
1820 assert(CGF.IsSanitizerScope);
1821 SanitizerHandler Check;
1822 SmallVector<llvm::Constant *, 4> StaticData;
1823 SmallVector<llvm::Value *, 2> DynamicData;
1824 TrapReason TR;
1825
1826 BinaryOperatorKind Opcode = Info.Opcode;
1829
1830 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1831 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1832 if (UO && UO->getOpcode() == UO_Minus) {
1833 Check = SanitizerHandler::NegateOverflow;
1834 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1835 DynamicData.push_back(Info.RHS);
1836 } else {
1837 if (BinaryOperator::isShiftOp(Opcode)) {
1838 // Shift LHS negative or too large, or RHS out of bounds.
1839 Check = SanitizerHandler::ShiftOutOfBounds;
1840 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1841 StaticData.push_back(
1842 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1843 StaticData.push_back(
1844 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1845 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1846 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1847 Check = SanitizerHandler::DivremOverflow;
1848 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1849 } else {
1850 // Arithmetic overflow (+, -, *).
1851 int ArithOverflowKind = 0;
1852 switch (Opcode) {
1853 case BO_Add: {
1854 Check = SanitizerHandler::AddOverflow;
1855 ArithOverflowKind = diag::UBSanArithKind::Add;
1856 break;
1857 }
1858 case BO_Sub: {
1859 Check = SanitizerHandler::SubOverflow;
1860 ArithOverflowKind = diag::UBSanArithKind::Sub;
1861 break;
1862 }
1863 case BO_Mul: {
1864 Check = SanitizerHandler::MulOverflow;
1865 ArithOverflowKind = diag::UBSanArithKind::Mul;
1866 break;
1867 }
1868 default:
1869 llvm_unreachable("unexpected opcode for bin op check");
1870 }
1871 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1873 SanitizerKind::UnsignedIntegerOverflow) ||
1875 SanitizerKind::SignedIntegerOverflow)) {
1876 // Only pay the cost for constructing the trap diagnostic if they are
1877 // going to be used.
1878 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1879 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1880 << Info.E;
1881 }
1882 }
1883 DynamicData.push_back(Info.LHS);
1884 DynamicData.push_back(Info.RHS);
1885 }
1886
1887 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1888}
1889
1890//===----------------------------------------------------------------------===//
1891// Visitor Methods
1892//===----------------------------------------------------------------------===//
1893
1894Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1895 CGF.ErrorUnsupported(E, "scalar expression");
1896 if (E->getType()->isVoidType())
1897 return nullptr;
1898 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1899}
1900
1901Value *
1902ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1903 ASTContext &Context = CGF.getContext();
1904 unsigned AddrSpace =
1906 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1907 E->ComputeName(Context), "__usn_str", AddrSpace);
1908
1909 llvm::Type *ExprTy = ConvertType(E->getType());
1910 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1911 "usn_addr_cast");
1912}
1913
1914Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1915 assert(E->getDataElementCount() == 1);
1916 auto It = E->begin();
1917 return Builder.getInt((*It)->getValue());
1918}
1919
1920Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1921 // Vector Mask Case
1922 if (E->getNumSubExprs() == 2) {
1923 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1924 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1925 Value *Mask;
1926
1927 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1928 unsigned LHSElts = LTy->getNumElements();
1929
1930 Mask = RHS;
1931
1932 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1933
1934 // Mask off the high bits of each shuffle index.
1935 Value *MaskBits =
1936 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1937 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1938
1939 // newv = undef
1940 // mask = mask & maskbits
1941 // for each elt
1942 // n = extract mask i
1943 // x = extract val n
1944 // newv = insert newv, x, i
1945 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1946 MTy->getNumElements());
1947 Value* NewV = llvm::PoisonValue::get(RTy);
1948 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1949 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1950 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1951
1952 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1953 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1954 }
1955 return NewV;
1956 }
1957
1958 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1959 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1960
1961 SmallVector<int, 32> Indices;
1962 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1963 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
1964 // Check for -1 and output it as undef in the IR.
1965 if (Idx.isSigned() && Idx.isAllOnes())
1966 Indices.push_back(-1);
1967 else
1968 Indices.push_back(Idx.getZExtValue());
1969 }
1970
1971 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1972}
1973
1974Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1975 QualType SrcType = E->getSrcExpr()->getType(),
1976 DstType = E->getType();
1977
1978 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1979
1980 SrcType = CGF.getContext().getCanonicalType(SrcType);
1981 DstType = CGF.getContext().getCanonicalType(DstType);
1982 if (SrcType == DstType) return Src;
1983
1984 assert(SrcType->isVectorType() &&
1985 "ConvertVector source type must be a vector");
1986 assert(DstType->isVectorType() &&
1987 "ConvertVector destination type must be a vector");
1988
1989 llvm::Type *SrcTy = Src->getType();
1990 llvm::Type *DstTy = ConvertType(DstType);
1991
1992 // Ignore conversions like int -> uint.
1993 if (SrcTy == DstTy)
1994 return Src;
1995
1996 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1997 DstEltType = DstType->castAs<VectorType>()->getElementType();
1998
1999 assert(SrcTy->isVectorTy() &&
2000 "ConvertVector source IR type must be a vector");
2001 assert(DstTy->isVectorTy() &&
2002 "ConvertVector destination IR type must be a vector");
2003
2004 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
2005 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2006
2007 if (DstEltType->isBooleanType()) {
2008 assert((SrcEltTy->isFloatingPointTy() ||
2009 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2010
2011 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2012 if (SrcEltTy->isFloatingPointTy()) {
2013 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2014 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2015 } else {
2016 return Builder.CreateICmpNE(Src, Zero, "tobool");
2017 }
2018 }
2019
2020 // We have the arithmetic types: real int/float.
2021 Value *Res = nullptr;
2022
2023 if (isa<llvm::IntegerType>(SrcEltTy)) {
2024 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2025 if (isa<llvm::IntegerType>(DstEltTy))
2026 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2027 else {
2028 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2029 if (InputSigned)
2030 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2031 else
2032 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2033 }
2034 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2035 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2036 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2037 if (DstEltType->isSignedIntegerOrEnumerationType())
2038 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2039 else
2040 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2041 } else {
2042 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2043 "Unknown real conversion");
2044 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2045 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2046 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2047 else
2048 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2049 }
2050
2051 return Res;
2052}
2053
2054Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2055 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2056 CGF.EmitIgnoredExpr(E->getBase());
2057 return CGF.emitScalarConstant(Constant, E);
2058 } else {
2059 Expr::EvalResult Result;
2061 llvm::APSInt Value = Result.Val.getInt();
2062 CGF.EmitIgnoredExpr(E->getBase());
2063 return Builder.getInt(Value);
2064 }
2065 }
2066
2067 llvm::Value *Result = EmitLoadOfLValue(E);
2068
2069 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2070 // debug info for the pointer, even if there is no variable associated with
2071 // the pointer's expression.
2072 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2073 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2074 if (llvm::GetElementPtrInst *GEP =
2075 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2076 if (llvm::Instruction *Pointer =
2077 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2078 QualType Ty = E->getBase()->getType();
2079 if (!E->isArrow())
2080 Ty = CGF.getContext().getPointerType(Ty);
2081 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2082 }
2083 }
2084 }
2085 }
2086 return Result;
2087}
2088
2089Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2090 TestAndClearIgnoreResultAssign();
2091
2092 // Emit subscript expressions in rvalue context's. For most cases, this just
2093 // loads the lvalue formed by the subscript expr. However, we have to be
2094 // careful, because the base of a vector subscript is occasionally an rvalue,
2095 // so we can't get it as an lvalue.
2096 if (!E->getBase()->getType()->isVectorType() &&
2098 return EmitLoadOfLValue(E);
2099
2100 // Handle the vector case. The base must be a vector, the index must be an
2101 // integer value.
2102 Value *Base = Visit(E->getBase());
2103 Value *Idx = Visit(E->getIdx());
2104 QualType IdxTy = E->getIdx()->getType();
2105
2106 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2107 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2108
2109 return Builder.CreateExtractElement(Base, Idx, "vecext");
2110}
2111
2112Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2113 TestAndClearIgnoreResultAssign();
2114
2115 // Handle the vector case. The base must be a vector, the index must be an
2116 // integer value.
2117 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2118 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2119
2120 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2121 unsigned NumRows = MatrixTy->getNumRows();
2122 llvm::MatrixBuilder MB(Builder);
2123 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2124 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2125 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2126
2127 Value *Matrix = Visit(E->getBase());
2128
2129 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2130 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2131}
2132
2133static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2134 unsigned Off) {
2135 int MV = SVI->getMaskValue(Idx);
2136 if (MV == -1)
2137 return -1;
2138 return Off + MV;
2139}
2140
2141static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2142 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2143 "Index operand too large for shufflevector mask!");
2144 return C->getZExtValue();
2145}
2146
2147Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2148 bool Ignore = TestAndClearIgnoreResultAssign();
2149 (void)Ignore;
2150 unsigned NumInitElements = E->getNumInits();
2151 assert((Ignore == false ||
2152 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2153 "init list ignored");
2154
2155 // HLSL initialization lists in the AST are an expansion which can contain
2156 // side-effecting expressions wrapped in opaque value expressions. To properly
2157 // emit these we need to emit the opaque values before we emit the argument
2158 // expressions themselves. This is a little hacky, but it prevents us needing
2159 // to do a bigger AST-level change for a language feature that we need
2160 // deprecate in the near future. See related HLSL language proposals in the
2161 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2162 // * 0005-strict-initializer-lists.md
2163 // * 0032-constructors.md
2164 if (CGF.getLangOpts().HLSL)
2166
2167 if (E->hadArrayRangeDesignator())
2168 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2169
2170 llvm::VectorType *VType =
2171 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2172
2173 if (!VType) {
2174 if (NumInitElements == 0) {
2175 // C++11 value-initialization for the scalar.
2176 return EmitNullValue(E->getType());
2177 }
2178 // We have a scalar in braces. Just use the first element.
2179 return Visit(E->getInit(0));
2180 }
2181
2182 if (isa<llvm::ScalableVectorType>(VType)) {
2183 if (NumInitElements == 0) {
2184 // C++11 value-initialization for the vector.
2185 return EmitNullValue(E->getType());
2186 }
2187
2188 if (NumInitElements == 1) {
2189 Expr *InitVector = E->getInit(0);
2190
2191 // Initialize from another scalable vector of the same type.
2192 if (InitVector->getType().getCanonicalType() ==
2194 return Visit(InitVector);
2195 }
2196
2197 llvm_unreachable("Unexpected initialization of a scalable vector!");
2198 }
2199
2200 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2201
2202 // Loop over initializers collecting the Value for each, and remembering
2203 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2204 // us to fold the shuffle for the swizzle into the shuffle for the vector
2205 // initializer, since LLVM optimizers generally do not want to touch
2206 // shuffles.
2207 unsigned CurIdx = 0;
2208 bool VIsPoisonShuffle = false;
2209 llvm::Value *V = llvm::PoisonValue::get(VType);
2210 for (unsigned i = 0; i != NumInitElements; ++i) {
2211 Expr *IE = E->getInit(i);
2212 Value *Init = Visit(IE);
2213 SmallVector<int, 16> Args;
2214
2215 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2216
2217 // Handle scalar elements. If the scalar initializer is actually one
2218 // element of a different vector of the same width, use shuffle instead of
2219 // extract+insert.
2220 if (!VVT) {
2221 if (isa<ExtVectorElementExpr>(IE)) {
2222 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2223
2224 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2225 ->getNumElements() == ResElts) {
2226 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2227 Value *LHS = nullptr, *RHS = nullptr;
2228 if (CurIdx == 0) {
2229 // insert into poison -> shuffle (src, poison)
2230 // shufflemask must use an i32
2231 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2232 Args.resize(ResElts, -1);
2233
2234 LHS = EI->getVectorOperand();
2235 RHS = V;
2236 VIsPoisonShuffle = true;
2237 } else if (VIsPoisonShuffle) {
2238 // insert into poison shuffle && size match -> shuffle (v, src)
2239 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2240 for (unsigned j = 0; j != CurIdx; ++j)
2241 Args.push_back(getMaskElt(SVV, j, 0));
2242 Args.push_back(ResElts + C->getZExtValue());
2243 Args.resize(ResElts, -1);
2244
2245 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2246 RHS = EI->getVectorOperand();
2247 VIsPoisonShuffle = false;
2248 }
2249 if (!Args.empty()) {
2250 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2251 ++CurIdx;
2252 continue;
2253 }
2254 }
2255 }
2256 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2257 "vecinit");
2258 VIsPoisonShuffle = false;
2259 ++CurIdx;
2260 continue;
2261 }
2262
2263 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2264
2265 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2266 // input is the same width as the vector being constructed, generate an
2267 // optimized shuffle of the swizzle input into the result.
2268 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2269 if (isa<ExtVectorElementExpr>(IE)) {
2270 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2271 Value *SVOp = SVI->getOperand(0);
2272 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2273
2274 if (OpTy->getNumElements() == ResElts) {
2275 for (unsigned j = 0; j != CurIdx; ++j) {
2276 // If the current vector initializer is a shuffle with poison, merge
2277 // this shuffle directly into it.
2278 if (VIsPoisonShuffle) {
2279 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2280 } else {
2281 Args.push_back(j);
2282 }
2283 }
2284 for (unsigned j = 0, je = InitElts; j != je; ++j)
2285 Args.push_back(getMaskElt(SVI, j, Offset));
2286 Args.resize(ResElts, -1);
2287
2288 if (VIsPoisonShuffle)
2289 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2290
2291 Init = SVOp;
2292 }
2293 }
2294
2295 // Extend init to result vector length, and then shuffle its contribution
2296 // to the vector initializer into V.
2297 if (Args.empty()) {
2298 for (unsigned j = 0; j != InitElts; ++j)
2299 Args.push_back(j);
2300 Args.resize(ResElts, -1);
2301 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2302
2303 Args.clear();
2304 for (unsigned j = 0; j != CurIdx; ++j)
2305 Args.push_back(j);
2306 for (unsigned j = 0; j != InitElts; ++j)
2307 Args.push_back(j + Offset);
2308 Args.resize(ResElts, -1);
2309 }
2310
2311 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2312 // merging subsequent shuffles into this one.
2313 if (CurIdx == 0)
2314 std::swap(V, Init);
2315 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2316 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2317 CurIdx += InitElts;
2318 }
2319
2320 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2321 // Emit remaining default initializers.
2322 llvm::Type *EltTy = VType->getElementType();
2323
2324 // Emit remaining default initializers
2325 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2326 Value *Idx = Builder.getInt32(CurIdx);
2327 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2328 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2329 }
2330 return V;
2331}
2332
2334 return !D->isWeak();
2335}
2336
2337static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2338 E = E->IgnoreParens();
2339
2340 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2341 if (UO->getOpcode() == UO_Deref)
2342 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2343
2344 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2345 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2346
2347 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2348 if (isa<FieldDecl>(ME->getMemberDecl()))
2349 return true;
2350 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2351 }
2352
2353 // Array subscripts? Anything else?
2354
2355 return false;
2356}
2357
2359 assert(E->getType()->isSignableType(getContext()));
2360
2361 E = E->IgnoreParens();
2362
2363 if (isa<CXXThisExpr>(E))
2364 return true;
2365
2366 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2367 if (UO->getOpcode() == UO_AddrOf)
2368 return isLValueKnownNonNull(*this, UO->getSubExpr());
2369
2370 if (const auto *CE = dyn_cast<CastExpr>(E))
2371 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2372 CE->getCastKind() == CK_ArrayToPointerDecay)
2373 return isLValueKnownNonNull(*this, CE->getSubExpr());
2374
2375 // Maybe honor __nonnull?
2376
2377 return false;
2378}
2379
2381 const Expr *E = CE->getSubExpr();
2382
2383 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2384 return false;
2385
2386 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2387 // We always assume that 'this' is never null.
2388 return false;
2389 }
2390
2391 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2392 // And that glvalue casts are never null.
2393 if (ICE->isGLValue())
2394 return false;
2395 }
2396
2397 return true;
2398}
2399
2400// RHS is an aggregate type
2402 QualType DestTy, SourceLocation Loc) {
2403 SmallVector<LValue, 16> LoadList;
2404 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
2405 // Dest is either a vector or a builtin?
2406 // if its a vector create a temp alloca to store into and return that
2407 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2408 assert(LoadList.size() >= VecTy->getNumElements() &&
2409 "Flattened type on RHS must have the same number or more elements "
2410 "than vector on LHS.");
2411 llvm::Value *V =
2412 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2413 // write to V.
2414 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2415 RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
2416 assert(RVal.isScalar() &&
2417 "All flattened source values should be scalars.");
2418 llvm::Value *Cast =
2419 CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
2420 VecTy->getElementType(), Loc);
2421 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2422 }
2423 return V;
2424 }
2425 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2426 assert(LoadList.size() >= MatTy->getNumElementsFlattened() &&
2427 "Flattened type on RHS must have the same number or more elements "
2428 "than vector on LHS.");
2429
2430 llvm::Value *V =
2431 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2432 // V is an allocated temporary to build the truncated matrix into.
2433 for (unsigned I = 0, E = MatTy->getNumElementsFlattened(); I < E; I++) {
2434 unsigned ColMajorIndex =
2435 (I % MatTy->getNumRows()) * MatTy->getNumColumns() +
2436 (I / MatTy->getNumRows());
2437 RValue RVal = CGF.EmitLoadOfLValue(LoadList[ColMajorIndex], Loc);
2438 assert(RVal.isScalar() &&
2439 "All flattened source values should be scalars.");
2440 llvm::Value *Cast = CGF.EmitScalarConversion(
2441 RVal.getScalarVal(), LoadList[ColMajorIndex].getType(),
2442 MatTy->getElementType(), Loc);
2443 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2444 }
2445 return V;
2446 }
2447 // if its a builtin just do an extract element or load.
2448 assert(DestTy->isBuiltinType() &&
2449 "Destination type must be a vector, matrix, or builtin type.");
2450 RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
2451 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2452 return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
2453 DestTy, Loc);
2454}
2455
2456// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2457// have to handle a more broad range of conversions than explicit casts, as they
2458// handle things like function to ptr-to-function decay etc.
2459Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2460 auto RestoreCurCast =
2461 llvm::make_scope_exit([this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2462 CGF.CurCast = CE;
2463
2464 Expr *E = CE->getSubExpr();
2465 QualType DestTy = CE->getType();
2466 CastKind Kind = CE->getCastKind();
2467 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2468
2469 // These cases are generally not written to ignore the result of
2470 // evaluating their sub-expressions, so we clear this now.
2471 bool Ignored = TestAndClearIgnoreResultAssign();
2472
2473 // Since almost all cast kinds apply to scalars, this switch doesn't have
2474 // a default case, so the compiler will warn on a missing case. The cases
2475 // are in the same order as in the CastKind enum.
2476 switch (Kind) {
2477 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2478 case CK_BuiltinFnToFnPtr:
2479 llvm_unreachable("builtin functions are handled elsewhere");
2480
2481 case CK_LValueBitCast:
2482 case CK_ObjCObjectLValueCast: {
2483 Address Addr = EmitLValue(E).getAddress();
2484 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2485 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2486 return EmitLoadOfLValue(LV, CE->getExprLoc());
2487 }
2488
2489 case CK_LValueToRValueBitCast: {
2490 LValue SourceLVal = CGF.EmitLValue(E);
2491 Address Addr =
2492 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2493 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2494 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2495 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2496 }
2497
2498 case CK_CPointerToObjCPointerCast:
2499 case CK_BlockPointerToObjCPointerCast:
2500 case CK_AnyPointerToBlockPointerCast:
2501 case CK_BitCast: {
2502 Value *Src = Visit(E);
2503 llvm::Type *SrcTy = Src->getType();
2504 llvm::Type *DstTy = ConvertType(DestTy);
2505
2506 // FIXME: this is a gross but seemingly necessary workaround for an issue
2507 // manifesting when a target uses a non-default AS for indirect sret args,
2508 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2509 // on the address of a local struct that gets returned by value yields an
2510 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2511 // DefaultAS. We can only do this subversive thing because sret args are
2512 // manufactured and them residing in the IndirectAS is a target specific
2513 // detail, and doing an AS cast here still retains the semantics the user
2514 // expects. It is desirable to remove this iff a better solution is found.
2515 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2517 CGF, Src, E->getType().getAddressSpace(), DstTy);
2518
2519 assert(
2520 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2521 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2522 "Address-space cast must be used to convert address spaces");
2523
2524 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2525 if (auto *PT = DestTy->getAs<PointerType>()) {
2527 PT->getPointeeType(),
2528 Address(Src,
2530 E->getType()->castAs<PointerType>()->getPointeeType()),
2531 CGF.getPointerAlign()),
2532 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2533 CE->getBeginLoc());
2534 }
2535 }
2536
2537 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2538 const QualType SrcType = E->getType();
2539
2540 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2541 // Casting to pointer that could carry dynamic information (provided by
2542 // invariant.group) requires launder.
2543 Src = Builder.CreateLaunderInvariantGroup(Src);
2544 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2545 // Casting to pointer that does not carry dynamic information (provided
2546 // by invariant.group) requires stripping it. Note that we don't do it
2547 // if the source could not be dynamic type and destination could be
2548 // dynamic because dynamic information is already laundered. It is
2549 // because launder(strip(src)) == launder(src), so there is no need to
2550 // add extra strip before launder.
2551 Src = Builder.CreateStripInvariantGroup(Src);
2552 }
2553 }
2554
2555 // Update heapallocsite metadata when there is an explicit pointer cast.
2556 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2557 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2558 !isa<CastExpr>(E)) {
2559 QualType PointeeType = DestTy->getPointeeType();
2560 if (!PointeeType.isNull())
2561 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2562 CE->getExprLoc());
2563 }
2564 }
2565
2566 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2567 // same element type, use the llvm.vector.insert intrinsic to perform the
2568 // bitcast.
2569 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2570 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2571 // If we are casting a fixed i8 vector to a scalable i1 predicate
2572 // vector, use a vector insert and bitcast the result.
2573 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2574 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2575 ScalableDstTy = llvm::ScalableVectorType::get(
2576 FixedSrcTy->getElementType(),
2577 llvm::divideCeil(
2578 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2579 }
2580 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2581 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2582 llvm::Value *Result = Builder.CreateInsertVector(
2583 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2584 ScalableDstTy = cast<llvm::ScalableVectorType>(
2585 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2586 if (Result->getType() != ScalableDstTy)
2587 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2588 if (Result->getType() != DstTy)
2589 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2590 return Result;
2591 }
2592 }
2593 }
2594
2595 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2596 // same element type, use the llvm.vector.extract intrinsic to perform the
2597 // bitcast.
2598 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2599 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2600 // If we are casting a scalable i1 predicate vector to a fixed i8
2601 // vector, bitcast the source and use a vector extract.
2602 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2603 FixedDstTy->getElementType()->isIntegerTy(8)) {
2604 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2605 ScalableSrcTy = llvm::ScalableVectorType::get(
2606 ScalableSrcTy->getElementType(),
2607 llvm::alignTo<8>(
2608 ScalableSrcTy->getElementCount().getKnownMinValue()));
2609 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2610 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2611 uint64_t(0));
2612 }
2613
2614 ScalableSrcTy = llvm::ScalableVectorType::get(
2615 FixedDstTy->getElementType(),
2616 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2617 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2618 }
2619 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2620 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2621 "cast.fixed");
2622 }
2623 }
2624
2625 // Perform VLAT <-> VLST bitcast through memory.
2626 // TODO: since the llvm.vector.{insert,extract} intrinsics
2627 // require the element types of the vectors to be the same, we
2628 // need to keep this around for bitcasts between VLAT <-> VLST where
2629 // the element types of the vectors are not the same, until we figure
2630 // out a better way of doing these casts.
2631 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2635 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2636 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2637 CGF.EmitStoreOfScalar(Src, LV);
2638 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2639 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2640 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2641 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2642 }
2643
2644 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2645 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2646 }
2647 case CK_AddressSpaceConversion: {
2648 Expr::EvalResult Result;
2649 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2650 Result.Val.isNullPointer()) {
2651 // If E has side effect, it is emitted even if its final result is a
2652 // null pointer. In that case, a DCE pass should be able to
2653 // eliminate the useless instructions emitted during translating E.
2654 if (Result.HasSideEffects)
2655 Visit(E);
2657 ConvertType(DestTy)), DestTy);
2658 }
2659 // Since target may map different address spaces in AST to the same address
2660 // space, an address space conversion may end up as a bitcast.
2662 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2663 ConvertType(DestTy));
2664 }
2665 case CK_AtomicToNonAtomic:
2666 case CK_NonAtomicToAtomic:
2667 case CK_UserDefinedConversion:
2668 return Visit(E);
2669
2670 case CK_NoOp: {
2671 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2672 }
2673
2674 case CK_BaseToDerived: {
2675 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2676 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2677
2678 Address Base = CGF.EmitPointerWithAlignment(E);
2679 Address Derived =
2680 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2681 CE->path_begin(), CE->path_end(),
2683
2684 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2685 // performed and the object is not of the derived type.
2686 if (CGF.sanitizePerformTypeCheck())
2688 Derived, DestTy->getPointeeType());
2689
2690 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2691 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2692 /*MayBeNull=*/true,
2694 CE->getBeginLoc());
2695
2696 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2697 }
2698 case CK_UncheckedDerivedToBase:
2699 case CK_DerivedToBase: {
2700 // The EmitPointerWithAlignment path does this fine; just discard
2701 // the alignment.
2703 CE->getType()->getPointeeType());
2704 }
2705
2706 case CK_Dynamic: {
2707 Address V = CGF.EmitPointerWithAlignment(E);
2708 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2709 return CGF.EmitDynamicCast(V, DCE);
2710 }
2711
2712 case CK_ArrayToPointerDecay:
2714 CE->getType()->getPointeeType());
2715 case CK_FunctionToPointerDecay:
2716 return EmitLValue(E).getPointer(CGF);
2717
2718 case CK_NullToPointer:
2719 if (MustVisitNullValue(E))
2720 CGF.EmitIgnoredExpr(E);
2721
2722 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2723 DestTy);
2724
2725 case CK_NullToMemberPointer: {
2726 if (MustVisitNullValue(E))
2727 CGF.EmitIgnoredExpr(E);
2728
2729 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2730 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2731 }
2732
2733 case CK_ReinterpretMemberPointer:
2734 case CK_BaseToDerivedMemberPointer:
2735 case CK_DerivedToBaseMemberPointer: {
2736 Value *Src = Visit(E);
2737
2738 // Note that the AST doesn't distinguish between checked and
2739 // unchecked member pointer conversions, so we always have to
2740 // implement checked conversions here. This is inefficient when
2741 // actual control flow may be required in order to perform the
2742 // check, which it is for data member pointers (but not member
2743 // function pointers on Itanium and ARM).
2744 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2745 }
2746
2747 case CK_ARCProduceObject:
2748 return CGF.EmitARCRetainScalarExpr(E);
2749 case CK_ARCConsumeObject:
2750 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2751 case CK_ARCReclaimReturnedObject:
2752 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2753 case CK_ARCExtendBlockObject:
2754 return CGF.EmitARCExtendBlockObject(E);
2755
2756 case CK_CopyAndAutoreleaseBlockObject:
2757 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2758
2759 case CK_FloatingRealToComplex:
2760 case CK_FloatingComplexCast:
2761 case CK_IntegralRealToComplex:
2762 case CK_IntegralComplexCast:
2763 case CK_IntegralComplexToFloatingComplex:
2764 case CK_FloatingComplexToIntegralComplex:
2765 case CK_ConstructorConversion:
2766 case CK_ToUnion:
2767 case CK_HLSLArrayRValue:
2768 llvm_unreachable("scalar cast to non-scalar value");
2769
2770 case CK_LValueToRValue:
2771 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2772 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2773 return Visit(E);
2774
2775 case CK_IntegralToPointer: {
2776 Value *Src = Visit(E);
2777
2778 // First, convert to the correct width so that we control the kind of
2779 // extension.
2780 auto DestLLVMTy = ConvertType(DestTy);
2781 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2782 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2783 llvm::Value* IntResult =
2784 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2785
2786 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2787
2788 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2789 // Going from integer to pointer that could be dynamic requires reloading
2790 // dynamic information from invariant.group.
2791 if (DestTy.mayBeDynamicClass())
2792 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2793 }
2794
2795 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2796 return IntToPtr;
2797 }
2798 case CK_PointerToIntegral: {
2799 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2800 auto *PtrExpr = Visit(E);
2801
2802 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2803 const QualType SrcType = E->getType();
2804
2805 // Casting to integer requires stripping dynamic information as it does
2806 // not carries it.
2807 if (SrcType.mayBeDynamicClass())
2808 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2809 }
2810
2811 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2812 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2813 }
2814 case CK_ToVoid: {
2815 CGF.EmitIgnoredExpr(E);
2816 return nullptr;
2817 }
2818 case CK_MatrixCast: {
2819 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2820 CE->getExprLoc());
2821 }
2822 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2823 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2824 // To perform any necessary Scalar Cast, so this Cast can be handled
2825 // by the regular Vector Splat cast code.
2826 case CK_HLSLAggregateSplatCast:
2827 case CK_VectorSplat: {
2828 llvm::Type *DstTy = ConvertType(DestTy);
2829 Value *Elt = Visit(E);
2830 // Splat the element across to all elements
2831 llvm::ElementCount NumElements =
2832 cast<llvm::VectorType>(DstTy)->getElementCount();
2833 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2834 }
2835
2836 case CK_FixedPointCast:
2837 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2838 CE->getExprLoc());
2839
2840 case CK_FixedPointToBoolean:
2841 assert(E->getType()->isFixedPointType() &&
2842 "Expected src type to be fixed point type");
2843 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2844 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2845 CE->getExprLoc());
2846
2847 case CK_FixedPointToIntegral:
2848 assert(E->getType()->isFixedPointType() &&
2849 "Expected src type to be fixed point type");
2850 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2851 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2852 CE->getExprLoc());
2853
2854 case CK_IntegralToFixedPoint:
2855 assert(E->getType()->isIntegerType() &&
2856 "Expected src type to be an integer");
2857 assert(DestTy->isFixedPointType() &&
2858 "Expected dest type to be fixed point type");
2859 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2860 CE->getExprLoc());
2861
2862 case CK_IntegralCast: {
2863 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2864 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2865 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2867 "conv");
2868 }
2869 ScalarConversionOpts Opts;
2870 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2871 if (!ICE->isPartOfExplicitCast())
2872 Opts = ScalarConversionOpts(CGF.SanOpts);
2873 }
2874 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2875 CE->getExprLoc(), Opts);
2876 }
2877 case CK_IntegralToFloating: {
2878 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2879 // TODO: Support constrained FP intrinsics.
2880 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2881 if (SrcElTy->isSignedIntegerOrEnumerationType())
2882 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2883 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2884 }
2885 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2886 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2887 CE->getExprLoc());
2888 }
2889 case CK_FloatingToIntegral: {
2890 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2891 // TODO: Support constrained FP intrinsics.
2892 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2893 if (DstElTy->isSignedIntegerOrEnumerationType())
2894 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2895 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2896 }
2897 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2898 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2899 CE->getExprLoc());
2900 }
2901 case CK_FloatingCast: {
2902 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2903 // TODO: Support constrained FP intrinsics.
2904 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2905 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2906 if (DstElTy->castAs<BuiltinType>()->getKind() <
2907 SrcElTy->castAs<BuiltinType>()->getKind())
2908 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2909 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2910 }
2911 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2912 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2913 CE->getExprLoc());
2914 }
2915 case CK_FixedPointToFloating:
2916 case CK_FloatingToFixedPoint: {
2917 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2918 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2919 CE->getExprLoc());
2920 }
2921 case CK_BooleanToSignedIntegral: {
2922 ScalarConversionOpts Opts;
2923 Opts.TreatBooleanAsSigned = true;
2924 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2925 CE->getExprLoc(), Opts);
2926 }
2927 case CK_IntegralToBoolean:
2928 return EmitIntToBoolConversion(Visit(E));
2929 case CK_PointerToBoolean:
2930 return EmitPointerToBoolConversion(Visit(E), E->getType());
2931 case CK_FloatingToBoolean: {
2932 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2933 return EmitFloatToBoolConversion(Visit(E));
2934 }
2935 case CK_MemberPointerToBoolean: {
2936 llvm::Value *MemPtr = Visit(E);
2937 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2938 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2939 }
2940
2941 case CK_FloatingComplexToReal:
2942 case CK_IntegralComplexToReal:
2943 return CGF.EmitComplexExpr(E, false, true).first;
2944
2945 case CK_FloatingComplexToBoolean:
2946 case CK_IntegralComplexToBoolean: {
2948
2949 // TODO: kill this function off, inline appropriate case here
2950 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2951 CE->getExprLoc());
2952 }
2953
2954 case CK_ZeroToOCLOpaqueType: {
2955 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2956 DestTy->isOCLIntelSubgroupAVCType()) &&
2957 "CK_ZeroToOCLEvent cast on non-event type");
2958 return llvm::Constant::getNullValue(ConvertType(DestTy));
2959 }
2960
2961 case CK_IntToOCLSampler:
2962 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2963
2964 case CK_HLSLVectorTruncation: {
2965 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
2966 "Destination type must be a vector or builtin type.");
2967 Value *Vec = Visit(E);
2968 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2969 SmallVector<int> Mask;
2970 unsigned NumElts = VecTy->getNumElements();
2971 for (unsigned I = 0; I != NumElts; ++I)
2972 Mask.push_back(I);
2973
2974 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
2975 }
2976 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
2977 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
2978 }
2979 case CK_HLSLMatrixTruncation: {
2980 assert((DestTy->isMatrixType() || DestTy->isBuiltinType()) &&
2981 "Destination type must be a matrix or builtin type.");
2982 Value *Mat = Visit(E);
2983 if (auto *MatTy = DestTy->getAs<ConstantMatrixType>()) {
2984 SmallVector<int> Mask;
2985 unsigned NumCols = MatTy->getNumColumns();
2986 unsigned NumRows = MatTy->getNumRows();
2987 unsigned ColOffset = NumCols;
2988 if (auto *SrcMatTy = E->getType()->getAs<ConstantMatrixType>())
2989 ColOffset = SrcMatTy->getNumColumns();
2990 for (unsigned R = 0; R < NumRows; R++) {
2991 for (unsigned C = 0; C < NumCols; C++) {
2992 unsigned I = R * ColOffset + C;
2993 Mask.push_back(I);
2994 }
2995 }
2996
2997 return Builder.CreateShuffleVector(Mat, Mask, "trunc");
2998 }
2999 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
3000 return Builder.CreateExtractElement(Mat, Zero, "cast.mtrunc");
3001 }
3002 case CK_HLSLElementwiseCast: {
3003 RValue RV = CGF.EmitAnyExpr(E);
3004 SourceLocation Loc = CE->getExprLoc();
3005
3006 Address SrcAddr = Address::invalid();
3007
3008 if (RV.isAggregate()) {
3009 SrcAddr = RV.getAggregateAddress();
3010 } else {
3011 SrcAddr = CGF.CreateMemTemp(E->getType(), "hlsl.ewcast.src");
3012 LValue TmpLV = CGF.MakeAddrLValue(SrcAddr, E->getType());
3013 CGF.EmitStoreThroughLValue(RV, TmpLV);
3014 }
3015
3016 LValue SrcVal = CGF.MakeAddrLValue(SrcAddr, E->getType());
3017 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
3018 }
3019
3020 } // end of switch
3021
3022 llvm_unreachable("unknown scalar cast");
3023}
3024
3025Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
3026 CodeGenFunction::StmtExprEvaluation eval(CGF);
3027 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
3028 !E->getType()->isVoidType());
3029 if (!RetAlloca.isValid())
3030 return nullptr;
3031 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
3032 E->getExprLoc());
3033}
3034
3035Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
3036 CodeGenFunction::RunCleanupsScope Scope(CGF);
3037 Value *V = Visit(E->getSubExpr());
3038 // Defend against dominance problems caused by jumps out of expression
3039 // evaluation through the shared cleanup block.
3040 Scope.ForceCleanup({&V});
3041 return V;
3042}
3043
3044//===----------------------------------------------------------------------===//
3045// Unary Operators
3046//===----------------------------------------------------------------------===//
3047
3049 llvm::Value *InVal, bool IsInc,
3050 FPOptions FPFeatures) {
3051 BinOpInfo BinOp;
3052 BinOp.LHS = InVal;
3053 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
3054 BinOp.Ty = E->getType();
3055 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3056 BinOp.FPFeatures = FPFeatures;
3057 BinOp.E = E;
3058 return BinOp;
3059}
3060
3061llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3062 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3063 llvm::Value *Amount =
3064 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
3065 StringRef Name = IsInc ? "inc" : "dec";
3066 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3067 case LangOptions::SOB_Defined:
3068 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3069 return Builder.CreateAdd(InVal, Amount, Name);
3070 [[fallthrough]];
3071 case LangOptions::SOB_Undefined:
3072 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3073 return Builder.CreateNSWAdd(InVal, Amount, Name);
3074 [[fallthrough]];
3075 case LangOptions::SOB_Trapping:
3076 BinOpInfo Info = createBinOpInfoFromIncDec(
3077 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3078 if (!E->canOverflow() || CanElideOverflowCheck(CGF.getContext(), Info))
3079 return Builder.CreateNSWAdd(InVal, Amount, Name);
3080 return EmitOverflowCheckedBinOp(Info);
3081 }
3082 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
3083}
3084
3085/// For the purposes of overflow pattern exclusion, does this match the
3086/// "while(i--)" pattern?
3087static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
3088 bool isPre, ASTContext &Ctx) {
3089 if (isInc || isPre)
3090 return false;
3091
3092 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3095 return false;
3096
3097 // all Parents (usually just one) must be a WhileStmt
3098 for (const auto &Parent : Ctx.getParentMapContext().getParents(*UO))
3099 if (!Parent.get<WhileStmt>())
3100 return false;
3101
3102 return true;
3103}
3104
3105namespace {
3106/// Handles check and update for lastprivate conditional variables.
3107class OMPLastprivateConditionalUpdateRAII {
3108private:
3109 CodeGenFunction &CGF;
3110 const UnaryOperator *E;
3111
3112public:
3113 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3114 const UnaryOperator *E)
3115 : CGF(CGF), E(E) {}
3116 ~OMPLastprivateConditionalUpdateRAII() {
3117 if (CGF.getLangOpts().OpenMP)
3119 CGF, E->getSubExpr());
3120 }
3121};
3122} // namespace
3123
3124llvm::Value *
3125ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3126 bool isInc, bool isPre) {
3127 ApplyAtomGroup Grp(CGF.getDebugInfo());
3128 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3129 QualType type = E->getSubExpr()->getType();
3130 llvm::PHINode *atomicPHI = nullptr;
3131 llvm::Value *value;
3132 llvm::Value *input;
3133 llvm::Value *Previous = nullptr;
3134 QualType SrcType = E->getType();
3135
3136 int amount = (isInc ? 1 : -1);
3137 bool isSubtraction = !isInc;
3138
3139 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3140 type = atomicTy->getValueType();
3141 if (isInc && type->isBooleanType()) {
3142 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3143 if (isPre) {
3144 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3145 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3146 return Builder.getTrue();
3147 }
3148 // For atomic bool increment, we just store true and return it for
3149 // preincrement, do an atomic swap with true for postincrement
3150 return Builder.CreateAtomicRMW(
3151 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3152 llvm::AtomicOrdering::SequentiallyConsistent);
3153 }
3154 // Special case for atomic increment / decrement on integers, emit
3155 // atomicrmw instructions. We skip this if we want to be doing overflow
3156 // checking, and fall into the slow path with the atomic cmpxchg loop.
3157 if (!type->isBooleanType() && type->isIntegerType() &&
3158 !(type->isUnsignedIntegerType() &&
3159 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3160 CGF.getLangOpts().getSignedOverflowBehavior() !=
3161 LangOptions::SOB_Trapping) {
3162 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3163 llvm::AtomicRMWInst::Sub;
3164 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3165 llvm::Instruction::Sub;
3166 llvm::Value *amt = CGF.EmitToMemory(
3167 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3168 llvm::Value *old =
3169 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3170 llvm::AtomicOrdering::SequentiallyConsistent);
3171 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3172 }
3173 // Special case for atomic increment/decrement on floats.
3174 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3175 if (type->isFloatingType()) {
3176 llvm::Type *Ty = ConvertType(type);
3177 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3178 llvm::AtomicRMWInst::BinOp aop =
3179 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3180 llvm::Instruction::BinaryOps op =
3181 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3182 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3183 llvm::AtomicRMWInst *old =
3184 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3185 llvm::AtomicOrdering::SequentiallyConsistent);
3186
3187 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3188 }
3189 }
3190 value = EmitLoadOfLValue(LV, E->getExprLoc());
3191 input = value;
3192 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3193 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3194 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3195 value = CGF.EmitToMemory(value, type);
3196 Builder.CreateBr(opBB);
3197 Builder.SetInsertPoint(opBB);
3198 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3199 atomicPHI->addIncoming(value, startBB);
3200 value = atomicPHI;
3201 } else {
3202 value = EmitLoadOfLValue(LV, E->getExprLoc());
3203 input = value;
3204 }
3205
3206 // Special case of integer increment that we have to check first: bool++.
3207 // Due to promotion rules, we get:
3208 // bool++ -> bool = bool + 1
3209 // -> bool = (int)bool + 1
3210 // -> bool = ((int)bool + 1 != 0)
3211 // An interesting aspect of this is that increment is always true.
3212 // Decrement does not have this property.
3213 if (isInc && type->isBooleanType()) {
3214 value = Builder.getTrue();
3215
3216 // Most common case by far: integer increment.
3217 } else if (type->isIntegerType()) {
3218 QualType promotedType;
3219 bool canPerformLossyDemotionCheck = false;
3220
3221 bool excludeOverflowPattern =
3222 matchesPostDecrInWhile(E, isInc, isPre, CGF.getContext());
3223
3225 promotedType = CGF.getContext().getPromotedIntegerType(type);
3226 assert(promotedType != type && "Shouldn't promote to the same type.");
3227 canPerformLossyDemotionCheck = true;
3228 canPerformLossyDemotionCheck &=
3230 CGF.getContext().getCanonicalType(promotedType);
3231 canPerformLossyDemotionCheck &=
3233 type, promotedType);
3234 assert((!canPerformLossyDemotionCheck ||
3235 type->isSignedIntegerOrEnumerationType() ||
3236 promotedType->isSignedIntegerOrEnumerationType() ||
3237 ConvertType(type)->getScalarSizeInBits() ==
3238 ConvertType(promotedType)->getScalarSizeInBits()) &&
3239 "The following check expects that if we do promotion to different "
3240 "underlying canonical type, at least one of the types (either "
3241 "base or promoted) will be signed, or the bitwidths will match.");
3242 }
3243 if (CGF.SanOpts.hasOneOf(
3244 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3245 SanitizerKind::ImplicitBitfieldConversion) &&
3246 canPerformLossyDemotionCheck) {
3247 // While `x += 1` (for `x` with width less than int) is modeled as
3248 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3249 // ease; inc/dec with width less than int can't overflow because of
3250 // promotion rules, so we omit promotion+demotion, which means that we can
3251 // not catch lossy "demotion". Because we still want to catch these cases
3252 // when the sanitizer is enabled, we perform the promotion, then perform
3253 // the increment/decrement in the wider type, and finally
3254 // perform the demotion. This will catch lossy demotions.
3255
3256 // We have a special case for bitfields defined using all the bits of the
3257 // type. In this case we need to do the same trick as for the integer
3258 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3259
3260 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3261 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3262 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3263 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3264 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3265 // checks will take care of the conversion.
3266 ScalarConversionOpts Opts;
3267 if (!LV.isBitField())
3268 Opts = ScalarConversionOpts(CGF.SanOpts);
3269 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3270 Previous = value;
3271 SrcType = promotedType;
3272 }
3273
3274 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3275 Opts);
3276
3277 // Note that signed integer inc/dec with width less than int can't
3278 // overflow because of promotion rules; we're just eliding a few steps
3279 // here.
3280 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3281 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3282 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3283 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3284 !excludeOverflowPattern &&
3286 SanitizerKind::UnsignedIntegerOverflow, E->getType())) {
3287 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
3288 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
3289 } else {
3290 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3291 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3292 }
3293
3294 // Next most common: pointer increment.
3295 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3296 QualType type = ptr->getPointeeType();
3297
3298 // VLA types don't have constant size.
3299 if (const VariableArrayType *vla
3301 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3302 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3303 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3304 if (CGF.getLangOpts().PointerOverflowDefined)
3305 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3306 else
3307 value = CGF.EmitCheckedInBoundsGEP(
3308 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3309 E->getExprLoc(), "vla.inc");
3310
3311 // Arithmetic on function pointers (!) is just +-1.
3312 } else if (type->isFunctionType()) {
3313 llvm::Value *amt = Builder.getInt32(amount);
3314
3315 if (CGF.getLangOpts().PointerOverflowDefined)
3316 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3317 else
3318 value =
3319 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3320 /*SignedIndices=*/false, isSubtraction,
3321 E->getExprLoc(), "incdec.funcptr");
3322
3323 // For everything else, we can just do a simple increment.
3324 } else {
3325 llvm::Value *amt = Builder.getInt32(amount);
3326 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3327 if (CGF.getLangOpts().PointerOverflowDefined)
3328 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3329 else
3330 value = CGF.EmitCheckedInBoundsGEP(
3331 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3332 E->getExprLoc(), "incdec.ptr");
3333 }
3334
3335 // Vector increment/decrement.
3336 } else if (type->isVectorType()) {
3337 if (type->hasIntegerRepresentation()) {
3338 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
3339
3340 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3341 } else {
3342 value = Builder.CreateFAdd(
3343 value,
3344 llvm::ConstantFP::get(value->getType(), amount),
3345 isInc ? "inc" : "dec");
3346 }
3347
3348 // Floating point.
3349 } else if (type->isRealFloatingType()) {
3350 // Add the inc/dec to the real part.
3351 llvm::Value *amt;
3352 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3353
3354 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3355 // Another special case: half FP increment should be done via float
3357 value = Builder.CreateCall(
3358 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
3359 CGF.CGM.FloatTy),
3360 input, "incdec.conv");
3361 } else {
3362 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
3363 }
3364 }
3365
3366 if (value->getType()->isFloatTy())
3367 amt = llvm::ConstantFP::get(VMContext,
3368 llvm::APFloat(static_cast<float>(amount)));
3369 else if (value->getType()->isDoubleTy())
3370 amt = llvm::ConstantFP::get(VMContext,
3371 llvm::APFloat(static_cast<double>(amount)));
3372 else {
3373 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3374 // Convert from float.
3375 llvm::APFloat F(static_cast<float>(amount));
3376 bool ignored;
3377 const llvm::fltSemantics *FS;
3378 // Don't use getFloatTypeSemantics because Half isn't
3379 // necessarily represented using the "half" LLVM type.
3380 if (value->getType()->isFP128Ty())
3381 FS = &CGF.getTarget().getFloat128Format();
3382 else if (value->getType()->isHalfTy())
3383 FS = &CGF.getTarget().getHalfFormat();
3384 else if (value->getType()->isBFloatTy())
3385 FS = &CGF.getTarget().getBFloat16Format();
3386 else if (value->getType()->isPPC_FP128Ty())
3387 FS = &CGF.getTarget().getIbm128Format();
3388 else
3389 FS = &CGF.getTarget().getLongDoubleFormat();
3390 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3391 amt = llvm::ConstantFP::get(VMContext, F);
3392 }
3393 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3394
3395 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3397 value = Builder.CreateCall(
3398 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3399 CGF.CGM.FloatTy),
3400 value, "incdec.conv");
3401 } else {
3402 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3403 }
3404 }
3405
3406 // Fixed-point types.
3407 } else if (type->isFixedPointType()) {
3408 // Fixed-point types are tricky. In some cases, it isn't possible to
3409 // represent a 1 or a -1 in the type at all. Piggyback off of
3410 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3411 BinOpInfo Info;
3412 Info.E = E;
3413 Info.Ty = E->getType();
3414 Info.Opcode = isInc ? BO_Add : BO_Sub;
3415 Info.LHS = value;
3416 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3417 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3418 // since -1 is guaranteed to be representable.
3419 if (type->isSignedFixedPointType()) {
3420 Info.Opcode = isInc ? BO_Sub : BO_Add;
3421 Info.RHS = Builder.CreateNeg(Info.RHS);
3422 }
3423 // Now, convert from our invented integer literal to the type of the unary
3424 // op. This will upscale and saturate if necessary. This value can become
3425 // undef in some cases.
3426 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3427 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3428 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3429 value = EmitFixedPointBinOp(Info);
3430
3431 // Objective-C pointer types.
3432 } else {
3433 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3434
3435 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3436 if (!isInc) size = -size;
3437 llvm::Value *sizeValue =
3438 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
3439
3440 if (CGF.getLangOpts().PointerOverflowDefined)
3441 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3442 else
3443 value = CGF.EmitCheckedInBoundsGEP(
3444 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3445 E->getExprLoc(), "incdec.objptr");
3446 value = Builder.CreateBitCast(value, input->getType());
3447 }
3448
3449 if (atomicPHI) {
3450 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3451 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3452 auto Pair = CGF.EmitAtomicCompareExchange(
3453 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3454 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3455 llvm::Value *success = Pair.second;
3456 atomicPHI->addIncoming(old, curBlock);
3457 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3458 Builder.SetInsertPoint(contBB);
3459 return isPre ? value : input;
3460 }
3461
3462 // Store the updated result through the lvalue.
3463 if (LV.isBitField()) {
3464 Value *Src = Previous ? Previous : value;
3465 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3466 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3467 LV.getBitFieldInfo(), E->getExprLoc());
3468 } else
3469 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3470
3471 // If this is a postinc, return the value read from memory, otherwise use the
3472 // updated value.
3473 return isPre ? value : input;
3474}
3475
3476
3477Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3478 QualType PromotionType) {
3479 QualType promotionTy = PromotionType.isNull()
3480 ? getPromotionType(E->getSubExpr()->getType())
3481 : PromotionType;
3482 Value *result = VisitPlus(E, promotionTy);
3483 if (result && !promotionTy.isNull())
3484 result = EmitUnPromotedValue(result, E->getType());
3485 return result;
3486}
3487
3488Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3489 QualType PromotionType) {
3490 // This differs from gcc, though, most likely due to a bug in gcc.
3491 TestAndClearIgnoreResultAssign();
3492 if (!PromotionType.isNull())
3493 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3494 return Visit(E->getSubExpr());
3495}
3496
3497Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3498 QualType PromotionType) {
3499 QualType promotionTy = PromotionType.isNull()
3500 ? getPromotionType(E->getSubExpr()->getType())
3501 : PromotionType;
3502 Value *result = VisitMinus(E, promotionTy);
3503 if (result && !promotionTy.isNull())
3504 result = EmitUnPromotedValue(result, E->getType());
3505 return result;
3506}
3507
3508Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3509 QualType PromotionType) {
3510 TestAndClearIgnoreResultAssign();
3511 Value *Op;
3512 if (!PromotionType.isNull())
3513 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3514 else
3515 Op = Visit(E->getSubExpr());
3516
3517 // Generate a unary FNeg for FP ops.
3518 if (Op->getType()->isFPOrFPVectorTy())
3519 return Builder.CreateFNeg(Op, "fneg");
3520
3521 // Emit unary minus with EmitSub so we handle overflow cases etc.
3522 BinOpInfo BinOp;
3523 BinOp.RHS = Op;
3524 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3525 BinOp.Ty = E->getType();
3526 BinOp.Opcode = BO_Sub;
3527 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3528 BinOp.E = E;
3529 return EmitSub(BinOp);
3530}
3531
3532Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3533 TestAndClearIgnoreResultAssign();
3534 Value *Op = Visit(E->getSubExpr());
3535 return Builder.CreateNot(Op, "not");
3536}
3537
3538Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3539 // Perform vector logical not on comparison with zero vector.
3540 if (E->getType()->isVectorType() &&
3541 E->getType()->castAs<VectorType>()->getVectorKind() ==
3542 VectorKind::Generic) {
3543 Value *Oper = Visit(E->getSubExpr());
3544 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3545 Value *Result;
3546 if (Oper->getType()->isFPOrFPVectorTy()) {
3547 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3548 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3549 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3550 } else
3551 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3552 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3553 }
3554
3555 // Compare operand to zero.
3556 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3557
3558 // Invert value.
3559 // TODO: Could dynamically modify easy computations here. For example, if
3560 // the operand is an icmp ne, turn into icmp eq.
3561 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3562
3563 // ZExt result to the expr type.
3564 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3565}
3566
3567Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3568 // Try folding the offsetof to a constant.
3569 Expr::EvalResult EVResult;
3570 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3571 llvm::APSInt Value = EVResult.Val.getInt();
3572 return Builder.getInt(Value);
3573 }
3574
3575 // Loop over the components of the offsetof to compute the value.
3576 unsigned n = E->getNumComponents();
3577 llvm::Type* ResultType = ConvertType(E->getType());
3578 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3579 QualType CurrentType = E->getTypeSourceInfo()->getType();
3580 for (unsigned i = 0; i != n; ++i) {
3581 OffsetOfNode ON = E->getComponent(i);
3582 llvm::Value *Offset = nullptr;
3583 switch (ON.getKind()) {
3584 case OffsetOfNode::Array: {
3585 // Compute the index
3586 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3587 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3588 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3589 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3590
3591 // Save the element type
3592 CurrentType =
3593 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3594
3595 // Compute the element size
3596 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3597 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3598
3599 // Multiply out to compute the result
3600 Offset = Builder.CreateMul(Idx, ElemSize);
3601 break;
3602 }
3603
3604 case OffsetOfNode::Field: {
3605 FieldDecl *MemberDecl = ON.getField();
3606 auto *RD = CurrentType->castAsRecordDecl();
3607 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3608
3609 // Compute the index of the field in its parent.
3610 unsigned i = 0;
3611 // FIXME: It would be nice if we didn't have to loop here!
3612 for (RecordDecl::field_iterator Field = RD->field_begin(),
3613 FieldEnd = RD->field_end();
3614 Field != FieldEnd; ++Field, ++i) {
3615 if (*Field == MemberDecl)
3616 break;
3617 }
3618 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3619
3620 // Compute the offset to the field
3621 int64_t OffsetInt = RL.getFieldOffset(i) /
3622 CGF.getContext().getCharWidth();
3623 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3624
3625 // Save the element type.
3626 CurrentType = MemberDecl->getType();
3627 break;
3628 }
3629
3631 llvm_unreachable("dependent __builtin_offsetof");
3632
3633 case OffsetOfNode::Base: {
3634 if (ON.getBase()->isVirtual()) {
3635 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3636 continue;
3637 }
3638
3639 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3640 CurrentType->castAsCanonical<RecordType>()->getDecl());
3641
3642 // Save the element type.
3643 CurrentType = ON.getBase()->getType();
3644
3645 // Compute the offset to the base.
3646 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3647 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3648 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3649 break;
3650 }
3651 }
3652 Result = Builder.CreateAdd(Result, Offset);
3653 }
3654 return Result;
3655}
3656
3657/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3658/// argument of the sizeof expression as an integer.
3659Value *
3660ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3661 const UnaryExprOrTypeTraitExpr *E) {
3662 QualType TypeToSize = E->getTypeOfArgument();
3663 if (auto Kind = E->getKind();
3664 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3665 if (const VariableArrayType *VAT =
3666 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3667 // For _Countof, we only want to evaluate if the extent is actually
3668 // variable as opposed to a multi-dimensional array whose extent is
3669 // constant but whose element type is variable.
3670 bool EvaluateExtent = true;
3671 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3672 EvaluateExtent =
3673 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3674 }
3675 if (EvaluateExtent) {
3676 if (E->isArgumentType()) {
3677 // sizeof(type) - make sure to emit the VLA size.
3678 CGF.EmitVariablyModifiedType(TypeToSize);
3679 } else {
3680 // C99 6.5.3.4p2: If the argument is an expression of type
3681 // VLA, it is evaluated.
3683 }
3684
3685 // For _Countof, we just want to return the size of a single dimension.
3686 if (Kind == UETT_CountOf)
3687 return CGF.getVLAElements1D(VAT).NumElts;
3688
3689 // For sizeof and __datasizeof, we need to scale the number of elements
3690 // by the size of the array element type.
3691 auto VlaSize = CGF.getVLASize(VAT);
3692
3693 // Scale the number of non-VLA elements by the non-VLA element size.
3694 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3695 if (!eltSize.isOne())
3696 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3697 VlaSize.NumElts);
3698 return VlaSize.NumElts;
3699 }
3700 }
3701 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3702 auto Alignment =
3703 CGF.getContext()
3706 .getQuantity();
3707 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3708 } else if (E->getKind() == UETT_VectorElements) {
3709 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3710 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3711 }
3712
3713 // If this isn't sizeof(vla), the result must be constant; use the constant
3714 // folding logic so we don't have to duplicate it here.
3715 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3716}
3717
3718Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3719 QualType PromotionType) {
3720 QualType promotionTy = PromotionType.isNull()
3721 ? getPromotionType(E->getSubExpr()->getType())
3722 : PromotionType;
3723 Value *result = VisitReal(E, promotionTy);
3724 if (result && !promotionTy.isNull())
3725 result = EmitUnPromotedValue(result, E->getType());
3726 return result;
3727}
3728
3729Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3730 QualType PromotionType) {
3731 Expr *Op = E->getSubExpr();
3732 if (Op->getType()->isAnyComplexType()) {
3733 // If it's an l-value, load through the appropriate subobject l-value.
3734 // Note that we have to ask E because Op might be an l-value that
3735 // this won't work for, e.g. an Obj-C property.
3736 if (E->isGLValue()) {
3737 if (!PromotionType.isNull()) {
3739 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3740 PromotionType = PromotionType->isAnyComplexType()
3741 ? PromotionType
3742 : CGF.getContext().getComplexType(PromotionType);
3743 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3744 : result.first;
3745 }
3746
3747 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3748 .getScalarVal();
3749 }
3750 // Otherwise, calculate and project.
3751 return CGF.EmitComplexExpr(Op, false, true).first;
3752 }
3753
3754 if (!PromotionType.isNull())
3755 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3756 return Visit(Op);
3757}
3758
3759Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3760 QualType PromotionType) {
3761 QualType promotionTy = PromotionType.isNull()
3762 ? getPromotionType(E->getSubExpr()->getType())
3763 : PromotionType;
3764 Value *result = VisitImag(E, promotionTy);
3765 if (result && !promotionTy.isNull())
3766 result = EmitUnPromotedValue(result, E->getType());
3767 return result;
3768}
3769
3770Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3771 QualType PromotionType) {
3772 Expr *Op = E->getSubExpr();
3773 if (Op->getType()->isAnyComplexType()) {
3774 // If it's an l-value, load through the appropriate subobject l-value.
3775 // Note that we have to ask E because Op might be an l-value that
3776 // this won't work for, e.g. an Obj-C property.
3777 if (Op->isGLValue()) {
3778 if (!PromotionType.isNull()) {
3780 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3781 PromotionType = PromotionType->isAnyComplexType()
3782 ? PromotionType
3783 : CGF.getContext().getComplexType(PromotionType);
3784 return result.second
3785 ? CGF.EmitPromotedValue(result, PromotionType).second
3786 : result.second;
3787 }
3788
3789 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3790 .getScalarVal();
3791 }
3792 // Otherwise, calculate and project.
3793 return CGF.EmitComplexExpr(Op, true, false).second;
3794 }
3795
3796 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3797 // effects are evaluated, but not the actual value.
3798 if (Op->isGLValue())
3799 CGF.EmitLValue(Op);
3800 else if (!PromotionType.isNull())
3801 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3802 else
3803 CGF.EmitScalarExpr(Op, true);
3804 if (!PromotionType.isNull())
3805 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3806 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3807}
3808
3809//===----------------------------------------------------------------------===//
3810// Binary Operators
3811//===----------------------------------------------------------------------===//
3812
3813Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3814 QualType PromotionType) {
3815 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3816}
3817
3818Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3819 QualType ExprType) {
3820 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3821}
3822
3823Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3824 E = E->IgnoreParens();
3825 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3826 switch (BO->getOpcode()) {
3827#define HANDLE_BINOP(OP) \
3828 case BO_##OP: \
3829 return Emit##OP(EmitBinOps(BO, PromotionType));
3830 HANDLE_BINOP(Add)
3831 HANDLE_BINOP(Sub)
3832 HANDLE_BINOP(Mul)
3833 HANDLE_BINOP(Div)
3834#undef HANDLE_BINOP
3835 default:
3836 break;
3837 }
3838 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3839 switch (UO->getOpcode()) {
3840 case UO_Imag:
3841 return VisitImag(UO, PromotionType);
3842 case UO_Real:
3843 return VisitReal(UO, PromotionType);
3844 case UO_Minus:
3845 return VisitMinus(UO, PromotionType);
3846 case UO_Plus:
3847 return VisitPlus(UO, PromotionType);
3848 default:
3849 break;
3850 }
3851 }
3852 auto result = Visit(const_cast<Expr *>(E));
3853 if (result) {
3854 if (!PromotionType.isNull())
3855 return EmitPromotedValue(result, PromotionType);
3856 else
3857 return EmitUnPromotedValue(result, E->getType());
3858 }
3859 return result;
3860}
3861
3862BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3863 QualType PromotionType) {
3864 TestAndClearIgnoreResultAssign();
3865 BinOpInfo Result;
3866 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3867 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3868 if (!PromotionType.isNull())
3869 Result.Ty = PromotionType;
3870 else
3871 Result.Ty = E->getType();
3872 Result.Opcode = E->getOpcode();
3873 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3874 Result.E = E;
3875 return Result;
3876}
3877
3878LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3879 const CompoundAssignOperator *E,
3880 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3881 Value *&Result) {
3882 QualType LHSTy = E->getLHS()->getType();
3883 BinOpInfo OpInfo;
3884
3887
3888 // Emit the RHS first. __block variables need to have the rhs evaluated
3889 // first, plus this should improve codegen a little.
3890
3891 QualType PromotionTypeCR;
3892 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3893 if (PromotionTypeCR.isNull())
3894 PromotionTypeCR = E->getComputationResultType();
3895 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3896 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3897 if (!PromotionTypeRHS.isNull())
3898 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3899 else
3900 OpInfo.RHS = Visit(E->getRHS());
3901 OpInfo.Ty = PromotionTypeCR;
3902 OpInfo.Opcode = E->getOpcode();
3903 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3904 OpInfo.E = E;
3905 // Load/convert the LHS.
3906 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3907
3908 llvm::PHINode *atomicPHI = nullptr;
3909 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3910 QualType type = atomicTy->getValueType();
3911 if (!type->isBooleanType() && type->isIntegerType() &&
3912 !(type->isUnsignedIntegerType() &&
3913 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3914 CGF.getLangOpts().getSignedOverflowBehavior() !=
3915 LangOptions::SOB_Trapping) {
3916 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3917 llvm::Instruction::BinaryOps Op;
3918 switch (OpInfo.Opcode) {
3919 // We don't have atomicrmw operands for *, %, /, <<, >>
3920 case BO_MulAssign: case BO_DivAssign:
3921 case BO_RemAssign:
3922 case BO_ShlAssign:
3923 case BO_ShrAssign:
3924 break;
3925 case BO_AddAssign:
3926 AtomicOp = llvm::AtomicRMWInst::Add;
3927 Op = llvm::Instruction::Add;
3928 break;
3929 case BO_SubAssign:
3930 AtomicOp = llvm::AtomicRMWInst::Sub;
3931 Op = llvm::Instruction::Sub;
3932 break;
3933 case BO_AndAssign:
3934 AtomicOp = llvm::AtomicRMWInst::And;
3935 Op = llvm::Instruction::And;
3936 break;
3937 case BO_XorAssign:
3938 AtomicOp = llvm::AtomicRMWInst::Xor;
3939 Op = llvm::Instruction::Xor;
3940 break;
3941 case BO_OrAssign:
3942 AtomicOp = llvm::AtomicRMWInst::Or;
3943 Op = llvm::Instruction::Or;
3944 break;
3945 default:
3946 llvm_unreachable("Invalid compound assignment type");
3947 }
3948 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3949 llvm::Value *Amt = CGF.EmitToMemory(
3950 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3951 E->getExprLoc()),
3952 LHSTy);
3953
3954 llvm::AtomicRMWInst *OldVal =
3955 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
3956
3957 // Since operation is atomic, the result type is guaranteed to be the
3958 // same as the input in LLVM terms.
3959 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3960 return LHSLV;
3961 }
3962 }
3963 // FIXME: For floating point types, we should be saving and restoring the
3964 // floating point environment in the loop.
3965 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3966 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3967 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3968 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3969 Builder.CreateBr(opBB);
3970 Builder.SetInsertPoint(opBB);
3971 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3972 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3973 OpInfo.LHS = atomicPHI;
3974 }
3975 else
3976 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3977
3978 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3979 SourceLocation Loc = E->getExprLoc();
3980 if (!PromotionTypeLHS.isNull())
3981 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
3982 E->getExprLoc());
3983 else
3984 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
3985 E->getComputationLHSType(), Loc);
3986
3987 // Expand the binary operator.
3988 Result = (this->*Func)(OpInfo);
3989
3990 // Convert the result back to the LHS type,
3991 // potentially with Implicit Conversion sanitizer check.
3992 // If LHSLV is a bitfield, use default ScalarConversionOpts
3993 // to avoid emit any implicit integer checks.
3994 Value *Previous = nullptr;
3995 if (LHSLV.isBitField()) {
3996 Previous = Result;
3997 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
3998 } else
3999 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
4000 ScalarConversionOpts(CGF.SanOpts));
4001
4002 if (atomicPHI) {
4003 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
4004 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
4005 auto Pair = CGF.EmitAtomicCompareExchange(
4006 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
4007 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
4008 llvm::Value *success = Pair.second;
4009 atomicPHI->addIncoming(old, curBlock);
4010 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
4011 Builder.SetInsertPoint(contBB);
4012 return LHSLV;
4013 }
4014
4015 // Store the result value into the LHS lvalue. Bit-fields are handled
4016 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
4017 // 'An assignment expression has the value of the left operand after the
4018 // assignment...'.
4019 if (LHSLV.isBitField()) {
4020 Value *Src = Previous ? Previous : Result;
4021 QualType SrcType = E->getRHS()->getType();
4022 QualType DstType = E->getLHS()->getType();
4024 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
4025 LHSLV.getBitFieldInfo(), E->getExprLoc());
4026 } else
4028
4029 if (CGF.getLangOpts().OpenMP)
4031 E->getLHS());
4032 return LHSLV;
4033}
4034
4035Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
4036 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
4037 bool Ignore = TestAndClearIgnoreResultAssign();
4038 Value *RHS = nullptr;
4039 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
4040
4041 // If the result is clearly ignored, return now.
4042 if (Ignore)
4043 return nullptr;
4044
4045 // The result of an assignment in C is the assigned r-value.
4046 if (!CGF.getLangOpts().CPlusPlus)
4047 return RHS;
4048
4049 // If the lvalue is non-volatile, return the computed value of the assignment.
4050 if (!LHS.isVolatileQualified())
4051 return RHS;
4052
4053 // Otherwise, reload the value.
4054 return EmitLoadOfLValue(LHS, E->getExprLoc());
4055}
4056
4057void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4058 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4059 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4060 Checks;
4061
4062 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4063 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4064 SanitizerKind::SO_IntegerDivideByZero));
4065 }
4066
4067 const auto *BO = cast<BinaryOperator>(Ops.E);
4068 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4069 Ops.Ty->hasSignedIntegerRepresentation() &&
4070 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4071 Ops.mayHaveIntegerOverflow()) {
4072 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4073
4074 llvm::Value *IntMin =
4075 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4076 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4077
4078 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4079 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4080 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4081 Checks.push_back(
4082 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4083 }
4084
4085 if (Checks.size() > 0)
4086 EmitBinOpCheck(Checks, Ops);
4087}
4088
4089Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4090 {
4091 SanitizerDebugLocation SanScope(&CGF,
4092 {SanitizerKind::SO_IntegerDivideByZero,
4093 SanitizerKind::SO_SignedIntegerOverflow,
4094 SanitizerKind::SO_FloatDivideByZero},
4095 SanitizerHandler::DivremOverflow);
4096 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4097 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4098 Ops.Ty->isIntegerType() &&
4099 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4100 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4101 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4102 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4103 Ops.Ty->isRealFloatingType() &&
4104 Ops.mayHaveFloatDivisionByZero()) {
4105 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4106 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4107 EmitBinOpCheck(
4108 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4109 }
4110 }
4111
4112 if (Ops.Ty->isConstantMatrixType()) {
4113 llvm::MatrixBuilder MB(Builder);
4114 // We need to check the types of the operands of the operator to get the
4115 // correct matrix dimensions.
4116 auto *BO = cast<BinaryOperator>(Ops.E);
4117 (void)BO;
4118 assert(
4120 "first operand must be a matrix");
4121 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4122 "second operand must be an arithmetic type");
4123 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4124 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4125 Ops.Ty->hasUnsignedIntegerRepresentation());
4126 }
4127
4128 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4129 llvm::Value *Val;
4130 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4131 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4132 CGF.SetDivFPAccuracy(Val);
4133 return Val;
4134 }
4135 else if (Ops.isFixedPointOp())
4136 return EmitFixedPointBinOp(Ops);
4137 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4138 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4139 else
4140 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4141}
4142
4143Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4144 // Rem in C can't be a floating point type: C99 6.5.5p2.
4145 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4146 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4147 Ops.Ty->isIntegerType() &&
4148 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4149 SanitizerDebugLocation SanScope(&CGF,
4150 {SanitizerKind::SO_IntegerDivideByZero,
4151 SanitizerKind::SO_SignedIntegerOverflow},
4152 SanitizerHandler::DivremOverflow);
4153 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4154 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4155 }
4156
4157 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4158 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4159
4160 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4161 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4162
4163 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4164}
4165
4166Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4167 unsigned IID;
4168 unsigned OpID = 0;
4169 SanitizerHandler OverflowKind;
4170
4171 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4172 switch (Ops.Opcode) {
4173 case BO_Add:
4174 case BO_AddAssign:
4175 OpID = 1;
4176 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4177 llvm::Intrinsic::uadd_with_overflow;
4178 OverflowKind = SanitizerHandler::AddOverflow;
4179 break;
4180 case BO_Sub:
4181 case BO_SubAssign:
4182 OpID = 2;
4183 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4184 llvm::Intrinsic::usub_with_overflow;
4185 OverflowKind = SanitizerHandler::SubOverflow;
4186 break;
4187 case BO_Mul:
4188 case BO_MulAssign:
4189 OpID = 3;
4190 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4191 llvm::Intrinsic::umul_with_overflow;
4192 OverflowKind = SanitizerHandler::MulOverflow;
4193 break;
4194 default:
4195 llvm_unreachable("Unsupported operation for overflow detection");
4196 }
4197 OpID <<= 1;
4198 if (isSigned)
4199 OpID |= 1;
4200
4201 SanitizerDebugLocation SanScope(&CGF,
4202 {SanitizerKind::SO_SignedIntegerOverflow,
4203 SanitizerKind::SO_UnsignedIntegerOverflow},
4204 OverflowKind);
4205 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4206
4207 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4208
4209 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4210 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4211 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4212
4213 // Handle overflow with llvm.trap if no custom handler has been specified.
4214 const std::string *handlerName =
4216 if (handlerName->empty()) {
4217 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4218 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4219 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4220 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
4222 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4223 : SanitizerKind::SO_UnsignedIntegerOverflow;
4224 EmitBinOpCheck(std::make_pair(NotOverflow, Ordinal), Ops);
4225 } else
4226 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4227 return result;
4228 }
4229
4230 // Branch in case of overflow.
4231 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4232 llvm::BasicBlock *continueBB =
4233 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4234 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4235
4236 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4237
4238 // If an overflow handler is set, then we want to call it and then use its
4239 // result, if it returns.
4240 Builder.SetInsertPoint(overflowBB);
4241
4242 // Get the overflow handler.
4243 llvm::Type *Int8Ty = CGF.Int8Ty;
4244 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4245 llvm::FunctionType *handlerTy =
4246 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4247 llvm::FunctionCallee handler =
4248 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4249
4250 // Sign extend the args to 64-bit, so that we can use the same handler for
4251 // all types of overflow.
4252 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4253 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4254
4255 // Call the handler with the two arguments, the operation, and the size of
4256 // the result.
4257 llvm::Value *handlerArgs[] = {
4258 lhs,
4259 rhs,
4260 Builder.getInt8(OpID),
4261 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4262 };
4263 llvm::Value *handlerResult =
4264 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4265
4266 // Truncate the result back to the desired size.
4267 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4268 Builder.CreateBr(continueBB);
4269
4270 Builder.SetInsertPoint(continueBB);
4271 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4272 phi->addIncoming(result, initialBB);
4273 phi->addIncoming(handlerResult, overflowBB);
4274
4275 return phi;
4276}
4277
4278/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4279/// information.
4280/// This function is used for BO_AddAssign/BO_SubAssign.
4281static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4282 bool isSubtraction) {
4283 // Must have binary (not unary) expr here. Unary pointer
4284 // increment/decrement doesn't use this path.
4286
4287 Value *pointer = op.LHS;
4288 Expr *pointerOperand = expr->getLHS();
4289 Value *index = op.RHS;
4290 Expr *indexOperand = expr->getRHS();
4291
4292 // In a subtraction, the LHS is always the pointer.
4293 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4294 std::swap(pointer, index);
4295 std::swap(pointerOperand, indexOperand);
4296 }
4297
4298 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4299 index, isSubtraction);
4300}
4301
4302/// Emit pointer + index arithmetic.
4304 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4305 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4306 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4307
4308 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4309 auto &DL = CGM.getDataLayout();
4310 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4311
4312 // Some versions of glibc and gcc use idioms (particularly in their malloc
4313 // routines) that add a pointer-sized integer (known to be a pointer value)
4314 // to a null pointer in order to cast the value back to an integer or as
4315 // part of a pointer alignment algorithm. This is undefined behavior, but
4316 // we'd like to be able to compile programs that use it.
4317 //
4318 // Normally, we'd generate a GEP with a null-pointer base here in response
4319 // to that code, but it's also UB to dereference a pointer created that
4320 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4321 // generate a direct cast of the integer value to a pointer.
4322 //
4323 // The idiom (p = nullptr + N) is not met if any of the following are true:
4324 //
4325 // The operation is subtraction.
4326 // The index is not pointer-sized.
4327 // The pointer type is not byte-sized.
4328 //
4329 // Note that we do not suppress the pointer overflow check in this case.
4331 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4332 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4333 if (getLangOpts().PointerOverflowDefined ||
4334 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4335 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4336 PtrTy->getPointerAddressSpace()))
4337 return Ptr;
4338 // The inbounds GEP of null is valid iff the index is zero.
4339 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4340 auto CheckHandler = SanitizerHandler::PointerOverflow;
4341 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4342 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4343 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4344 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4345 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4346 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4347 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4348 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4349 DynamicArgs);
4350 return Ptr;
4351 }
4352
4353 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4354 // Zero-extend or sign-extend the pointer value according to
4355 // whether the index is signed or not.
4356 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4357 "idx.ext");
4358 }
4359
4360 // If this is subtraction, negate the index.
4361 if (isSubtraction)
4362 index = Builder.CreateNeg(index, "idx.neg");
4363
4364 if (SanOpts.has(SanitizerKind::ArrayBounds))
4365 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4366 /*Accessed*/ false);
4367
4368 const PointerType *pointerType =
4369 pointerOperand->getType()->getAs<PointerType>();
4370 if (!pointerType) {
4371 QualType objectType = pointerOperand->getType()
4373 ->getPointeeType();
4374 llvm::Value *objectSize =
4375 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4376
4377 index = Builder.CreateMul(index, objectSize);
4378
4379 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4380 return Builder.CreateBitCast(result, pointer->getType());
4381 }
4382
4383 QualType elementType = pointerType->getPointeeType();
4384 if (const VariableArrayType *vla =
4385 getContext().getAsVariableArrayType(elementType)) {
4386 // The element count here is the total number of non-VLA elements.
4387 llvm::Value *numElements = getVLASize(vla).NumElts;
4388
4389 // Effectively, the multiply by the VLA size is part of the GEP.
4390 // GEP indexes are signed, and scaling an index isn't permitted to
4391 // signed-overflow, so we use the same semantics for our explicit
4392 // multiply. We suppress this if overflow is not undefined behavior.
4393 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4394 if (getLangOpts().PointerOverflowDefined) {
4395 index = Builder.CreateMul(index, numElements, "vla.index");
4396 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4397 } else {
4398 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4399 pointer =
4400 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4401 isSubtraction, BO->getExprLoc(), "add.ptr");
4402 }
4403 return pointer;
4404 }
4405
4406 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4407 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4408 // future proof.
4409 llvm::Type *elemTy;
4410 if (elementType->isVoidType() || elementType->isFunctionType())
4411 elemTy = Int8Ty;
4412 else
4413 elemTy = ConvertTypeForMem(elementType);
4414
4415 if (getLangOpts().PointerOverflowDefined)
4416 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4417
4418 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4419 BO->getExprLoc(), "add.ptr");
4420}
4421
4422// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4423// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4424// the add operand respectively. This allows fmuladd to represent a*b-c, or
4425// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4426// efficient operations.
4427static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4428 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4429 bool negMul, bool negAdd) {
4430 Value *MulOp0 = MulOp->getOperand(0);
4431 Value *MulOp1 = MulOp->getOperand(1);
4432 if (negMul)
4433 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4434 if (negAdd)
4435 Addend = Builder.CreateFNeg(Addend, "neg");
4436
4437 Value *FMulAdd = nullptr;
4438 if (Builder.getIsFPConstrained()) {
4439 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4440 "Only constrained operation should be created when Builder is in FP "
4441 "constrained mode");
4442 FMulAdd = Builder.CreateConstrainedFPCall(
4443 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4444 Addend->getType()),
4445 {MulOp0, MulOp1, Addend});
4446 } else {
4447 FMulAdd = Builder.CreateCall(
4448 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4449 {MulOp0, MulOp1, Addend});
4450 }
4451 MulOp->eraseFromParent();
4452
4453 return FMulAdd;
4454}
4455
4456// Check whether it would be legal to emit an fmuladd intrinsic call to
4457// represent op and if so, build the fmuladd.
4458//
4459// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4460// Does NOT check the type of the operation - it's assumed that this function
4461// will be called from contexts where it's known that the type is contractable.
4462static Value* tryEmitFMulAdd(const BinOpInfo &op,
4463 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4464 bool isSub=false) {
4465
4466 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4467 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4468 "Only fadd/fsub can be the root of an fmuladd.");
4469
4470 // Check whether this op is marked as fusable.
4471 if (!op.FPFeatures.allowFPContractWithinStatement())
4472 return nullptr;
4473
4474 Value *LHS = op.LHS;
4475 Value *RHS = op.RHS;
4476
4477 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4478 // it is the only use of its operand.
4479 bool NegLHS = false;
4480 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4481 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4482 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4483 LHS = LHSUnOp->getOperand(0);
4484 NegLHS = true;
4485 }
4486 }
4487
4488 bool NegRHS = false;
4489 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4490 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4491 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4492 RHS = RHSUnOp->getOperand(0);
4493 NegRHS = true;
4494 }
4495 }
4496
4497 // We have a potentially fusable op. Look for a mul on one of the operands.
4498 // Also, make sure that the mul result isn't used directly. In that case,
4499 // there's no point creating a muladd operation.
4500 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4501 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4502 (LHSBinOp->use_empty() || NegLHS)) {
4503 // If we looked through fneg, erase it.
4504 if (NegLHS)
4505 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4506 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4507 }
4508 }
4509 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4510 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4511 (RHSBinOp->use_empty() || NegRHS)) {
4512 // If we looked through fneg, erase it.
4513 if (NegRHS)
4514 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4515 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4516 }
4517 }
4518
4519 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4520 if (LHSBinOp->getIntrinsicID() ==
4521 llvm::Intrinsic::experimental_constrained_fmul &&
4522 (LHSBinOp->use_empty() || NegLHS)) {
4523 // If we looked through fneg, erase it.
4524 if (NegLHS)
4525 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4526 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4527 }
4528 }
4529 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4530 if (RHSBinOp->getIntrinsicID() ==
4531 llvm::Intrinsic::experimental_constrained_fmul &&
4532 (RHSBinOp->use_empty() || NegRHS)) {
4533 // If we looked through fneg, erase it.
4534 if (NegRHS)
4535 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4536 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4537 }
4538 }
4539
4540 return nullptr;
4541}
4542
4543Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4544 if (op.LHS->getType()->isPointerTy() ||
4545 op.RHS->getType()->isPointerTy())
4547
4548 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4549 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4550 case LangOptions::SOB_Defined:
4551 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4552 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4553 [[fallthrough]];
4554 case LangOptions::SOB_Undefined:
4555 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4556 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4557 [[fallthrough]];
4558 case LangOptions::SOB_Trapping:
4559 if (CanElideOverflowCheck(CGF.getContext(), op))
4560 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4561 return EmitOverflowCheckedBinOp(op);
4562 }
4563 }
4564
4565 // For vector and matrix adds, try to fold into a fmuladd.
4566 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4567 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4568 // Try to form an fmuladd.
4569 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4570 return FMulAdd;
4571 }
4572
4573 if (op.Ty->isConstantMatrixType()) {
4574 llvm::MatrixBuilder MB(Builder);
4575 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4576 return MB.CreateAdd(op.LHS, op.RHS);
4577 }
4578
4579 if (op.Ty->isUnsignedIntegerType() &&
4580 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4581 !CanElideOverflowCheck(CGF.getContext(), op))
4582 return EmitOverflowCheckedBinOp(op);
4583
4584 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4585 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4586 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4587 }
4588
4589 if (op.isFixedPointOp())
4590 return EmitFixedPointBinOp(op);
4591
4592 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4593}
4594
4595/// The resulting value must be calculated with exact precision, so the operands
4596/// may not be the same type.
4597Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4598 using llvm::APSInt;
4599 using llvm::ConstantInt;
4600
4601 // This is either a binary operation where at least one of the operands is
4602 // a fixed-point type, or a unary operation where the operand is a fixed-point
4603 // type. The result type of a binary operation is determined by
4604 // Sema::handleFixedPointConversions().
4605 QualType ResultTy = op.Ty;
4606 QualType LHSTy, RHSTy;
4607 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4608 RHSTy = BinOp->getRHS()->getType();
4609 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4610 // For compound assignment, the effective type of the LHS at this point
4611 // is the computation LHS type, not the actual LHS type, and the final
4612 // result type is not the type of the expression but rather the
4613 // computation result type.
4614 LHSTy = CAO->getComputationLHSType();
4615 ResultTy = CAO->getComputationResultType();
4616 } else
4617 LHSTy = BinOp->getLHS()->getType();
4618 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4619 LHSTy = UnOp->getSubExpr()->getType();
4620 RHSTy = UnOp->getSubExpr()->getType();
4621 }
4622 ASTContext &Ctx = CGF.getContext();
4623 Value *LHS = op.LHS;
4624 Value *RHS = op.RHS;
4625
4626 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4627 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4628 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4629 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4630
4631 // Perform the actual operation.
4632 Value *Result;
4633 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4634 switch (op.Opcode) {
4635 case BO_AddAssign:
4636 case BO_Add:
4637 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4638 break;
4639 case BO_SubAssign:
4640 case BO_Sub:
4641 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4642 break;
4643 case BO_MulAssign:
4644 case BO_Mul:
4645 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4646 break;
4647 case BO_DivAssign:
4648 case BO_Div:
4649 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4650 break;
4651 case BO_ShlAssign:
4652 case BO_Shl:
4653 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4654 break;
4655 case BO_ShrAssign:
4656 case BO_Shr:
4657 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4658 break;
4659 case BO_LT:
4660 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4661 case BO_GT:
4662 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4663 case BO_LE:
4664 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4665 case BO_GE:
4666 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4667 case BO_EQ:
4668 // For equality operations, we assume any padding bits on unsigned types are
4669 // zero'd out. They could be overwritten through non-saturating operations
4670 // that cause overflow, but this leads to undefined behavior.
4671 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4672 case BO_NE:
4673 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4674 case BO_Cmp:
4675 case BO_LAnd:
4676 case BO_LOr:
4677 llvm_unreachable("Found unimplemented fixed point binary operation");
4678 case BO_PtrMemD:
4679 case BO_PtrMemI:
4680 case BO_Rem:
4681 case BO_Xor:
4682 case BO_And:
4683 case BO_Or:
4684 case BO_Assign:
4685 case BO_RemAssign:
4686 case BO_AndAssign:
4687 case BO_XorAssign:
4688 case BO_OrAssign:
4689 case BO_Comma:
4690 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4691 }
4692
4693 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4695 // Convert to the result type.
4696 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4697 : CommonFixedSema,
4698 ResultFixedSema);
4699}
4700
4701Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4702 // The LHS is always a pointer if either side is.
4703 if (!op.LHS->getType()->isPointerTy()) {
4704 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4705 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4706 case LangOptions::SOB_Defined:
4707 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4708 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4709 [[fallthrough]];
4710 case LangOptions::SOB_Undefined:
4711 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4712 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4713 [[fallthrough]];
4714 case LangOptions::SOB_Trapping:
4715 if (CanElideOverflowCheck(CGF.getContext(), op))
4716 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4717 return EmitOverflowCheckedBinOp(op);
4718 }
4719 }
4720
4721 // For vector and matrix subs, try to fold into a fmuladd.
4722 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4723 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4724 // Try to form an fmuladd.
4725 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4726 return FMulAdd;
4727 }
4728
4729 if (op.Ty->isConstantMatrixType()) {
4730 llvm::MatrixBuilder MB(Builder);
4731 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4732 return MB.CreateSub(op.LHS, op.RHS);
4733 }
4734
4735 if (op.Ty->isUnsignedIntegerType() &&
4736 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4737 !CanElideOverflowCheck(CGF.getContext(), op))
4738 return EmitOverflowCheckedBinOp(op);
4739
4740 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4741 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4742 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4743 }
4744
4745 if (op.isFixedPointOp())
4746 return EmitFixedPointBinOp(op);
4747
4748 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4749 }
4750
4751 // If the RHS is not a pointer, then we have normal pointer
4752 // arithmetic.
4753 if (!op.RHS->getType()->isPointerTy())
4755
4756 // Otherwise, this is a pointer subtraction.
4757
4758 // Do the raw subtraction part.
4759 llvm::Value *LHS
4760 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4761 llvm::Value *RHS
4762 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4763 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4764
4765 // Okay, figure out the element size.
4766 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4767 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4768
4769 llvm::Value *divisor = nullptr;
4770
4771 // For a variable-length array, this is going to be non-constant.
4772 if (const VariableArrayType *vla
4773 = CGF.getContext().getAsVariableArrayType(elementType)) {
4774 auto VlaSize = CGF.getVLASize(vla);
4775 elementType = VlaSize.Type;
4776 divisor = VlaSize.NumElts;
4777
4778 // Scale the number of non-VLA elements by the non-VLA element size.
4779 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4780 if (!eltSize.isOne())
4781 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4782
4783 // For everything elese, we can just compute it, safe in the
4784 // assumption that Sema won't let anything through that we can't
4785 // safely compute the size of.
4786 } else {
4787 CharUnits elementSize;
4788 // Handle GCC extension for pointer arithmetic on void* and
4789 // function pointer types.
4790 if (elementType->isVoidType() || elementType->isFunctionType())
4791 elementSize = CharUnits::One();
4792 else
4793 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4794
4795 // Don't even emit the divide for element size of 1.
4796 if (elementSize.isOne())
4797 return diffInChars;
4798
4799 divisor = CGF.CGM.getSize(elementSize);
4800 }
4801
4802 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4803 // pointer difference in C is only defined in the case where both operands
4804 // are pointing to elements of an array.
4805 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4806}
4807
4808Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4809 bool RHSIsSigned) {
4810 llvm::IntegerType *Ty;
4811 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4812 Ty = cast<llvm::IntegerType>(VT->getElementType());
4813 else
4814 Ty = cast<llvm::IntegerType>(LHS->getType());
4815 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4816 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4817 // this in ConstantInt::get, this results in the value getting truncated.
4818 // Constrain the return value to be max(RHS) in this case.
4819 llvm::Type *RHSTy = RHS->getType();
4820 llvm::APInt RHSMax =
4821 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4822 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4823 if (RHSMax.ult(Ty->getBitWidth()))
4824 return llvm::ConstantInt::get(RHSTy, RHSMax);
4825 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4826}
4827
4828Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4829 const Twine &Name) {
4830 llvm::IntegerType *Ty;
4831 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4832 Ty = cast<llvm::IntegerType>(VT->getElementType());
4833 else
4834 Ty = cast<llvm::IntegerType>(LHS->getType());
4835
4836 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4837 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4838
4839 return Builder.CreateURem(
4840 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4841}
4842
4843Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4844 // TODO: This misses out on the sanitizer check below.
4845 if (Ops.isFixedPointOp())
4846 return EmitFixedPointBinOp(Ops);
4847
4848 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4849 // RHS to the same size as the LHS.
4850 Value *RHS = Ops.RHS;
4851 if (Ops.LHS->getType() != RHS->getType())
4852 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4853
4854 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4855 Ops.Ty->hasSignedIntegerRepresentation() &&
4857 !CGF.getLangOpts().CPlusPlus20;
4858 bool SanitizeUnsignedBase =
4859 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4860 Ops.Ty->hasUnsignedIntegerRepresentation();
4861 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4862 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4863 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4864 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4865 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4866 else if ((SanitizeBase || SanitizeExponent) &&
4867 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4868 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4869 if (SanitizeSignedBase)
4870 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
4871 if (SanitizeUnsignedBase)
4872 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
4873 if (SanitizeExponent)
4874 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
4875
4876 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4877 SanitizerHandler::ShiftOutOfBounds);
4878 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4879 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4880 llvm::Value *WidthMinusOne =
4881 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4882 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4883
4884 if (SanitizeExponent) {
4885 Checks.push_back(
4886 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
4887 }
4888
4889 if (SanitizeBase) {
4890 // Check whether we are shifting any non-zero bits off the top of the
4891 // integer. We only emit this check if exponent is valid - otherwise
4892 // instructions below will have undefined behavior themselves.
4893 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4894 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4895 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4896 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4897 llvm::Value *PromotedWidthMinusOne =
4898 (RHS == Ops.RHS) ? WidthMinusOne
4899 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4900 CGF.EmitBlock(CheckShiftBase);
4901 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4902 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4903 /*NUW*/ true, /*NSW*/ true),
4904 "shl.check");
4905 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4906 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4907 // Under C++11's rules, shifting a 1 bit into the sign bit is
4908 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4909 // define signed left shifts, so we use the C99 and C++11 rules there).
4910 // Unsigned shifts can always shift into the top bit.
4911 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4912 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4913 }
4914 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4915 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4916 CGF.EmitBlock(Cont);
4917 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4918 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4919 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4920 Checks.push_back(std::make_pair(
4921 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4922 : SanitizerKind::SO_UnsignedShiftBase));
4923 }
4924
4925 assert(!Checks.empty());
4926 EmitBinOpCheck(Checks, Ops);
4927 }
4928
4929 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4930}
4931
4932Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4933 // TODO: This misses out on the sanitizer check below.
4934 if (Ops.isFixedPointOp())
4935 return EmitFixedPointBinOp(Ops);
4936
4937 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4938 // RHS to the same size as the LHS.
4939 Value *RHS = Ops.RHS;
4940 if (Ops.LHS->getType() != RHS->getType())
4941 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4942
4943 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4944 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4945 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4946 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4947 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4948 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4949 SanitizerHandler::ShiftOutOfBounds);
4950 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4951 llvm::Value *Valid = Builder.CreateICmpULE(
4952 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4953 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
4954 }
4955
4956 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4957 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4958 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4959}
4960
4962// return corresponding comparison intrinsic for given vector type
4963static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4964 BuiltinType::Kind ElemKind) {
4965 switch (ElemKind) {
4966 default: llvm_unreachable("unexpected element type");
4967 case BuiltinType::Char_U:
4968 case BuiltinType::UChar:
4969 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4970 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4971 case BuiltinType::Char_S:
4972 case BuiltinType::SChar:
4973 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4974 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4975 case BuiltinType::UShort:
4976 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4977 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4978 case BuiltinType::Short:
4979 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4980 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4981 case BuiltinType::UInt:
4982 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4983 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4984 case BuiltinType::Int:
4985 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4986 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4987 case BuiltinType::ULong:
4988 case BuiltinType::ULongLong:
4989 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4990 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4991 case BuiltinType::Long:
4992 case BuiltinType::LongLong:
4993 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4994 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4995 case BuiltinType::Float:
4996 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4997 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4998 case BuiltinType::Double:
4999 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
5000 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
5001 case BuiltinType::UInt128:
5002 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5003 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
5004 case BuiltinType::Int128:
5005 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
5006 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
5007 }
5008}
5009
5010Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
5011 llvm::CmpInst::Predicate UICmpOpc,
5012 llvm::CmpInst::Predicate SICmpOpc,
5013 llvm::CmpInst::Predicate FCmpOpc,
5014 bool IsSignaling) {
5015 TestAndClearIgnoreResultAssign();
5016 Value *Result;
5017 QualType LHSTy = E->getLHS()->getType();
5018 QualType RHSTy = E->getRHS()->getType();
5019 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
5020 assert(E->getOpcode() == BO_EQ ||
5021 E->getOpcode() == BO_NE);
5022 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
5023 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
5025 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
5026 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
5027 BinOpInfo BOInfo = EmitBinOps(E);
5028 Value *LHS = BOInfo.LHS;
5029 Value *RHS = BOInfo.RHS;
5030
5031 // If AltiVec, the comparison results in a numeric type, so we use
5032 // intrinsics comparing vectors and giving 0 or 1 as a result
5033 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
5034 // constants for mapping CR6 register bits to predicate result
5035 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
5036
5037 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
5038
5039 // in several cases vector arguments order will be reversed
5040 Value *FirstVecArg = LHS,
5041 *SecondVecArg = RHS;
5042
5043 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
5044 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
5045
5046 switch(E->getOpcode()) {
5047 default: llvm_unreachable("is not a comparison operation");
5048 case BO_EQ:
5049 CR6 = CR6_LT;
5050 ID = GetIntrinsic(VCMPEQ, ElementKind);
5051 break;
5052 case BO_NE:
5053 CR6 = CR6_EQ;
5054 ID = GetIntrinsic(VCMPEQ, ElementKind);
5055 break;
5056 case BO_LT:
5057 CR6 = CR6_LT;
5058 ID = GetIntrinsic(VCMPGT, ElementKind);
5059 std::swap(FirstVecArg, SecondVecArg);
5060 break;
5061 case BO_GT:
5062 CR6 = CR6_LT;
5063 ID = GetIntrinsic(VCMPGT, ElementKind);
5064 break;
5065 case BO_LE:
5066 if (ElementKind == BuiltinType::Float) {
5067 CR6 = CR6_LT;
5068 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5069 std::swap(FirstVecArg, SecondVecArg);
5070 }
5071 else {
5072 CR6 = CR6_EQ;
5073 ID = GetIntrinsic(VCMPGT, ElementKind);
5074 }
5075 break;
5076 case BO_GE:
5077 if (ElementKind == BuiltinType::Float) {
5078 CR6 = CR6_LT;
5079 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5080 }
5081 else {
5082 CR6 = CR6_EQ;
5083 ID = GetIntrinsic(VCMPGT, ElementKind);
5084 std::swap(FirstVecArg, SecondVecArg);
5085 }
5086 break;
5087 }
5088
5089 Value *CR6Param = Builder.getInt32(CR6);
5090 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5091 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5092
5093 // The result type of intrinsic may not be same as E->getType().
5094 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5095 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5096 // do nothing, if ResultTy is not i1 at the same time, it will cause
5097 // crash later.
5098 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5099 if (ResultTy->getBitWidth() > 1 &&
5100 E->getType() == CGF.getContext().BoolTy)
5101 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5102 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5103 E->getExprLoc());
5104 }
5105
5106 if (BOInfo.isFixedPointOp()) {
5107 Result = EmitFixedPointBinOp(BOInfo);
5108 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5109 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5110 if (!IsSignaling)
5111 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5112 else
5113 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5114 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5115 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5116 } else {
5117 // Unsigned integers and pointers.
5118
5119 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5122
5123 // Dynamic information is required to be stripped for comparisons,
5124 // because it could leak the dynamic information. Based on comparisons
5125 // of pointers to dynamic objects, the optimizer can replace one pointer
5126 // with another, which might be incorrect in presence of invariant
5127 // groups. Comparison with null is safe because null does not carry any
5128 // dynamic information.
5129 if (LHSTy.mayBeDynamicClass())
5130 LHS = Builder.CreateStripInvariantGroup(LHS);
5131 if (RHSTy.mayBeDynamicClass())
5132 RHS = Builder.CreateStripInvariantGroup(RHS);
5133 }
5134
5135 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5136 }
5137
5138 // If this is a vector comparison, sign extend the result to the appropriate
5139 // vector integer type and return it (don't convert to bool).
5140 if (LHSTy->isVectorType())
5141 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5142
5143 } else {
5144 // Complex Comparison: can only be an equality comparison.
5146 QualType CETy;
5147 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5148 LHS = CGF.EmitComplexExpr(E->getLHS());
5149 CETy = CTy->getElementType();
5150 } else {
5151 LHS.first = Visit(E->getLHS());
5152 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5153 CETy = LHSTy;
5154 }
5155 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5156 RHS = CGF.EmitComplexExpr(E->getRHS());
5157 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5158 CTy->getElementType()) &&
5159 "The element types must always match.");
5160 (void)CTy;
5161 } else {
5162 RHS.first = Visit(E->getRHS());
5163 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5164 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5165 "The element types must always match.");
5166 }
5167
5168 Value *ResultR, *ResultI;
5169 if (CETy->isRealFloatingType()) {
5170 // As complex comparisons can only be equality comparisons, they
5171 // are never signaling comparisons.
5172 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5173 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5174 } else {
5175 // Complex comparisons can only be equality comparisons. As such, signed
5176 // and unsigned opcodes are the same.
5177 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5178 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5179 }
5180
5181 if (E->getOpcode() == BO_EQ) {
5182 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5183 } else {
5184 assert(E->getOpcode() == BO_NE &&
5185 "Complex comparison other than == or != ?");
5186 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5187 }
5188 }
5189
5190 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5191 E->getExprLoc());
5192}
5193
5195 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5196 // In case we have the integer or bitfield sanitizer checks enabled
5197 // we want to get the expression before scalar conversion.
5198 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5199 CastKind Kind = ICE->getCastKind();
5200 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5201 *SrcType = ICE->getSubExpr()->getType();
5202 *Previous = EmitScalarExpr(ICE->getSubExpr());
5203 // Pass default ScalarConversionOpts to avoid emitting
5204 // integer sanitizer checks as E refers to bitfield.
5205 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5206 ICE->getExprLoc());
5207 }
5208 }
5209 return EmitScalarExpr(E->getRHS());
5210}
5211
5212Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5213 ApplyAtomGroup Grp(CGF.getDebugInfo());
5214 bool Ignore = TestAndClearIgnoreResultAssign();
5215
5216 Value *RHS;
5217 LValue LHS;
5218
5219 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5222 llvm::Value *RV =
5223 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5224 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5226
5227 if (Ignore)
5228 return nullptr;
5229 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5230 LV.getAddress(), /*nonnull*/ false);
5231 return RV;
5232 }
5233
5234 switch (E->getLHS()->getType().getObjCLifetime()) {
5236 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5237 break;
5238
5240 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5241 break;
5242
5244 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5245 break;
5246
5248 RHS = Visit(E->getRHS());
5249 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5250 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5251 break;
5252
5254 // __block variables need to have the rhs evaluated first, plus
5255 // this should improve codegen just a little.
5256 Value *Previous = nullptr;
5257 QualType SrcType = E->getRHS()->getType();
5258 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5259 // we want to extract that value and potentially (if the bitfield sanitizer
5260 // is enabled) use it to check for an implicit conversion.
5261 if (E->getLHS()->refersToBitField())
5262 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5263 else
5264 RHS = Visit(E->getRHS());
5265
5266 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5267
5268 // Store the value into the LHS. Bit-fields are handled specially
5269 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5270 // 'An assignment expression has the value of the left operand after
5271 // the assignment...'.
5272 if (LHS.isBitField()) {
5273 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5274 // If the expression contained an implicit conversion, make sure
5275 // to use the value before the scalar conversion.
5276 Value *Src = Previous ? Previous : RHS;
5277 QualType DstType = E->getLHS()->getType();
5278 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5279 LHS.getBitFieldInfo(), E->getExprLoc());
5280 } else {
5281 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5282 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5283 }
5284 }
5285 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5286 if (CGF.getLangOpts().OpenMP) {
5288 E->getLHS());
5289 }
5290
5291 // If the result is clearly ignored, return now.
5292 if (Ignore)
5293 return nullptr;
5294
5295 // The result of an assignment in C is the assigned r-value.
5296 if (!CGF.getLangOpts().CPlusPlus)
5297 return RHS;
5298
5299 // If the lvalue is non-volatile, return the computed value of the assignment.
5300 if (!LHS.isVolatileQualified())
5301 return RHS;
5302
5303 // Otherwise, reload the value.
5304 return EmitLoadOfLValue(LHS, E->getExprLoc());
5305}
5306
5307Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5308 // Perform vector logical and on comparisons with zero vectors.
5309 if (E->getType()->isVectorType()) {
5311
5312 Value *LHS = Visit(E->getLHS());
5313 Value *RHS = Visit(E->getRHS());
5314 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5315 if (LHS->getType()->isFPOrFPVectorTy()) {
5316 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5317 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5318 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5319 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5320 } else {
5321 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5322 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5323 }
5324 Value *And = Builder.CreateAnd(LHS, RHS);
5325 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5326 }
5327
5328 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5329 llvm::Type *ResTy = ConvertType(E->getType());
5330
5331 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5332 // If we have 1 && X, just emit X without inserting the control flow.
5333 bool LHSCondVal;
5334 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5335 if (LHSCondVal) { // If we have 1 && X, just emit X.
5337
5338 // If the top of the logical operator nest, reset the MCDC temp to 0.
5339 if (CGF.MCDCLogOpStack.empty())
5341
5342 CGF.MCDCLogOpStack.push_back(E);
5343
5344 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5345
5346 // If we're generating for profiling or coverage, generate a branch to a
5347 // block that increments the RHS counter needed to track branch condition
5348 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5349 // "FalseBlock" after the increment is done.
5350 if (InstrumentRegions &&
5352 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5353 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5354 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5355 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
5356 CGF.EmitBlock(RHSBlockCnt);
5358 CGF.EmitBranch(FBlock);
5359 CGF.EmitBlock(FBlock);
5360 } else
5361 CGF.markStmtMaybeUsed(E->getRHS());
5362
5363 CGF.MCDCLogOpStack.pop_back();
5364 // If the top of the logical operator nest, update the MCDC bitmap.
5365 if (CGF.MCDCLogOpStack.empty())
5367
5368 // ZExt result to int or bool.
5369 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5370 }
5371
5372 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5373 if (!CGF.ContainsLabel(E->getRHS())) {
5374 CGF.markStmtMaybeUsed(E->getRHS());
5375 return llvm::Constant::getNullValue(ResTy);
5376 }
5377 }
5378
5379 // If the top of the logical operator nest, reset the MCDC temp to 0.
5380 if (CGF.MCDCLogOpStack.empty())
5382
5383 CGF.MCDCLogOpStack.push_back(E);
5384
5385 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5386 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5387
5388 CodeGenFunction::ConditionalEvaluation eval(CGF);
5389
5390 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5391 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
5392 CGF.getProfileCount(E->getRHS()));
5393
5394 // Any edges into the ContBlock are now from an (indeterminate number of)
5395 // edges from this first condition. All of these values will be false. Start
5396 // setting up the PHI node in the Cont Block for this.
5397 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5398 "", ContBlock);
5399 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5400 PI != PE; ++PI)
5401 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5402
5403 eval.begin(CGF);
5404 CGF.EmitBlock(RHSBlock);
5406 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5407 eval.end(CGF);
5408
5409 // Reaquire the RHS block, as there may be subblocks inserted.
5410 RHSBlock = Builder.GetInsertBlock();
5411
5412 // If we're generating for profiling or coverage, generate a branch on the
5413 // RHS to a block that increments the RHS true counter needed to track branch
5414 // condition coverage.
5415 if (InstrumentRegions &&
5417 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5418 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5419 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
5420 CGF.EmitBlock(RHSBlockCnt);
5422 CGF.EmitBranch(ContBlock);
5423 PN->addIncoming(RHSCond, RHSBlockCnt);
5424 }
5425
5426 // Emit an unconditional branch from this block to ContBlock.
5427 {
5428 // There is no need to emit line number for unconditional branch.
5429 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5430 CGF.EmitBlock(ContBlock);
5431 }
5432 // Insert an entry into the phi node for the edge with the value of RHSCond.
5433 PN->addIncoming(RHSCond, RHSBlock);
5434
5435 CGF.MCDCLogOpStack.pop_back();
5436 // If the top of the logical operator nest, update the MCDC bitmap.
5437 if (CGF.MCDCLogOpStack.empty())
5439
5440 // Artificial location to preserve the scope information
5441 {
5443 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5444 }
5445
5446 // ZExt result to int.
5447 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5448}
5449
5450Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5451 // Perform vector logical or on comparisons with zero vectors.
5452 if (E->getType()->isVectorType()) {
5454
5455 Value *LHS = Visit(E->getLHS());
5456 Value *RHS = Visit(E->getRHS());
5457 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5458 if (LHS->getType()->isFPOrFPVectorTy()) {
5459 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5460 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5461 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5462 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5463 } else {
5464 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5465 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5466 }
5467 Value *Or = Builder.CreateOr(LHS, RHS);
5468 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5469 }
5470
5471 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5472 llvm::Type *ResTy = ConvertType(E->getType());
5473
5474 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5475 // If we have 0 || X, just emit X without inserting the control flow.
5476 bool LHSCondVal;
5477 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5478 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5480
5481 // If the top of the logical operator nest, reset the MCDC temp to 0.
5482 if (CGF.MCDCLogOpStack.empty())
5484
5485 CGF.MCDCLogOpStack.push_back(E);
5486
5487 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5488
5489 // If we're generating for profiling or coverage, generate a branch to a
5490 // block that increments the RHS counter need to track branch condition
5491 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5492 // "FalseBlock" after the increment is done.
5493 if (InstrumentRegions &&
5495 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5496 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5497 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5498 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5499 CGF.EmitBlock(RHSBlockCnt);
5501 CGF.EmitBranch(FBlock);
5502 CGF.EmitBlock(FBlock);
5503 } else
5504 CGF.markStmtMaybeUsed(E->getRHS());
5505
5506 CGF.MCDCLogOpStack.pop_back();
5507 // If the top of the logical operator nest, update the MCDC bitmap.
5508 if (CGF.MCDCLogOpStack.empty())
5510
5511 // ZExt result to int or bool.
5512 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5513 }
5514
5515 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5516 if (!CGF.ContainsLabel(E->getRHS())) {
5517 CGF.markStmtMaybeUsed(E->getRHS());
5518 return llvm::ConstantInt::get(ResTy, 1);
5519 }
5520 }
5521
5522 // If the top of the logical operator nest, reset the MCDC temp to 0.
5523 if (CGF.MCDCLogOpStack.empty())
5525
5526 CGF.MCDCLogOpStack.push_back(E);
5527
5528 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5529 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5530
5531 CodeGenFunction::ConditionalEvaluation eval(CGF);
5532
5533 // Branch on the LHS first. If it is true, go to the success (cont) block.
5534 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5536 CGF.getProfileCount(E->getRHS()));
5537
5538 // Any edges into the ContBlock are now from an (indeterminate number of)
5539 // edges from this first condition. All of these values will be true. Start
5540 // setting up the PHI node in the Cont Block for this.
5541 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5542 "", ContBlock);
5543 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5544 PI != PE; ++PI)
5545 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5546
5547 eval.begin(CGF);
5548
5549 // Emit the RHS condition as a bool value.
5550 CGF.EmitBlock(RHSBlock);
5552 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5553
5554 eval.end(CGF);
5555
5556 // Reaquire the RHS block, as there may be subblocks inserted.
5557 RHSBlock = Builder.GetInsertBlock();
5558
5559 // If we're generating for profiling or coverage, generate a branch on the
5560 // RHS to a block that increments the RHS true counter needed to track branch
5561 // condition coverage.
5562 if (InstrumentRegions &&
5564 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5565 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5566 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5567 CGF.EmitBlock(RHSBlockCnt);
5569 CGF.EmitBranch(ContBlock);
5570 PN->addIncoming(RHSCond, RHSBlockCnt);
5571 }
5572
5573 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5574 // into the phi node for the edge with the value of RHSCond.
5575 CGF.EmitBlock(ContBlock);
5576 PN->addIncoming(RHSCond, RHSBlock);
5577
5578 CGF.MCDCLogOpStack.pop_back();
5579 // If the top of the logical operator nest, update the MCDC bitmap.
5580 if (CGF.MCDCLogOpStack.empty())
5582
5583 // ZExt result to int.
5584 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5585}
5586
5587Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5588 CGF.EmitIgnoredExpr(E->getLHS());
5589 CGF.EnsureInsertPoint();
5590 return Visit(E->getRHS());
5591}
5592
5593//===----------------------------------------------------------------------===//
5594// Other Operators
5595//===----------------------------------------------------------------------===//
5596
5597/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5598/// expression is cheap enough and side-effect-free enough to evaluate
5599/// unconditionally instead of conditionally. This is used to convert control
5600/// flow into selects in some cases.
5602 CodeGenFunction &CGF) {
5603 // Anything that is an integer or floating point constant is fine.
5604 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5605
5606 // Even non-volatile automatic variables can't be evaluated unconditionally.
5607 // Referencing a thread_local may cause non-trivial initialization work to
5608 // occur. If we're inside a lambda and one of the variables is from the scope
5609 // outside the lambda, that function may have returned already. Reading its
5610 // locals is a bad idea. Also, these reads may introduce races there didn't
5611 // exist in the source-level program.
5612}
5613
5614
5615Value *ScalarExprEmitter::
5616VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5617 TestAndClearIgnoreResultAssign();
5618
5619 // Bind the common expression if necessary.
5620 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5621
5622 Expr *condExpr = E->getCond();
5623 Expr *lhsExpr = E->getTrueExpr();
5624 Expr *rhsExpr = E->getFalseExpr();
5625
5626 // If the condition constant folds and can be elided, try to avoid emitting
5627 // the condition and the dead arm.
5628 bool CondExprBool;
5629 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5630 Expr *live = lhsExpr, *dead = rhsExpr;
5631 if (!CondExprBool) std::swap(live, dead);
5632
5633 // If the dead side doesn't have labels we need, just emit the Live part.
5634 if (!CGF.ContainsLabel(dead)) {
5635 if (CondExprBool) {
5637 CGF.incrementProfileCounter(lhsExpr);
5638 CGF.incrementProfileCounter(rhsExpr);
5639 }
5641 }
5642 Value *Result = Visit(live);
5643 CGF.markStmtMaybeUsed(dead);
5644
5645 // If the live part is a throw expression, it acts like it has a void
5646 // type, so evaluating it returns a null Value*. However, a conditional
5647 // with non-void type must return a non-null Value*.
5648 if (!Result && !E->getType()->isVoidType())
5649 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5650
5651 return Result;
5652 }
5653 }
5654
5655 // OpenCL: If the condition is a vector, we can treat this condition like
5656 // the select function.
5657 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5658 condExpr->getType()->isExtVectorType())) {
5660
5661 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5662 llvm::Value *LHS = Visit(lhsExpr);
5663 llvm::Value *RHS = Visit(rhsExpr);
5664
5665 llvm::Type *condType = ConvertType(condExpr->getType());
5666 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5667
5668 unsigned numElem = vecTy->getNumElements();
5669 llvm::Type *elemType = vecTy->getElementType();
5670
5671 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5672 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5673 llvm::Value *tmp = Builder.CreateSExt(
5674 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5675 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5676
5677 // Cast float to int to perform ANDs if necessary.
5678 llvm::Value *RHSTmp = RHS;
5679 llvm::Value *LHSTmp = LHS;
5680 bool wasCast = false;
5681 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5682 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5683 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5684 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5685 wasCast = true;
5686 }
5687
5688 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5689 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5690 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5691 if (wasCast)
5692 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5693
5694 return tmp5;
5695 }
5696
5697 if (condExpr->getType()->isVectorType() ||
5698 condExpr->getType()->isSveVLSBuiltinType()) {
5700
5701 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5702 llvm::Value *LHS = Visit(lhsExpr);
5703 llvm::Value *RHS = Visit(rhsExpr);
5704
5705 llvm::Type *CondType = ConvertType(condExpr->getType());
5706 auto *VecTy = cast<llvm::VectorType>(CondType);
5707
5708 if (VecTy->getElementType()->isIntegerTy(1))
5709 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5710
5711 // OpenCL uses the MSB of the mask vector.
5712 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5713 if (condExpr->getType()->isExtVectorType())
5714 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5715 else
5716 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5717 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5718 }
5719
5720 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5721 // select instead of as control flow. We can only do this if it is cheap and
5722 // safe to evaluate the LHS and RHS unconditionally.
5723 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
5725 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5726 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5727
5729 CGF.incrementProfileCounter(lhsExpr);
5730 CGF.incrementProfileCounter(rhsExpr);
5732 } else
5733 CGF.incrementProfileCounter(E, StepV);
5734
5735 llvm::Value *LHS = Visit(lhsExpr);
5736 llvm::Value *RHS = Visit(rhsExpr);
5737 if (!LHS) {
5738 // If the conditional has void type, make sure we return a null Value*.
5739 assert(!RHS && "LHS and RHS types must match");
5740 return nullptr;
5741 }
5742 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5743 }
5744
5745 // If the top of the logical operator nest, reset the MCDC temp to 0.
5746 if (CGF.MCDCLogOpStack.empty())
5747 CGF.maybeResetMCDCCondBitmap(condExpr);
5748
5749 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5750 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5751 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5752
5753 CodeGenFunction::ConditionalEvaluation eval(CGF);
5754 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5755 CGF.getProfileCount(lhsExpr));
5756
5757 CGF.EmitBlock(LHSBlock);
5758
5759 // If the top of the logical operator nest, update the MCDC bitmap for the
5760 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5761 // may also contain a boolean expression.
5762 if (CGF.MCDCLogOpStack.empty())
5763 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5764
5766 CGF.incrementProfileCounter(lhsExpr);
5767 else
5769
5770 eval.begin(CGF);
5771 Value *LHS = Visit(lhsExpr);
5772 eval.end(CGF);
5773
5774 LHSBlock = Builder.GetInsertBlock();
5775 Builder.CreateBr(ContBlock);
5776
5777 CGF.EmitBlock(RHSBlock);
5778
5779 // If the top of the logical operator nest, update the MCDC bitmap for the
5780 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5781 // may also contain a boolean expression.
5782 if (CGF.MCDCLogOpStack.empty())
5783 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5784
5786 CGF.incrementProfileCounter(rhsExpr);
5787
5788 eval.begin(CGF);
5789 Value *RHS = Visit(rhsExpr);
5790 eval.end(CGF);
5791
5792 RHSBlock = Builder.GetInsertBlock();
5793 CGF.EmitBlock(ContBlock);
5794
5795 // If the LHS or RHS is a throw expression, it will be legitimately null.
5796 if (!LHS)
5797 return RHS;
5798 if (!RHS)
5799 return LHS;
5800
5801 // Create a PHI node for the real part.
5802 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5803 PN->addIncoming(LHS, LHSBlock);
5804 PN->addIncoming(RHS, RHSBlock);
5805
5806 // When single byte coverage mode is enabled, add a counter to continuation
5807 // block.
5810
5811 return PN;
5812}
5813
5814Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5815 return Visit(E->getChosenSubExpr());
5816}
5817
5818Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5819 Address ArgValue = Address::invalid();
5820 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5821
5822 return ArgPtr.getScalarVal();
5823}
5824
5825Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5826 return CGF.EmitBlockLiteral(block);
5827}
5828
5829// Convert a vec3 to vec4, or vice versa.
5831 Value *Src, unsigned NumElementsDst) {
5832 static constexpr int Mask[] = {0, 1, 2, -1};
5833 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5834}
5835
5836// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5837// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5838// but could be scalar or vectors of different lengths, and either can be
5839// pointer.
5840// There are 4 cases:
5841// 1. non-pointer -> non-pointer : needs 1 bitcast
5842// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5843// 3. pointer -> non-pointer
5844// a) pointer -> intptr_t : needs 1 ptrtoint
5845// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5846// 4. non-pointer -> pointer
5847// a) intptr_t -> pointer : needs 1 inttoptr
5848// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5849// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5850// allow casting directly between pointer types and non-integer non-pointer
5851// types.
5853 const llvm::DataLayout &DL,
5854 Value *Src, llvm::Type *DstTy,
5855 StringRef Name = "") {
5856 auto SrcTy = Src->getType();
5857
5858 // Case 1.
5859 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5860 return Builder.CreateBitCast(Src, DstTy, Name);
5861
5862 // Case 2.
5863 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5864 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5865
5866 // Case 3.
5867 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5868 // Case 3b.
5869 if (!DstTy->isIntegerTy())
5870 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5871 // Cases 3a and 3b.
5872 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5873 }
5874
5875 // Case 4b.
5876 if (!SrcTy->isIntegerTy())
5877 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5878 // Cases 4a and 4b.
5879 return Builder.CreateIntToPtr(Src, DstTy, Name);
5880}
5881
5882Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5883 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5884 llvm::Type *DstTy = ConvertType(E->getType());
5885
5886 llvm::Type *SrcTy = Src->getType();
5887 unsigned NumElementsSrc =
5889 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5890 : 0;
5891 unsigned NumElementsDst =
5893 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5894 : 0;
5895
5896 // Use bit vector expansion for ext_vector_type boolean vectors.
5897 if (E->getType()->isExtVectorBoolType())
5898 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5899
5900 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5901 // vector to get a vec4, then a bitcast if the target type is different.
5902 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5903 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5904 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5905 DstTy);
5906
5907 Src->setName("astype");
5908 return Src;
5909 }
5910
5911 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5912 // to vec4 if the original type is not vec4, then a shuffle vector to
5913 // get a vec3.
5914 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5915 auto *Vec4Ty = llvm::FixedVectorType::get(
5916 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5917 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5918 Vec4Ty);
5919
5920 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5921 Src->setName("astype");
5922 return Src;
5923 }
5924
5925 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5926 Src, DstTy, "astype");
5927}
5928
5929Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5930 return CGF.EmitAtomicExpr(E).getScalarVal();
5931}
5932
5933//===----------------------------------------------------------------------===//
5934// Entry Point into this File
5935//===----------------------------------------------------------------------===//
5936
5937/// Emit the computation of the specified expression of scalar type, ignoring
5938/// the result.
5939Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5940 assert(E && hasScalarEvaluationKind(E->getType()) &&
5941 "Invalid scalar expression to emit");
5942
5943 return ScalarExprEmitter(*this, IgnoreResultAssign)
5944 .Visit(const_cast<Expr *>(E));
5945}
5946
5947/// Emit a conversion from the specified type to the specified destination type,
5948/// both of which are LLVM scalar types.
5950 QualType DstTy,
5951 SourceLocation Loc) {
5952 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5953 "Invalid scalar expression to emit");
5954 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5955}
5956
5957/// Emit a conversion from the specified complex type to the specified
5958/// destination type, where the destination type is an LLVM scalar type.
5960 QualType SrcTy,
5961 QualType DstTy,
5962 SourceLocation Loc) {
5963 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5964 "Invalid complex -> scalar conversion");
5965 return ScalarExprEmitter(*this)
5966 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5967}
5968
5969
5970Value *
5972 QualType PromotionType) {
5973 if (!PromotionType.isNull())
5974 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5975 else
5976 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
5977}
5978
5979
5982 bool isInc, bool isPre) {
5983 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5984}
5985
5987 // object->isa or (*object).isa
5988 // Generate code as for: *(Class*)object
5989
5990 Expr *BaseExpr = E->getBase();
5992 if (BaseExpr->isPRValue()) {
5993 llvm::Type *BaseTy =
5995 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
5996 } else {
5997 Addr = EmitLValue(BaseExpr).getAddress();
5998 }
5999
6000 // Cast the address to Class*.
6001 Addr = Addr.withElementType(ConvertType(E->getType()));
6002 return MakeAddrLValue(Addr, E->getType());
6003}
6004
6005
6007 const CompoundAssignOperator *E) {
6009 ScalarExprEmitter Scalar(*this);
6010 Value *Result = nullptr;
6011 switch (E->getOpcode()) {
6012#define COMPOUND_OP(Op) \
6013 case BO_##Op##Assign: \
6014 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
6015 Result)
6016 COMPOUND_OP(Mul);
6017 COMPOUND_OP(Div);
6018 COMPOUND_OP(Rem);
6019 COMPOUND_OP(Add);
6020 COMPOUND_OP(Sub);
6021 COMPOUND_OP(Shl);
6022 COMPOUND_OP(Shr);
6024 COMPOUND_OP(Xor);
6025 COMPOUND_OP(Or);
6026#undef COMPOUND_OP
6027
6028 case BO_PtrMemD:
6029 case BO_PtrMemI:
6030 case BO_Mul:
6031 case BO_Div:
6032 case BO_Rem:
6033 case BO_Add:
6034 case BO_Sub:
6035 case BO_Shl:
6036 case BO_Shr:
6037 case BO_LT:
6038 case BO_GT:
6039 case BO_LE:
6040 case BO_GE:
6041 case BO_EQ:
6042 case BO_NE:
6043 case BO_Cmp:
6044 case BO_And:
6045 case BO_Xor:
6046 case BO_Or:
6047 case BO_LAnd:
6048 case BO_LOr:
6049 case BO_Assign:
6050 case BO_Comma:
6051 llvm_unreachable("Not valid compound assignment operators");
6052 }
6053
6054 llvm_unreachable("Unhandled compound assignment operator");
6055}
6056
6058 // The total (signed) byte offset for the GEP.
6059 llvm::Value *TotalOffset;
6060 // The offset overflow flag - true if the total offset overflows.
6061 llvm::Value *OffsetOverflows;
6062};
6063
6064/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6065/// and compute the total offset it applies from it's base pointer BasePtr.
6066/// Returns offset in bytes and a boolean flag whether an overflow happened
6067/// during evaluation.
6069 llvm::LLVMContext &VMContext,
6070 CodeGenModule &CGM,
6071 CGBuilderTy &Builder) {
6072 const auto &DL = CGM.getDataLayout();
6073
6074 // The total (signed) byte offset for the GEP.
6075 llvm::Value *TotalOffset = nullptr;
6076
6077 // Was the GEP already reduced to a constant?
6078 if (isa<llvm::Constant>(GEPVal)) {
6079 // Compute the offset by casting both pointers to integers and subtracting:
6080 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6081 Value *BasePtr_int =
6082 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6083 Value *GEPVal_int =
6084 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6085 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6086 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6087 }
6088
6089 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6090 assert(GEP->getPointerOperand() == BasePtr &&
6091 "BasePtr must be the base of the GEP.");
6092 assert(GEP->isInBounds() && "Expected inbounds GEP");
6093
6094 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6095
6096 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6097 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6098 auto *SAddIntrinsic =
6099 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6100 auto *SMulIntrinsic =
6101 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6102
6103 // The offset overflow flag - true if the total offset overflows.
6104 llvm::Value *OffsetOverflows = Builder.getFalse();
6105
6106 /// Return the result of the given binary operation.
6107 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6108 llvm::Value *RHS) -> llvm::Value * {
6109 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6110
6111 // If the operands are constants, return a constant result.
6112 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6113 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6114 llvm::APInt N;
6115 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6116 /*Signed=*/true, N);
6117 if (HasOverflow)
6118 OffsetOverflows = Builder.getTrue();
6119 return llvm::ConstantInt::get(VMContext, N);
6120 }
6121 }
6122
6123 // Otherwise, compute the result with checked arithmetic.
6124 auto *ResultAndOverflow = Builder.CreateCall(
6125 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6126 OffsetOverflows = Builder.CreateOr(
6127 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6128 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6129 };
6130
6131 // Determine the total byte offset by looking at each GEP operand.
6132 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6133 GTI != GTE; ++GTI) {
6134 llvm::Value *LocalOffset;
6135 auto *Index = GTI.getOperand();
6136 // Compute the local offset contributed by this indexing step:
6137 if (auto *STy = GTI.getStructTypeOrNull()) {
6138 // For struct indexing, the local offset is the byte position of the
6139 // specified field.
6140 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6141 LocalOffset = llvm::ConstantInt::get(
6142 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6143 } else {
6144 // Otherwise this is array-like indexing. The local offset is the index
6145 // multiplied by the element size.
6146 auto *ElementSize =
6147 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6148 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6149 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6150 }
6151
6152 // If this is the first offset, set it as the total offset. Otherwise, add
6153 // the local offset into the running total.
6154 if (!TotalOffset || TotalOffset == Zero)
6155 TotalOffset = LocalOffset;
6156 else
6157 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6158 }
6159
6160 return {TotalOffset, OffsetOverflows};
6161}
6162
6163Value *
6164CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6165 ArrayRef<Value *> IdxList,
6166 bool SignedIndices, bool IsSubtraction,
6167 SourceLocation Loc, const Twine &Name) {
6168 llvm::Type *PtrTy = Ptr->getType();
6169
6170 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6171 if (!SignedIndices && !IsSubtraction)
6172 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6173
6174 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6175
6176 // If the pointer overflow sanitizer isn't enabled, do nothing.
6177 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6178 return GEPVal;
6179
6180 // Perform nullptr-and-offset check unless the nullptr is defined.
6181 bool PerformNullCheck = !NullPointerIsDefined(
6182 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6183 // Check for overflows unless the GEP got constant-folded,
6184 // and only in the default address space
6185 bool PerformOverflowCheck =
6186 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6187
6188 if (!(PerformNullCheck || PerformOverflowCheck))
6189 return GEPVal;
6190
6191 const auto &DL = CGM.getDataLayout();
6192
6193 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6194 auto CheckHandler = SanitizerHandler::PointerOverflow;
6195 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6196 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6197
6198 GEPOffsetAndOverflow EvaluatedGEP =
6199 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6200
6201 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6202 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6203 "If the offset got constant-folded, we don't expect that there was an "
6204 "overflow.");
6205
6206 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6207
6208 // Common case: if the total offset is zero, don't emit a check.
6209 if (EvaluatedGEP.TotalOffset == Zero)
6210 return GEPVal;
6211
6212 // Now that we've computed the total offset, add it to the base pointer (with
6213 // wrapping semantics).
6214 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6215 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6216
6217 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6218 2>
6219 Checks;
6220
6221 if (PerformNullCheck) {
6222 // If the base pointer evaluates to a null pointer value,
6223 // the only valid pointer this inbounds GEP can produce is also
6224 // a null pointer, so the offset must also evaluate to zero.
6225 // Likewise, if we have non-zero base pointer, we can not get null pointer
6226 // as a result, so the offset can not be -intptr_t(BasePtr).
6227 // In other words, both pointers are either null, or both are non-null,
6228 // or the behaviour is undefined.
6229 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6230 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6231 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6232 Checks.emplace_back(Valid, CheckOrdinal);
6233 }
6234
6235 if (PerformOverflowCheck) {
6236 // The GEP is valid if:
6237 // 1) The total offset doesn't overflow, and
6238 // 2) The sign of the difference between the computed address and the base
6239 // pointer matches the sign of the total offset.
6240 llvm::Value *ValidGEP;
6241 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6242 if (SignedIndices) {
6243 // GEP is computed as `unsigned base + signed offset`, therefore:
6244 // * If offset was positive, then the computed pointer can not be
6245 // [unsigned] less than the base pointer, unless it overflowed.
6246 // * If offset was negative, then the computed pointer can not be
6247 // [unsigned] greater than the bas pointere, unless it overflowed.
6248 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6249 auto *PosOrZeroOffset =
6250 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6251 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6252 ValidGEP =
6253 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6254 } else if (!IsSubtraction) {
6255 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6256 // computed pointer can not be [unsigned] less than base pointer,
6257 // unless there was an overflow.
6258 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6259 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6260 } else {
6261 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6262 // computed pointer can not be [unsigned] greater than base pointer,
6263 // unless there was an overflow.
6264 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6265 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6266 }
6267 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6268 Checks.emplace_back(ValidGEP, CheckOrdinal);
6269 }
6270
6271 assert(!Checks.empty() && "Should have produced some checks.");
6272
6273 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6274 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6275 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6276 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6277
6278 return GEPVal;
6279}
6280
6282 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6283 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6284 const Twine &Name) {
6285 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6286 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6287 if (!SignedIndices && !IsSubtraction)
6288 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6289
6290 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6291 }
6292
6293 return RawAddress(
6294 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6295 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6296 elementType, Align);
6297}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc, bool isPre, ASTContext &Ctx)
For the purposes of overflow pattern exclusion, does this match the "while(i--)" pattern?
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
static uint32_t getBitWidth(const Expr *E)
llvm::APSInt APSInt
Definition Compiler.cpp:24
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isLValue() const
Definition APValue.h:472
bool isInt() const
Definition APValue.h:467
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
ParentMapContext & getParentMapContext()
Returns the dynamic AST node parent map context.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:930
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:895
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
LabelDecl * getLabel() const
Definition Expr.h:4507
uint64_t getValue() const
Definition ExprCXX.h:3044
QualType getElementType() const
Definition TypeBase.h:3734
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6638
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4119
bool isCompoundAssignmentOp() const
Definition Expr.h:4116
SourceLocation getExprLoc() const
Definition Expr.h:4013
bool isShiftOp() const
Definition Expr.h:4061
Expr * getRHS() const
Definition Expr.h:4024
bool isShiftAssignOp() const
Definition Expr.h:4130
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2201
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4332
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
path_iterator path_begin()
Definition Expr.h:3680
CastKind getCastKind() const
Definition Expr.h:3654
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
path_iterator path_end()
Definition Expr.h:3681
Expr * getSubExpr()
Definition Expr.h:3660
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4818
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:103
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:95
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:85
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:72
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3089
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6795
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:257
SanitizerSet SanOpts
Sanitizers enabled for this function.
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:183
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:251
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2747
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3745
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6329
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1238
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:6895
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:247
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:394
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2890
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3629
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3635
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:176
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3953
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:244
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:6041
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2377
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:64
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:225
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3893
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5994
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2004
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3493
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2190
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5980
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2574
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4323
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:569
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:266
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:899
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:6904
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1552
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:672
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1633
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:188
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:736
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4033
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:4869
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4235
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2220
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:51
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1901
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1668
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3524
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1348
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:183
bool isBitField() const
Definition CGValue.h:281
bool isVolatileQualified() const
Definition CGValue.h:286
const Qualifiers & getQuals() const
Definition CGValue.h:339
Address getAddress() const
Definition CGValue.h:362
QualType getType() const
Definition CGValue.h:292
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:425
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
APValue getAPValueResult() const
Definition Expr.cpp:409
bool hasAPValueResult() const
Definition Expr.h:1157
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4373
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
T * getAttr() const
Definition DeclBase.h:573
ChildElementIter< false > begin()
Definition Expr.h:5166
size_t getDataElementCount() const
Definition Expr.h:5082
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isIntegerConstantExpr(const ASTContext &Ctx) const
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3082
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3066
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:476
QualType getType() const
Definition Expr.h:144
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1575
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
@ PostDecrInWhile
while (count–)
bool isSignedOverflowDefined() const
bool isOverflowPatternExcluded(OverflowPatternExclusionKind Kind) const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4337
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
VersionTuple getVersion() const
Definition ExprObjC.h:1723
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1495
Expr * getBase() const
Definition ExprObjC.h:1520
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1543
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1361
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:7896
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:7933
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2586
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2479
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
DynTypedNodeList getParents(const NodeT &Node)
Returns the parents of the given node (within the traversal scope).
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1453
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:130
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8404
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8463
QualType getCanonicalType() const
Definition TypeBase.h:8330
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:135
bool isCanonical() const
Definition TypeBase.h:8335
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4521
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:583
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4629
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4515
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2278
SourceLocation getLocation() const
Definition Expr.h:4995
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:786
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:796
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:807
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:815
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:823
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8260
bool getBoolValue() const
Definition ExprCXX.h:2947
const APValue & getAPValue() const
Definition ExprCXX.h:2952
bool isStoredAsBoolean() const
Definition ExprCXX.h:2943
bool isVoidType() const
Definition TypeBase.h:8871
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8527
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
bool isReferenceType() const
Definition TypeBase.h:8539
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1909
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2607
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isExtVectorType() const
Definition TypeBase.h:8658
bool isExtVectorBoolType() const
Definition TypeBase.h:8662
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8790
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8638
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8650
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8927
bool isHalfType() const
Definition TypeBase.h:8875
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isQueueT() const
Definition TypeBase.h:8761
bool isMatrixType() const
Definition TypeBase.h:8672
bool isEventT() const
Definition TypeBase.h:8753
bool isFunctionType() const
Definition TypeBase.h:8511
bool isVectorType() const
Definition TypeBase.h:8654
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2928
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool isNullPtrType() const
Definition TypeBase.h:8908
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2400
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5506
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Represents a GCC generic vector type.
Definition TypeBase.h:4175
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2676
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1249
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1913
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1264
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184