clang 22.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGHLSLRuntime.h"
17#include "CGObjCRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CGRecordLayout.h"
20#include "CodeGenFunction.h"
21#include "CodeGenModule.h"
22#include "ConstantEmitter.h"
23#include "TargetInfo.h"
24#include "TrapReasonBuilder.h"
26#include "clang/AST/Attr.h"
27#include "clang/AST/DeclObjC.h"
28#include "clang/AST/Expr.h"
35#include "llvm/ADT/APFixedPoint.h"
36#include "llvm/ADT/ScopeExit.h"
37#include "llvm/IR/Argument.h"
38#include "llvm/IR/CFG.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/FixedPointBuilder.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GEPNoWrapFlags.h"
45#include "llvm/IR/GetElementPtrTypeIterator.h"
46#include "llvm/IR/GlobalVariable.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/IntrinsicsPowerPC.h"
49#include "llvm/IR/MatrixBuilder.h"
50#include "llvm/IR/Module.h"
51#include "llvm/Support/TypeSize.h"
52#include <cstdarg>
53#include <optional>
54
55using namespace clang;
56using namespace CodeGen;
57using llvm::Value;
58
59//===----------------------------------------------------------------------===//
60// Scalar Expression Emitter
61//===----------------------------------------------------------------------===//
62
63namespace llvm {
64extern cl::opt<bool> EnableSingleByteCoverage;
65} // namespace llvm
66
67namespace {
68
69/// Determine whether the given binary operation may overflow.
70/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
71/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
72/// the returned overflow check is precise. The returned value is 'true' for
73/// all other opcodes, to be conservative.
74bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
75 BinaryOperator::Opcode Opcode, bool Signed,
76 llvm::APInt &Result) {
77 // Assume overflow is possible, unless we can prove otherwise.
78 bool Overflow = true;
79 const auto &LHSAP = LHS->getValue();
80 const auto &RHSAP = RHS->getValue();
81 if (Opcode == BO_Add) {
82 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
83 : LHSAP.uadd_ov(RHSAP, Overflow);
84 } else if (Opcode == BO_Sub) {
85 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
86 : LHSAP.usub_ov(RHSAP, Overflow);
87 } else if (Opcode == BO_Mul) {
88 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
89 : LHSAP.umul_ov(RHSAP, Overflow);
90 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
91 if (Signed && !RHS->isZero())
92 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
93 else
94 return false;
95 }
96 return Overflow;
97}
98
99struct BinOpInfo {
100 Value *LHS;
101 Value *RHS;
102 QualType Ty; // Computation Type.
103 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
104 FPOptions FPFeatures;
105 const Expr *E; // Entire expr, for error unsupported. May not be binop.
106
107 /// Check if the binop can result in integer overflow.
108 bool mayHaveIntegerOverflow() const {
109 // Without constant input, we can't rule out overflow.
110 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
111 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
112 if (!LHSCI || !RHSCI)
113 return true;
114
115 llvm::APInt Result;
116 return ::mayHaveIntegerOverflow(
117 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
118 }
119
120 /// Check if the binop computes a division or a remainder.
121 bool isDivremOp() const {
122 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
123 Opcode == BO_RemAssign;
124 }
125
126 /// Check if the binop can result in an integer division by zero.
127 bool mayHaveIntegerDivisionByZero() const {
128 if (isDivremOp())
129 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
130 return CI->isZero();
131 return true;
132 }
133
134 /// Check if the binop can result in a float division by zero.
135 bool mayHaveFloatDivisionByZero() const {
136 if (isDivremOp())
137 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
138 return CFP->isZero();
139 return true;
140 }
141
142 /// Check if at least one operand is a fixed point type. In such cases, this
143 /// operation did not follow usual arithmetic conversion and both operands
144 /// might not be of the same type.
145 bool isFixedPointOp() const {
146 // We cannot simply check the result type since comparison operations return
147 // an int.
148 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
149 QualType LHSType = BinOp->getLHS()->getType();
150 QualType RHSType = BinOp->getRHS()->getType();
151 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
152 }
153 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
154 return UnOp->getSubExpr()->getType()->isFixedPointType();
155 return false;
156 }
157
158 /// Check if the RHS has a signed integer representation.
159 bool rhsHasSignedIntegerRepresentation() const {
160 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
161 QualType RHSType = BinOp->getRHS()->getType();
162 return RHSType->hasSignedIntegerRepresentation();
163 }
164 return false;
165 }
166};
167
168static bool MustVisitNullValue(const Expr *E) {
169 // If a null pointer expression's type is the C++0x nullptr_t, then
170 // it's not necessarily a simple constant and it must be evaluated
171 // for its potential side effects.
172 return E->getType()->isNullPtrType();
173}
174
175/// If \p E is a widened promoted integer, get its base (unpromoted) type.
176static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
177 const Expr *E) {
178 const Expr *Base = E->IgnoreImpCasts();
179 if (E == Base)
180 return std::nullopt;
181
182 QualType BaseTy = Base->getType();
183 if (!Ctx.isPromotableIntegerType(BaseTy) ||
184 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
185 return std::nullopt;
186
187 return BaseTy;
188}
189
190/// Check if \p E is a widened promoted integer.
191static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
192 return getUnwidenedIntegerType(Ctx, E).has_value();
193}
194
195/// Check if we can skip the overflow check for \p Op.
196static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
197 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
198 "Expected a unary or binary operator");
199
200 // If the binop has constant inputs and we can prove there is no overflow,
201 // we can elide the overflow check.
202 if (!Op.mayHaveIntegerOverflow())
203 return true;
204
205 if (Op.Ty->isSignedIntegerType() &&
206 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::SignedIntegerOverflow,
207 Op.Ty)) {
208 return true;
209 }
210
211 if (Op.Ty->isUnsignedIntegerType() &&
212 Ctx.isTypeIgnoredBySanitizer(SanitizerKind::UnsignedIntegerOverflow,
213 Op.Ty)) {
214 return true;
215 }
216
217 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Op.E);
218
219 if (UO && UO->getOpcode() == UO_Minus &&
222 UO->isIntegerConstantExpr(Ctx))
223 return true;
224
225 // If a unary op has a widened operand, the op cannot overflow.
226 if (UO)
227 return !UO->canOverflow();
228
229 // We usually don't need overflow checks for binops with widened operands.
230 // Multiplication with promoted unsigned operands is a special case.
231 const auto *BO = cast<BinaryOperator>(Op.E);
232 if (BO->hasExcludedOverflowPattern())
233 return true;
234
235 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
236 if (!OptionalLHSTy)
237 return false;
238
239 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
240 if (!OptionalRHSTy)
241 return false;
242
243 QualType LHSTy = *OptionalLHSTy;
244 QualType RHSTy = *OptionalRHSTy;
245
246 // This is the simple case: binops without unsigned multiplication, and with
247 // widened operands. No overflow check is needed here.
248 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
249 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
250 return true;
251
252 // For unsigned multiplication the overflow check can be elided if either one
253 // of the unpromoted types are less than half the size of the promoted type.
254 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
255 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
256 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
257}
258
259class ScalarExprEmitter
260 : public StmtVisitor<ScalarExprEmitter, Value*> {
261 CodeGenFunction &CGF;
262 CGBuilderTy &Builder;
263 bool IgnoreResultAssign;
264 llvm::LLVMContext &VMContext;
265public:
266
267 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
268 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
269 VMContext(cgf.getLLVMContext()) {
270 }
271
272 //===--------------------------------------------------------------------===//
273 // Utilities
274 //===--------------------------------------------------------------------===//
275
276 bool TestAndClearIgnoreResultAssign() {
277 bool I = IgnoreResultAssign;
278 IgnoreResultAssign = false;
279 return I;
280 }
281
282 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
283 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
284 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
285 return CGF.EmitCheckedLValue(E, TCK);
286 }
287
288 void EmitBinOpCheck(
289 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
290 const BinOpInfo &Info);
291
292 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
293 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
294 }
295
296 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
297 const AlignValueAttr *AVAttr = nullptr;
298 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
299 const ValueDecl *VD = DRE->getDecl();
300
301 if (VD->getType()->isReferenceType()) {
302 if (const auto *TTy =
303 VD->getType().getNonReferenceType()->getAs<TypedefType>())
304 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
305 } else {
306 // Assumptions for function parameters are emitted at the start of the
307 // function, so there is no need to repeat that here,
308 // unless the alignment-assumption sanitizer is enabled,
309 // then we prefer the assumption over alignment attribute
310 // on IR function param.
311 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
312 return;
313
314 AVAttr = VD->getAttr<AlignValueAttr>();
315 }
316 }
317
318 if (!AVAttr)
319 if (const auto *TTy = E->getType()->getAs<TypedefType>())
320 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
321
322 if (!AVAttr)
323 return;
324
325 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
326 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
327 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
328 }
329
330 /// EmitLoadOfLValue - Given an expression with complex type that represents a
331 /// value l-value, this method emits the address of the l-value, then loads
332 /// and returns the result.
333 Value *EmitLoadOfLValue(const Expr *E) {
334 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
335 E->getExprLoc());
336
337 EmitLValueAlignmentAssumption(E, V);
338 return V;
339 }
340
341 /// EmitConversionToBool - Convert the specified expression value to a
342 /// boolean (i1) truth value. This is equivalent to "Val != 0".
343 Value *EmitConversionToBool(Value *Src, QualType DstTy);
344
345 /// Emit a check that a conversion from a floating-point type does not
346 /// overflow.
347 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
348 Value *Src, QualType SrcType, QualType DstType,
349 llvm::Type *DstTy, SourceLocation Loc);
350
351 /// Known implicit conversion check kinds.
352 /// This is used for bitfield conversion checks as well.
353 /// Keep in sync with the enum of the same name in ubsan_handlers.h
354 enum ImplicitConversionCheckKind : unsigned char {
355 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
356 ICCK_UnsignedIntegerTruncation = 1,
357 ICCK_SignedIntegerTruncation = 2,
358 ICCK_IntegerSignChange = 3,
359 ICCK_SignedIntegerTruncationOrSignChange = 4,
360 };
361
362 /// Emit a check that an [implicit] truncation of an integer does not
363 /// discard any bits. It is not UB, so we use the value after truncation.
364 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
365 QualType DstType, SourceLocation Loc);
366
367 /// Emit a check that an [implicit] conversion of an integer does not change
368 /// the sign of the value. It is not UB, so we use the value after conversion.
369 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
370 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
371 QualType DstType, SourceLocation Loc);
372
373 /// Emit a conversion from the specified type to the specified destination
374 /// type, both of which are LLVM scalar types.
375 struct ScalarConversionOpts {
376 bool TreatBooleanAsSigned;
377 bool EmitImplicitIntegerTruncationChecks;
378 bool EmitImplicitIntegerSignChangeChecks;
379
380 ScalarConversionOpts()
381 : TreatBooleanAsSigned(false),
382 EmitImplicitIntegerTruncationChecks(false),
383 EmitImplicitIntegerSignChangeChecks(false) {}
384
385 ScalarConversionOpts(clang::SanitizerSet SanOpts)
386 : TreatBooleanAsSigned(false),
387 EmitImplicitIntegerTruncationChecks(
388 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
389 EmitImplicitIntegerSignChangeChecks(
390 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
391 };
392 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
393 llvm::Type *SrcTy, llvm::Type *DstTy,
394 ScalarConversionOpts Opts);
395 Value *
396 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
397 SourceLocation Loc,
398 ScalarConversionOpts Opts = ScalarConversionOpts());
399
400 /// Convert between either a fixed point and other fixed point or fixed point
401 /// and an integer.
402 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
403 SourceLocation Loc);
404
405 /// Emit a conversion from the specified complex type to the specified
406 /// destination type, where the destination type is an LLVM scalar type.
407 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
408 QualType SrcTy, QualType DstTy,
409 SourceLocation Loc);
410
411 /// EmitNullValue - Emit a value that corresponds to null for the given type.
412 Value *EmitNullValue(QualType Ty);
413
414 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
415 Value *EmitFloatToBoolConversion(Value *V) {
416 // Compare against 0.0 for fp scalars.
417 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
418 return Builder.CreateFCmpUNE(V, Zero, "tobool");
419 }
420
421 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
422 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
423 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
424
425 return Builder.CreateICmpNE(V, Zero, "tobool");
426 }
427
428 Value *EmitIntToBoolConversion(Value *V) {
429 // Because of the type rules of C, we often end up computing a
430 // logical value, then zero extending it to int, then wanting it
431 // as a logical value again. Optimize this common case.
432 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
433 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
434 Value *Result = ZI->getOperand(0);
435 // If there aren't any more uses, zap the instruction to save space.
436 // Note that there can be more uses, for example if this
437 // is the result of an assignment.
438 if (ZI->use_empty())
439 ZI->eraseFromParent();
440 return Result;
441 }
442 }
443
444 return Builder.CreateIsNotNull(V, "tobool");
445 }
446
447 //===--------------------------------------------------------------------===//
448 // Visitor Methods
449 //===--------------------------------------------------------------------===//
450
451 Value *Visit(Expr *E) {
452 ApplyDebugLocation DL(CGF, E);
453 return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
454 }
455
456 Value *VisitStmt(Stmt *S) {
457 S->dump(llvm::errs(), CGF.getContext());
458 llvm_unreachable("Stmt can't have complex result type!");
459 }
460 Value *VisitExpr(Expr *S);
461
462 Value *VisitConstantExpr(ConstantExpr *E) {
463 // A constant expression of type 'void' generates no code and produces no
464 // value.
465 if (E->getType()->isVoidType())
466 return nullptr;
467
468 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
469 if (E->isGLValue()) {
470 // This was already converted to an rvalue when it was constant
471 // evaluated.
472 if (E->hasAPValueResult() && !E->getAPValueResult().isLValue())
473 return Result;
474 return CGF.EmitLoadOfScalar(
475 Address(Result, CGF.convertTypeForLoadStore(E->getType()),
477 /*Volatile*/ false, E->getType(), E->getExprLoc());
478 }
479 return Result;
480 }
481 return Visit(E->getSubExpr());
482 }
483 Value *VisitParenExpr(ParenExpr *PE) {
484 return Visit(PE->getSubExpr());
485 }
486 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
487 return Visit(E->getReplacement());
488 }
489 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
490 return Visit(GE->getResultExpr());
491 }
492 Value *VisitCoawaitExpr(CoawaitExpr *S) {
493 return CGF.EmitCoawaitExpr(*S).getScalarVal();
494 }
495 Value *VisitCoyieldExpr(CoyieldExpr *S) {
496 return CGF.EmitCoyieldExpr(*S).getScalarVal();
497 }
498 Value *VisitUnaryCoawait(const UnaryOperator *E) {
499 return Visit(E->getSubExpr());
500 }
501
502 // Leaves.
503 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
504 return Builder.getInt(E->getValue());
505 }
506 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
507 return Builder.getInt(E->getValue());
508 }
509 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
510 return llvm::ConstantFP::get(VMContext, E->getValue());
511 }
512 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
513 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
514 }
515 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
516 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
517 }
518 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
519 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
520 }
521 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
522 if (E->getType()->isVoidType())
523 return nullptr;
524
525 return EmitNullValue(E->getType());
526 }
527 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
528 return EmitNullValue(E->getType());
529 }
530 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
531 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
532 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
533 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
534 return Builder.CreateBitCast(V, ConvertType(E->getType()));
535 }
536
537 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
538 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
539 }
540
541 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
542 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
543 }
544
545 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
546 Value *VisitEmbedExpr(EmbedExpr *E);
547
548 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
549 if (E->isGLValue())
550 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
551 E->getExprLoc());
552
553 // Otherwise, assume the mapping is the scalar directly.
555 }
556
557 Value *VisitOpenACCAsteriskSizeExpr(OpenACCAsteriskSizeExpr *E) {
558 llvm_unreachable("Codegen for this isn't defined/implemented");
559 }
560
561 // l-values.
562 Value *VisitDeclRefExpr(DeclRefExpr *E) {
563 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
564 return CGF.emitScalarConstant(Constant, E);
565 return EmitLoadOfLValue(E);
566 }
567
568 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
569 return CGF.EmitObjCSelectorExpr(E);
570 }
571 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
572 return CGF.EmitObjCProtocolExpr(E);
573 }
574 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
575 return EmitLoadOfLValue(E);
576 }
577 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
578 if (E->getMethodDecl() &&
580 return EmitLoadOfLValue(E);
581 return CGF.EmitObjCMessageExpr(E).getScalarVal();
582 }
583
584 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
585 LValue LV = CGF.EmitObjCIsaExpr(E);
587 return V;
588 }
589
590 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
591 VersionTuple Version = E->getVersion();
592
593 // If we're checking for a platform older than our minimum deployment
594 // target, we can fold the check away.
595 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
596 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
597
598 return CGF.EmitBuiltinAvailable(Version);
599 }
600
601 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
602 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
603 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
604 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
605 Value *VisitMemberExpr(MemberExpr *E);
606 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
607 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
608 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
609 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
610 // literals aren't l-values in C++. We do so simply because that's the
611 // cleanest way to handle compound literals in C++.
612 // See the discussion here: https://reviews.llvm.org/D64464
613 return EmitLoadOfLValue(E);
614 }
615
616 Value *VisitInitListExpr(InitListExpr *E);
617
618 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
619 assert(CGF.getArrayInitIndex() &&
620 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
621 return CGF.getArrayInitIndex();
622 }
623
624 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
625 return EmitNullValue(E->getType());
626 }
627 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
628 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
629 return VisitCastExpr(E);
630 }
631 Value *VisitCastExpr(CastExpr *E);
632
633 Value *VisitCallExpr(const CallExpr *E) {
635 return EmitLoadOfLValue(E);
636
637 Value *V = CGF.EmitCallExpr(E).getScalarVal();
638
639 EmitLValueAlignmentAssumption(E, V);
640 return V;
641 }
642
643 Value *VisitStmtExpr(const StmtExpr *E);
644
645 // Unary Operators.
646 Value *VisitUnaryPostDec(const UnaryOperator *E) {
647 LValue LV = EmitLValue(E->getSubExpr());
648 return EmitScalarPrePostIncDec(E, LV, false, false);
649 }
650 Value *VisitUnaryPostInc(const UnaryOperator *E) {
651 LValue LV = EmitLValue(E->getSubExpr());
652 return EmitScalarPrePostIncDec(E, LV, true, false);
653 }
654 Value *VisitUnaryPreDec(const UnaryOperator *E) {
655 LValue LV = EmitLValue(E->getSubExpr());
656 return EmitScalarPrePostIncDec(E, LV, false, true);
657 }
658 Value *VisitUnaryPreInc(const UnaryOperator *E) {
659 LValue LV = EmitLValue(E->getSubExpr());
660 return EmitScalarPrePostIncDec(E, LV, true, true);
661 }
662
663 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
664 llvm::Value *InVal,
665 bool IsInc);
666
667 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
668 bool isInc, bool isPre);
669
670
671 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
672 if (isa<MemberPointerType>(E->getType())) // never sugared
673 return CGF.CGM.getMemberPointerConstant(E);
674
675 return EmitLValue(E->getSubExpr()).getPointer(CGF);
676 }
677 Value *VisitUnaryDeref(const UnaryOperator *E) {
678 if (E->getType()->isVoidType())
679 return Visit(E->getSubExpr()); // the actual value should be unused
680 return EmitLoadOfLValue(E);
681 }
682
683 Value *VisitUnaryPlus(const UnaryOperator *E,
684 QualType PromotionType = QualType());
685 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
686 Value *VisitUnaryMinus(const UnaryOperator *E,
687 QualType PromotionType = QualType());
688 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
689
690 Value *VisitUnaryNot (const UnaryOperator *E);
691 Value *VisitUnaryLNot (const UnaryOperator *E);
692 Value *VisitUnaryReal(const UnaryOperator *E,
693 QualType PromotionType = QualType());
694 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
695 Value *VisitUnaryImag(const UnaryOperator *E,
696 QualType PromotionType = QualType());
697 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
698 Value *VisitUnaryExtension(const UnaryOperator *E) {
699 return Visit(E->getSubExpr());
700 }
701
702 // C++
703 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
704 return EmitLoadOfLValue(E);
705 }
706 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
707 auto &Ctx = CGF.getContext();
710 return ConstantEmitter(CGF).emitAbstract(SLE->getLocation(), Evaluated,
711 SLE->getType());
712 }
713
714 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
715 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
716 return Visit(DAE->getExpr());
717 }
718 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
719 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
720 return Visit(DIE->getExpr());
721 }
722 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
723 return CGF.LoadCXXThis();
724 }
725
726 Value *VisitExprWithCleanups(ExprWithCleanups *E);
727 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
728 return CGF.EmitCXXNewExpr(E);
729 }
730 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
731 CGF.EmitCXXDeleteExpr(E);
732 return nullptr;
733 }
734
735 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
736 if (E->isStoredAsBoolean())
737 return llvm::ConstantInt::get(ConvertType(E->getType()),
738 E->getBoolValue());
739 assert(E->getAPValue().isInt() && "APValue type not supported");
740 return llvm::ConstantInt::get(ConvertType(E->getType()),
741 E->getAPValue().getInt());
742 }
743
744 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
745 return Builder.getInt1(E->isSatisfied());
746 }
747
748 Value *VisitRequiresExpr(const RequiresExpr *E) {
749 return Builder.getInt1(E->isSatisfied());
750 }
751
752 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
753 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
754 }
755
756 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
757 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
758 }
759
760 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
761 // C++ [expr.pseudo]p1:
762 // The result shall only be used as the operand for the function call
763 // operator (), and the result of such a call has type void. The only
764 // effect is the evaluation of the postfix-expression before the dot or
765 // arrow.
766 CGF.EmitScalarExpr(E->getBase());
767 return nullptr;
768 }
769
770 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
771 return EmitNullValue(E->getType());
772 }
773
774 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
775 CGF.EmitCXXThrowExpr(E);
776 return nullptr;
777 }
778
779 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
780 return Builder.getInt1(E->getValue());
781 }
782
783 // Binary Operators.
784 Value *EmitMul(const BinOpInfo &Ops) {
785 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
786 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
787 case LangOptions::SOB_Defined:
788 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
789 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
790 [[fallthrough]];
791 case LangOptions::SOB_Undefined:
792 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
793 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
794 [[fallthrough]];
795 case LangOptions::SOB_Trapping:
796 if (CanElideOverflowCheck(CGF.getContext(), Ops))
797 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
798 return EmitOverflowCheckedBinOp(Ops);
799 }
800 }
801
802 if (Ops.Ty->isConstantMatrixType()) {
803 llvm::MatrixBuilder MB(Builder);
804 // We need to check the types of the operands of the operator to get the
805 // correct matrix dimensions.
806 auto *BO = cast<BinaryOperator>(Ops.E);
807 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
808 BO->getLHS()->getType().getCanonicalType());
809 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
810 BO->getRHS()->getType().getCanonicalType());
811 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
812 if (LHSMatTy && RHSMatTy)
813 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
814 LHSMatTy->getNumColumns(),
815 RHSMatTy->getNumColumns());
816 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
817 }
818
819 if (Ops.Ty->isUnsignedIntegerType() &&
820 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
821 !CanElideOverflowCheck(CGF.getContext(), Ops))
822 return EmitOverflowCheckedBinOp(Ops);
823
824 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
825 // Preserve the old values
826 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
827 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
828 }
829 if (Ops.isFixedPointOp())
830 return EmitFixedPointBinOp(Ops);
831 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
832 }
833 /// Create a binary op that checks for overflow.
834 /// Currently only supports +, - and *.
835 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
836
837 // Check for undefined division and modulus behaviors.
838 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
839 llvm::Value *Zero,bool isDiv);
840 // Common helper for getting how wide LHS of shift is.
841 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
842
843 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
844 // non powers of two.
845 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
846
847 Value *EmitDiv(const BinOpInfo &Ops);
848 Value *EmitRem(const BinOpInfo &Ops);
849 Value *EmitAdd(const BinOpInfo &Ops);
850 Value *EmitSub(const BinOpInfo &Ops);
851 Value *EmitShl(const BinOpInfo &Ops);
852 Value *EmitShr(const BinOpInfo &Ops);
853 Value *EmitAnd(const BinOpInfo &Ops) {
854 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
855 }
856 Value *EmitXor(const BinOpInfo &Ops) {
857 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
858 }
859 Value *EmitOr (const BinOpInfo &Ops) {
860 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
861 }
862
863 // Helper functions for fixed point binary operations.
864 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
865
866 BinOpInfo EmitBinOps(const BinaryOperator *E,
867 QualType PromotionTy = QualType());
868
869 Value *EmitPromotedValue(Value *result, QualType PromotionType);
870 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
871 Value *EmitPromoted(const Expr *E, QualType PromotionType);
872
873 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
874 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
875 Value *&Result);
876
877 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
878 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
879
880 QualType getPromotionType(QualType Ty) {
881 const auto &Ctx = CGF.getContext();
882 if (auto *CT = Ty->getAs<ComplexType>()) {
883 QualType ElementType = CT->getElementType();
884 if (ElementType.UseExcessPrecision(Ctx))
885 return Ctx.getComplexType(Ctx.FloatTy);
886 }
887
888 if (Ty.UseExcessPrecision(Ctx)) {
889 if (auto *VT = Ty->getAs<VectorType>()) {
890 unsigned NumElements = VT->getNumElements();
891 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
892 }
893 return Ctx.FloatTy;
894 }
895
896 return QualType();
897 }
898
899 // Binary operators and binary compound assignment operators.
900#define HANDLEBINOP(OP) \
901 Value *VisitBin##OP(const BinaryOperator *E) { \
902 QualType promotionTy = getPromotionType(E->getType()); \
903 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
904 if (result && !promotionTy.isNull()) \
905 result = EmitUnPromotedValue(result, E->getType()); \
906 return result; \
907 } \
908 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
909 ApplyAtomGroup Grp(CGF.getDebugInfo()); \
910 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
911 }
912 HANDLEBINOP(Mul)
913 HANDLEBINOP(Div)
914 HANDLEBINOP(Rem)
915 HANDLEBINOP(Add)
916 HANDLEBINOP(Sub)
917 HANDLEBINOP(Shl)
918 HANDLEBINOP(Shr)
920 HANDLEBINOP(Xor)
922#undef HANDLEBINOP
923
924 // Comparisons.
925 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
926 llvm::CmpInst::Predicate SICmpOpc,
927 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
928#define VISITCOMP(CODE, UI, SI, FP, SIG) \
929 Value *VisitBin##CODE(const BinaryOperator *E) { \
930 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
931 llvm::FCmpInst::FP, SIG); }
932 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
933 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
934 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
935 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
936 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
937 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
938#undef VISITCOMP
939
940 Value *VisitBinAssign (const BinaryOperator *E);
941
942 Value *VisitBinLAnd (const BinaryOperator *E);
943 Value *VisitBinLOr (const BinaryOperator *E);
944 Value *VisitBinComma (const BinaryOperator *E);
945
946 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
947 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
948
949 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
950 return Visit(E->getSemanticForm());
951 }
952
953 // Other Operators.
954 Value *VisitBlockExpr(const BlockExpr *BE);
955 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
956 Value *VisitChooseExpr(ChooseExpr *CE);
957 Value *VisitVAArgExpr(VAArgExpr *VE);
958 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
959 return CGF.EmitObjCStringLiteral(E);
960 }
961 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
962 return CGF.EmitObjCBoxedExpr(E);
963 }
964 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
965 return CGF.EmitObjCArrayLiteral(E);
966 }
967 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
968 return CGF.EmitObjCDictionaryLiteral(E);
969 }
970 Value *VisitAsTypeExpr(AsTypeExpr *CE);
971 Value *VisitAtomicExpr(AtomicExpr *AE);
972 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
973 return Visit(E->getSelectedExpr());
974 }
975};
976} // end anonymous namespace.
977
978//===----------------------------------------------------------------------===//
979// Utilities
980//===----------------------------------------------------------------------===//
981
982/// EmitConversionToBool - Convert the specified expression value to a
983/// boolean (i1) truth value. This is equivalent to "Val != 0".
984Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
985 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
986
987 if (SrcType->isRealFloatingType())
988 return EmitFloatToBoolConversion(Src);
989
990 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
991 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
992
993 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
994 "Unknown scalar type to convert");
995
997 return EmitIntToBoolConversion(Src);
998
999 assert(isa<llvm::PointerType>(Src->getType()));
1000 return EmitPointerToBoolConversion(Src, SrcType);
1001}
1002
1003void ScalarExprEmitter::EmitFloatConversionCheck(
1004 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
1005 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
1006 assert(SrcType->isFloatingType() && "not a conversion from floating point");
1007 if (!isa<llvm::IntegerType>(DstTy))
1008 return;
1009
1010 auto CheckOrdinal = SanitizerKind::SO_FloatCastOverflow;
1011 auto CheckHandler = SanitizerHandler::FloatCastOverflow;
1012 SanitizerDebugLocation SanScope(&CGF, {CheckOrdinal}, CheckHandler);
1013 using llvm::APFloat;
1014 using llvm::APSInt;
1015
1016 llvm::Value *Check = nullptr;
1017 const llvm::fltSemantics &SrcSema =
1018 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
1019
1020 // Floating-point to integer. This has undefined behavior if the source is
1021 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
1022 // to an integer).
1023 unsigned Width = CGF.getContext().getIntWidth(DstType);
1025
1026 APSInt Min = APSInt::getMinValue(Width, Unsigned);
1027 APFloat MinSrc(SrcSema, APFloat::uninitialized);
1028 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
1029 APFloat::opOverflow)
1030 // Don't need an overflow check for lower bound. Just check for
1031 // -Inf/NaN.
1032 MinSrc = APFloat::getInf(SrcSema, true);
1033 else
1034 // Find the largest value which is too small to represent (before
1035 // truncation toward zero).
1036 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
1037
1038 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
1039 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
1040 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
1041 APFloat::opOverflow)
1042 // Don't need an overflow check for upper bound. Just check for
1043 // +Inf/NaN.
1044 MaxSrc = APFloat::getInf(SrcSema, false);
1045 else
1046 // Find the smallest value which is too large to represent (before
1047 // truncation toward zero).
1048 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
1049
1050 // If we're converting from __half, convert the range to float to match
1051 // the type of src.
1052 if (OrigSrcType->isHalfType()) {
1053 const llvm::fltSemantics &Sema =
1054 CGF.getContext().getFloatTypeSemantics(SrcType);
1055 bool IsInexact;
1056 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1057 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1058 }
1059
1060 llvm::Value *GE =
1061 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1062 llvm::Value *LE =
1063 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1064 Check = Builder.CreateAnd(GE, LE);
1065
1066 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1067 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1068 CGF.EmitCheckTypeDescriptor(DstType)};
1069 CGF.EmitCheck(std::make_pair(Check, CheckOrdinal), CheckHandler, StaticArgs,
1070 OrigSrc);
1071}
1072
1073// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1074// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1075static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1076 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1078 QualType DstType, CGBuilderTy &Builder) {
1079 llvm::Type *SrcTy = Src->getType();
1080 llvm::Type *DstTy = Dst->getType();
1081 (void)DstTy; // Only used in assert()
1082
1083 // This should be truncation of integral types.
1084 assert(Src != Dst);
1085 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1086 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1087 "non-integer llvm type");
1088
1089 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1090 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1091
1092 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1093 // Else, it is a signed truncation.
1094 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1096 if (!SrcSigned && !DstSigned) {
1097 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1098 Ordinal = SanitizerKind::SO_ImplicitUnsignedIntegerTruncation;
1099 } else {
1100 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1101 Ordinal = SanitizerKind::SO_ImplicitSignedIntegerTruncation;
1102 }
1103
1104 llvm::Value *Check = nullptr;
1105 // 1. Extend the truncated value back to the same width as the Src.
1106 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1107 // 2. Equality-compare with the original source value
1108 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1109 // If the comparison result is 'i1 false', then the truncation was lossy.
1110 return std::make_pair(Kind, std::make_pair(Check, Ordinal));
1111}
1112
1114 QualType SrcType, QualType DstType) {
1115 return SrcType->isIntegerType() && DstType->isIntegerType();
1116}
1117
1118void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1119 Value *Dst, QualType DstType,
1120 SourceLocation Loc) {
1121 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1122 return;
1123
1124 // We only care about int->int conversions here.
1125 // We ignore conversions to/from pointer and/or bool.
1127 DstType))
1128 return;
1129
1130 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1131 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1132 // This must be truncation. Else we do not care.
1133 if (SrcBits <= DstBits)
1134 return;
1135
1136 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1137
1138 // If the integer sign change sanitizer is enabled,
1139 // and we are truncating from larger unsigned type to smaller signed type,
1140 // let that next sanitizer deal with it.
1141 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1142 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1143 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1144 (!SrcSigned && DstSigned))
1145 return;
1146
1147 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1148 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1149 Check;
1150
1151 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1152 {
1153 // We don't know the check kind until we call
1154 // EmitIntegerTruncationCheckHelper, but we want to annotate
1155 // EmitIntegerTruncationCheckHelper's instructions too.
1156 SanitizerDebugLocation SanScope(
1157 &CGF,
1158 {SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1159 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1160 CheckHandler);
1161 Check =
1162 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1163 // If the comparison result is 'i1 false', then the truncation was lossy.
1164 }
1165
1166 // Do we care about this type of truncation?
1167 if (!CGF.SanOpts.has(Check.second.second))
1168 return;
1169
1170 SanitizerDebugLocation SanScope(&CGF, {Check.second.second}, CheckHandler);
1171
1172 // Does some SSCL ignore this type?
1174 SanitizerMask::bitPosToMask(Check.second.second), DstType))
1175 return;
1176
1177 llvm::Constant *StaticArgs[] = {
1178 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1179 CGF.EmitCheckTypeDescriptor(DstType),
1180 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1181 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1182
1183 CGF.EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1184}
1185
1186static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1187 const char *Name,
1188 CGBuilderTy &Builder) {
1189 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1190 llvm::Type *VTy = V->getType();
1191 if (!VSigned) {
1192 // If the value is unsigned, then it is never negative.
1193 return llvm::ConstantInt::getFalse(VTy->getContext());
1194 }
1195 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1196 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1197 llvm::Twine(Name) + "." + V->getName() +
1198 ".negativitycheck");
1199}
1200
1201// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1202// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1203static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1204 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1206 QualType DstType, CGBuilderTy &Builder) {
1207 llvm::Type *SrcTy = Src->getType();
1208 llvm::Type *DstTy = Dst->getType();
1209
1210 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1211 "non-integer llvm type");
1212
1213 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1214 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1215 (void)SrcSigned; // Only used in assert()
1216 (void)DstSigned; // Only used in assert()
1217 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1218 unsigned DstBits = DstTy->getScalarSizeInBits();
1219 (void)SrcBits; // Only used in assert()
1220 (void)DstBits; // Only used in assert()
1221
1222 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1223 "either the widths should be different, or the signednesses.");
1224
1225 // 1. Was the old Value negative?
1226 llvm::Value *SrcIsNegative =
1227 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1228 // 2. Is the new Value negative?
1229 llvm::Value *DstIsNegative =
1230 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1231 // 3. Now, was the 'negativity status' preserved during the conversion?
1232 // NOTE: conversion from negative to zero is considered to change the sign.
1233 // (We want to get 'false' when the conversion changed the sign)
1234 // So we should just equality-compare the negativity statuses.
1235 llvm::Value *Check = nullptr;
1236 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1237 // If the comparison result is 'false', then the conversion changed the sign.
1238 return std::make_pair(
1239 ScalarExprEmitter::ICCK_IntegerSignChange,
1240 std::make_pair(Check, SanitizerKind::SO_ImplicitIntegerSignChange));
1241}
1242
1243void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1244 Value *Dst, QualType DstType,
1245 SourceLocation Loc) {
1246 if (!CGF.SanOpts.has(SanitizerKind::SO_ImplicitIntegerSignChange))
1247 return;
1248
1249 llvm::Type *SrcTy = Src->getType();
1250 llvm::Type *DstTy = Dst->getType();
1251
1252 // We only care about int->int conversions here.
1253 // We ignore conversions to/from pointer and/or bool.
1255 DstType))
1256 return;
1257
1258 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1259 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1260 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1261 unsigned DstBits = DstTy->getScalarSizeInBits();
1262
1263 // Now, we do not need to emit the check in *all* of the cases.
1264 // We can avoid emitting it in some obvious cases where it would have been
1265 // dropped by the opt passes (instcombine) always anyways.
1266 // If it's a cast between effectively the same type, no check.
1267 // NOTE: this is *not* equivalent to checking the canonical types.
1268 if (SrcSigned == DstSigned && SrcBits == DstBits)
1269 return;
1270 // At least one of the values needs to have signed type.
1271 // If both are unsigned, then obviously, neither of them can be negative.
1272 if (!SrcSigned && !DstSigned)
1273 return;
1274 // If the conversion is to *larger* *signed* type, then no check is needed.
1275 // Because either sign-extension happens (so the sign will remain),
1276 // or zero-extension will happen (the sign bit will be zero.)
1277 if ((DstBits > SrcBits) && DstSigned)
1278 return;
1279 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1280 (SrcBits > DstBits) && SrcSigned) {
1281 // If the signed integer truncation sanitizer is enabled,
1282 // and this is a truncation from signed type, then no check is needed.
1283 // Because here sign change check is interchangeable with truncation check.
1284 return;
1285 }
1286 // Does an SSCL have an entry for the DstType under its respective sanitizer
1287 // section?
1288 if (DstSigned && CGF.getContext().isTypeIgnoredBySanitizer(
1289 SanitizerKind::ImplicitSignedIntegerTruncation, DstType))
1290 return;
1291 if (!DstSigned &&
1293 SanitizerKind::ImplicitUnsignedIntegerTruncation, DstType))
1294 return;
1295 // That's it. We can't rule out any more cases with the data we have.
1296
1297 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1298 SanitizerDebugLocation SanScope(
1299 &CGF,
1300 {SanitizerKind::SO_ImplicitIntegerSignChange,
1301 SanitizerKind::SO_ImplicitUnsignedIntegerTruncation,
1302 SanitizerKind::SO_ImplicitSignedIntegerTruncation},
1303 CheckHandler);
1304
1305 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1306 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1307 Check;
1308
1309 // Each of these checks needs to return 'false' when an issue was detected.
1310 ImplicitConversionCheckKind CheckKind;
1311 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
1312 2>
1313 Checks;
1314 // So we can 'and' all the checks together, and still get 'false',
1315 // if at least one of the checks detected an issue.
1316
1317 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1318 CheckKind = Check.first;
1319 Checks.emplace_back(Check.second);
1320
1321 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1322 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1323 // If the signed integer truncation sanitizer was enabled,
1324 // and we are truncating from larger unsigned type to smaller signed type,
1325 // let's handle the case we skipped in that check.
1326 Check =
1327 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1328 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1329 Checks.emplace_back(Check.second);
1330 // If the comparison result is 'i1 false', then the truncation was lossy.
1331 }
1332
1333 llvm::Constant *StaticArgs[] = {
1334 CGF.EmitCheckSourceLocation(Loc), CGF.EmitCheckTypeDescriptor(SrcType),
1335 CGF.EmitCheckTypeDescriptor(DstType),
1336 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1337 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1338 // EmitCheck() will 'and' all the checks together.
1339 CGF.EmitCheck(Checks, CheckHandler, StaticArgs, {Src, Dst});
1340}
1341
1342// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1343// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1344static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1345 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1347 QualType DstType, CGBuilderTy &Builder) {
1348 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1349 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1350
1351 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1352 if (!SrcSigned && !DstSigned)
1353 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1354 else
1355 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1356
1357 llvm::Value *Check = nullptr;
1358 // 1. Extend the truncated value back to the same width as the Src.
1359 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1360 // 2. Equality-compare with the original source value
1361 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1362 // If the comparison result is 'i1 false', then the truncation was lossy.
1363
1364 return std::make_pair(
1365 Kind,
1366 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1367}
1368
1369// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1370// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1371static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1372 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1374 QualType DstType, CGBuilderTy &Builder) {
1375 // 1. Was the old Value negative?
1376 llvm::Value *SrcIsNegative =
1377 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1378 // 2. Is the new Value negative?
1379 llvm::Value *DstIsNegative =
1380 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1381 // 3. Now, was the 'negativity status' preserved during the conversion?
1382 // NOTE: conversion from negative to zero is considered to change the sign.
1383 // (We want to get 'false' when the conversion changed the sign)
1384 // So we should just equality-compare the negativity statuses.
1385 llvm::Value *Check = nullptr;
1386 Check =
1387 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1388 // If the comparison result is 'false', then the conversion changed the sign.
1389 return std::make_pair(
1390 ScalarExprEmitter::ICCK_IntegerSignChange,
1391 std::make_pair(Check, SanitizerKind::SO_ImplicitBitfieldConversion));
1392}
1393
1395 Value *Dst, QualType DstType,
1396 const CGBitFieldInfo &Info,
1397 SourceLocation Loc) {
1398
1399 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1400 return;
1401
1402 // We only care about int->int conversions here.
1403 // We ignore conversions to/from pointer and/or bool.
1405 DstType))
1406 return;
1407
1408 if (DstType->isBooleanType() || SrcType->isBooleanType())
1409 return;
1410
1411 // This should be truncation of integral types.
1412 assert(isa<llvm::IntegerType>(Src->getType()) &&
1413 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1414
1415 // TODO: Calculate src width to avoid emitting code
1416 // for unecessary cases.
1417 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1418 unsigned DstBits = Info.Size;
1419
1420 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1421 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1422
1423 auto CheckHandler = SanitizerHandler::ImplicitConversion;
1424 SanitizerDebugLocation SanScope(
1425 this, {SanitizerKind::SO_ImplicitBitfieldConversion}, CheckHandler);
1426
1427 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1428 std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>>
1429 Check;
1430
1431 // Truncation
1432 bool EmitTruncation = DstBits < SrcBits;
1433 // If Dst is signed and Src unsigned, we want to be more specific
1434 // about the CheckKind we emit, in this case we want to emit
1435 // ICCK_SignedIntegerTruncationOrSignChange.
1436 bool EmitTruncationFromUnsignedToSigned =
1437 EmitTruncation && DstSigned && !SrcSigned;
1438 // Sign change
1439 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1440 bool BothUnsigned = !SrcSigned && !DstSigned;
1441 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1442 // We can avoid emitting sign change checks in some obvious cases
1443 // 1. If Src and Dst have the same signedness and size
1444 // 2. If both are unsigned sign check is unecessary!
1445 // 3. If Dst is signed and bigger than Src, either
1446 // sign-extension or zero-extension will make sure
1447 // the sign remains.
1448 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1449
1450 if (EmitTruncation)
1451 Check =
1452 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1453 else if (EmitSignChange) {
1454 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1455 "either the widths should be different, or the signednesses.");
1456 Check =
1457 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1458 } else
1459 return;
1460
1461 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1462 if (EmitTruncationFromUnsignedToSigned)
1463 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1464
1465 llvm::Constant *StaticArgs[] = {
1467 EmitCheckTypeDescriptor(DstType),
1468 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1469 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1470
1471 EmitCheck(Check.second, CheckHandler, StaticArgs, {Src, Dst});
1472}
1473
1474Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1475 QualType DstType, llvm::Type *SrcTy,
1476 llvm::Type *DstTy,
1477 ScalarConversionOpts Opts) {
1478 // The Element types determine the type of cast to perform.
1479 llvm::Type *SrcElementTy;
1480 llvm::Type *DstElementTy;
1481 QualType SrcElementType;
1482 QualType DstElementType;
1483 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1484 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1485 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1486 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1487 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1488 } else {
1489 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1490 "cannot cast between matrix and non-matrix types");
1491 SrcElementTy = SrcTy;
1492 DstElementTy = DstTy;
1493 SrcElementType = SrcType;
1494 DstElementType = DstType;
1495 }
1496
1497 if (isa<llvm::IntegerType>(SrcElementTy)) {
1498 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1499 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1500 InputSigned = true;
1501 }
1502
1503 if (isa<llvm::IntegerType>(DstElementTy))
1504 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1505 if (InputSigned)
1506 return Builder.CreateSIToFP(Src, DstTy, "conv");
1507 return Builder.CreateUIToFP(Src, DstTy, "conv");
1508 }
1509
1510 if (isa<llvm::IntegerType>(DstElementTy)) {
1511 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1512 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1513
1514 // If we can't recognize overflow as undefined behavior, assume that
1515 // overflow saturates. This protects against normal optimizations if we are
1516 // compiling with non-standard FP semantics.
1517 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1518 llvm::Intrinsic::ID IID =
1519 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1520 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1521 }
1522
1523 if (IsSigned)
1524 return Builder.CreateFPToSI(Src, DstTy, "conv");
1525 return Builder.CreateFPToUI(Src, DstTy, "conv");
1526 }
1527
1528 if ((DstElementTy->is16bitFPTy() && SrcElementTy->is16bitFPTy())) {
1529 Value *FloatVal = Builder.CreateFPExt(Src, Builder.getFloatTy(), "fpext");
1530 return Builder.CreateFPTrunc(FloatVal, DstTy, "fptrunc");
1531 }
1532 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1533 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1534 return Builder.CreateFPExt(Src, DstTy, "conv");
1535}
1536
1537/// Emit a conversion from the specified type to the specified destination type,
1538/// both of which are LLVM scalar types.
1539Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1540 QualType DstType,
1541 SourceLocation Loc,
1542 ScalarConversionOpts Opts) {
1543 // All conversions involving fixed point types should be handled by the
1544 // EmitFixedPoint family functions. This is done to prevent bloating up this
1545 // function more, and although fixed point numbers are represented by
1546 // integers, we do not want to follow any logic that assumes they should be
1547 // treated as integers.
1548 // TODO(leonardchan): When necessary, add another if statement checking for
1549 // conversions to fixed point types from other types.
1550 if (SrcType->isFixedPointType()) {
1551 if (DstType->isBooleanType())
1552 // It is important that we check this before checking if the dest type is
1553 // an integer because booleans are technically integer types.
1554 // We do not need to check the padding bit on unsigned types if unsigned
1555 // padding is enabled because overflow into this bit is undefined
1556 // behavior.
1557 return Builder.CreateIsNotNull(Src, "tobool");
1558 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1559 DstType->isRealFloatingType())
1560 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1561
1562 llvm_unreachable(
1563 "Unhandled scalar conversion from a fixed point type to another type.");
1564 } else if (DstType->isFixedPointType()) {
1565 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1566 // This also includes converting booleans and enums to fixed point types.
1567 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1568
1569 llvm_unreachable(
1570 "Unhandled scalar conversion to a fixed point type from another type.");
1571 }
1572
1573 QualType NoncanonicalSrcType = SrcType;
1574 QualType NoncanonicalDstType = DstType;
1575
1576 SrcType = CGF.getContext().getCanonicalType(SrcType);
1577 DstType = CGF.getContext().getCanonicalType(DstType);
1578 if (SrcType == DstType) return Src;
1579
1580 if (DstType->isVoidType()) return nullptr;
1581
1582 llvm::Value *OrigSrc = Src;
1583 QualType OrigSrcType = SrcType;
1584 llvm::Type *SrcTy = Src->getType();
1585
1586 // Handle conversions to bool first, they are special: comparisons against 0.
1587 if (DstType->isBooleanType())
1588 return EmitConversionToBool(Src, SrcType);
1589
1590 llvm::Type *DstTy = ConvertType(DstType);
1591
1592 // Cast from half through float if half isn't a native type.
1593 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1594 // Cast to FP using the intrinsic if the half type itself isn't supported.
1595 if (DstTy->isFloatingPointTy()) {
1597 return Builder.CreateCall(
1598 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1599 Src);
1600 } else {
1601 // Cast to other types through float, using either the intrinsic or FPExt,
1602 // depending on whether the half type itself is supported
1603 // (as opposed to operations on half, available with NativeHalfType).
1605 Src = Builder.CreateCall(
1606 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1607 CGF.CGM.FloatTy),
1608 Src);
1609 } else {
1610 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1611 }
1612 SrcType = CGF.getContext().FloatTy;
1613 SrcTy = CGF.FloatTy;
1614 }
1615 }
1616
1617 // Ignore conversions like int -> uint.
1618 if (SrcTy == DstTy) {
1619 if (Opts.EmitImplicitIntegerSignChangeChecks)
1620 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1621 NoncanonicalDstType, Loc);
1622
1623 return Src;
1624 }
1625
1626 // Handle pointer conversions next: pointers can only be converted to/from
1627 // other pointers and integers. Check for pointer types in terms of LLVM, as
1628 // some native types (like Obj-C id) may map to a pointer type.
1629 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1630 // The source value may be an integer, or a pointer.
1631 if (isa<llvm::PointerType>(SrcTy))
1632 return Src;
1633
1634 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1635 // First, convert to the correct width so that we control the kind of
1636 // extension.
1637 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1638 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1639 llvm::Value* IntResult =
1640 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1641 // Then, cast to pointer.
1642 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1643 }
1644
1645 if (isa<llvm::PointerType>(SrcTy)) {
1646 // Must be an ptr to int cast.
1647 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1648 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1649 }
1650
1651 // A scalar can be splatted to an extended vector of the same element type
1652 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1653 // Sema should add casts to make sure that the source expression's type is
1654 // the same as the vector's element type (sans qualifiers)
1655 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1656 SrcType.getTypePtr() &&
1657 "Splatted expr doesn't match with vector element type?");
1658
1659 // Splat the element across to all elements
1660 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1661 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1662 }
1663
1664 if (SrcType->isMatrixType() && DstType->isMatrixType())
1665 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1666
1667 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1668 // Allow bitcast from vector to integer/fp of the same size.
1669 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1670 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1671 if (SrcSize == DstSize)
1672 return Builder.CreateBitCast(Src, DstTy, "conv");
1673
1674 // Conversions between vectors of different sizes are not allowed except
1675 // when vectors of half are involved. Operations on storage-only half
1676 // vectors require promoting half vector operands to float vectors and
1677 // truncating the result, which is either an int or float vector, to a
1678 // short or half vector.
1679
1680 // Source and destination are both expected to be vectors.
1681 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1682 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1683 (void)DstElementTy;
1684
1685 assert(((SrcElementTy->isIntegerTy() &&
1686 DstElementTy->isIntegerTy()) ||
1687 (SrcElementTy->isFloatingPointTy() &&
1688 DstElementTy->isFloatingPointTy())) &&
1689 "unexpected conversion between a floating-point vector and an "
1690 "integer vector");
1691
1692 // Truncate an i32 vector to an i16 vector.
1693 if (SrcElementTy->isIntegerTy())
1694 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1695
1696 // Truncate a float vector to a half vector.
1697 if (SrcSize > DstSize)
1698 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1699
1700 // Promote a half vector to a float vector.
1701 return Builder.CreateFPExt(Src, DstTy, "conv");
1702 }
1703
1704 // Finally, we have the arithmetic types: real int/float.
1705 Value *Res = nullptr;
1706 llvm::Type *ResTy = DstTy;
1707
1708 // An overflowing conversion has undefined behavior if either the source type
1709 // or the destination type is a floating-point type. However, we consider the
1710 // range of representable values for all floating-point types to be
1711 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1712 // floating-point type.
1713 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1714 OrigSrcType->isFloatingType())
1715 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1716 Loc);
1717
1718 // Cast to half through float if half isn't a native type.
1719 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1720 // Make sure we cast in a single step if from another FP type.
1721 if (SrcTy->isFloatingPointTy()) {
1722 // Use the intrinsic if the half type itself isn't supported
1723 // (as opposed to operations on half, available with NativeHalfType).
1725 return Builder.CreateCall(
1726 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1727 // If the half type is supported, just use an fptrunc.
1728 return Builder.CreateFPTrunc(Src, DstTy);
1729 }
1730 DstTy = CGF.FloatTy;
1731 }
1732
1733 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1734
1735 if (DstTy != ResTy) {
1737 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1738 Res = Builder.CreateCall(
1739 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1740 Res);
1741 } else {
1742 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1743 }
1744 }
1745
1746 if (Opts.EmitImplicitIntegerTruncationChecks)
1747 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1748 NoncanonicalDstType, Loc);
1749
1750 if (Opts.EmitImplicitIntegerSignChangeChecks)
1751 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1752 NoncanonicalDstType, Loc);
1753
1754 return Res;
1755}
1756
1757Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1758 QualType DstTy,
1759 SourceLocation Loc) {
1760 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1761 llvm::Value *Result;
1762 if (SrcTy->isRealFloatingType())
1763 Result = FPBuilder.CreateFloatingToFixed(Src,
1764 CGF.getContext().getFixedPointSemantics(DstTy));
1765 else if (DstTy->isRealFloatingType())
1766 Result = FPBuilder.CreateFixedToFloating(Src,
1768 ConvertType(DstTy));
1769 else {
1770 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1771 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1772
1773 if (DstTy->isIntegerType())
1774 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1775 DstFPSema.getWidth(),
1776 DstFPSema.isSigned());
1777 else if (SrcTy->isIntegerType())
1778 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1779 DstFPSema);
1780 else
1781 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1782 }
1783 return Result;
1784}
1785
1786/// Emit a conversion from the specified complex type to the specified
1787/// destination type, where the destination type is an LLVM scalar type.
1788Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1789 CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy,
1790 SourceLocation Loc) {
1791 // Get the source element type.
1792 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1793
1794 // Handle conversions to bool first, they are special: comparisons against 0.
1795 if (DstTy->isBooleanType()) {
1796 // Complex != 0 -> (Real != 0) | (Imag != 0)
1797 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1798 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1799 return Builder.CreateOr(Src.first, Src.second, "tobool");
1800 }
1801
1802 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1803 // the imaginary part of the complex value is discarded and the value of the
1804 // real part is converted according to the conversion rules for the
1805 // corresponding real type.
1806 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1807}
1808
1809Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1810 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1811}
1812
1813/// Emit a sanitization check for the given "binary" operation (which
1814/// might actually be a unary increment which has been lowered to a binary
1815/// operation). The check passes if all values in \p Checks (which are \c i1),
1816/// are \c true.
1817void ScalarExprEmitter::EmitBinOpCheck(
1818 ArrayRef<std::pair<Value *, SanitizerKind::SanitizerOrdinal>> Checks,
1819 const BinOpInfo &Info) {
1820 assert(CGF.IsSanitizerScope);
1821 SanitizerHandler Check;
1822 SmallVector<llvm::Constant *, 4> StaticData;
1823 SmallVector<llvm::Value *, 2> DynamicData;
1824 TrapReason TR;
1825
1826 BinaryOperatorKind Opcode = Info.Opcode;
1829
1830 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1831 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1832 if (UO && UO->getOpcode() == UO_Minus) {
1833 Check = SanitizerHandler::NegateOverflow;
1834 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1835 DynamicData.push_back(Info.RHS);
1836 } else {
1837 if (BinaryOperator::isShiftOp(Opcode)) {
1838 // Shift LHS negative or too large, or RHS out of bounds.
1839 Check = SanitizerHandler::ShiftOutOfBounds;
1840 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1841 StaticData.push_back(
1842 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1843 StaticData.push_back(
1844 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1845 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1846 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1847 Check = SanitizerHandler::DivremOverflow;
1848 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1849 } else {
1850 // Arithmetic overflow (+, -, *).
1851 int ArithOverflowKind = 0;
1852 switch (Opcode) {
1853 case BO_Add: {
1854 Check = SanitizerHandler::AddOverflow;
1855 ArithOverflowKind = diag::UBSanArithKind::Add;
1856 break;
1857 }
1858 case BO_Sub: {
1859 Check = SanitizerHandler::SubOverflow;
1860 ArithOverflowKind = diag::UBSanArithKind::Sub;
1861 break;
1862 }
1863 case BO_Mul: {
1864 Check = SanitizerHandler::MulOverflow;
1865 ArithOverflowKind = diag::UBSanArithKind::Mul;
1866 break;
1867 }
1868 default:
1869 llvm_unreachable("unexpected opcode for bin op check");
1870 }
1871 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1873 SanitizerKind::UnsignedIntegerOverflow) ||
1875 SanitizerKind::SignedIntegerOverflow)) {
1876 // Only pay the cost for constructing the trap diagnostic if they are
1877 // going to be used.
1878 CGF.CGM.BuildTrapReason(diag::trap_ubsan_arith_overflow, TR)
1879 << Info.Ty->isSignedIntegerOrEnumerationType() << ArithOverflowKind
1880 << Info.E;
1881 }
1882 }
1883 DynamicData.push_back(Info.LHS);
1884 DynamicData.push_back(Info.RHS);
1885 }
1886
1887 CGF.EmitCheck(Checks, Check, StaticData, DynamicData, &TR);
1888}
1889
1890//===----------------------------------------------------------------------===//
1891// Visitor Methods
1892//===----------------------------------------------------------------------===//
1893
1894Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1895 CGF.ErrorUnsupported(E, "scalar expression");
1896 if (E->getType()->isVoidType())
1897 return nullptr;
1898 return llvm::PoisonValue::get(CGF.ConvertType(E->getType()));
1899}
1900
1901Value *
1902ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1903 ASTContext &Context = CGF.getContext();
1904 unsigned AddrSpace =
1906 llvm::Constant *GlobalConstStr = Builder.CreateGlobalString(
1907 E->ComputeName(Context), "__usn_str", AddrSpace);
1908
1909 llvm::Type *ExprTy = ConvertType(E->getType());
1910 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1911 "usn_addr_cast");
1912}
1913
1914Value *ScalarExprEmitter::VisitEmbedExpr(EmbedExpr *E) {
1915 assert(E->getDataElementCount() == 1);
1916 auto It = E->begin();
1917 return Builder.getInt((*It)->getValue());
1918}
1919
1920Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1921 // Vector Mask Case
1922 if (E->getNumSubExprs() == 2) {
1923 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1924 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1925 Value *Mask;
1926
1927 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1928 unsigned LHSElts = LTy->getNumElements();
1929
1930 Mask = RHS;
1931
1932 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1933
1934 // Mask off the high bits of each shuffle index.
1935 Value *MaskBits =
1936 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1937 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1938
1939 // newv = undef
1940 // mask = mask & maskbits
1941 // for each elt
1942 // n = extract mask i
1943 // x = extract val n
1944 // newv = insert newv, x, i
1945 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1946 MTy->getNumElements());
1947 Value* NewV = llvm::PoisonValue::get(RTy);
1948 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1949 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1950 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1951
1952 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1953 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1954 }
1955 return NewV;
1956 }
1957
1958 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1959 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1960
1961 SmallVector<int, 32> Indices;
1962 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1963 llvm::APSInt Idx = E->getShuffleMaskIdx(i - 2);
1964 // Check for -1 and output it as undef in the IR.
1965 if (Idx.isSigned() && Idx.isAllOnes())
1966 Indices.push_back(-1);
1967 else
1968 Indices.push_back(Idx.getZExtValue());
1969 }
1970
1971 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1972}
1973
1974Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1975 QualType SrcType = E->getSrcExpr()->getType(),
1976 DstType = E->getType();
1977
1978 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1979
1980 SrcType = CGF.getContext().getCanonicalType(SrcType);
1981 DstType = CGF.getContext().getCanonicalType(DstType);
1982 if (SrcType == DstType) return Src;
1983
1984 assert(SrcType->isVectorType() &&
1985 "ConvertVector source type must be a vector");
1986 assert(DstType->isVectorType() &&
1987 "ConvertVector destination type must be a vector");
1988
1989 llvm::Type *SrcTy = Src->getType();
1990 llvm::Type *DstTy = ConvertType(DstType);
1991
1992 // Ignore conversions like int -> uint.
1993 if (SrcTy == DstTy)
1994 return Src;
1995
1996 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1997 DstEltType = DstType->castAs<VectorType>()->getElementType();
1998
1999 assert(SrcTy->isVectorTy() &&
2000 "ConvertVector source IR type must be a vector");
2001 assert(DstTy->isVectorTy() &&
2002 "ConvertVector destination IR type must be a vector");
2003
2004 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
2005 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
2006
2007 if (DstEltType->isBooleanType()) {
2008 assert((SrcEltTy->isFloatingPointTy() ||
2009 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
2010
2011 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
2012 if (SrcEltTy->isFloatingPointTy()) {
2013 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2014 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
2015 } else {
2016 return Builder.CreateICmpNE(Src, Zero, "tobool");
2017 }
2018 }
2019
2020 // We have the arithmetic types: real int/float.
2021 Value *Res = nullptr;
2022
2023 if (isa<llvm::IntegerType>(SrcEltTy)) {
2024 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
2025 if (isa<llvm::IntegerType>(DstEltTy))
2026 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
2027 else {
2028 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2029 if (InputSigned)
2030 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
2031 else
2032 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
2033 }
2034 } else if (isa<llvm::IntegerType>(DstEltTy)) {
2035 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
2036 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2037 if (DstEltType->isSignedIntegerOrEnumerationType())
2038 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
2039 else
2040 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
2041 } else {
2042 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
2043 "Unknown real conversion");
2044 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, E);
2045 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
2046 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
2047 else
2048 Res = Builder.CreateFPExt(Src, DstTy, "conv");
2049 }
2050
2051 return Res;
2052}
2053
2054Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
2055 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
2056 CGF.EmitIgnoredExpr(E->getBase());
2057 return CGF.emitScalarConstant(Constant, E);
2058 } else {
2059 Expr::EvalResult Result;
2061 llvm::APSInt Value = Result.Val.getInt();
2062 CGF.EmitIgnoredExpr(E->getBase());
2063 return Builder.getInt(Value);
2064 }
2065 }
2066
2067 llvm::Value *Result = EmitLoadOfLValue(E);
2068
2069 // If -fdebug-info-for-profiling is specified, emit a pseudo variable and its
2070 // debug info for the pointer, even if there is no variable associated with
2071 // the pointer's expression.
2072 if (CGF.CGM.getCodeGenOpts().DebugInfoForProfiling && CGF.getDebugInfo()) {
2073 if (llvm::LoadInst *Load = dyn_cast<llvm::LoadInst>(Result)) {
2074 if (llvm::GetElementPtrInst *GEP =
2075 dyn_cast<llvm::GetElementPtrInst>(Load->getPointerOperand())) {
2076 if (llvm::Instruction *Pointer =
2077 dyn_cast<llvm::Instruction>(GEP->getPointerOperand())) {
2078 QualType Ty = E->getBase()->getType();
2079 if (!E->isArrow())
2080 Ty = CGF.getContext().getPointerType(Ty);
2081 CGF.getDebugInfo()->EmitPseudoVariable(Builder, Pointer, Ty);
2082 }
2083 }
2084 }
2085 }
2086 return Result;
2087}
2088
2089Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
2090 TestAndClearIgnoreResultAssign();
2091
2092 // Emit subscript expressions in rvalue context's. For most cases, this just
2093 // loads the lvalue formed by the subscript expr. However, we have to be
2094 // careful, because the base of a vector subscript is occasionally an rvalue,
2095 // so we can't get it as an lvalue.
2096 if (!E->getBase()->getType()->isVectorType() &&
2098 return EmitLoadOfLValue(E);
2099
2100 // Handle the vector case. The base must be a vector, the index must be an
2101 // integer value.
2102 Value *Base = Visit(E->getBase());
2103 Value *Idx = Visit(E->getIdx());
2104 QualType IdxTy = E->getIdx()->getType();
2105
2106 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
2107 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
2108
2109 return Builder.CreateExtractElement(Base, Idx, "vecext");
2110}
2111
2112Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
2113 TestAndClearIgnoreResultAssign();
2114
2115 // Handle the vector case. The base must be a vector, the index must be an
2116 // integer value.
2117 Value *RowIdx = CGF.EmitMatrixIndexExpr(E->getRowIdx());
2118 Value *ColumnIdx = CGF.EmitMatrixIndexExpr(E->getColumnIdx());
2119
2120 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
2121 unsigned NumRows = MatrixTy->getNumRows();
2122 llvm::MatrixBuilder MB(Builder);
2123 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
2124 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
2125 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
2126
2127 Value *Matrix = Visit(E->getBase());
2128
2129 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
2130 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
2131}
2132
2133static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
2134 unsigned Off) {
2135 int MV = SVI->getMaskValue(Idx);
2136 if (MV == -1)
2137 return -1;
2138 return Off + MV;
2139}
2140
2141static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
2142 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
2143 "Index operand too large for shufflevector mask!");
2144 return C->getZExtValue();
2145}
2146
2147Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2148 bool Ignore = TestAndClearIgnoreResultAssign();
2149 (void)Ignore;
2150 unsigned NumInitElements = E->getNumInits();
2151 assert((Ignore == false ||
2152 (NumInitElements == 0 && E->getType()->isVoidType())) &&
2153 "init list ignored");
2154
2155 // HLSL initialization lists in the AST are an expansion which can contain
2156 // side-effecting expressions wrapped in opaque value expressions. To properly
2157 // emit these we need to emit the opaque values before we emit the argument
2158 // expressions themselves. This is a little hacky, but it prevents us needing
2159 // to do a bigger AST-level change for a language feature that we need
2160 // deprecate in the near future. See related HLSL language proposals in the
2161 // proposals (https://github.com/microsoft/hlsl-specs/blob/main/proposals):
2162 // * 0005-strict-initializer-lists.md
2163 // * 0032-constructors.md
2164 if (CGF.getLangOpts().HLSL)
2166
2167 if (E->hadArrayRangeDesignator())
2168 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2169
2170 llvm::VectorType *VType =
2171 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2172
2173 if (!VType) {
2174 if (NumInitElements == 0) {
2175 // C++11 value-initialization for the scalar.
2176 return EmitNullValue(E->getType());
2177 }
2178 // We have a scalar in braces. Just use the first element.
2179 return Visit(E->getInit(0));
2180 }
2181
2182 if (isa<llvm::ScalableVectorType>(VType)) {
2183 if (NumInitElements == 0) {
2184 // C++11 value-initialization for the vector.
2185 return EmitNullValue(E->getType());
2186 }
2187
2188 if (NumInitElements == 1) {
2189 Expr *InitVector = E->getInit(0);
2190
2191 // Initialize from another scalable vector of the same type.
2192 if (InitVector->getType().getCanonicalType() ==
2194 return Visit(InitVector);
2195 }
2196
2197 llvm_unreachable("Unexpected initialization of a scalable vector!");
2198 }
2199
2200 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2201
2202 // Loop over initializers collecting the Value for each, and remembering
2203 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2204 // us to fold the shuffle for the swizzle into the shuffle for the vector
2205 // initializer, since LLVM optimizers generally do not want to touch
2206 // shuffles.
2207 unsigned CurIdx = 0;
2208 bool VIsPoisonShuffle = false;
2209 llvm::Value *V = llvm::PoisonValue::get(VType);
2210 for (unsigned i = 0; i != NumInitElements; ++i) {
2211 Expr *IE = E->getInit(i);
2212 Value *Init = Visit(IE);
2213 SmallVector<int, 16> Args;
2214
2215 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2216
2217 // Handle scalar elements. If the scalar initializer is actually one
2218 // element of a different vector of the same width, use shuffle instead of
2219 // extract+insert.
2220 if (!VVT) {
2221 if (isa<ExtVectorElementExpr>(IE)) {
2222 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2223
2224 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2225 ->getNumElements() == ResElts) {
2226 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2227 Value *LHS = nullptr, *RHS = nullptr;
2228 if (CurIdx == 0) {
2229 // insert into poison -> shuffle (src, poison)
2230 // shufflemask must use an i32
2231 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2232 Args.resize(ResElts, -1);
2233
2234 LHS = EI->getVectorOperand();
2235 RHS = V;
2236 VIsPoisonShuffle = true;
2237 } else if (VIsPoisonShuffle) {
2238 // insert into poison shuffle && size match -> shuffle (v, src)
2239 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2240 for (unsigned j = 0; j != CurIdx; ++j)
2241 Args.push_back(getMaskElt(SVV, j, 0));
2242 Args.push_back(ResElts + C->getZExtValue());
2243 Args.resize(ResElts, -1);
2244
2245 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2246 RHS = EI->getVectorOperand();
2247 VIsPoisonShuffle = false;
2248 }
2249 if (!Args.empty()) {
2250 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2251 ++CurIdx;
2252 continue;
2253 }
2254 }
2255 }
2256 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2257 "vecinit");
2258 VIsPoisonShuffle = false;
2259 ++CurIdx;
2260 continue;
2261 }
2262
2263 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2264
2265 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2266 // input is the same width as the vector being constructed, generate an
2267 // optimized shuffle of the swizzle input into the result.
2268 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2269 if (isa<ExtVectorElementExpr>(IE)) {
2270 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2271 Value *SVOp = SVI->getOperand(0);
2272 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2273
2274 if (OpTy->getNumElements() == ResElts) {
2275 for (unsigned j = 0; j != CurIdx; ++j) {
2276 // If the current vector initializer is a shuffle with poison, merge
2277 // this shuffle directly into it.
2278 if (VIsPoisonShuffle) {
2279 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2280 } else {
2281 Args.push_back(j);
2282 }
2283 }
2284 for (unsigned j = 0, je = InitElts; j != je; ++j)
2285 Args.push_back(getMaskElt(SVI, j, Offset));
2286 Args.resize(ResElts, -1);
2287
2288 if (VIsPoisonShuffle)
2289 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2290
2291 Init = SVOp;
2292 }
2293 }
2294
2295 // Extend init to result vector length, and then shuffle its contribution
2296 // to the vector initializer into V.
2297 if (Args.empty()) {
2298 for (unsigned j = 0; j != InitElts; ++j)
2299 Args.push_back(j);
2300 Args.resize(ResElts, -1);
2301 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2302
2303 Args.clear();
2304 for (unsigned j = 0; j != CurIdx; ++j)
2305 Args.push_back(j);
2306 for (unsigned j = 0; j != InitElts; ++j)
2307 Args.push_back(j + Offset);
2308 Args.resize(ResElts, -1);
2309 }
2310
2311 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2312 // merging subsequent shuffles into this one.
2313 if (CurIdx == 0)
2314 std::swap(V, Init);
2315 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2316 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2317 CurIdx += InitElts;
2318 }
2319
2320 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2321 // Emit remaining default initializers.
2322 llvm::Type *EltTy = VType->getElementType();
2323
2324 // Emit remaining default initializers
2325 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2326 Value *Idx = Builder.getInt32(CurIdx);
2327 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2328 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2329 }
2330 return V;
2331}
2332
2334 return !D->isWeak();
2335}
2336
2337static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E) {
2338 E = E->IgnoreParens();
2339
2340 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2341 if (UO->getOpcode() == UO_Deref)
2342 return CGF.isPointerKnownNonNull(UO->getSubExpr());
2343
2344 if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
2345 return isDeclRefKnownNonNull(CGF, DRE->getDecl());
2346
2347 if (const auto *ME = dyn_cast<MemberExpr>(E)) {
2348 if (isa<FieldDecl>(ME->getMemberDecl()))
2349 return true;
2350 return isDeclRefKnownNonNull(CGF, ME->getMemberDecl());
2351 }
2352
2353 // Array subscripts? Anything else?
2354
2355 return false;
2356}
2357
2359 assert(E->getType()->isSignableType(getContext()));
2360
2361 E = E->IgnoreParens();
2362
2363 if (isa<CXXThisExpr>(E))
2364 return true;
2365
2366 if (const auto *UO = dyn_cast<UnaryOperator>(E))
2367 if (UO->getOpcode() == UO_AddrOf)
2368 return isLValueKnownNonNull(*this, UO->getSubExpr());
2369
2370 if (const auto *CE = dyn_cast<CastExpr>(E))
2371 if (CE->getCastKind() == CK_FunctionToPointerDecay ||
2372 CE->getCastKind() == CK_ArrayToPointerDecay)
2373 return isLValueKnownNonNull(*this, CE->getSubExpr());
2374
2375 // Maybe honor __nonnull?
2376
2377 return false;
2378}
2379
2381 const Expr *E = CE->getSubExpr();
2382
2383 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2384 return false;
2385
2386 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2387 // We always assume that 'this' is never null.
2388 return false;
2389 }
2390
2391 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2392 // And that glvalue casts are never null.
2393 if (ICE->isGLValue())
2394 return false;
2395 }
2396
2397 return true;
2398}
2399
2400// RHS is an aggregate type
2402 QualType DestTy, SourceLocation Loc) {
2403 SmallVector<LValue, 16> LoadList;
2404 CGF.FlattenAccessAndTypeLValue(SrcVal, LoadList);
2405 // Dest is either a vector or a builtin?
2406 // if its a vector create a temp alloca to store into and return that
2407 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2408 assert(LoadList.size() >= VecTy->getNumElements() &&
2409 "Flattened type on RHS must have the same number or more elements "
2410 "than vector on LHS.");
2411 llvm::Value *V =
2412 CGF.Builder.CreateLoad(CGF.CreateIRTemp(DestTy, "flatcast.tmp"));
2413 // write to V.
2414 for (unsigned I = 0, E = VecTy->getNumElements(); I < E; I++) {
2415 RValue RVal = CGF.EmitLoadOfLValue(LoadList[I], Loc);
2416 assert(RVal.isScalar() &&
2417 "All flattened source values should be scalars.");
2418 llvm::Value *Cast =
2419 CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[I].getType(),
2420 VecTy->getElementType(), Loc);
2421 V = CGF.Builder.CreateInsertElement(V, Cast, I);
2422 }
2423 return V;
2424 }
2425 // if its a builtin just do an extract element or load.
2426 assert(DestTy->isBuiltinType() &&
2427 "Destination type must be a vector or builtin type.");
2428 RValue RVal = CGF.EmitLoadOfLValue(LoadList[0], Loc);
2429 assert(RVal.isScalar() && "All flattened source values should be scalars.");
2430 return CGF.EmitScalarConversion(RVal.getScalarVal(), LoadList[0].getType(),
2431 DestTy, Loc);
2432}
2433
2434// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2435// have to handle a more broad range of conversions than explicit casts, as they
2436// handle things like function to ptr-to-function decay etc.
2437Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2438 auto RestoreCurCast =
2439 llvm::make_scope_exit([this, Prev = CGF.CurCast] { CGF.CurCast = Prev; });
2440 CGF.CurCast = CE;
2441
2442 Expr *E = CE->getSubExpr();
2443 QualType DestTy = CE->getType();
2444 CastKind Kind = CE->getCastKind();
2445 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2446
2447 // These cases are generally not written to ignore the result of
2448 // evaluating their sub-expressions, so we clear this now.
2449 bool Ignored = TestAndClearIgnoreResultAssign();
2450
2451 // Since almost all cast kinds apply to scalars, this switch doesn't have
2452 // a default case, so the compiler will warn on a missing case. The cases
2453 // are in the same order as in the CastKind enum.
2454 switch (Kind) {
2455 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2456 case CK_BuiltinFnToFnPtr:
2457 llvm_unreachable("builtin functions are handled elsewhere");
2458
2459 case CK_LValueBitCast:
2460 case CK_ObjCObjectLValueCast: {
2461 Address Addr = EmitLValue(E).getAddress();
2462 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2463 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2464 return EmitLoadOfLValue(LV, CE->getExprLoc());
2465 }
2466
2467 case CK_LValueToRValueBitCast: {
2468 LValue SourceLVal = CGF.EmitLValue(E);
2469 Address Addr =
2470 SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
2471 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2472 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2473 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2474 }
2475
2476 case CK_CPointerToObjCPointerCast:
2477 case CK_BlockPointerToObjCPointerCast:
2478 case CK_AnyPointerToBlockPointerCast:
2479 case CK_BitCast: {
2480 Value *Src = Visit(E);
2481 llvm::Type *SrcTy = Src->getType();
2482 llvm::Type *DstTy = ConvertType(DestTy);
2483
2484 // FIXME: this is a gross but seemingly necessary workaround for an issue
2485 // manifesting when a target uses a non-default AS for indirect sret args,
2486 // but the source HLL is generic, wherein a valid C-cast or reinterpret_cast
2487 // on the address of a local struct that gets returned by value yields an
2488 // invalid bitcast from the a pointer to the IndirectAS to a pointer to the
2489 // DefaultAS. We can only do this subversive thing because sret args are
2490 // manufactured and them residing in the IndirectAS is a target specific
2491 // detail, and doing an AS cast here still retains the semantics the user
2492 // expects. It is desirable to remove this iff a better solution is found.
2493 if (auto A = dyn_cast<llvm::Argument>(Src); A && A->hasStructRetAttr())
2495 CGF, Src, E->getType().getAddressSpace(), DstTy);
2496
2497 assert(
2498 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2499 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2500 "Address-space cast must be used to convert address spaces");
2501
2502 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2503 if (auto *PT = DestTy->getAs<PointerType>()) {
2505 PT->getPointeeType(),
2506 Address(Src,
2508 E->getType()->castAs<PointerType>()->getPointeeType()),
2509 CGF.getPointerAlign()),
2510 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2511 CE->getBeginLoc());
2512 }
2513 }
2514
2515 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2516 const QualType SrcType = E->getType();
2517
2518 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2519 // Casting to pointer that could carry dynamic information (provided by
2520 // invariant.group) requires launder.
2521 Src = Builder.CreateLaunderInvariantGroup(Src);
2522 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2523 // Casting to pointer that does not carry dynamic information (provided
2524 // by invariant.group) requires stripping it. Note that we don't do it
2525 // if the source could not be dynamic type and destination could be
2526 // dynamic because dynamic information is already laundered. It is
2527 // because launder(strip(src)) == launder(src), so there is no need to
2528 // add extra strip before launder.
2529 Src = Builder.CreateStripInvariantGroup(Src);
2530 }
2531 }
2532
2533 // Update heapallocsite metadata when there is an explicit pointer cast.
2534 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2535 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2536 !isa<CastExpr>(E)) {
2537 QualType PointeeType = DestTy->getPointeeType();
2538 if (!PointeeType.isNull())
2539 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2540 CE->getExprLoc());
2541 }
2542 }
2543
2544 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2545 // same element type, use the llvm.vector.insert intrinsic to perform the
2546 // bitcast.
2547 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2548 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2549 // If we are casting a fixed i8 vector to a scalable i1 predicate
2550 // vector, use a vector insert and bitcast the result.
2551 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2552 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2553 ScalableDstTy = llvm::ScalableVectorType::get(
2554 FixedSrcTy->getElementType(),
2555 llvm::divideCeil(
2556 ScalableDstTy->getElementCount().getKnownMinValue(), 8));
2557 }
2558 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2559 llvm::Value *PoisonVec = llvm::PoisonValue::get(ScalableDstTy);
2560 llvm::Value *Result = Builder.CreateInsertVector(
2561 ScalableDstTy, PoisonVec, Src, uint64_t(0), "cast.scalable");
2562 ScalableDstTy = cast<llvm::ScalableVectorType>(
2563 llvm::VectorType::getWithSizeAndScalar(ScalableDstTy, DstTy));
2564 if (Result->getType() != ScalableDstTy)
2565 Result = Builder.CreateBitCast(Result, ScalableDstTy);
2566 if (Result->getType() != DstTy)
2567 Result = Builder.CreateExtractVector(DstTy, Result, uint64_t(0));
2568 return Result;
2569 }
2570 }
2571 }
2572
2573 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2574 // same element type, use the llvm.vector.extract intrinsic to perform the
2575 // bitcast.
2576 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2577 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2578 // If we are casting a scalable i1 predicate vector to a fixed i8
2579 // vector, bitcast the source and use a vector extract.
2580 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2581 FixedDstTy->getElementType()->isIntegerTy(8)) {
2582 if (!ScalableSrcTy->getElementCount().isKnownMultipleOf(8)) {
2583 ScalableSrcTy = llvm::ScalableVectorType::get(
2584 ScalableSrcTy->getElementType(),
2585 llvm::alignTo<8>(
2586 ScalableSrcTy->getElementCount().getKnownMinValue()));
2587 llvm::Value *ZeroVec = llvm::Constant::getNullValue(ScalableSrcTy);
2588 Src = Builder.CreateInsertVector(ScalableSrcTy, ZeroVec, Src,
2589 uint64_t(0));
2590 }
2591
2592 ScalableSrcTy = llvm::ScalableVectorType::get(
2593 FixedDstTy->getElementType(),
2594 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2595 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2596 }
2597 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType())
2598 return Builder.CreateExtractVector(DstTy, Src, uint64_t(0),
2599 "cast.fixed");
2600 }
2601 }
2602
2603 // Perform VLAT <-> VLST bitcast through memory.
2604 // TODO: since the llvm.vector.{insert,extract} intrinsics
2605 // require the element types of the vectors to be the same, we
2606 // need to keep this around for bitcasts between VLAT <-> VLST where
2607 // the element types of the vectors are not the same, until we figure
2608 // out a better way of doing these casts.
2609 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2613 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2614 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2615 CGF.EmitStoreOfScalar(Src, LV);
2616 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2617 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2618 DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
2619 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2620 }
2621
2622 llvm::Value *Result = Builder.CreateBitCast(Src, DstTy);
2623 return CGF.authPointerToPointerCast(Result, E->getType(), DestTy);
2624 }
2625 case CK_AddressSpaceConversion: {
2626 Expr::EvalResult Result;
2627 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2628 Result.Val.isNullPointer()) {
2629 // If E has side effect, it is emitted even if its final result is a
2630 // null pointer. In that case, a DCE pass should be able to
2631 // eliminate the useless instructions emitted during translating E.
2632 if (Result.HasSideEffects)
2633 Visit(E);
2635 ConvertType(DestTy)), DestTy);
2636 }
2637 // Since target may map different address spaces in AST to the same address
2638 // space, an address space conversion may end up as a bitcast.
2640 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2641 ConvertType(DestTy));
2642 }
2643 case CK_AtomicToNonAtomic:
2644 case CK_NonAtomicToAtomic:
2645 case CK_UserDefinedConversion:
2646 return Visit(E);
2647
2648 case CK_NoOp: {
2649 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE) : Visit(E);
2650 }
2651
2652 case CK_BaseToDerived: {
2653 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2654 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2655
2656 Address Base = CGF.EmitPointerWithAlignment(E);
2657 Address Derived =
2658 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2659 CE->path_begin(), CE->path_end(),
2661
2662 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2663 // performed and the object is not of the derived type.
2664 if (CGF.sanitizePerformTypeCheck())
2666 Derived, DestTy->getPointeeType());
2667
2668 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2669 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2670 /*MayBeNull=*/true,
2672 CE->getBeginLoc());
2673
2674 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2675 }
2676 case CK_UncheckedDerivedToBase:
2677 case CK_DerivedToBase: {
2678 // The EmitPointerWithAlignment path does this fine; just discard
2679 // the alignment.
2681 CE->getType()->getPointeeType());
2682 }
2683
2684 case CK_Dynamic: {
2685 Address V = CGF.EmitPointerWithAlignment(E);
2686 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2687 return CGF.EmitDynamicCast(V, DCE);
2688 }
2689
2690 case CK_ArrayToPointerDecay:
2692 CE->getType()->getPointeeType());
2693 case CK_FunctionToPointerDecay:
2694 return EmitLValue(E).getPointer(CGF);
2695
2696 case CK_NullToPointer:
2697 if (MustVisitNullValue(E))
2698 CGF.EmitIgnoredExpr(E);
2699
2700 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2701 DestTy);
2702
2703 case CK_NullToMemberPointer: {
2704 if (MustVisitNullValue(E))
2705 CGF.EmitIgnoredExpr(E);
2706
2707 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2708 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2709 }
2710
2711 case CK_ReinterpretMemberPointer:
2712 case CK_BaseToDerivedMemberPointer:
2713 case CK_DerivedToBaseMemberPointer: {
2714 Value *Src = Visit(E);
2715
2716 // Note that the AST doesn't distinguish between checked and
2717 // unchecked member pointer conversions, so we always have to
2718 // implement checked conversions here. This is inefficient when
2719 // actual control flow may be required in order to perform the
2720 // check, which it is for data member pointers (but not member
2721 // function pointers on Itanium and ARM).
2722 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2723 }
2724
2725 case CK_ARCProduceObject:
2726 return CGF.EmitARCRetainScalarExpr(E);
2727 case CK_ARCConsumeObject:
2728 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2729 case CK_ARCReclaimReturnedObject:
2730 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2731 case CK_ARCExtendBlockObject:
2732 return CGF.EmitARCExtendBlockObject(E);
2733
2734 case CK_CopyAndAutoreleaseBlockObject:
2735 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2736
2737 case CK_FloatingRealToComplex:
2738 case CK_FloatingComplexCast:
2739 case CK_IntegralRealToComplex:
2740 case CK_IntegralComplexCast:
2741 case CK_IntegralComplexToFloatingComplex:
2742 case CK_FloatingComplexToIntegralComplex:
2743 case CK_ConstructorConversion:
2744 case CK_ToUnion:
2745 case CK_HLSLArrayRValue:
2746 llvm_unreachable("scalar cast to non-scalar value");
2747
2748 case CK_LValueToRValue:
2749 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2750 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2751 return Visit(E);
2752
2753 case CK_IntegralToPointer: {
2754 Value *Src = Visit(E);
2755
2756 // First, convert to the correct width so that we control the kind of
2757 // extension.
2758 auto DestLLVMTy = ConvertType(DestTy);
2759 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2760 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2761 llvm::Value* IntResult =
2762 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2763
2764 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2765
2766 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2767 // Going from integer to pointer that could be dynamic requires reloading
2768 // dynamic information from invariant.group.
2769 if (DestTy.mayBeDynamicClass())
2770 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2771 }
2772
2773 IntToPtr = CGF.authPointerToPointerCast(IntToPtr, E->getType(), DestTy);
2774 return IntToPtr;
2775 }
2776 case CK_PointerToIntegral: {
2777 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2778 auto *PtrExpr = Visit(E);
2779
2780 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2781 const QualType SrcType = E->getType();
2782
2783 // Casting to integer requires stripping dynamic information as it does
2784 // not carries it.
2785 if (SrcType.mayBeDynamicClass())
2786 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2787 }
2788
2789 PtrExpr = CGF.authPointerToPointerCast(PtrExpr, E->getType(), DestTy);
2790 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2791 }
2792 case CK_ToVoid: {
2793 CGF.EmitIgnoredExpr(E);
2794 return nullptr;
2795 }
2796 case CK_MatrixCast: {
2797 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2798 CE->getExprLoc());
2799 }
2800 // CK_HLSLAggregateSplatCast only handles splatting to vectors from a vec1
2801 // Casts were inserted in Sema to Cast the Src Expr to a Scalar and
2802 // To perform any necessary Scalar Cast, so this Cast can be handled
2803 // by the regular Vector Splat cast code.
2804 case CK_HLSLAggregateSplatCast:
2805 case CK_VectorSplat: {
2806 llvm::Type *DstTy = ConvertType(DestTy);
2807 Value *Elt = Visit(E);
2808 // Splat the element across to all elements
2809 llvm::ElementCount NumElements =
2810 cast<llvm::VectorType>(DstTy)->getElementCount();
2811 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2812 }
2813
2814 case CK_FixedPointCast:
2815 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2816 CE->getExprLoc());
2817
2818 case CK_FixedPointToBoolean:
2819 assert(E->getType()->isFixedPointType() &&
2820 "Expected src type to be fixed point type");
2821 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2822 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2823 CE->getExprLoc());
2824
2825 case CK_FixedPointToIntegral:
2826 assert(E->getType()->isFixedPointType() &&
2827 "Expected src type to be fixed point type");
2828 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2829 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2830 CE->getExprLoc());
2831
2832 case CK_IntegralToFixedPoint:
2833 assert(E->getType()->isIntegerType() &&
2834 "Expected src type to be an integer");
2835 assert(DestTy->isFixedPointType() &&
2836 "Expected dest type to be fixed point type");
2837 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2838 CE->getExprLoc());
2839
2840 case CK_IntegralCast: {
2841 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2842 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2843 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2845 "conv");
2846 }
2847 ScalarConversionOpts Opts;
2848 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2849 if (!ICE->isPartOfExplicitCast())
2850 Opts = ScalarConversionOpts(CGF.SanOpts);
2851 }
2852 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2853 CE->getExprLoc(), Opts);
2854 }
2855 case CK_IntegralToFloating: {
2856 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2857 // TODO: Support constrained FP intrinsics.
2858 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2859 if (SrcElTy->isSignedIntegerOrEnumerationType())
2860 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2861 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2862 }
2863 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2864 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2865 CE->getExprLoc());
2866 }
2867 case CK_FloatingToIntegral: {
2868 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2869 // TODO: Support constrained FP intrinsics.
2870 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2871 if (DstElTy->isSignedIntegerOrEnumerationType())
2872 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2873 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2874 }
2875 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2876 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2877 CE->getExprLoc());
2878 }
2879 case CK_FloatingCast: {
2880 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2881 // TODO: Support constrained FP intrinsics.
2882 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2883 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2884 if (DstElTy->castAs<BuiltinType>()->getKind() <
2885 SrcElTy->castAs<BuiltinType>()->getKind())
2886 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2887 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2888 }
2889 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2890 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2891 CE->getExprLoc());
2892 }
2893 case CK_FixedPointToFloating:
2894 case CK_FloatingToFixedPoint: {
2895 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2896 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2897 CE->getExprLoc());
2898 }
2899 case CK_BooleanToSignedIntegral: {
2900 ScalarConversionOpts Opts;
2901 Opts.TreatBooleanAsSigned = true;
2902 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2903 CE->getExprLoc(), Opts);
2904 }
2905 case CK_IntegralToBoolean:
2906 return EmitIntToBoolConversion(Visit(E));
2907 case CK_PointerToBoolean:
2908 return EmitPointerToBoolConversion(Visit(E), E->getType());
2909 case CK_FloatingToBoolean: {
2910 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2911 return EmitFloatToBoolConversion(Visit(E));
2912 }
2913 case CK_MemberPointerToBoolean: {
2914 llvm::Value *MemPtr = Visit(E);
2915 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2916 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2917 }
2918
2919 case CK_FloatingComplexToReal:
2920 case CK_IntegralComplexToReal:
2921 return CGF.EmitComplexExpr(E, false, true).first;
2922
2923 case CK_FloatingComplexToBoolean:
2924 case CK_IntegralComplexToBoolean: {
2926
2927 // TODO: kill this function off, inline appropriate case here
2928 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2929 CE->getExprLoc());
2930 }
2931
2932 case CK_ZeroToOCLOpaqueType: {
2933 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2934 DestTy->isOCLIntelSubgroupAVCType()) &&
2935 "CK_ZeroToOCLEvent cast on non-event type");
2936 return llvm::Constant::getNullValue(ConvertType(DestTy));
2937 }
2938
2939 case CK_IntToOCLSampler:
2940 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2941
2942 case CK_HLSLVectorTruncation: {
2943 assert((DestTy->isVectorType() || DestTy->isBuiltinType()) &&
2944 "Destination type must be a vector or builtin type.");
2945 Value *Vec = Visit(E);
2946 if (auto *VecTy = DestTy->getAs<VectorType>()) {
2947 SmallVector<int> Mask;
2948 unsigned NumElts = VecTy->getNumElements();
2949 for (unsigned I = 0; I != NumElts; ++I)
2950 Mask.push_back(I);
2951
2952 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
2953 }
2954 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.SizeTy);
2955 return Builder.CreateExtractElement(Vec, Zero, "cast.vtrunc");
2956 }
2957 case CK_HLSLElementwiseCast: {
2958 RValue RV = CGF.EmitAnyExpr(E);
2959 SourceLocation Loc = CE->getExprLoc();
2960
2961 assert(RV.isAggregate() && "Not a valid HLSL Elementwise Cast.");
2962 // RHS is an aggregate
2963 LValue SrcVal = CGF.MakeAddrLValue(RV.getAggregateAddress(), E->getType());
2964 return EmitHLSLElementwiseCast(CGF, SrcVal, DestTy, Loc);
2965 }
2966 } // end of switch
2967
2968 llvm_unreachable("unknown scalar cast");
2969}
2970
2971Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2972 CodeGenFunction::StmtExprEvaluation eval(CGF);
2973 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2974 !E->getType()->isVoidType());
2975 if (!RetAlloca.isValid())
2976 return nullptr;
2977 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2978 E->getExprLoc());
2979}
2980
2981Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2982 CodeGenFunction::RunCleanupsScope Scope(CGF);
2983 Value *V = Visit(E->getSubExpr());
2984 // Defend against dominance problems caused by jumps out of expression
2985 // evaluation through the shared cleanup block.
2986 Scope.ForceCleanup({&V});
2987 return V;
2988}
2989
2990//===----------------------------------------------------------------------===//
2991// Unary Operators
2992//===----------------------------------------------------------------------===//
2993
2995 llvm::Value *InVal, bool IsInc,
2996 FPOptions FPFeatures) {
2997 BinOpInfo BinOp;
2998 BinOp.LHS = InVal;
2999 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
3000 BinOp.Ty = E->getType();
3001 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
3002 BinOp.FPFeatures = FPFeatures;
3003 BinOp.E = E;
3004 return BinOp;
3005}
3006
3007llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
3008 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
3009 llvm::Value *Amount =
3010 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
3011 StringRef Name = IsInc ? "inc" : "dec";
3012 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
3013 case LangOptions::SOB_Defined:
3014 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3015 return Builder.CreateAdd(InVal, Amount, Name);
3016 [[fallthrough]];
3017 case LangOptions::SOB_Undefined:
3018 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
3019 return Builder.CreateNSWAdd(InVal, Amount, Name);
3020 [[fallthrough]];
3021 case LangOptions::SOB_Trapping:
3022 BinOpInfo Info = createBinOpInfoFromIncDec(
3023 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3024 if (!E->canOverflow() || CanElideOverflowCheck(CGF.getContext(), Info))
3025 return Builder.CreateNSWAdd(InVal, Amount, Name);
3026 return EmitOverflowCheckedBinOp(Info);
3027 }
3028 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
3029}
3030
3031/// For the purposes of overflow pattern exclusion, does this match the
3032/// "while(i--)" pattern?
3033static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc,
3034 bool isPre, ASTContext &Ctx) {
3035 if (isInc || isPre)
3036 return false;
3037
3038 // -fsanitize-undefined-ignore-overflow-pattern=unsigned-post-decr-while
3041 return false;
3042
3043 // all Parents (usually just one) must be a WhileStmt
3044 for (const auto &Parent : Ctx.getParentMapContext().getParents(*UO))
3045 if (!Parent.get<WhileStmt>())
3046 return false;
3047
3048 return true;
3049}
3050
3051namespace {
3052/// Handles check and update for lastprivate conditional variables.
3053class OMPLastprivateConditionalUpdateRAII {
3054private:
3055 CodeGenFunction &CGF;
3056 const UnaryOperator *E;
3057
3058public:
3059 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
3060 const UnaryOperator *E)
3061 : CGF(CGF), E(E) {}
3062 ~OMPLastprivateConditionalUpdateRAII() {
3063 if (CGF.getLangOpts().OpenMP)
3065 CGF, E->getSubExpr());
3066 }
3067};
3068} // namespace
3069
3070llvm::Value *
3071ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
3072 bool isInc, bool isPre) {
3073 ApplyAtomGroup Grp(CGF.getDebugInfo());
3074 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
3075 QualType type = E->getSubExpr()->getType();
3076 llvm::PHINode *atomicPHI = nullptr;
3077 llvm::Value *value;
3078 llvm::Value *input;
3079 llvm::Value *Previous = nullptr;
3080 QualType SrcType = E->getType();
3081
3082 int amount = (isInc ? 1 : -1);
3083 bool isSubtraction = !isInc;
3084
3085 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
3086 type = atomicTy->getValueType();
3087 if (isInc && type->isBooleanType()) {
3088 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
3089 if (isPre) {
3090 Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
3091 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
3092 return Builder.getTrue();
3093 }
3094 // For atomic bool increment, we just store true and return it for
3095 // preincrement, do an atomic swap with true for postincrement
3096 return Builder.CreateAtomicRMW(
3097 llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
3098 llvm::AtomicOrdering::SequentiallyConsistent);
3099 }
3100 // Special case for atomic increment / decrement on integers, emit
3101 // atomicrmw instructions. We skip this if we want to be doing overflow
3102 // checking, and fall into the slow path with the atomic cmpxchg loop.
3103 if (!type->isBooleanType() && type->isIntegerType() &&
3104 !(type->isUnsignedIntegerType() &&
3105 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3106 CGF.getLangOpts().getSignedOverflowBehavior() !=
3107 LangOptions::SOB_Trapping) {
3108 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
3109 llvm::AtomicRMWInst::Sub;
3110 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
3111 llvm::Instruction::Sub;
3112 llvm::Value *amt = CGF.EmitToMemory(
3113 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
3114 llvm::Value *old =
3115 Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
3116 llvm::AtomicOrdering::SequentiallyConsistent);
3117 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3118 }
3119 // Special case for atomic increment/decrement on floats.
3120 // Bail out non-power-of-2-sized floating point types (e.g., x86_fp80).
3121 if (type->isFloatingType()) {
3122 llvm::Type *Ty = ConvertType(type);
3123 if (llvm::has_single_bit(Ty->getScalarSizeInBits())) {
3124 llvm::AtomicRMWInst::BinOp aop =
3125 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
3126 llvm::Instruction::BinaryOps op =
3127 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
3128 llvm::Value *amt = llvm::ConstantFP::get(Ty, 1.0);
3129 llvm::AtomicRMWInst *old =
3130 CGF.emitAtomicRMWInst(aop, LV.getAddress(), amt,
3131 llvm::AtomicOrdering::SequentiallyConsistent);
3132
3133 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
3134 }
3135 }
3136 value = EmitLoadOfLValue(LV, E->getExprLoc());
3137 input = value;
3138 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
3139 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3140 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3141 value = CGF.EmitToMemory(value, type);
3142 Builder.CreateBr(opBB);
3143 Builder.SetInsertPoint(opBB);
3144 atomicPHI = Builder.CreatePHI(value->getType(), 2);
3145 atomicPHI->addIncoming(value, startBB);
3146 value = atomicPHI;
3147 } else {
3148 value = EmitLoadOfLValue(LV, E->getExprLoc());
3149 input = value;
3150 }
3151
3152 // Special case of integer increment that we have to check first: bool++.
3153 // Due to promotion rules, we get:
3154 // bool++ -> bool = bool + 1
3155 // -> bool = (int)bool + 1
3156 // -> bool = ((int)bool + 1 != 0)
3157 // An interesting aspect of this is that increment is always true.
3158 // Decrement does not have this property.
3159 if (isInc && type->isBooleanType()) {
3160 value = Builder.getTrue();
3161
3162 // Most common case by far: integer increment.
3163 } else if (type->isIntegerType()) {
3164 QualType promotedType;
3165 bool canPerformLossyDemotionCheck = false;
3166
3167 bool excludeOverflowPattern =
3168 matchesPostDecrInWhile(E, isInc, isPre, CGF.getContext());
3169
3171 promotedType = CGF.getContext().getPromotedIntegerType(type);
3172 assert(promotedType != type && "Shouldn't promote to the same type.");
3173 canPerformLossyDemotionCheck = true;
3174 canPerformLossyDemotionCheck &=
3176 CGF.getContext().getCanonicalType(promotedType);
3177 canPerformLossyDemotionCheck &=
3179 type, promotedType);
3180 assert((!canPerformLossyDemotionCheck ||
3181 type->isSignedIntegerOrEnumerationType() ||
3182 promotedType->isSignedIntegerOrEnumerationType() ||
3183 ConvertType(type)->getScalarSizeInBits() ==
3184 ConvertType(promotedType)->getScalarSizeInBits()) &&
3185 "The following check expects that if we do promotion to different "
3186 "underlying canonical type, at least one of the types (either "
3187 "base or promoted) will be signed, or the bitwidths will match.");
3188 }
3189 if (CGF.SanOpts.hasOneOf(
3190 SanitizerKind::ImplicitIntegerArithmeticValueChange |
3191 SanitizerKind::ImplicitBitfieldConversion) &&
3192 canPerformLossyDemotionCheck) {
3193 // While `x += 1` (for `x` with width less than int) is modeled as
3194 // promotion+arithmetics+demotion, and we can catch lossy demotion with
3195 // ease; inc/dec with width less than int can't overflow because of
3196 // promotion rules, so we omit promotion+demotion, which means that we can
3197 // not catch lossy "demotion". Because we still want to catch these cases
3198 // when the sanitizer is enabled, we perform the promotion, then perform
3199 // the increment/decrement in the wider type, and finally
3200 // perform the demotion. This will catch lossy demotions.
3201
3202 // We have a special case for bitfields defined using all the bits of the
3203 // type. In this case we need to do the same trick as for the integer
3204 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
3205
3206 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
3207 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3208 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3209 // Do pass non-default ScalarConversionOpts so that sanitizer check is
3210 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
3211 // checks will take care of the conversion.
3212 ScalarConversionOpts Opts;
3213 if (!LV.isBitField())
3214 Opts = ScalarConversionOpts(CGF.SanOpts);
3215 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
3216 Previous = value;
3217 SrcType = promotedType;
3218 }
3219
3220 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
3221 Opts);
3222
3223 // Note that signed integer inc/dec with width less than int can't
3224 // overflow because of promotion rules; we're just eliding a few steps
3225 // here.
3226 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
3227 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
3228 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
3229 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
3230 !excludeOverflowPattern &&
3232 SanitizerKind::UnsignedIntegerOverflow, E->getType())) {
3233 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
3234 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
3235 } else {
3236 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
3237 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3238 }
3239
3240 // Next most common: pointer increment.
3241 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
3242 QualType type = ptr->getPointeeType();
3243
3244 // VLA types don't have constant size.
3245 if (const VariableArrayType *vla
3247 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
3248 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
3249 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3250 if (CGF.getLangOpts().PointerOverflowDefined)
3251 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
3252 else
3253 value = CGF.EmitCheckedInBoundsGEP(
3254 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
3255 E->getExprLoc(), "vla.inc");
3256
3257 // Arithmetic on function pointers (!) is just +-1.
3258 } else if (type->isFunctionType()) {
3259 llvm::Value *amt = Builder.getInt32(amount);
3260
3261 if (CGF.getLangOpts().PointerOverflowDefined)
3262 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
3263 else
3264 value =
3265 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
3266 /*SignedIndices=*/false, isSubtraction,
3267 E->getExprLoc(), "incdec.funcptr");
3268
3269 // For everything else, we can just do a simple increment.
3270 } else {
3271 llvm::Value *amt = Builder.getInt32(amount);
3272 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
3273 if (CGF.getLangOpts().PointerOverflowDefined)
3274 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
3275 else
3276 value = CGF.EmitCheckedInBoundsGEP(
3277 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
3278 E->getExprLoc(), "incdec.ptr");
3279 }
3280
3281 // Vector increment/decrement.
3282 } else if (type->isVectorType()) {
3283 if (type->hasIntegerRepresentation()) {
3284 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
3285
3286 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
3287 } else {
3288 value = Builder.CreateFAdd(
3289 value,
3290 llvm::ConstantFP::get(value->getType(), amount),
3291 isInc ? "inc" : "dec");
3292 }
3293
3294 // Floating point.
3295 } else if (type->isRealFloatingType()) {
3296 // Add the inc/dec to the real part.
3297 llvm::Value *amt;
3298 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
3299
3300 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3301 // Another special case: half FP increment should be done via float
3303 value = Builder.CreateCall(
3304 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
3305 CGF.CGM.FloatTy),
3306 input, "incdec.conv");
3307 } else {
3308 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
3309 }
3310 }
3311
3312 if (value->getType()->isFloatTy())
3313 amt = llvm::ConstantFP::get(VMContext,
3314 llvm::APFloat(static_cast<float>(amount)));
3315 else if (value->getType()->isDoubleTy())
3316 amt = llvm::ConstantFP::get(VMContext,
3317 llvm::APFloat(static_cast<double>(amount)));
3318 else {
3319 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
3320 // Convert from float.
3321 llvm::APFloat F(static_cast<float>(amount));
3322 bool ignored;
3323 const llvm::fltSemantics *FS;
3324 // Don't use getFloatTypeSemantics because Half isn't
3325 // necessarily represented using the "half" LLVM type.
3326 if (value->getType()->isFP128Ty())
3327 FS = &CGF.getTarget().getFloat128Format();
3328 else if (value->getType()->isHalfTy())
3329 FS = &CGF.getTarget().getHalfFormat();
3330 else if (value->getType()->isBFloatTy())
3331 FS = &CGF.getTarget().getBFloat16Format();
3332 else if (value->getType()->isPPC_FP128Ty())
3333 FS = &CGF.getTarget().getIbm128Format();
3334 else
3335 FS = &CGF.getTarget().getLongDoubleFormat();
3336 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3337 amt = llvm::ConstantFP::get(VMContext, F);
3338 }
3339 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3340
3341 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3343 value = Builder.CreateCall(
3344 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3345 CGF.CGM.FloatTy),
3346 value, "incdec.conv");
3347 } else {
3348 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3349 }
3350 }
3351
3352 // Fixed-point types.
3353 } else if (type->isFixedPointType()) {
3354 // Fixed-point types are tricky. In some cases, it isn't possible to
3355 // represent a 1 or a -1 in the type at all. Piggyback off of
3356 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3357 BinOpInfo Info;
3358 Info.E = E;
3359 Info.Ty = E->getType();
3360 Info.Opcode = isInc ? BO_Add : BO_Sub;
3361 Info.LHS = value;
3362 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3363 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3364 // since -1 is guaranteed to be representable.
3365 if (type->isSignedFixedPointType()) {
3366 Info.Opcode = isInc ? BO_Sub : BO_Add;
3367 Info.RHS = Builder.CreateNeg(Info.RHS);
3368 }
3369 // Now, convert from our invented integer literal to the type of the unary
3370 // op. This will upscale and saturate if necessary. This value can become
3371 // undef in some cases.
3372 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3373 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3374 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3375 value = EmitFixedPointBinOp(Info);
3376
3377 // Objective-C pointer types.
3378 } else {
3379 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3380
3381 CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
3382 if (!isInc) size = -size;
3383 llvm::Value *sizeValue =
3384 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
3385
3386 if (CGF.getLangOpts().PointerOverflowDefined)
3387 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3388 else
3389 value = CGF.EmitCheckedInBoundsGEP(
3390 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3391 E->getExprLoc(), "incdec.objptr");
3392 value = Builder.CreateBitCast(value, input->getType());
3393 }
3394
3395 if (atomicPHI) {
3396 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3397 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3398 auto Pair = CGF.EmitAtomicCompareExchange(
3399 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3400 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3401 llvm::Value *success = Pair.second;
3402 atomicPHI->addIncoming(old, curBlock);
3403 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3404 Builder.SetInsertPoint(contBB);
3405 return isPre ? value : input;
3406 }
3407
3408 // Store the updated result through the lvalue.
3409 if (LV.isBitField()) {
3410 Value *Src = Previous ? Previous : value;
3411 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3412 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3413 LV.getBitFieldInfo(), E->getExprLoc());
3414 } else
3415 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3416
3417 // If this is a postinc, return the value read from memory, otherwise use the
3418 // updated value.
3419 return isPre ? value : input;
3420}
3421
3422
3423Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3424 QualType PromotionType) {
3425 QualType promotionTy = PromotionType.isNull()
3426 ? getPromotionType(E->getSubExpr()->getType())
3427 : PromotionType;
3428 Value *result = VisitPlus(E, promotionTy);
3429 if (result && !promotionTy.isNull())
3430 result = EmitUnPromotedValue(result, E->getType());
3431 return result;
3432}
3433
3434Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3435 QualType PromotionType) {
3436 // This differs from gcc, though, most likely due to a bug in gcc.
3437 TestAndClearIgnoreResultAssign();
3438 if (!PromotionType.isNull())
3439 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3440 return Visit(E->getSubExpr());
3441}
3442
3443Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3444 QualType PromotionType) {
3445 QualType promotionTy = PromotionType.isNull()
3446 ? getPromotionType(E->getSubExpr()->getType())
3447 : PromotionType;
3448 Value *result = VisitMinus(E, promotionTy);
3449 if (result && !promotionTy.isNull())
3450 result = EmitUnPromotedValue(result, E->getType());
3451 return result;
3452}
3453
3454Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3455 QualType PromotionType) {
3456 TestAndClearIgnoreResultAssign();
3457 Value *Op;
3458 if (!PromotionType.isNull())
3459 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3460 else
3461 Op = Visit(E->getSubExpr());
3462
3463 // Generate a unary FNeg for FP ops.
3464 if (Op->getType()->isFPOrFPVectorTy())
3465 return Builder.CreateFNeg(Op, "fneg");
3466
3467 // Emit unary minus with EmitSub so we handle overflow cases etc.
3468 BinOpInfo BinOp;
3469 BinOp.RHS = Op;
3470 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3471 BinOp.Ty = E->getType();
3472 BinOp.Opcode = BO_Sub;
3473 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3474 BinOp.E = E;
3475 return EmitSub(BinOp);
3476}
3477
3478Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3479 TestAndClearIgnoreResultAssign();
3480 Value *Op = Visit(E->getSubExpr());
3481 return Builder.CreateNot(Op, "not");
3482}
3483
3484Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3485 // Perform vector logical not on comparison with zero vector.
3486 if (E->getType()->isVectorType() &&
3487 E->getType()->castAs<VectorType>()->getVectorKind() ==
3488 VectorKind::Generic) {
3489 Value *Oper = Visit(E->getSubExpr());
3490 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3491 Value *Result;
3492 if (Oper->getType()->isFPOrFPVectorTy()) {
3493 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3494 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3495 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3496 } else
3497 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3498 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3499 }
3500
3501 // Compare operand to zero.
3502 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3503
3504 // Invert value.
3505 // TODO: Could dynamically modify easy computations here. For example, if
3506 // the operand is an icmp ne, turn into icmp eq.
3507 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3508
3509 // ZExt result to the expr type.
3510 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3511}
3512
3513Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3514 // Try folding the offsetof to a constant.
3515 Expr::EvalResult EVResult;
3516 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3517 llvm::APSInt Value = EVResult.Val.getInt();
3518 return Builder.getInt(Value);
3519 }
3520
3521 // Loop over the components of the offsetof to compute the value.
3522 unsigned n = E->getNumComponents();
3523 llvm::Type* ResultType = ConvertType(E->getType());
3524 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3525 QualType CurrentType = E->getTypeSourceInfo()->getType();
3526 for (unsigned i = 0; i != n; ++i) {
3527 OffsetOfNode ON = E->getComponent(i);
3528 llvm::Value *Offset = nullptr;
3529 switch (ON.getKind()) {
3530 case OffsetOfNode::Array: {
3531 // Compute the index
3532 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3533 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3534 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3535 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3536
3537 // Save the element type
3538 CurrentType =
3539 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3540
3541 // Compute the element size
3542 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3543 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3544
3545 // Multiply out to compute the result
3546 Offset = Builder.CreateMul(Idx, ElemSize);
3547 break;
3548 }
3549
3550 case OffsetOfNode::Field: {
3551 FieldDecl *MemberDecl = ON.getField();
3552 auto *RD = CurrentType->castAsRecordDecl();
3553 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3554
3555 // Compute the index of the field in its parent.
3556 unsigned i = 0;
3557 // FIXME: It would be nice if we didn't have to loop here!
3558 for (RecordDecl::field_iterator Field = RD->field_begin(),
3559 FieldEnd = RD->field_end();
3560 Field != FieldEnd; ++Field, ++i) {
3561 if (*Field == MemberDecl)
3562 break;
3563 }
3564 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3565
3566 // Compute the offset to the field
3567 int64_t OffsetInt = RL.getFieldOffset(i) /
3568 CGF.getContext().getCharWidth();
3569 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3570
3571 // Save the element type.
3572 CurrentType = MemberDecl->getType();
3573 break;
3574 }
3575
3577 llvm_unreachable("dependent __builtin_offsetof");
3578
3579 case OffsetOfNode::Base: {
3580 if (ON.getBase()->isVirtual()) {
3581 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3582 continue;
3583 }
3584
3585 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(
3586 CurrentType->castAsCanonical<RecordType>()->getDecl());
3587
3588 // Save the element type.
3589 CurrentType = ON.getBase()->getType();
3590
3591 // Compute the offset to the base.
3592 auto *BaseRD = CurrentType->castAsCXXRecordDecl();
3593 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3594 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3595 break;
3596 }
3597 }
3598 Result = Builder.CreateAdd(Result, Offset);
3599 }
3600 return Result;
3601}
3602
3603/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3604/// argument of the sizeof expression as an integer.
3605Value *
3606ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3607 const UnaryExprOrTypeTraitExpr *E) {
3608 QualType TypeToSize = E->getTypeOfArgument();
3609 if (auto Kind = E->getKind();
3610 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf || Kind == UETT_CountOf) {
3611 if (const VariableArrayType *VAT =
3612 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3613 // For _Countof, we only want to evaluate if the extent is actually
3614 // variable as opposed to a multi-dimensional array whose extent is
3615 // constant but whose element type is variable.
3616 bool EvaluateExtent = true;
3617 if (Kind == UETT_CountOf && VAT->getElementType()->isArrayType()) {
3618 EvaluateExtent =
3619 !VAT->getSizeExpr()->isIntegerConstantExpr(CGF.getContext());
3620 }
3621 if (EvaluateExtent) {
3622 if (E->isArgumentType()) {
3623 // sizeof(type) - make sure to emit the VLA size.
3624 CGF.EmitVariablyModifiedType(TypeToSize);
3625 } else {
3626 // C99 6.5.3.4p2: If the argument is an expression of type
3627 // VLA, it is evaluated.
3629 }
3630
3631 // For _Countof, we just want to return the size of a single dimension.
3632 if (Kind == UETT_CountOf)
3633 return CGF.getVLAElements1D(VAT).NumElts;
3634
3635 // For sizeof and __datasizeof, we need to scale the number of elements
3636 // by the size of the array element type.
3637 auto VlaSize = CGF.getVLASize(VAT);
3638
3639 // Scale the number of non-VLA elements by the non-VLA element size.
3640 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3641 if (!eltSize.isOne())
3642 return CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize),
3643 VlaSize.NumElts);
3644 return VlaSize.NumElts;
3645 }
3646 }
3647 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3648 auto Alignment =
3649 CGF.getContext()
3652 .getQuantity();
3653 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3654 } else if (E->getKind() == UETT_VectorElements) {
3655 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3656 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3657 }
3658
3659 // If this isn't sizeof(vla), the result must be constant; use the constant
3660 // folding logic so we don't have to duplicate it here.
3661 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3662}
3663
3664Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3665 QualType PromotionType) {
3666 QualType promotionTy = PromotionType.isNull()
3667 ? getPromotionType(E->getSubExpr()->getType())
3668 : PromotionType;
3669 Value *result = VisitReal(E, promotionTy);
3670 if (result && !promotionTy.isNull())
3671 result = EmitUnPromotedValue(result, E->getType());
3672 return result;
3673}
3674
3675Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3676 QualType PromotionType) {
3677 Expr *Op = E->getSubExpr();
3678 if (Op->getType()->isAnyComplexType()) {
3679 // If it's an l-value, load through the appropriate subobject l-value.
3680 // Note that we have to ask E because Op might be an l-value that
3681 // this won't work for, e.g. an Obj-C property.
3682 if (E->isGLValue()) {
3683 if (!PromotionType.isNull()) {
3685 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3686 PromotionType = PromotionType->isAnyComplexType()
3687 ? PromotionType
3688 : CGF.getContext().getComplexType(PromotionType);
3689 return result.first ? CGF.EmitPromotedValue(result, PromotionType).first
3690 : result.first;
3691 }
3692
3693 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3694 .getScalarVal();
3695 }
3696 // Otherwise, calculate and project.
3697 return CGF.EmitComplexExpr(Op, false, true).first;
3698 }
3699
3700 if (!PromotionType.isNull())
3701 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3702 return Visit(Op);
3703}
3704
3705Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3706 QualType PromotionType) {
3707 QualType promotionTy = PromotionType.isNull()
3708 ? getPromotionType(E->getSubExpr()->getType())
3709 : PromotionType;
3710 Value *result = VisitImag(E, promotionTy);
3711 if (result && !promotionTy.isNull())
3712 result = EmitUnPromotedValue(result, E->getType());
3713 return result;
3714}
3715
3716Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3717 QualType PromotionType) {
3718 Expr *Op = E->getSubExpr();
3719 if (Op->getType()->isAnyComplexType()) {
3720 // If it's an l-value, load through the appropriate subobject l-value.
3721 // Note that we have to ask E because Op might be an l-value that
3722 // this won't work for, e.g. an Obj-C property.
3723 if (Op->isGLValue()) {
3724 if (!PromotionType.isNull()) {
3726 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3727 PromotionType = PromotionType->isAnyComplexType()
3728 ? PromotionType
3729 : CGF.getContext().getComplexType(PromotionType);
3730 return result.second
3731 ? CGF.EmitPromotedValue(result, PromotionType).second
3732 : result.second;
3733 }
3734
3735 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3736 .getScalarVal();
3737 }
3738 // Otherwise, calculate and project.
3739 return CGF.EmitComplexExpr(Op, true, false).second;
3740 }
3741
3742 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3743 // effects are evaluated, but not the actual value.
3744 if (Op->isGLValue())
3745 CGF.EmitLValue(Op);
3746 else if (!PromotionType.isNull())
3747 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3748 else
3749 CGF.EmitScalarExpr(Op, true);
3750 if (!PromotionType.isNull())
3751 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3752 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3753}
3754
3755//===----------------------------------------------------------------------===//
3756// Binary Operators
3757//===----------------------------------------------------------------------===//
3758
3759Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3760 QualType PromotionType) {
3761 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3762}
3763
3764Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3765 QualType ExprType) {
3766 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3767}
3768
3769Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3770 E = E->IgnoreParens();
3771 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3772 switch (BO->getOpcode()) {
3773#define HANDLE_BINOP(OP) \
3774 case BO_##OP: \
3775 return Emit##OP(EmitBinOps(BO, PromotionType));
3776 HANDLE_BINOP(Add)
3777 HANDLE_BINOP(Sub)
3778 HANDLE_BINOP(Mul)
3779 HANDLE_BINOP(Div)
3780#undef HANDLE_BINOP
3781 default:
3782 break;
3783 }
3784 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3785 switch (UO->getOpcode()) {
3786 case UO_Imag:
3787 return VisitImag(UO, PromotionType);
3788 case UO_Real:
3789 return VisitReal(UO, PromotionType);
3790 case UO_Minus:
3791 return VisitMinus(UO, PromotionType);
3792 case UO_Plus:
3793 return VisitPlus(UO, PromotionType);
3794 default:
3795 break;
3796 }
3797 }
3798 auto result = Visit(const_cast<Expr *>(E));
3799 if (result) {
3800 if (!PromotionType.isNull())
3801 return EmitPromotedValue(result, PromotionType);
3802 else
3803 return EmitUnPromotedValue(result, E->getType());
3804 }
3805 return result;
3806}
3807
3808BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3809 QualType PromotionType) {
3810 TestAndClearIgnoreResultAssign();
3811 BinOpInfo Result;
3812 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3813 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3814 if (!PromotionType.isNull())
3815 Result.Ty = PromotionType;
3816 else
3817 Result.Ty = E->getType();
3818 Result.Opcode = E->getOpcode();
3819 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3820 Result.E = E;
3821 return Result;
3822}
3823
3824LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3825 const CompoundAssignOperator *E,
3826 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3827 Value *&Result) {
3828 QualType LHSTy = E->getLHS()->getType();
3829 BinOpInfo OpInfo;
3830
3833
3834 // Emit the RHS first. __block variables need to have the rhs evaluated
3835 // first, plus this should improve codegen a little.
3836
3837 QualType PromotionTypeCR;
3838 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3839 if (PromotionTypeCR.isNull())
3840 PromotionTypeCR = E->getComputationResultType();
3841 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3842 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3843 if (!PromotionTypeRHS.isNull())
3844 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3845 else
3846 OpInfo.RHS = Visit(E->getRHS());
3847 OpInfo.Ty = PromotionTypeCR;
3848 OpInfo.Opcode = E->getOpcode();
3849 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3850 OpInfo.E = E;
3851 // Load/convert the LHS.
3852 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3853
3854 llvm::PHINode *atomicPHI = nullptr;
3855 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3856 QualType type = atomicTy->getValueType();
3857 if (!type->isBooleanType() && type->isIntegerType() &&
3858 !(type->isUnsignedIntegerType() &&
3859 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3860 CGF.getLangOpts().getSignedOverflowBehavior() !=
3861 LangOptions::SOB_Trapping) {
3862 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3863 llvm::Instruction::BinaryOps Op;
3864 switch (OpInfo.Opcode) {
3865 // We don't have atomicrmw operands for *, %, /, <<, >>
3866 case BO_MulAssign: case BO_DivAssign:
3867 case BO_RemAssign:
3868 case BO_ShlAssign:
3869 case BO_ShrAssign:
3870 break;
3871 case BO_AddAssign:
3872 AtomicOp = llvm::AtomicRMWInst::Add;
3873 Op = llvm::Instruction::Add;
3874 break;
3875 case BO_SubAssign:
3876 AtomicOp = llvm::AtomicRMWInst::Sub;
3877 Op = llvm::Instruction::Sub;
3878 break;
3879 case BO_AndAssign:
3880 AtomicOp = llvm::AtomicRMWInst::And;
3881 Op = llvm::Instruction::And;
3882 break;
3883 case BO_XorAssign:
3884 AtomicOp = llvm::AtomicRMWInst::Xor;
3885 Op = llvm::Instruction::Xor;
3886 break;
3887 case BO_OrAssign:
3888 AtomicOp = llvm::AtomicRMWInst::Or;
3889 Op = llvm::Instruction::Or;
3890 break;
3891 default:
3892 llvm_unreachable("Invalid compound assignment type");
3893 }
3894 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3895 llvm::Value *Amt = CGF.EmitToMemory(
3896 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3897 E->getExprLoc()),
3898 LHSTy);
3899
3900 llvm::AtomicRMWInst *OldVal =
3901 CGF.emitAtomicRMWInst(AtomicOp, LHSLV.getAddress(), Amt);
3902
3903 // Since operation is atomic, the result type is guaranteed to be the
3904 // same as the input in LLVM terms.
3905 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3906 return LHSLV;
3907 }
3908 }
3909 // FIXME: For floating point types, we should be saving and restoring the
3910 // floating point environment in the loop.
3911 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3912 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3913 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3914 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3915 Builder.CreateBr(opBB);
3916 Builder.SetInsertPoint(opBB);
3917 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3918 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3919 OpInfo.LHS = atomicPHI;
3920 }
3921 else
3922 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3923
3924 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3925 SourceLocation Loc = E->getExprLoc();
3926 if (!PromotionTypeLHS.isNull())
3927 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
3928 E->getExprLoc());
3929 else
3930 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
3931 E->getComputationLHSType(), Loc);
3932
3933 // Expand the binary operator.
3934 Result = (this->*Func)(OpInfo);
3935
3936 // Convert the result back to the LHS type,
3937 // potentially with Implicit Conversion sanitizer check.
3938 // If LHSLV is a bitfield, use default ScalarConversionOpts
3939 // to avoid emit any implicit integer checks.
3940 Value *Previous = nullptr;
3941 if (LHSLV.isBitField()) {
3942 Previous = Result;
3943 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
3944 } else
3945 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
3946 ScalarConversionOpts(CGF.SanOpts));
3947
3948 if (atomicPHI) {
3949 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3950 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3951 auto Pair = CGF.EmitAtomicCompareExchange(
3952 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3953 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3954 llvm::Value *success = Pair.second;
3955 atomicPHI->addIncoming(old, curBlock);
3956 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3957 Builder.SetInsertPoint(contBB);
3958 return LHSLV;
3959 }
3960
3961 // Store the result value into the LHS lvalue. Bit-fields are handled
3962 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3963 // 'An assignment expression has the value of the left operand after the
3964 // assignment...'.
3965 if (LHSLV.isBitField()) {
3966 Value *Src = Previous ? Previous : Result;
3967 QualType SrcType = E->getRHS()->getType();
3968 QualType DstType = E->getLHS()->getType();
3970 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
3971 LHSLV.getBitFieldInfo(), E->getExprLoc());
3972 } else
3974
3975 if (CGF.getLangOpts().OpenMP)
3977 E->getLHS());
3978 return LHSLV;
3979}
3980
3981Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3982 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3983 bool Ignore = TestAndClearIgnoreResultAssign();
3984 Value *RHS = nullptr;
3985 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3986
3987 // If the result is clearly ignored, return now.
3988 if (Ignore)
3989 return nullptr;
3990
3991 // The result of an assignment in C is the assigned r-value.
3992 if (!CGF.getLangOpts().CPlusPlus)
3993 return RHS;
3994
3995 // If the lvalue is non-volatile, return the computed value of the assignment.
3996 if (!LHS.isVolatileQualified())
3997 return RHS;
3998
3999 // Otherwise, reload the value.
4000 return EmitLoadOfLValue(LHS, E->getExprLoc());
4001}
4002
4003void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
4004 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
4005 SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>, 2>
4006 Checks;
4007
4008 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
4009 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
4010 SanitizerKind::SO_IntegerDivideByZero));
4011 }
4012
4013 const auto *BO = cast<BinaryOperator>(Ops.E);
4014 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
4015 Ops.Ty->hasSignedIntegerRepresentation() &&
4016 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
4017 Ops.mayHaveIntegerOverflow()) {
4018 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
4019
4020 llvm::Value *IntMin =
4021 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
4022 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
4023
4024 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
4025 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
4026 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
4027 Checks.push_back(
4028 std::make_pair(NotOverflow, SanitizerKind::SO_SignedIntegerOverflow));
4029 }
4030
4031 if (Checks.size() > 0)
4032 EmitBinOpCheck(Checks, Ops);
4033}
4034
4035Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
4036 {
4037 SanitizerDebugLocation SanScope(&CGF,
4038 {SanitizerKind::SO_IntegerDivideByZero,
4039 SanitizerKind::SO_SignedIntegerOverflow,
4040 SanitizerKind::SO_FloatDivideByZero},
4041 SanitizerHandler::DivremOverflow);
4042 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4043 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4044 Ops.Ty->isIntegerType() &&
4045 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4046 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4047 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
4048 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
4049 Ops.Ty->isRealFloatingType() &&
4050 Ops.mayHaveFloatDivisionByZero()) {
4051 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4052 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
4053 EmitBinOpCheck(
4054 std::make_pair(NonZero, SanitizerKind::SO_FloatDivideByZero), Ops);
4055 }
4056 }
4057
4058 if (Ops.Ty->isConstantMatrixType()) {
4059 llvm::MatrixBuilder MB(Builder);
4060 // We need to check the types of the operands of the operator to get the
4061 // correct matrix dimensions.
4062 auto *BO = cast<BinaryOperator>(Ops.E);
4063 (void)BO;
4064 assert(
4066 "first operand must be a matrix");
4067 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
4068 "second operand must be an arithmetic type");
4069 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4070 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
4071 Ops.Ty->hasUnsignedIntegerRepresentation());
4072 }
4073
4074 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
4075 llvm::Value *Val;
4076 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
4077 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
4078 CGF.SetDivFPAccuracy(Val);
4079 return Val;
4080 }
4081 else if (Ops.isFixedPointOp())
4082 return EmitFixedPointBinOp(Ops);
4083 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
4084 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
4085 else
4086 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
4087}
4088
4089Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
4090 // Rem in C can't be a floating point type: C99 6.5.5p2.
4091 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
4092 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
4093 Ops.Ty->isIntegerType() &&
4094 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
4095 SanitizerDebugLocation SanScope(&CGF,
4096 {SanitizerKind::SO_IntegerDivideByZero,
4097 SanitizerKind::SO_SignedIntegerOverflow},
4098 SanitizerHandler::DivremOverflow);
4099 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
4100 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
4101 }
4102
4103 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4104 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
4105
4106 if (CGF.getLangOpts().HLSL && Ops.Ty->hasFloatingRepresentation())
4107 return Builder.CreateFRem(Ops.LHS, Ops.RHS, "rem");
4108
4109 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
4110}
4111
4112Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
4113 unsigned IID;
4114 unsigned OpID = 0;
4115 SanitizerHandler OverflowKind;
4116
4117 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
4118 switch (Ops.Opcode) {
4119 case BO_Add:
4120 case BO_AddAssign:
4121 OpID = 1;
4122 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
4123 llvm::Intrinsic::uadd_with_overflow;
4124 OverflowKind = SanitizerHandler::AddOverflow;
4125 break;
4126 case BO_Sub:
4127 case BO_SubAssign:
4128 OpID = 2;
4129 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
4130 llvm::Intrinsic::usub_with_overflow;
4131 OverflowKind = SanitizerHandler::SubOverflow;
4132 break;
4133 case BO_Mul:
4134 case BO_MulAssign:
4135 OpID = 3;
4136 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
4137 llvm::Intrinsic::umul_with_overflow;
4138 OverflowKind = SanitizerHandler::MulOverflow;
4139 break;
4140 default:
4141 llvm_unreachable("Unsupported operation for overflow detection");
4142 }
4143 OpID <<= 1;
4144 if (isSigned)
4145 OpID |= 1;
4146
4147 SanitizerDebugLocation SanScope(&CGF,
4148 {SanitizerKind::SO_SignedIntegerOverflow,
4149 SanitizerKind::SO_UnsignedIntegerOverflow},
4150 OverflowKind);
4151 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
4152
4153 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
4154
4155 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
4156 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
4157 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
4158
4159 // Handle overflow with llvm.trap if no custom handler has been specified.
4160 const std::string *handlerName =
4162 if (handlerName->empty()) {
4163 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
4164 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
4165 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
4166 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
4168 isSigned ? SanitizerKind::SO_SignedIntegerOverflow
4169 : SanitizerKind::SO_UnsignedIntegerOverflow;
4170 EmitBinOpCheck(std::make_pair(NotOverflow, Ordinal), Ops);
4171 } else
4172 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
4173 return result;
4174 }
4175
4176 // Branch in case of overflow.
4177 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
4178 llvm::BasicBlock *continueBB =
4179 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
4180 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
4181
4182 Builder.CreateCondBr(overflow, overflowBB, continueBB);
4183
4184 // If an overflow handler is set, then we want to call it and then use its
4185 // result, if it returns.
4186 Builder.SetInsertPoint(overflowBB);
4187
4188 // Get the overflow handler.
4189 llvm::Type *Int8Ty = CGF.Int8Ty;
4190 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
4191 llvm::FunctionType *handlerTy =
4192 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
4193 llvm::FunctionCallee handler =
4194 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
4195
4196 // Sign extend the args to 64-bit, so that we can use the same handler for
4197 // all types of overflow.
4198 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
4199 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
4200
4201 // Call the handler with the two arguments, the operation, and the size of
4202 // the result.
4203 llvm::Value *handlerArgs[] = {
4204 lhs,
4205 rhs,
4206 Builder.getInt8(OpID),
4207 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
4208 };
4209 llvm::Value *handlerResult =
4210 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
4211
4212 // Truncate the result back to the desired size.
4213 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
4214 Builder.CreateBr(continueBB);
4215
4216 Builder.SetInsertPoint(continueBB);
4217 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
4218 phi->addIncoming(result, initialBB);
4219 phi->addIncoming(handlerResult, overflowBB);
4220
4221 return phi;
4222}
4223
4224/// BO_Add/BO_Sub are handled by EmitPointerWithAlignment to preserve alignment
4225/// information.
4226/// This function is used for BO_AddAssign/BO_SubAssign.
4227static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op,
4228 bool isSubtraction) {
4229 // Must have binary (not unary) expr here. Unary pointer
4230 // increment/decrement doesn't use this path.
4232
4233 Value *pointer = op.LHS;
4234 Expr *pointerOperand = expr->getLHS();
4235 Value *index = op.RHS;
4236 Expr *indexOperand = expr->getRHS();
4237
4238 // In a subtraction, the LHS is always the pointer.
4239 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
4240 std::swap(pointer, index);
4241 std::swap(pointerOperand, indexOperand);
4242 }
4243
4244 return CGF.EmitPointerArithmetic(expr, pointerOperand, pointer, indexOperand,
4245 index, isSubtraction);
4246}
4247
4248/// Emit pointer + index arithmetic.
4250 const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer,
4251 Expr *indexOperand, llvm::Value *index, bool isSubtraction) {
4252 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
4253
4254 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
4255 auto &DL = CGM.getDataLayout();
4256 auto *PtrTy = cast<llvm::PointerType>(pointer->getType());
4257
4258 // Some versions of glibc and gcc use idioms (particularly in their malloc
4259 // routines) that add a pointer-sized integer (known to be a pointer value)
4260 // to a null pointer in order to cast the value back to an integer or as
4261 // part of a pointer alignment algorithm. This is undefined behavior, but
4262 // we'd like to be able to compile programs that use it.
4263 //
4264 // Normally, we'd generate a GEP with a null-pointer base here in response
4265 // to that code, but it's also UB to dereference a pointer created that
4266 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
4267 // generate a direct cast of the integer value to a pointer.
4268 //
4269 // The idiom (p = nullptr + N) is not met if any of the following are true:
4270 //
4271 // The operation is subtraction.
4272 // The index is not pointer-sized.
4273 // The pointer type is not byte-sized.
4274 //
4275 // Note that we do not suppress the pointer overflow check in this case.
4277 getContext(), BO->getOpcode(), pointerOperand, indexOperand)) {
4278 llvm::Value *Ptr = Builder.CreateIntToPtr(index, pointer->getType());
4279 if (getLangOpts().PointerOverflowDefined ||
4280 !SanOpts.has(SanitizerKind::PointerOverflow) ||
4281 NullPointerIsDefined(Builder.GetInsertBlock()->getParent(),
4282 PtrTy->getPointerAddressSpace()))
4283 return Ptr;
4284 // The inbounds GEP of null is valid iff the index is zero.
4285 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
4286 auto CheckHandler = SanitizerHandler::PointerOverflow;
4287 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
4288 llvm::Value *IsZeroIndex = Builder.CreateIsNull(index);
4289 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(BO->getExprLoc())};
4290 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
4291 llvm::Value *IntPtr = llvm::Constant::getNullValue(IntPtrTy);
4292 llvm::Value *ComputedGEP = Builder.CreateZExtOrTrunc(index, IntPtrTy);
4293 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
4294 EmitCheck({{IsZeroIndex, CheckOrdinal}}, CheckHandler, StaticArgs,
4295 DynamicArgs);
4296 return Ptr;
4297 }
4298
4299 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
4300 // Zero-extend or sign-extend the pointer value according to
4301 // whether the index is signed or not.
4302 index = Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
4303 "idx.ext");
4304 }
4305
4306 // If this is subtraction, negate the index.
4307 if (isSubtraction)
4308 index = Builder.CreateNeg(index, "idx.neg");
4309
4310 if (SanOpts.has(SanitizerKind::ArrayBounds))
4311 EmitBoundsCheck(BO, pointerOperand, index, indexOperand->getType(),
4312 /*Accessed*/ false);
4313
4314 const PointerType *pointerType =
4315 pointerOperand->getType()->getAs<PointerType>();
4316 if (!pointerType) {
4317 QualType objectType = pointerOperand->getType()
4319 ->getPointeeType();
4320 llvm::Value *objectSize =
4321 CGM.getSize(getContext().getTypeSizeInChars(objectType));
4322
4323 index = Builder.CreateMul(index, objectSize);
4324
4325 llvm::Value *result = Builder.CreateGEP(Int8Ty, pointer, index, "add.ptr");
4326 return Builder.CreateBitCast(result, pointer->getType());
4327 }
4328
4329 QualType elementType = pointerType->getPointeeType();
4330 if (const VariableArrayType *vla =
4331 getContext().getAsVariableArrayType(elementType)) {
4332 // The element count here is the total number of non-VLA elements.
4333 llvm::Value *numElements = getVLASize(vla).NumElts;
4334
4335 // Effectively, the multiply by the VLA size is part of the GEP.
4336 // GEP indexes are signed, and scaling an index isn't permitted to
4337 // signed-overflow, so we use the same semantics for our explicit
4338 // multiply. We suppress this if overflow is not undefined behavior.
4339 llvm::Type *elemTy = ConvertTypeForMem(vla->getElementType());
4340 if (getLangOpts().PointerOverflowDefined) {
4341 index = Builder.CreateMul(index, numElements, "vla.index");
4342 pointer = Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4343 } else {
4344 index = Builder.CreateNSWMul(index, numElements, "vla.index");
4345 pointer =
4346 EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned,
4347 isSubtraction, BO->getExprLoc(), "add.ptr");
4348 }
4349 return pointer;
4350 }
4351
4352 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
4353 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
4354 // future proof.
4355 llvm::Type *elemTy;
4356 if (elementType->isVoidType() || elementType->isFunctionType())
4357 elemTy = Int8Ty;
4358 else
4359 elemTy = ConvertTypeForMem(elementType);
4360
4361 if (getLangOpts().PointerOverflowDefined)
4362 return Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
4363
4364 return EmitCheckedInBoundsGEP(elemTy, pointer, index, isSigned, isSubtraction,
4365 BO->getExprLoc(), "add.ptr");
4366}
4367
4368// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
4369// Addend. Use negMul and negAdd to negate the first operand of the Mul or
4370// the add operand respectively. This allows fmuladd to represent a*b-c, or
4371// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
4372// efficient operations.
4373static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
4374 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4375 bool negMul, bool negAdd) {
4376 Value *MulOp0 = MulOp->getOperand(0);
4377 Value *MulOp1 = MulOp->getOperand(1);
4378 if (negMul)
4379 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
4380 if (negAdd)
4381 Addend = Builder.CreateFNeg(Addend, "neg");
4382
4383 Value *FMulAdd = nullptr;
4384 if (Builder.getIsFPConstrained()) {
4385 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4386 "Only constrained operation should be created when Builder is in FP "
4387 "constrained mode");
4388 FMulAdd = Builder.CreateConstrainedFPCall(
4389 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4390 Addend->getType()),
4391 {MulOp0, MulOp1, Addend});
4392 } else {
4393 FMulAdd = Builder.CreateCall(
4394 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4395 {MulOp0, MulOp1, Addend});
4396 }
4397 MulOp->eraseFromParent();
4398
4399 return FMulAdd;
4400}
4401
4402// Check whether it would be legal to emit an fmuladd intrinsic call to
4403// represent op and if so, build the fmuladd.
4404//
4405// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4406// Does NOT check the type of the operation - it's assumed that this function
4407// will be called from contexts where it's known that the type is contractable.
4408static Value* tryEmitFMulAdd(const BinOpInfo &op,
4409 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4410 bool isSub=false) {
4411
4412 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4413 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4414 "Only fadd/fsub can be the root of an fmuladd.");
4415
4416 // Check whether this op is marked as fusable.
4417 if (!op.FPFeatures.allowFPContractWithinStatement())
4418 return nullptr;
4419
4420 Value *LHS = op.LHS;
4421 Value *RHS = op.RHS;
4422
4423 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4424 // it is the only use of its operand.
4425 bool NegLHS = false;
4426 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4427 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4428 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4429 LHS = LHSUnOp->getOperand(0);
4430 NegLHS = true;
4431 }
4432 }
4433
4434 bool NegRHS = false;
4435 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4436 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4437 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4438 RHS = RHSUnOp->getOperand(0);
4439 NegRHS = true;
4440 }
4441 }
4442
4443 // We have a potentially fusable op. Look for a mul on one of the operands.
4444 // Also, make sure that the mul result isn't used directly. In that case,
4445 // there's no point creating a muladd operation.
4446 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4447 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4448 (LHSBinOp->use_empty() || NegLHS)) {
4449 // If we looked through fneg, erase it.
4450 if (NegLHS)
4451 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4452 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4453 }
4454 }
4455 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4456 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4457 (RHSBinOp->use_empty() || NegRHS)) {
4458 // If we looked through fneg, erase it.
4459 if (NegRHS)
4460 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4461 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4462 }
4463 }
4464
4465 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4466 if (LHSBinOp->getIntrinsicID() ==
4467 llvm::Intrinsic::experimental_constrained_fmul &&
4468 (LHSBinOp->use_empty() || NegLHS)) {
4469 // If we looked through fneg, erase it.
4470 if (NegLHS)
4471 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4472 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4473 }
4474 }
4475 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4476 if (RHSBinOp->getIntrinsicID() ==
4477 llvm::Intrinsic::experimental_constrained_fmul &&
4478 (RHSBinOp->use_empty() || NegRHS)) {
4479 // If we looked through fneg, erase it.
4480 if (NegRHS)
4481 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4482 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4483 }
4484 }
4485
4486 return nullptr;
4487}
4488
4489Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4490 if (op.LHS->getType()->isPointerTy() ||
4491 op.RHS->getType()->isPointerTy())
4493
4494 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4495 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4496 case LangOptions::SOB_Defined:
4497 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4498 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4499 [[fallthrough]];
4500 case LangOptions::SOB_Undefined:
4501 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4502 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4503 [[fallthrough]];
4504 case LangOptions::SOB_Trapping:
4505 if (CanElideOverflowCheck(CGF.getContext(), op))
4506 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4507 return EmitOverflowCheckedBinOp(op);
4508 }
4509 }
4510
4511 // For vector and matrix adds, try to fold into a fmuladd.
4512 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4513 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4514 // Try to form an fmuladd.
4515 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4516 return FMulAdd;
4517 }
4518
4519 if (op.Ty->isConstantMatrixType()) {
4520 llvm::MatrixBuilder MB(Builder);
4521 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4522 return MB.CreateAdd(op.LHS, op.RHS);
4523 }
4524
4525 if (op.Ty->isUnsignedIntegerType() &&
4526 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4527 !CanElideOverflowCheck(CGF.getContext(), op))
4528 return EmitOverflowCheckedBinOp(op);
4529
4530 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4531 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4532 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4533 }
4534
4535 if (op.isFixedPointOp())
4536 return EmitFixedPointBinOp(op);
4537
4538 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4539}
4540
4541/// The resulting value must be calculated with exact precision, so the operands
4542/// may not be the same type.
4543Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4544 using llvm::APSInt;
4545 using llvm::ConstantInt;
4546
4547 // This is either a binary operation where at least one of the operands is
4548 // a fixed-point type, or a unary operation where the operand is a fixed-point
4549 // type. The result type of a binary operation is determined by
4550 // Sema::handleFixedPointConversions().
4551 QualType ResultTy = op.Ty;
4552 QualType LHSTy, RHSTy;
4553 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4554 RHSTy = BinOp->getRHS()->getType();
4555 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4556 // For compound assignment, the effective type of the LHS at this point
4557 // is the computation LHS type, not the actual LHS type, and the final
4558 // result type is not the type of the expression but rather the
4559 // computation result type.
4560 LHSTy = CAO->getComputationLHSType();
4561 ResultTy = CAO->getComputationResultType();
4562 } else
4563 LHSTy = BinOp->getLHS()->getType();
4564 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4565 LHSTy = UnOp->getSubExpr()->getType();
4566 RHSTy = UnOp->getSubExpr()->getType();
4567 }
4568 ASTContext &Ctx = CGF.getContext();
4569 Value *LHS = op.LHS;
4570 Value *RHS = op.RHS;
4571
4572 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4573 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4574 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4575 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4576
4577 // Perform the actual operation.
4578 Value *Result;
4579 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4580 switch (op.Opcode) {
4581 case BO_AddAssign:
4582 case BO_Add:
4583 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4584 break;
4585 case BO_SubAssign:
4586 case BO_Sub:
4587 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4588 break;
4589 case BO_MulAssign:
4590 case BO_Mul:
4591 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4592 break;
4593 case BO_DivAssign:
4594 case BO_Div:
4595 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4596 break;
4597 case BO_ShlAssign:
4598 case BO_Shl:
4599 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4600 break;
4601 case BO_ShrAssign:
4602 case BO_Shr:
4603 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4604 break;
4605 case BO_LT:
4606 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4607 case BO_GT:
4608 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4609 case BO_LE:
4610 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4611 case BO_GE:
4612 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4613 case BO_EQ:
4614 // For equality operations, we assume any padding bits on unsigned types are
4615 // zero'd out. They could be overwritten through non-saturating operations
4616 // that cause overflow, but this leads to undefined behavior.
4617 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4618 case BO_NE:
4619 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4620 case BO_Cmp:
4621 case BO_LAnd:
4622 case BO_LOr:
4623 llvm_unreachable("Found unimplemented fixed point binary operation");
4624 case BO_PtrMemD:
4625 case BO_PtrMemI:
4626 case BO_Rem:
4627 case BO_Xor:
4628 case BO_And:
4629 case BO_Or:
4630 case BO_Assign:
4631 case BO_RemAssign:
4632 case BO_AndAssign:
4633 case BO_XorAssign:
4634 case BO_OrAssign:
4635 case BO_Comma:
4636 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4637 }
4638
4639 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4641 // Convert to the result type.
4642 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4643 : CommonFixedSema,
4644 ResultFixedSema);
4645}
4646
4647Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4648 // The LHS is always a pointer if either side is.
4649 if (!op.LHS->getType()->isPointerTy()) {
4650 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4651 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4652 case LangOptions::SOB_Defined:
4653 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4654 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4655 [[fallthrough]];
4656 case LangOptions::SOB_Undefined:
4657 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4658 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4659 [[fallthrough]];
4660 case LangOptions::SOB_Trapping:
4661 if (CanElideOverflowCheck(CGF.getContext(), op))
4662 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4663 return EmitOverflowCheckedBinOp(op);
4664 }
4665 }
4666
4667 // For vector and matrix subs, try to fold into a fmuladd.
4668 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4669 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4670 // Try to form an fmuladd.
4671 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4672 return FMulAdd;
4673 }
4674
4675 if (op.Ty->isConstantMatrixType()) {
4676 llvm::MatrixBuilder MB(Builder);
4677 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4678 return MB.CreateSub(op.LHS, op.RHS);
4679 }
4680
4681 if (op.Ty->isUnsignedIntegerType() &&
4682 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4683 !CanElideOverflowCheck(CGF.getContext(), op))
4684 return EmitOverflowCheckedBinOp(op);
4685
4686 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4687 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4688 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4689 }
4690
4691 if (op.isFixedPointOp())
4692 return EmitFixedPointBinOp(op);
4693
4694 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4695 }
4696
4697 // If the RHS is not a pointer, then we have normal pointer
4698 // arithmetic.
4699 if (!op.RHS->getType()->isPointerTy())
4701
4702 // Otherwise, this is a pointer subtraction.
4703
4704 // Do the raw subtraction part.
4705 llvm::Value *LHS
4706 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4707 llvm::Value *RHS
4708 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4709 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4710
4711 // Okay, figure out the element size.
4712 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4713 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4714
4715 llvm::Value *divisor = nullptr;
4716
4717 // For a variable-length array, this is going to be non-constant.
4718 if (const VariableArrayType *vla
4719 = CGF.getContext().getAsVariableArrayType(elementType)) {
4720 auto VlaSize = CGF.getVLASize(vla);
4721 elementType = VlaSize.Type;
4722 divisor = VlaSize.NumElts;
4723
4724 // Scale the number of non-VLA elements by the non-VLA element size.
4725 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4726 if (!eltSize.isOne())
4727 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4728
4729 // For everything elese, we can just compute it, safe in the
4730 // assumption that Sema won't let anything through that we can't
4731 // safely compute the size of.
4732 } else {
4733 CharUnits elementSize;
4734 // Handle GCC extension for pointer arithmetic on void* and
4735 // function pointer types.
4736 if (elementType->isVoidType() || elementType->isFunctionType())
4737 elementSize = CharUnits::One();
4738 else
4739 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4740
4741 // Don't even emit the divide for element size of 1.
4742 if (elementSize.isOne())
4743 return diffInChars;
4744
4745 divisor = CGF.CGM.getSize(elementSize);
4746 }
4747
4748 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4749 // pointer difference in C is only defined in the case where both operands
4750 // are pointing to elements of an array.
4751 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4752}
4753
4754Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4755 bool RHSIsSigned) {
4756 llvm::IntegerType *Ty;
4757 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4758 Ty = cast<llvm::IntegerType>(VT->getElementType());
4759 else
4760 Ty = cast<llvm::IntegerType>(LHS->getType());
4761 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4762 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4763 // this in ConstantInt::get, this results in the value getting truncated.
4764 // Constrain the return value to be max(RHS) in this case.
4765 llvm::Type *RHSTy = RHS->getType();
4766 llvm::APInt RHSMax =
4767 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4768 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4769 if (RHSMax.ult(Ty->getBitWidth()))
4770 return llvm::ConstantInt::get(RHSTy, RHSMax);
4771 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4772}
4773
4774Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4775 const Twine &Name) {
4776 llvm::IntegerType *Ty;
4777 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4778 Ty = cast<llvm::IntegerType>(VT->getElementType());
4779 else
4780 Ty = cast<llvm::IntegerType>(LHS->getType());
4781
4782 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4783 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4784
4785 return Builder.CreateURem(
4786 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4787}
4788
4789Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4790 // TODO: This misses out on the sanitizer check below.
4791 if (Ops.isFixedPointOp())
4792 return EmitFixedPointBinOp(Ops);
4793
4794 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4795 // RHS to the same size as the LHS.
4796 Value *RHS = Ops.RHS;
4797 if (Ops.LHS->getType() != RHS->getType())
4798 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4799
4800 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4801 Ops.Ty->hasSignedIntegerRepresentation() &&
4803 !CGF.getLangOpts().CPlusPlus20;
4804 bool SanitizeUnsignedBase =
4805 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4806 Ops.Ty->hasUnsignedIntegerRepresentation();
4807 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4808 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4809 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4810 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4811 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4812 else if ((SanitizeBase || SanitizeExponent) &&
4813 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4814 SmallVector<SanitizerKind::SanitizerOrdinal, 3> Ordinals;
4815 if (SanitizeSignedBase)
4816 Ordinals.push_back(SanitizerKind::SO_ShiftBase);
4817 if (SanitizeUnsignedBase)
4818 Ordinals.push_back(SanitizerKind::SO_UnsignedShiftBase);
4819 if (SanitizeExponent)
4820 Ordinals.push_back(SanitizerKind::SO_ShiftExponent);
4821
4822 SanitizerDebugLocation SanScope(&CGF, Ordinals,
4823 SanitizerHandler::ShiftOutOfBounds);
4824 SmallVector<std::pair<Value *, SanitizerKind::SanitizerOrdinal>, 2> Checks;
4825 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4826 llvm::Value *WidthMinusOne =
4827 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4828 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4829
4830 if (SanitizeExponent) {
4831 Checks.push_back(
4832 std::make_pair(ValidExponent, SanitizerKind::SO_ShiftExponent));
4833 }
4834
4835 if (SanitizeBase) {
4836 // Check whether we are shifting any non-zero bits off the top of the
4837 // integer. We only emit this check if exponent is valid - otherwise
4838 // instructions below will have undefined behavior themselves.
4839 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4840 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4841 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4842 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4843 llvm::Value *PromotedWidthMinusOne =
4844 (RHS == Ops.RHS) ? WidthMinusOne
4845 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4846 CGF.EmitBlock(CheckShiftBase);
4847 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4848 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4849 /*NUW*/ true, /*NSW*/ true),
4850 "shl.check");
4851 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4852 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4853 // Under C++11's rules, shifting a 1 bit into the sign bit is
4854 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4855 // define signed left shifts, so we use the C99 and C++11 rules there).
4856 // Unsigned shifts can always shift into the top bit.
4857 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4858 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4859 }
4860 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4861 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4862 CGF.EmitBlock(Cont);
4863 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4864 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4865 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4866 Checks.push_back(std::make_pair(
4867 BaseCheck, SanitizeSignedBase ? SanitizerKind::SO_ShiftBase
4868 : SanitizerKind::SO_UnsignedShiftBase));
4869 }
4870
4871 assert(!Checks.empty());
4872 EmitBinOpCheck(Checks, Ops);
4873 }
4874
4875 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4876}
4877
4878Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4879 // TODO: This misses out on the sanitizer check below.
4880 if (Ops.isFixedPointOp())
4881 return EmitFixedPointBinOp(Ops);
4882
4883 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4884 // RHS to the same size as the LHS.
4885 Value *RHS = Ops.RHS;
4886 if (Ops.LHS->getType() != RHS->getType())
4887 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4888
4889 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4890 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4891 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4892 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4893 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4894 SanitizerDebugLocation SanScope(&CGF, {SanitizerKind::SO_ShiftExponent},
4895 SanitizerHandler::ShiftOutOfBounds);
4896 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4897 llvm::Value *Valid = Builder.CreateICmpULE(
4898 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4899 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::SO_ShiftExponent), Ops);
4900 }
4901
4902 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4903 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4904 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4905}
4906
4908// return corresponding comparison intrinsic for given vector type
4909static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4910 BuiltinType::Kind ElemKind) {
4911 switch (ElemKind) {
4912 default: llvm_unreachable("unexpected element type");
4913 case BuiltinType::Char_U:
4914 case BuiltinType::UChar:
4915 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4916 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4917 case BuiltinType::Char_S:
4918 case BuiltinType::SChar:
4919 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4920 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4921 case BuiltinType::UShort:
4922 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4923 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4924 case BuiltinType::Short:
4925 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4926 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4927 case BuiltinType::UInt:
4928 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4929 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4930 case BuiltinType::Int:
4931 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4932 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4933 case BuiltinType::ULong:
4934 case BuiltinType::ULongLong:
4935 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4936 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4937 case BuiltinType::Long:
4938 case BuiltinType::LongLong:
4939 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4940 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4941 case BuiltinType::Float:
4942 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4943 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4944 case BuiltinType::Double:
4945 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4946 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4947 case BuiltinType::UInt128:
4948 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4949 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4950 case BuiltinType::Int128:
4951 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4952 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4953 }
4954}
4955
4956Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4957 llvm::CmpInst::Predicate UICmpOpc,
4958 llvm::CmpInst::Predicate SICmpOpc,
4959 llvm::CmpInst::Predicate FCmpOpc,
4960 bool IsSignaling) {
4961 TestAndClearIgnoreResultAssign();
4962 Value *Result;
4963 QualType LHSTy = E->getLHS()->getType();
4964 QualType RHSTy = E->getRHS()->getType();
4965 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4966 assert(E->getOpcode() == BO_EQ ||
4967 E->getOpcode() == BO_NE);
4968 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4969 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4971 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4972 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4973 BinOpInfo BOInfo = EmitBinOps(E);
4974 Value *LHS = BOInfo.LHS;
4975 Value *RHS = BOInfo.RHS;
4976
4977 // If AltiVec, the comparison results in a numeric type, so we use
4978 // intrinsics comparing vectors and giving 0 or 1 as a result
4979 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4980 // constants for mapping CR6 register bits to predicate result
4981 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4982
4983 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4984
4985 // in several cases vector arguments order will be reversed
4986 Value *FirstVecArg = LHS,
4987 *SecondVecArg = RHS;
4988
4989 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4990 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4991
4992 switch(E->getOpcode()) {
4993 default: llvm_unreachable("is not a comparison operation");
4994 case BO_EQ:
4995 CR6 = CR6_LT;
4996 ID = GetIntrinsic(VCMPEQ, ElementKind);
4997 break;
4998 case BO_NE:
4999 CR6 = CR6_EQ;
5000 ID = GetIntrinsic(VCMPEQ, ElementKind);
5001 break;
5002 case BO_LT:
5003 CR6 = CR6_LT;
5004 ID = GetIntrinsic(VCMPGT, ElementKind);
5005 std::swap(FirstVecArg, SecondVecArg);
5006 break;
5007 case BO_GT:
5008 CR6 = CR6_LT;
5009 ID = GetIntrinsic(VCMPGT, ElementKind);
5010 break;
5011 case BO_LE:
5012 if (ElementKind == BuiltinType::Float) {
5013 CR6 = CR6_LT;
5014 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5015 std::swap(FirstVecArg, SecondVecArg);
5016 }
5017 else {
5018 CR6 = CR6_EQ;
5019 ID = GetIntrinsic(VCMPGT, ElementKind);
5020 }
5021 break;
5022 case BO_GE:
5023 if (ElementKind == BuiltinType::Float) {
5024 CR6 = CR6_LT;
5025 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
5026 }
5027 else {
5028 CR6 = CR6_EQ;
5029 ID = GetIntrinsic(VCMPGT, ElementKind);
5030 std::swap(FirstVecArg, SecondVecArg);
5031 }
5032 break;
5033 }
5034
5035 Value *CR6Param = Builder.getInt32(CR6);
5036 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
5037 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
5038
5039 // The result type of intrinsic may not be same as E->getType().
5040 // If E->getType() is not BoolTy, EmitScalarConversion will do the
5041 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
5042 // do nothing, if ResultTy is not i1 at the same time, it will cause
5043 // crash later.
5044 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
5045 if (ResultTy->getBitWidth() > 1 &&
5046 E->getType() == CGF.getContext().BoolTy)
5047 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
5048 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5049 E->getExprLoc());
5050 }
5051
5052 if (BOInfo.isFixedPointOp()) {
5053 Result = EmitFixedPointBinOp(BOInfo);
5054 } else if (LHS->getType()->isFPOrFPVectorTy()) {
5055 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
5056 if (!IsSignaling)
5057 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
5058 else
5059 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
5060 } else if (LHSTy->hasSignedIntegerRepresentation()) {
5061 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
5062 } else {
5063 // Unsigned integers and pointers.
5064
5065 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
5068
5069 // Dynamic information is required to be stripped for comparisons,
5070 // because it could leak the dynamic information. Based on comparisons
5071 // of pointers to dynamic objects, the optimizer can replace one pointer
5072 // with another, which might be incorrect in presence of invariant
5073 // groups. Comparison with null is safe because null does not carry any
5074 // dynamic information.
5075 if (LHSTy.mayBeDynamicClass())
5076 LHS = Builder.CreateStripInvariantGroup(LHS);
5077 if (RHSTy.mayBeDynamicClass())
5078 RHS = Builder.CreateStripInvariantGroup(RHS);
5079 }
5080
5081 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
5082 }
5083
5084 // If this is a vector comparison, sign extend the result to the appropriate
5085 // vector integer type and return it (don't convert to bool).
5086 if (LHSTy->isVectorType())
5087 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
5088
5089 } else {
5090 // Complex Comparison: can only be an equality comparison.
5092 QualType CETy;
5093 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
5094 LHS = CGF.EmitComplexExpr(E->getLHS());
5095 CETy = CTy->getElementType();
5096 } else {
5097 LHS.first = Visit(E->getLHS());
5098 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
5099 CETy = LHSTy;
5100 }
5101 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
5102 RHS = CGF.EmitComplexExpr(E->getRHS());
5103 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
5104 CTy->getElementType()) &&
5105 "The element types must always match.");
5106 (void)CTy;
5107 } else {
5108 RHS.first = Visit(E->getRHS());
5109 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
5110 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
5111 "The element types must always match.");
5112 }
5113
5114 Value *ResultR, *ResultI;
5115 if (CETy->isRealFloatingType()) {
5116 // As complex comparisons can only be equality comparisons, they
5117 // are never signaling comparisons.
5118 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
5119 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
5120 } else {
5121 // Complex comparisons can only be equality comparisons. As such, signed
5122 // and unsigned opcodes are the same.
5123 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
5124 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
5125 }
5126
5127 if (E->getOpcode() == BO_EQ) {
5128 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
5129 } else {
5130 assert(E->getOpcode() == BO_NE &&
5131 "Complex comparison other than == or != ?");
5132 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
5133 }
5134 }
5135
5136 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
5137 E->getExprLoc());
5138}
5139
5141 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
5142 // In case we have the integer or bitfield sanitizer checks enabled
5143 // we want to get the expression before scalar conversion.
5144 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
5145 CastKind Kind = ICE->getCastKind();
5146 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
5147 *SrcType = ICE->getSubExpr()->getType();
5148 *Previous = EmitScalarExpr(ICE->getSubExpr());
5149 // Pass default ScalarConversionOpts to avoid emitting
5150 // integer sanitizer checks as E refers to bitfield.
5151 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
5152 ICE->getExprLoc());
5153 }
5154 }
5155 return EmitScalarExpr(E->getRHS());
5156}
5157
5158Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
5159 ApplyAtomGroup Grp(CGF.getDebugInfo());
5160 bool Ignore = TestAndClearIgnoreResultAssign();
5161
5162 Value *RHS;
5163 LValue LHS;
5164
5165 if (PointerAuthQualifier PtrAuth = E->getLHS()->getType().getPointerAuth()) {
5168 llvm::Value *RV =
5169 CGF.EmitPointerAuthQualify(PtrAuth, E->getRHS(), LV.getAddress());
5170 CGF.EmitNullabilityCheck(LV, RV, E->getExprLoc());
5172
5173 if (Ignore)
5174 return nullptr;
5175 RV = CGF.EmitPointerAuthUnqualify(PtrAuth, RV, LV.getType(),
5176 LV.getAddress(), /*nonnull*/ false);
5177 return RV;
5178 }
5179
5180 switch (E->getLHS()->getType().getObjCLifetime()) {
5182 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
5183 break;
5184
5186 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
5187 break;
5188
5190 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
5191 break;
5192
5194 RHS = Visit(E->getRHS());
5195 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5196 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
5197 break;
5198
5200 // __block variables need to have the rhs evaluated first, plus
5201 // this should improve codegen just a little.
5202 Value *Previous = nullptr;
5203 QualType SrcType = E->getRHS()->getType();
5204 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
5205 // we want to extract that value and potentially (if the bitfield sanitizer
5206 // is enabled) use it to check for an implicit conversion.
5207 if (E->getLHS()->refersToBitField())
5208 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
5209 else
5210 RHS = Visit(E->getRHS());
5211
5212 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
5213
5214 // Store the value into the LHS. Bit-fields are handled specially
5215 // because the result is altered by the store, i.e., [C99 6.5.16p1]
5216 // 'An assignment expression has the value of the left operand after
5217 // the assignment...'.
5218 if (LHS.isBitField()) {
5219 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
5220 // If the expression contained an implicit conversion, make sure
5221 // to use the value before the scalar conversion.
5222 Value *Src = Previous ? Previous : RHS;
5223 QualType DstType = E->getLHS()->getType();
5224 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
5225 LHS.getBitFieldInfo(), E->getExprLoc());
5226 } else {
5227 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
5228 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
5229 }
5230 }
5231 // OpenMP: Handle lastprivate(condition:) in scalar assignment
5232 if (CGF.getLangOpts().OpenMP) {
5234 E->getLHS());
5235 }
5236
5237 // If the result is clearly ignored, return now.
5238 if (Ignore)
5239 return nullptr;
5240
5241 // The result of an assignment in C is the assigned r-value.
5242 if (!CGF.getLangOpts().CPlusPlus)
5243 return RHS;
5244
5245 // If the lvalue is non-volatile, return the computed value of the assignment.
5246 if (!LHS.isVolatileQualified())
5247 return RHS;
5248
5249 // Otherwise, reload the value.
5250 return EmitLoadOfLValue(LHS, E->getExprLoc());
5251}
5252
5253Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
5254 // Perform vector logical and on comparisons with zero vectors.
5255 if (E->getType()->isVectorType()) {
5257
5258 Value *LHS = Visit(E->getLHS());
5259 Value *RHS = Visit(E->getRHS());
5260 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5261 if (LHS->getType()->isFPOrFPVectorTy()) {
5262 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5263 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5264 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5265 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5266 } else {
5267 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5268 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5269 }
5270 Value *And = Builder.CreateAnd(LHS, RHS);
5271 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
5272 }
5273
5274 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5275 llvm::Type *ResTy = ConvertType(E->getType());
5276
5277 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
5278 // If we have 1 && X, just emit X without inserting the control flow.
5279 bool LHSCondVal;
5280 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5281 if (LHSCondVal) { // If we have 1 && X, just emit X.
5283
5284 // If the top of the logical operator nest, reset the MCDC temp to 0.
5285 if (CGF.MCDCLogOpStack.empty())
5287
5288 CGF.MCDCLogOpStack.push_back(E);
5289
5290 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5291
5292 // If we're generating for profiling or coverage, generate a branch to a
5293 // block that increments the RHS counter needed to track branch condition
5294 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5295 // "FalseBlock" after the increment is done.
5296 if (InstrumentRegions &&
5298 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5299 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
5300 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5301 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
5302 CGF.EmitBlock(RHSBlockCnt);
5304 CGF.EmitBranch(FBlock);
5305 CGF.EmitBlock(FBlock);
5306 } else
5307 CGF.markStmtMaybeUsed(E->getRHS());
5308
5309 CGF.MCDCLogOpStack.pop_back();
5310 // If the top of the logical operator nest, update the MCDC bitmap.
5311 if (CGF.MCDCLogOpStack.empty())
5313
5314 // ZExt result to int or bool.
5315 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
5316 }
5317
5318 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
5319 if (!CGF.ContainsLabel(E->getRHS())) {
5320 CGF.markStmtMaybeUsed(E->getRHS());
5321 return llvm::Constant::getNullValue(ResTy);
5322 }
5323 }
5324
5325 // If the top of the logical operator nest, reset the MCDC temp to 0.
5326 if (CGF.MCDCLogOpStack.empty())
5328
5329 CGF.MCDCLogOpStack.push_back(E);
5330
5331 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
5332 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
5333
5334 CodeGenFunction::ConditionalEvaluation eval(CGF);
5335
5336 // Branch on the LHS first. If it is false, go to the failure (cont) block.
5337 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
5338 CGF.getProfileCount(E->getRHS()));
5339
5340 // Any edges into the ContBlock are now from an (indeterminate number of)
5341 // edges from this first condition. All of these values will be false. Start
5342 // setting up the PHI node in the Cont Block for this.
5343 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5344 "", ContBlock);
5345 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5346 PI != PE; ++PI)
5347 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
5348
5349 eval.begin(CGF);
5350 CGF.EmitBlock(RHSBlock);
5352 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5353 eval.end(CGF);
5354
5355 // Reaquire the RHS block, as there may be subblocks inserted.
5356 RHSBlock = Builder.GetInsertBlock();
5357
5358 // If we're generating for profiling or coverage, generate a branch on the
5359 // RHS to a block that increments the RHS true counter needed to track branch
5360 // condition coverage.
5361 if (InstrumentRegions &&
5363 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5364 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
5365 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
5366 CGF.EmitBlock(RHSBlockCnt);
5368 CGF.EmitBranch(ContBlock);
5369 PN->addIncoming(RHSCond, RHSBlockCnt);
5370 }
5371
5372 // Emit an unconditional branch from this block to ContBlock.
5373 {
5374 // There is no need to emit line number for unconditional branch.
5375 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
5376 CGF.EmitBlock(ContBlock);
5377 }
5378 // Insert an entry into the phi node for the edge with the value of RHSCond.
5379 PN->addIncoming(RHSCond, RHSBlock);
5380
5381 CGF.MCDCLogOpStack.pop_back();
5382 // If the top of the logical operator nest, update the MCDC bitmap.
5383 if (CGF.MCDCLogOpStack.empty())
5385
5386 // Artificial location to preserve the scope information
5387 {
5389 PN->setDebugLoc(Builder.getCurrentDebugLocation());
5390 }
5391
5392 // ZExt result to int.
5393 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
5394}
5395
5396Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
5397 // Perform vector logical or on comparisons with zero vectors.
5398 if (E->getType()->isVectorType()) {
5400
5401 Value *LHS = Visit(E->getLHS());
5402 Value *RHS = Visit(E->getRHS());
5403 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
5404 if (LHS->getType()->isFPOrFPVectorTy()) {
5405 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
5406 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
5407 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
5408 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
5409 } else {
5410 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
5411 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
5412 }
5413 Value *Or = Builder.CreateOr(LHS, RHS);
5414 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
5415 }
5416
5417 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
5418 llvm::Type *ResTy = ConvertType(E->getType());
5419
5420 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5421 // If we have 0 || X, just emit X without inserting the control flow.
5422 bool LHSCondVal;
5423 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5424 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5426
5427 // If the top of the logical operator nest, reset the MCDC temp to 0.
5428 if (CGF.MCDCLogOpStack.empty())
5430
5431 CGF.MCDCLogOpStack.push_back(E);
5432
5433 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5434
5435 // If we're generating for profiling or coverage, generate a branch to a
5436 // block that increments the RHS counter need to track branch condition
5437 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5438 // "FalseBlock" after the increment is done.
5439 if (InstrumentRegions &&
5441 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5442 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5443 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5444 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5445 CGF.EmitBlock(RHSBlockCnt);
5447 CGF.EmitBranch(FBlock);
5448 CGF.EmitBlock(FBlock);
5449 } else
5450 CGF.markStmtMaybeUsed(E->getRHS());
5451
5452 CGF.MCDCLogOpStack.pop_back();
5453 // If the top of the logical operator nest, update the MCDC bitmap.
5454 if (CGF.MCDCLogOpStack.empty())
5456
5457 // ZExt result to int or bool.
5458 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5459 }
5460
5461 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5462 if (!CGF.ContainsLabel(E->getRHS())) {
5463 CGF.markStmtMaybeUsed(E->getRHS());
5464 return llvm::ConstantInt::get(ResTy, 1);
5465 }
5466 }
5467
5468 // If the top of the logical operator nest, reset the MCDC temp to 0.
5469 if (CGF.MCDCLogOpStack.empty())
5471
5472 CGF.MCDCLogOpStack.push_back(E);
5473
5474 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5475 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5476
5477 CodeGenFunction::ConditionalEvaluation eval(CGF);
5478
5479 // Branch on the LHS first. If it is true, go to the success (cont) block.
5480 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5482 CGF.getProfileCount(E->getRHS()));
5483
5484 // Any edges into the ContBlock are now from an (indeterminate number of)
5485 // edges from this first condition. All of these values will be true. Start
5486 // setting up the PHI node in the Cont Block for this.
5487 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5488 "", ContBlock);
5489 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5490 PI != PE; ++PI)
5491 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5492
5493 eval.begin(CGF);
5494
5495 // Emit the RHS condition as a bool value.
5496 CGF.EmitBlock(RHSBlock);
5498 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5499
5500 eval.end(CGF);
5501
5502 // Reaquire the RHS block, as there may be subblocks inserted.
5503 RHSBlock = Builder.GetInsertBlock();
5504
5505 // If we're generating for profiling or coverage, generate a branch on the
5506 // RHS to a block that increments the RHS true counter needed to track branch
5507 // condition coverage.
5508 if (InstrumentRegions &&
5510 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5511 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5512 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5513 CGF.EmitBlock(RHSBlockCnt);
5515 CGF.EmitBranch(ContBlock);
5516 PN->addIncoming(RHSCond, RHSBlockCnt);
5517 }
5518
5519 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5520 // into the phi node for the edge with the value of RHSCond.
5521 CGF.EmitBlock(ContBlock);
5522 PN->addIncoming(RHSCond, RHSBlock);
5523
5524 CGF.MCDCLogOpStack.pop_back();
5525 // If the top of the logical operator nest, update the MCDC bitmap.
5526 if (CGF.MCDCLogOpStack.empty())
5528
5529 // ZExt result to int.
5530 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5531}
5532
5533Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5534 CGF.EmitIgnoredExpr(E->getLHS());
5535 CGF.EnsureInsertPoint();
5536 return Visit(E->getRHS());
5537}
5538
5539//===----------------------------------------------------------------------===//
5540// Other Operators
5541//===----------------------------------------------------------------------===//
5542
5543/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5544/// expression is cheap enough and side-effect-free enough to evaluate
5545/// unconditionally instead of conditionally. This is used to convert control
5546/// flow into selects in some cases.
5548 CodeGenFunction &CGF) {
5549 // Anything that is an integer or floating point constant is fine.
5550 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5551
5552 // Even non-volatile automatic variables can't be evaluated unconditionally.
5553 // Referencing a thread_local may cause non-trivial initialization work to
5554 // occur. If we're inside a lambda and one of the variables is from the scope
5555 // outside the lambda, that function may have returned already. Reading its
5556 // locals is a bad idea. Also, these reads may introduce races there didn't
5557 // exist in the source-level program.
5558}
5559
5560
5561Value *ScalarExprEmitter::
5562VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5563 TestAndClearIgnoreResultAssign();
5564
5565 // Bind the common expression if necessary.
5566 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5567
5568 Expr *condExpr = E->getCond();
5569 Expr *lhsExpr = E->getTrueExpr();
5570 Expr *rhsExpr = E->getFalseExpr();
5571
5572 // If the condition constant folds and can be elided, try to avoid emitting
5573 // the condition and the dead arm.
5574 bool CondExprBool;
5575 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5576 Expr *live = lhsExpr, *dead = rhsExpr;
5577 if (!CondExprBool) std::swap(live, dead);
5578
5579 // If the dead side doesn't have labels we need, just emit the Live part.
5580 if (!CGF.ContainsLabel(dead)) {
5581 if (CondExprBool) {
5583 CGF.incrementProfileCounter(lhsExpr);
5584 CGF.incrementProfileCounter(rhsExpr);
5585 }
5587 }
5588 Value *Result = Visit(live);
5589 CGF.markStmtMaybeUsed(dead);
5590
5591 // If the live part is a throw expression, it acts like it has a void
5592 // type, so evaluating it returns a null Value*. However, a conditional
5593 // with non-void type must return a non-null Value*.
5594 if (!Result && !E->getType()->isVoidType())
5595 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5596
5597 return Result;
5598 }
5599 }
5600
5601 // OpenCL: If the condition is a vector, we can treat this condition like
5602 // the select function.
5603 if (CGF.getLangOpts().OpenCL && (condExpr->getType()->isVectorType() ||
5604 condExpr->getType()->isExtVectorType())) {
5606
5607 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5608 llvm::Value *LHS = Visit(lhsExpr);
5609 llvm::Value *RHS = Visit(rhsExpr);
5610
5611 llvm::Type *condType = ConvertType(condExpr->getType());
5612 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5613
5614 unsigned numElem = vecTy->getNumElements();
5615 llvm::Type *elemType = vecTy->getElementType();
5616
5617 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5618 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5619 llvm::Value *tmp = Builder.CreateSExt(
5620 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5621 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5622
5623 // Cast float to int to perform ANDs if necessary.
5624 llvm::Value *RHSTmp = RHS;
5625 llvm::Value *LHSTmp = LHS;
5626 bool wasCast = false;
5627 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5628 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5629 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5630 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5631 wasCast = true;
5632 }
5633
5634 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5635 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5636 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5637 if (wasCast)
5638 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5639
5640 return tmp5;
5641 }
5642
5643 if (condExpr->getType()->isVectorType() ||
5644 condExpr->getType()->isSveVLSBuiltinType()) {
5646
5647 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5648 llvm::Value *LHS = Visit(lhsExpr);
5649 llvm::Value *RHS = Visit(rhsExpr);
5650
5651 llvm::Type *CondType = ConvertType(condExpr->getType());
5652 auto *VecTy = cast<llvm::VectorType>(CondType);
5653
5654 if (VecTy->getElementType()->isIntegerTy(1))
5655 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5656
5657 // OpenCL uses the MSB of the mask vector.
5658 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5659 if (condExpr->getType()->isExtVectorType())
5660 CondV = Builder.CreateICmpSLT(CondV, ZeroVec, "vector_cond");
5661 else
5662 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5663 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5664 }
5665
5666 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5667 // select instead of as control flow. We can only do this if it is cheap and
5668 // safe to evaluate the LHS and RHS unconditionally.
5669 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
5671 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5672 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5673
5675 CGF.incrementProfileCounter(lhsExpr);
5676 CGF.incrementProfileCounter(rhsExpr);
5678 } else
5679 CGF.incrementProfileCounter(E, StepV);
5680
5681 llvm::Value *LHS = Visit(lhsExpr);
5682 llvm::Value *RHS = Visit(rhsExpr);
5683 if (!LHS) {
5684 // If the conditional has void type, make sure we return a null Value*.
5685 assert(!RHS && "LHS and RHS types must match");
5686 return nullptr;
5687 }
5688 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5689 }
5690
5691 // If the top of the logical operator nest, reset the MCDC temp to 0.
5692 if (CGF.MCDCLogOpStack.empty())
5693 CGF.maybeResetMCDCCondBitmap(condExpr);
5694
5695 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5696 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5697 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5698
5699 CodeGenFunction::ConditionalEvaluation eval(CGF);
5700 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5701 CGF.getProfileCount(lhsExpr));
5702
5703 CGF.EmitBlock(LHSBlock);
5704
5705 // If the top of the logical operator nest, update the MCDC bitmap for the
5706 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5707 // may also contain a boolean expression.
5708 if (CGF.MCDCLogOpStack.empty())
5709 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5710
5712 CGF.incrementProfileCounter(lhsExpr);
5713 else
5715
5716 eval.begin(CGF);
5717 Value *LHS = Visit(lhsExpr);
5718 eval.end(CGF);
5719
5720 LHSBlock = Builder.GetInsertBlock();
5721 Builder.CreateBr(ContBlock);
5722
5723 CGF.EmitBlock(RHSBlock);
5724
5725 // If the top of the logical operator nest, update the MCDC bitmap for the
5726 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5727 // may also contain a boolean expression.
5728 if (CGF.MCDCLogOpStack.empty())
5729 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5730
5732 CGF.incrementProfileCounter(rhsExpr);
5733
5734 eval.begin(CGF);
5735 Value *RHS = Visit(rhsExpr);
5736 eval.end(CGF);
5737
5738 RHSBlock = Builder.GetInsertBlock();
5739 CGF.EmitBlock(ContBlock);
5740
5741 // If the LHS or RHS is a throw expression, it will be legitimately null.
5742 if (!LHS)
5743 return RHS;
5744 if (!RHS)
5745 return LHS;
5746
5747 // Create a PHI node for the real part.
5748 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5749 PN->addIncoming(LHS, LHSBlock);
5750 PN->addIncoming(RHS, RHSBlock);
5751
5752 // When single byte coverage mode is enabled, add a counter to continuation
5753 // block.
5756
5757 return PN;
5758}
5759
5760Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5761 return Visit(E->getChosenSubExpr());
5762}
5763
5764Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5765 Address ArgValue = Address::invalid();
5766 RValue ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5767
5768 return ArgPtr.getScalarVal();
5769}
5770
5771Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5772 return CGF.EmitBlockLiteral(block);
5773}
5774
5775// Convert a vec3 to vec4, or vice versa.
5777 Value *Src, unsigned NumElementsDst) {
5778 static constexpr int Mask[] = {0, 1, 2, -1};
5779 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5780}
5781
5782// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5783// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5784// but could be scalar or vectors of different lengths, and either can be
5785// pointer.
5786// There are 4 cases:
5787// 1. non-pointer -> non-pointer : needs 1 bitcast
5788// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5789// 3. pointer -> non-pointer
5790// a) pointer -> intptr_t : needs 1 ptrtoint
5791// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5792// 4. non-pointer -> pointer
5793// a) intptr_t -> pointer : needs 1 inttoptr
5794// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5795// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5796// allow casting directly between pointer types and non-integer non-pointer
5797// types.
5799 const llvm::DataLayout &DL,
5800 Value *Src, llvm::Type *DstTy,
5801 StringRef Name = "") {
5802 auto SrcTy = Src->getType();
5803
5804 // Case 1.
5805 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5806 return Builder.CreateBitCast(Src, DstTy, Name);
5807
5808 // Case 2.
5809 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5810 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5811
5812 // Case 3.
5813 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5814 // Case 3b.
5815 if (!DstTy->isIntegerTy())
5816 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5817 // Cases 3a and 3b.
5818 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5819 }
5820
5821 // Case 4b.
5822 if (!SrcTy->isIntegerTy())
5823 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5824 // Cases 4a and 4b.
5825 return Builder.CreateIntToPtr(Src, DstTy, Name);
5826}
5827
5828Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5829 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5830 llvm::Type *DstTy = ConvertType(E->getType());
5831
5832 llvm::Type *SrcTy = Src->getType();
5833 unsigned NumElementsSrc =
5835 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5836 : 0;
5837 unsigned NumElementsDst =
5839 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5840 : 0;
5841
5842 // Use bit vector expansion for ext_vector_type boolean vectors.
5843 if (E->getType()->isExtVectorBoolType())
5844 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5845
5846 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5847 // vector to get a vec4, then a bitcast if the target type is different.
5848 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5849 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5850 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5851 DstTy);
5852
5853 Src->setName("astype");
5854 return Src;
5855 }
5856
5857 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5858 // to vec4 if the original type is not vec4, then a shuffle vector to
5859 // get a vec3.
5860 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5861 auto *Vec4Ty = llvm::FixedVectorType::get(
5862 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5863 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5864 Vec4Ty);
5865
5866 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5867 Src->setName("astype");
5868 return Src;
5869 }
5870
5871 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5872 Src, DstTy, "astype");
5873}
5874
5875Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5876 return CGF.EmitAtomicExpr(E).getScalarVal();
5877}
5878
5879//===----------------------------------------------------------------------===//
5880// Entry Point into this File
5881//===----------------------------------------------------------------------===//
5882
5883/// Emit the computation of the specified expression of scalar type, ignoring
5884/// the result.
5885Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5886 assert(E && hasScalarEvaluationKind(E->getType()) &&
5887 "Invalid scalar expression to emit");
5888
5889 return ScalarExprEmitter(*this, IgnoreResultAssign)
5890 .Visit(const_cast<Expr *>(E));
5891}
5892
5893/// Emit a conversion from the specified type to the specified destination type,
5894/// both of which are LLVM scalar types.
5896 QualType DstTy,
5897 SourceLocation Loc) {
5898 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5899 "Invalid scalar expression to emit");
5900 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5901}
5902
5903/// Emit a conversion from the specified complex type to the specified
5904/// destination type, where the destination type is an LLVM scalar type.
5906 QualType SrcTy,
5907 QualType DstTy,
5908 SourceLocation Loc) {
5909 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5910 "Invalid complex -> scalar conversion");
5911 return ScalarExprEmitter(*this)
5912 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5913}
5914
5915
5916Value *
5918 QualType PromotionType) {
5919 if (!PromotionType.isNull())
5920 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5921 else
5922 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
5923}
5924
5925
5928 bool isInc, bool isPre) {
5929 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5930}
5931
5933 // object->isa or (*object).isa
5934 // Generate code as for: *(Class*)object
5935
5936 Expr *BaseExpr = E->getBase();
5938 if (BaseExpr->isPRValue()) {
5939 llvm::Type *BaseTy =
5941 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
5942 } else {
5943 Addr = EmitLValue(BaseExpr).getAddress();
5944 }
5945
5946 // Cast the address to Class*.
5947 Addr = Addr.withElementType(ConvertType(E->getType()));
5948 return MakeAddrLValue(Addr, E->getType());
5949}
5950
5951
5953 const CompoundAssignOperator *E) {
5955 ScalarExprEmitter Scalar(*this);
5956 Value *Result = nullptr;
5957 switch (E->getOpcode()) {
5958#define COMPOUND_OP(Op) \
5959 case BO_##Op##Assign: \
5960 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5961 Result)
5962 COMPOUND_OP(Mul);
5963 COMPOUND_OP(Div);
5964 COMPOUND_OP(Rem);
5965 COMPOUND_OP(Add);
5966 COMPOUND_OP(Sub);
5967 COMPOUND_OP(Shl);
5968 COMPOUND_OP(Shr);
5970 COMPOUND_OP(Xor);
5971 COMPOUND_OP(Or);
5972#undef COMPOUND_OP
5973
5974 case BO_PtrMemD:
5975 case BO_PtrMemI:
5976 case BO_Mul:
5977 case BO_Div:
5978 case BO_Rem:
5979 case BO_Add:
5980 case BO_Sub:
5981 case BO_Shl:
5982 case BO_Shr:
5983 case BO_LT:
5984 case BO_GT:
5985 case BO_LE:
5986 case BO_GE:
5987 case BO_EQ:
5988 case BO_NE:
5989 case BO_Cmp:
5990 case BO_And:
5991 case BO_Xor:
5992 case BO_Or:
5993 case BO_LAnd:
5994 case BO_LOr:
5995 case BO_Assign:
5996 case BO_Comma:
5997 llvm_unreachable("Not valid compound assignment operators");
5998 }
5999
6000 llvm_unreachable("Unhandled compound assignment operator");
6001}
6002
6004 // The total (signed) byte offset for the GEP.
6005 llvm::Value *TotalOffset;
6006 // The offset overflow flag - true if the total offset overflows.
6007 llvm::Value *OffsetOverflows;
6008};
6009
6010/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
6011/// and compute the total offset it applies from it's base pointer BasePtr.
6012/// Returns offset in bytes and a boolean flag whether an overflow happened
6013/// during evaluation.
6015 llvm::LLVMContext &VMContext,
6016 CodeGenModule &CGM,
6017 CGBuilderTy &Builder) {
6018 const auto &DL = CGM.getDataLayout();
6019
6020 // The total (signed) byte offset for the GEP.
6021 llvm::Value *TotalOffset = nullptr;
6022
6023 // Was the GEP already reduced to a constant?
6024 if (isa<llvm::Constant>(GEPVal)) {
6025 // Compute the offset by casting both pointers to integers and subtracting:
6026 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
6027 Value *BasePtr_int =
6028 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
6029 Value *GEPVal_int =
6030 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
6031 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
6032 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
6033 }
6034
6035 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
6036 assert(GEP->getPointerOperand() == BasePtr &&
6037 "BasePtr must be the base of the GEP.");
6038 assert(GEP->isInBounds() && "Expected inbounds GEP");
6039
6040 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
6041
6042 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
6043 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6044 auto *SAddIntrinsic =
6045 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
6046 auto *SMulIntrinsic =
6047 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
6048
6049 // The offset overflow flag - true if the total offset overflows.
6050 llvm::Value *OffsetOverflows = Builder.getFalse();
6051
6052 /// Return the result of the given binary operation.
6053 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
6054 llvm::Value *RHS) -> llvm::Value * {
6055 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
6056
6057 // If the operands are constants, return a constant result.
6058 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
6059 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
6060 llvm::APInt N;
6061 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
6062 /*Signed=*/true, N);
6063 if (HasOverflow)
6064 OffsetOverflows = Builder.getTrue();
6065 return llvm::ConstantInt::get(VMContext, N);
6066 }
6067 }
6068
6069 // Otherwise, compute the result with checked arithmetic.
6070 auto *ResultAndOverflow = Builder.CreateCall(
6071 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
6072 OffsetOverflows = Builder.CreateOr(
6073 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
6074 return Builder.CreateExtractValue(ResultAndOverflow, 0);
6075 };
6076
6077 // Determine the total byte offset by looking at each GEP operand.
6078 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
6079 GTI != GTE; ++GTI) {
6080 llvm::Value *LocalOffset;
6081 auto *Index = GTI.getOperand();
6082 // Compute the local offset contributed by this indexing step:
6083 if (auto *STy = GTI.getStructTypeOrNull()) {
6084 // For struct indexing, the local offset is the byte position of the
6085 // specified field.
6086 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
6087 LocalOffset = llvm::ConstantInt::get(
6088 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
6089 } else {
6090 // Otherwise this is array-like indexing. The local offset is the index
6091 // multiplied by the element size.
6092 auto *ElementSize =
6093 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
6094 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
6095 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
6096 }
6097
6098 // If this is the first offset, set it as the total offset. Otherwise, add
6099 // the local offset into the running total.
6100 if (!TotalOffset || TotalOffset == Zero)
6101 TotalOffset = LocalOffset;
6102 else
6103 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
6104 }
6105
6106 return {TotalOffset, OffsetOverflows};
6107}
6108
6109Value *
6110CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
6111 ArrayRef<Value *> IdxList,
6112 bool SignedIndices, bool IsSubtraction,
6113 SourceLocation Loc, const Twine &Name) {
6114 llvm::Type *PtrTy = Ptr->getType();
6115
6116 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6117 if (!SignedIndices && !IsSubtraction)
6118 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6119
6120 Value *GEPVal = Builder.CreateGEP(ElemTy, Ptr, IdxList, Name, NWFlags);
6121
6122 // If the pointer overflow sanitizer isn't enabled, do nothing.
6123 if (!SanOpts.has(SanitizerKind::PointerOverflow))
6124 return GEPVal;
6125
6126 // Perform nullptr-and-offset check unless the nullptr is defined.
6127 bool PerformNullCheck = !NullPointerIsDefined(
6128 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
6129 // Check for overflows unless the GEP got constant-folded,
6130 // and only in the default address space
6131 bool PerformOverflowCheck =
6132 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
6133
6134 if (!(PerformNullCheck || PerformOverflowCheck))
6135 return GEPVal;
6136
6137 const auto &DL = CGM.getDataLayout();
6138
6139 auto CheckOrdinal = SanitizerKind::SO_PointerOverflow;
6140 auto CheckHandler = SanitizerHandler::PointerOverflow;
6141 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
6142 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
6143
6144 GEPOffsetAndOverflow EvaluatedGEP =
6145 EmitGEPOffsetInBytes(Ptr, GEPVal, getLLVMContext(), CGM, Builder);
6146
6147 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
6148 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
6149 "If the offset got constant-folded, we don't expect that there was an "
6150 "overflow.");
6151
6152 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
6153
6154 // Common case: if the total offset is zero, don't emit a check.
6155 if (EvaluatedGEP.TotalOffset == Zero)
6156 return GEPVal;
6157
6158 // Now that we've computed the total offset, add it to the base pointer (with
6159 // wrapping semantics).
6160 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
6161 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
6162
6163 llvm::SmallVector<std::pair<llvm::Value *, SanitizerKind::SanitizerOrdinal>,
6164 2>
6165 Checks;
6166
6167 if (PerformNullCheck) {
6168 // If the base pointer evaluates to a null pointer value,
6169 // the only valid pointer this inbounds GEP can produce is also
6170 // a null pointer, so the offset must also evaluate to zero.
6171 // Likewise, if we have non-zero base pointer, we can not get null pointer
6172 // as a result, so the offset can not be -intptr_t(BasePtr).
6173 // In other words, both pointers are either null, or both are non-null,
6174 // or the behaviour is undefined.
6175 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
6176 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
6177 auto *Valid = Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr);
6178 Checks.emplace_back(Valid, CheckOrdinal);
6179 }
6180
6181 if (PerformOverflowCheck) {
6182 // The GEP is valid if:
6183 // 1) The total offset doesn't overflow, and
6184 // 2) The sign of the difference between the computed address and the base
6185 // pointer matches the sign of the total offset.
6186 llvm::Value *ValidGEP;
6187 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
6188 if (SignedIndices) {
6189 // GEP is computed as `unsigned base + signed offset`, therefore:
6190 // * If offset was positive, then the computed pointer can not be
6191 // [unsigned] less than the base pointer, unless it overflowed.
6192 // * If offset was negative, then the computed pointer can not be
6193 // [unsigned] greater than the bas pointere, unless it overflowed.
6194 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6195 auto *PosOrZeroOffset =
6196 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
6197 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
6198 ValidGEP =
6199 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
6200 } else if (!IsSubtraction) {
6201 // GEP is computed as `unsigned base + unsigned offset`, therefore the
6202 // computed pointer can not be [unsigned] less than base pointer,
6203 // unless there was an overflow.
6204 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
6205 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
6206 } else {
6207 // GEP is computed as `unsigned base - unsigned offset`, therefore the
6208 // computed pointer can not be [unsigned] greater than base pointer,
6209 // unless there was an overflow.
6210 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
6211 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
6212 }
6213 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
6214 Checks.emplace_back(ValidGEP, CheckOrdinal);
6215 }
6216
6217 assert(!Checks.empty() && "Should have produced some checks.");
6218
6219 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
6220 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
6221 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
6222 EmitCheck(Checks, CheckHandler, StaticArgs, DynamicArgs);
6223
6224 return GEPVal;
6225}
6226
6228 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
6229 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
6230 const Twine &Name) {
6231 if (!SanOpts.has(SanitizerKind::PointerOverflow)) {
6232 llvm::GEPNoWrapFlags NWFlags = llvm::GEPNoWrapFlags::inBounds();
6233 if (!SignedIndices && !IsSubtraction)
6234 NWFlags |= llvm::GEPNoWrapFlags::noUnsignedWrap();
6235
6236 return Builder.CreateGEP(Addr, IdxList, elementType, Align, Name, NWFlags);
6237 }
6238
6239 return RawAddress(
6240 EmitCheckedInBoundsGEP(Addr.getElementType(), Addr.emitRawPointer(*this),
6241 IdxList, SignedIndices, IsSubtraction, Loc, Name),
6242 elementType, Align);
6243}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
static void EmitHLSLElementwiseCast(CodeGenFunction &CGF, LValue DestVal, LValue SrcVal, SourceLocation Loc)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
static bool isLValueKnownNonNull(CodeGenFunction &CGF, const Expr *E)
static bool matchesPostDecrInWhile(const UnaryOperator *UO, bool isInc, bool isPre, ASTContext &Ctx)
For the purposes of overflow pattern exclusion, does this match the "while(i--)" pattern?
IntrinsicType
@ VCMPGT
@ VCMPEQ
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isDeclRefKnownNonNull(CodeGenFunction &CGF, const ValueDecl *D)
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
#define HANDLE_BINOP(OP)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
static uint32_t getBitWidth(const Expr *E)
llvm::APSInt APSInt
Definition Compiler.cpp:23
static Decl::Kind getKind(const Decl *D)
FormatToken * Previous
The previous token in the unwrapped line.
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
This file contains the declaration of TrapReasonBuilder and related classes.
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isLValue() const
Definition APValue.h:472
bool isInt() const
Definition APValue.h:467
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
ParentMapContext & getParentMapContext()
Returns the dynamic AST node parent map context.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:926
bool isTypeIgnoredBySanitizer(const SanitizerMask &Mask, const QualType &Ty) const
Check if a type can have its sanitizer instrumentation elided based on its presence within an ignorel...
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
LabelDecl * getLabel() const
Definition Expr.h:4507
uint64_t getValue() const
Definition ExprCXX.h:3046
QualType getElementType() const
Definition TypeBase.h:3734
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:6638
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition Expr.h:4119
bool isCompoundAssignmentOp() const
Definition Expr.h:4116
SourceLocation getExprLoc() const
Definition Expr.h:4013
bool isShiftOp() const
Definition Expr.h:4061
Expr * getRHS() const
Definition Expr.h:4024
bool isShiftAssignOp() const
Definition Expr.h:4130
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2200
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4334
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
path_iterator path_begin()
Definition Expr.h:3680
CastKind getCastKind() const
Definition Expr.h:3654
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
path_iterator path_end()
Definition Expr.h:3681
Expr * getSubExpr()
Definition Expr.h:3660
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4818
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
SanitizerSet SanitizeTrap
Set of sanitizer checks that trap rather than diagnose.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
bool isValid() const
Definition Address.h:177
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition CGCXXABI.cpp:103
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition CGCXXABI.cpp:95
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition CGCXXABI.cpp:85
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition CGCXXABI.cpp:72
void EmitPseudoVariable(CGBuilderTy &Builder, llvm::Instruction *Value, QualType Ty)
Emit a pseudo variable and debug info for an intermediate value if it does not correspond to a variab...
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
void emitInitListOpaqueValues(CodeGenFunction &CGF, InitListExpr *E)
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
Produce the code for a CK_ARCConsumeObject.
Definition CGObjC.cpp:2152
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr, const VarDecl *ConditionalDecl=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
Definition CGObjC.cpp:573
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
Definition CGObjC.cpp:3089
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Definition CGObjC.cpp:3679
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6726
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
Emit a selector.
Definition CGObjC.cpp:257
SanitizerSet SanOpts
Sanitizers enabled for this function.
RawAddress CreateIRTemp(QualType T, const Twine &Name="tmp")
CreateIRTemp - Create a temporary IR object of the given type, with appropriate alignment.
Definition CGExpr.cpp:183
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
Definition CGObjC.cpp:251
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
const CastExpr * CurCast
If a cast expression is being visited, this holds the current cast's expression.
static bool hasScalarEvaluationKind(QualType T)
llvm::Type * ConvertType(QualType T)
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
Definition CGObjC.cpp:265
llvm::Value * EmitPointerAuthQualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType ValueType, Address StorageAddress, bool IsKnownNonNull)
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
Definition CGExpr.cpp:2717
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3685
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
RValue EmitVAArg(VAArgExpr *VE, Address &VAListAddr, AggValueSlot Slot=AggValueSlot::ignored())
Generate code to get an argument from the passed in pointer and update it accordingly.
Definition CGCall.cpp:6296
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
Definition CGExpr.cpp:1238
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
Definition CGExpr.cpp:6826
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
Definition CGObjC.cpp:247
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
const LangOptions & getLangOpts() const
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
Store into a strong object.
Definition CGObjC.cpp:2545
bool isPointerKnownNonNull(const Expr *E)
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Definition CGClass.cpp:394
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
Definition CGDecl.cpp:765
llvm::Value * EmitPointerAuthUnqualify(PointerAuthQualifier Qualifier, llvm::Value *Pointer, QualType PointerType, Address StorageAddress, bool IsKnownNonNull)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
Definition CGClass.cpp:2890
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Definition CGObjC.cpp:3629
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3575
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition CGExpr.cpp:176
const TargetInfo & getTarget() const
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
Definition CGObjC.cpp:3953
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
Definition CGExpr.cpp:244
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot(), llvm::CallBase **CallOrInvoke=nullptr)
Definition CGExpr.cpp:5971
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2373
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
EmitObjCBoxedExpr - This routine generates code to call the appropriate expression boxing method.
Definition CGObjC.cpp:64
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:225
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3825
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5924
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
Definition CGExpr.cpp:2004
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
EmitARCRetainScalarExpr - Semantically equivalent to EmitARCRetainObject(e->getType(),...
Definition CGObjC.cpp:3493
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
Definition CGBlocks.cpp:764
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2190
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
Definition CGExpr.cpp:5910
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2570
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4265
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
EmitCompoundStmt - Emit a compound statement {..} node.
Definition CGStmt.cpp:569
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::Value * EmitPointerArithmetic(const BinaryOperator *BO, Expr *pointerOperand, llvm::Value *pointer, Expr *indexOperand, llvm::Value *index, bool isSubtraction)
Emit pointer + index arithmetic.
RValue EmitAnyExpr(const Expr *E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
EmitAnyExpr - Emit code to compute the specified expression which can have any type.
Definition CGExpr.cpp:266
uint64_t getCurrentProfileCount()
Get the profiler's current count.
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:892
void markStmtMaybeUsed(const Stmt *S)
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void FlattenAccessAndTypeLValue(LValue LVal, SmallVectorImpl< LValue > &AccessList)
Definition CGExpr.cpp:6835
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * authPointerToPointerCast(llvm::Value *ResultPtr, QualType SourceType, QualType DestType)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1552
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
Definition CGStmt.cpp:675
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
Definition CGExpr.cpp:1633
llvm::Type * convertTypeForLoadStore(QualType ASTTy, llvm::Type *LLVMTy=nullptr)
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
Definition CGExpr.cpp:736
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
Definition CGObjC.cpp:4033
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitMatrixIndexExpr(const Expr *E)
Definition CGExpr.cpp:4805
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4177
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2220
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
Emits an instance of NSConstantString representing the object.
Definition CGObjC.cpp:51
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
ConstantEmission tryEmitAsConstant(const DeclRefExpr *RefExpr)
Try to emit a reference to the given value without producing it as an l-value.
Definition CGExpr.cpp:1901
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1668
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
Definition CGObjC.cpp:3524
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
i8* @objc_storeWeak(i8** addr, i8* value) Returns value.
Definition CGObjC.cpp:2651
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:655
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition CGExpr.cpp:1348
CGHLSLRuntime & getHLSLRuntime()
Return a reference to the configured HLSL runtime.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
TrapReasonBuilder BuildTrapReason(unsigned DiagID, TrapReason &TR)
Helper function to construct a TrapReasonBuilder.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
LValue - This represents an lvalue references.
Definition CGValue.h:182
bool isBitField() const
Definition CGValue.h:280
bool isVolatileQualified() const
Definition CGValue.h:285
const Qualifiers & getQuals() const
Definition CGValue.h:338
Address getAddress() const
Definition CGValue.h:361
QualType getType() const
Definition CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition CGValue.h:424
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:98
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:71
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, llvm::Type *DestTy, bool IsNonNull=false) const
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
APValue getAPValueResult() const
Definition Expr.cpp:409
bool hasAPValueResult() const
Definition Expr.h:1157
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
T * getAttr() const
Definition DeclBase.h:573
ChildElementIter< false > begin()
Definition Expr.h:5166
size_t getDataElementCount() const
Definition Expr.h:5082
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isIntegerConstantExpr(const ASTContext &Ctx) const
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3065
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition Expr.h:476
QualType getType() const
Definition Expr.h:144
llvm::APInt getValue() const
Returns an internal integer representation of the literal.
Definition Expr.h:1575
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
@ PostDecrInWhile
while (count–)
bool isSignedOverflowDefined() const
bool isOverflowPatternExcluded(OverflowPatternExclusionKind Kind) const
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition TypeBase.h:4337
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
VersionTuple getVersion() const
Definition ExprObjC.h:1726
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition ExprObjC.h:1498
Expr * getBase() const
Definition ExprObjC.h:1523
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprObjC.h:1546
const ObjCMethodDecl * getMethodDecl() const
Definition ExprObjC.h:1364
QualType getReturnType() const
Definition DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition TypeBase.h:7896
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition TypeBase.h:7933
Expr * getIndexExpr(unsigned Idx)
Definition Expr.h:2586
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition Expr.h:2479
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4641
const Expr * getSubExpr() const
Definition Expr.h:2199
DynTypedNodeList getParents(const NodeT &Node)
Returns the parents of the given node (within the traversal scope).
Pointer-authentication qualifiers.
Definition TypeBase.h:152
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
PointerAuthQualifier getPointerAuth() const
Definition TypeBase.h:1453
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition Type.cpp:130
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8404
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8463
QualType getCanonicalType() const
Definition TypeBase.h:8330
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition Type.cpp:135
bool isCanonical() const
Definition TypeBase.h:8335
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
void removePointerAuth()
Definition TypeBase.h:610
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4512
bool isSatisfied() const
Whether or not the requires clause is satisfied.
std::string ComputeName(ASTContext &Context) const
Definition Expr.cpp:583
static constexpr SanitizerMask bitPosToMask(const unsigned Pos)
Create a mask with a bit enabled at position Pos.
Definition Sanitizers.h:59
llvm::APSInt getShuffleMaskIdx(unsigned N) const
Definition Expr.h:4629
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4517
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2277
SourceLocation getLocation() const
Definition Expr.h:4995
Encodes a location in the source.
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
const llvm::fltSemantics & getHalfFormat() const
Definition TargetInfo.h:783
const llvm::fltSemantics & getBFloat16Format() const
Definition TargetInfo.h:793
const llvm::fltSemantics & getLongDoubleFormat() const
Definition TargetInfo.h:804
const llvm::fltSemantics & getFloat128Format() const
Definition TargetInfo.h:812
const llvm::fltSemantics & getIbm128Format() const
Definition TargetInfo.h:820
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8260
bool getBoolValue() const
Definition ExprCXX.h:2949
const APValue & getAPValue() const
Definition ExprCXX.h:2954
bool isStoredAsBoolean() const
Definition ExprCXX.h:2945
bool isVoidType() const
Definition TypeBase.h:8871
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignableType(const ASTContext &Ctx) const
Definition TypeBase.h:8527
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
bool isReferenceType() const
Definition TypeBase.h:8539
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1909
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2607
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isExtVectorType() const
Definition TypeBase.h:8658
bool isExtVectorBoolType() const
Definition TypeBase.h:8662
bool isOCLIntelSubgroupAVCType() const
Definition TypeBase.h:8790
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8638
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8650
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8927
bool isHalfType() const
Definition TypeBase.h:8875
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isQueueT() const
Definition TypeBase.h:8761
bool isMatrixType() const
Definition TypeBase.h:8672
bool isEventT() const
Definition TypeBase.h:8753
bool isFunctionType() const
Definition TypeBase.h:8511
bool isVectorType() const
Definition TypeBase.h:8654
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * castAsCanonical() const
Return this type's canonical type cast to the specified type.
Definition TypeBase.h:2928
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool isNullPtrType() const
Definition TypeBase.h:8908
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:2400
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
bool isWeak() const
Determine whether this symbol is weakly-imported, or declared with the weak or weak-ref attr.
Definition Decl.cpp:5501
QualType getType() const
Definition Value.cpp:237
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Represents a GCC generic vector type.
Definition TypeBase.h:4175
WhileStmt - This represents a 'while' stmt.
Definition Stmt.h:2697
Defines the clang::TargetInfo interface.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
llvm::APFloat APFloat
Definition Floating.h:27
llvm::APInt APInt
Definition FixedPoint.h:19
bool LE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1247
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1911
bool GE(InterpState &S, CodePtr OpPC)
Definition Interp.h:1262
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
CastKind
CastKind - The kind of operation required for a conversion.
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
static TBAAAccessInfo getMayAliasInfo()
Definition CodeGenTBAA.h:63
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition Sanitizers.h:184