clang 22.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14#include "CIRGenValue.h"
15
16#include "clang/AST/Expr.h"
19
20#include "mlir/IR/Location.h"
21#include "mlir/IR/Value.h"
22
23#include <cassert>
24#include <utility>
25
26using namespace clang;
27using namespace clang::CIRGen;
28
29namespace {
30
31struct BinOpInfo {
32 mlir::Value lhs;
33 mlir::Value rhs;
34 SourceRange loc;
35 QualType fullType; // Type of operands and result
36 QualType compType; // Type used for computations. Element type
37 // for vectors, otherwise same as FullType.
38 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
39 FPOptions fpfeatures;
40 const Expr *e; // Entire expr, for error unsupported. May not be binop.
41
42 /// Check if the binop computes a division or a remainder.
43 bool isDivRemOp() const {
44 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
45 opcode == BO_RemAssign;
46 }
47
48 /// Check if the binop can result in integer overflow.
49 bool mayHaveIntegerOverflow() const {
50 // Without constant input, we can't rule out overflow.
51 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
52 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
53 if (!lhsci || !rhsci)
54 return true;
55
57 // TODO(cir): For now we just assume that we might overflow
58 return true;
59 }
60
61 /// Check if at least one operand is a fixed point type. In such cases,
62 /// this operation did not follow usual arithmetic conversion and both
63 /// operands might not be of the same type.
64 bool isFixedPointOp() const {
65 // We cannot simply check the result type since comparison operations
66 // return an int.
67 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
68 QualType lhstype = binOp->getLHS()->getType();
69 QualType rhstype = binOp->getRHS()->getType();
70 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
71 }
72 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
73 return unop->getSubExpr()->getType()->isFixedPointType();
74 return false;
75 }
76};
77
78class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
79 CIRGenFunction &cgf;
80 CIRGenBuilderTy &builder;
81 // Unlike classic codegen we set this to false or use std::exchange to read
82 // the value instead of calling TestAndClearIgnoreResultAssign to make it
83 // explicit when the value is used
84 bool ignoreResultAssign;
85
86public:
87 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder,
88 bool ignoreResultAssign = false)
89 : cgf(cgf), builder(builder), ignoreResultAssign(ignoreResultAssign) {}
90
91 //===--------------------------------------------------------------------===//
92 // Utilities
93 //===--------------------------------------------------------------------===//
94
95 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
96 mlir::Value value, CastKind kind,
97 QualType destTy);
98
99 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
100 return cgf.cgm.emitNullConstant(ty, loc);
101 }
102
103 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
104 return builder.createFloatingCast(result, cgf.convertType(promotionType));
105 }
106
107 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
108 return builder.createFloatingCast(result, cgf.convertType(exprType));
109 }
110
111 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
112
113 mlir::Value maybePromoteBoolResult(mlir::Value value,
114 mlir::Type dstTy) const {
115 if (mlir::isa<cir::IntType>(dstTy))
116 return builder.createBoolToInt(value, dstTy);
117 if (mlir::isa<cir::BoolType>(dstTy))
118 return value;
119 llvm_unreachable("Can only promote integer or boolean types");
120 }
121
122 //===--------------------------------------------------------------------===//
123 // Visitor Methods
124 //===--------------------------------------------------------------------===//
125
126 mlir::Value Visit(Expr *e) {
127 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
128 }
129
130 mlir::Value VisitStmt(Stmt *s) {
131 llvm_unreachable("Statement passed to ScalarExprEmitter");
132 }
133
134 mlir::Value VisitExpr(Expr *e) {
135 cgf.getCIRGenModule().errorNYI(
136 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
137 return {};
138 }
139
140 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
141 return Visit(e->getSelectedExpr());
142 }
143
144 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
145
146 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
147 return Visit(ge->getResultExpr());
148 }
149
150 /// Emits the address of the l-value, then loads and returns the result.
151 mlir::Value emitLoadOfLValue(const Expr *e) {
152 LValue lv = cgf.emitLValue(e);
153 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
154 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
155 }
156
157 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
158 return cgf.emitLoadOfLValue(lv, loc).getValue();
159 }
160
161 // l-values
162 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
163 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
164 return cgf.emitScalarConstant(constant, e);
165
166 return emitLoadOfLValue(e);
167 }
168
169 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
170 mlir::Type type = cgf.convertType(e->getType());
171 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
172 cir::IntAttr::get(type, e->getValue()));
173 }
174
175 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
176 mlir::Type type = cgf.convertType(e->getType());
177 assert(mlir::isa<cir::FPTypeInterface>(type) &&
178 "expect floating-point type");
179 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
180 cir::FPAttr::get(type, e->getValue()));
181 }
182
183 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
184 mlir::Type ty = cgf.convertType(e->getType());
185 auto init = cir::IntAttr::get(ty, e->getValue());
186 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()), init);
187 }
188
189 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
190 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
191 }
192
193 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
194 if (e->getType()->isVoidType())
195 return {};
196
197 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
198 }
199
200 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
201 if (e->isGLValue())
202 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
203 e->getExprLoc());
204
205 // Otherwise, assume the mapping is the scalar directly.
206 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
207 }
208
209 mlir::Value VisitCastExpr(CastExpr *e);
210 mlir::Value VisitCallExpr(const CallExpr *e);
211
212 mlir::Value VisitStmtExpr(StmtExpr *e) {
213 CIRGenFunction::StmtExprEvaluation eval(cgf);
214 if (e->getType()->isVoidType()) {
215 (void)cgf.emitCompoundStmt(*e->getSubStmt());
216 return {};
217 }
218
219 Address retAlloca =
220 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
221 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
222
223 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
224 e->getExprLoc());
225 }
226
227 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
228 ignoreResultAssign = false;
229
230 if (e->getBase()->getType()->isVectorType()) {
232
233 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
234 const mlir::Value vecValue = Visit(e->getBase());
235 const mlir::Value indexValue = Visit(e->getIdx());
236 return cir::VecExtractOp::create(cgf.builder, loc, vecValue, indexValue);
237 }
238 // Just load the lvalue formed by the subscript expression.
239 return emitLoadOfLValue(e);
240 }
241
242 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
243 if (e->getNumSubExprs() == 2) {
244 // The undocumented form of __builtin_shufflevector.
245 mlir::Value inputVec = Visit(e->getExpr(0));
246 mlir::Value indexVec = Visit(e->getExpr(1));
247 return cir::VecShuffleDynamicOp::create(
248 cgf.builder, cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
249 }
250
251 mlir::Value vec1 = Visit(e->getExpr(0));
252 mlir::Value vec2 = Visit(e->getExpr(1));
253
254 // The documented form of __builtin_shufflevector, where the indices are
255 // a variable number of integer constants. The constants will be stored
256 // in an ArrayAttr.
257 SmallVector<mlir::Attribute, 8> indices;
258 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
259 indices.push_back(
260 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
261 e->getExpr(i)
262 ->EvaluateKnownConstInt(cgf.getContext())
263 .getSExtValue()));
264 }
265
266 return cir::VecShuffleOp::create(cgf.builder,
267 cgf.getLoc(e->getSourceRange()),
268 cgf.convertType(e->getType()), vec1, vec2,
269 cgf.builder.getArrayAttr(indices));
270 }
271
272 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
273 // __builtin_convertvector is an element-wise cast, and is implemented as a
274 // regular cast. The back end handles casts of vectors correctly.
275 return emitScalarConversion(Visit(e->getSrcExpr()),
276 e->getSrcExpr()->getType(), e->getType(),
277 e->getSourceRange().getBegin());
278 }
279
280 mlir::Value VisitMemberExpr(MemberExpr *e);
281
282 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
283 return emitLoadOfLValue(e);
284 }
285
286 mlir::Value VisitInitListExpr(InitListExpr *e);
287
288 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
289 return VisitCastExpr(e);
290 }
291
292 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
293 return cgf.cgm.emitNullConstant(e->getType(),
294 cgf.getLoc(e->getSourceRange()));
295 }
296
297 /// Perform a pointer to boolean conversion.
298 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
299 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
300 // We might want to have a separate pass for these types of conversions.
301 return cgf.getBuilder().createPtrToBoolCast(v);
302 }
303
304 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
305 cir::BoolType boolTy = builder.getBoolTy();
306 return cir::CastOp::create(builder, loc, boolTy,
307 cir::CastKind::float_to_bool, src);
308 }
309
310 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
311 // Because of the type rules of C, we often end up computing a
312 // logical value, then zero extending it to int, then wanting it
313 // as a logical value again.
314 // TODO: optimize this common case here or leave it for later
315 // CIR passes?
316 cir::BoolType boolTy = builder.getBoolTy();
317 return cir::CastOp::create(builder, loc, boolTy, cir::CastKind::int_to_bool,
318 srcVal);
319 }
320
321 /// Convert the specified expression value to a boolean (!cir.bool) truth
322 /// value. This is equivalent to "Val != 0".
323 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
324 mlir::Location loc) {
325 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
326
327 if (srcType->isRealFloatingType())
328 return emitFloatToBoolConversion(src, loc);
329
330 if (llvm::isa<MemberPointerType>(srcType)) {
331 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
332 return builder.getFalse(loc);
333 }
334
335 if (srcType->isIntegerType())
336 return emitIntToBoolConversion(src, loc);
337
338 assert(::mlir::isa<cir::PointerType>(src.getType()));
339 return emitPointerToBoolConversion(src, srcType);
340 }
341
342 // Emit a conversion from the specified type to the specified destination
343 // type, both of which are CIR scalar types.
344 struct ScalarConversionOpts {
345 bool treatBooleanAsSigned;
346 bool emitImplicitIntegerTruncationChecks;
347 bool emitImplicitIntegerSignChangeChecks;
348
349 ScalarConversionOpts()
350 : treatBooleanAsSigned(false),
351 emitImplicitIntegerTruncationChecks(false),
352 emitImplicitIntegerSignChangeChecks(false) {}
353
354 ScalarConversionOpts(clang::SanitizerSet sanOpts)
355 : treatBooleanAsSigned(false),
356 emitImplicitIntegerTruncationChecks(
357 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
358 emitImplicitIntegerSignChangeChecks(
359 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
360 };
361
362 // Conversion from bool, integral, or floating-point to integral or
363 // floating-point. Conversions involving other types are handled elsewhere.
364 // Conversion to bool is handled elsewhere because that's a comparison against
365 // zero, not a simple cast. This handles both individual scalars and vectors.
366 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
367 QualType dstType, mlir::Type srcTy,
368 mlir::Type dstTy, ScalarConversionOpts opts) {
369 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
370 "Internal error: matrix types not handled by this function.");
371 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
372 mlir::isa<mlir::IntegerType>(dstTy)) &&
373 "Obsolete code. Don't use mlir::IntegerType with CIR.");
374
375 mlir::Type fullDstTy = dstTy;
376 if (mlir::isa<cir::VectorType>(srcTy) &&
377 mlir::isa<cir::VectorType>(dstTy)) {
378 // Use the element types of the vectors to figure out the CastKind.
379 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
380 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
381 }
382
383 std::optional<cir::CastKind> castKind;
384
385 if (mlir::isa<cir::BoolType>(srcTy)) {
386 if (opts.treatBooleanAsSigned)
387 cgf.getCIRGenModule().errorNYI("signed bool");
388 if (cgf.getBuilder().isInt(dstTy))
389 castKind = cir::CastKind::bool_to_int;
390 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
391 castKind = cir::CastKind::bool_to_float;
392 else
393 llvm_unreachable("Internal error: Cast to unexpected type");
394 } else if (cgf.getBuilder().isInt(srcTy)) {
395 if (cgf.getBuilder().isInt(dstTy))
396 castKind = cir::CastKind::integral;
397 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
398 castKind = cir::CastKind::int_to_float;
399 else
400 llvm_unreachable("Internal error: Cast to unexpected type");
401 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
402 if (cgf.getBuilder().isInt(dstTy)) {
403 // If we can't recognize overflow as undefined behavior, assume that
404 // overflow saturates. This protects against normal optimizations if we
405 // are compiling with non-standard FP semantics.
406 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
407 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
409 castKind = cir::CastKind::float_to_int;
410 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
411 // TODO: split this to createFPExt/createFPTrunc
412 return builder.createFloatingCast(src, fullDstTy);
413 } else {
414 llvm_unreachable("Internal error: Cast to unexpected type");
415 }
416 } else {
417 llvm_unreachable("Internal error: Cast from unexpected type");
418 }
419
420 assert(castKind.has_value() && "Internal error: CastKind not set.");
421 return cir::CastOp::create(builder, src.getLoc(), fullDstTy, *castKind,
422 src);
423 }
424
425 mlir::Value
426 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
427 return Visit(e->getReplacement());
428 }
429
430 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
431 QualType ty = ve->getType();
432
433 if (ty->isVariablyModifiedType()) {
434 cgf.cgm.errorNYI(ve->getSourceRange(),
435 "variably modified types in varargs");
436 }
437
438 return cgf.emitVAArg(ve);
439 }
440
441 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
442 mlir::Value
443 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
444
445 // Unary Operators.
446 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
447 LValue lv = cgf.emitLValue(e->getSubExpr());
448 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
449 }
450 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
451 LValue lv = cgf.emitLValue(e->getSubExpr());
452 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
453 }
454 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
455 LValue lv = cgf.emitLValue(e->getSubExpr());
456 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
457 }
458 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
459 LValue lv = cgf.emitLValue(e->getSubExpr());
460 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
461 }
462 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
463 cir::UnaryOpKind kind, bool isPre) {
464 if (cgf.getLangOpts().OpenMP)
465 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
466
467 QualType type = e->getSubExpr()->getType();
468
469 mlir::Value value;
470 mlir::Value input;
471
472 if (type->getAs<AtomicType>()) {
473 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
474 // TODO(cir): This is not correct, but it will produce reasonable code
475 // until atomic operations are implemented.
476 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
477 input = value;
478 } else {
479 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
480 input = value;
481 }
482
483 // NOTE: When possible, more frequent cases are handled first.
484
485 // Special case of integer increment that we have to check first: bool++.
486 // Due to promotion rules, we get:
487 // bool++ -> bool = bool + 1
488 // -> bool = (int)bool + 1
489 // -> bool = ((int)bool + 1 != 0)
490 // An interesting aspect of this is that increment is always true.
491 // Decrement does not have this property.
492 if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
493 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
494 } else if (type->isIntegerType()) {
495 QualType promotedType;
496 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
497 if (cgf.getContext().isPromotableIntegerType(type)) {
498 promotedType = cgf.getContext().getPromotedIntegerType(type);
499 assert(promotedType != type && "Shouldn't promote to the same type.");
500 canPerformLossyDemotionCheck = true;
501 canPerformLossyDemotionCheck &=
502 cgf.getContext().getCanonicalType(type) !=
503 cgf.getContext().getCanonicalType(promotedType);
504 canPerformLossyDemotionCheck &=
505 type->isIntegerType() && promotedType->isIntegerType();
506
507 // TODO(cir): Currently, we store bitwidths in CIR types only for
508 // integers. This might also be required for other types.
509
510 assert(
511 (!canPerformLossyDemotionCheck ||
512 type->isSignedIntegerOrEnumerationType() ||
513 promotedType->isSignedIntegerOrEnumerationType() ||
514 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
515 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
516 "The following check expects that if we do promotion to different "
517 "underlying canonical type, at least one of the types (either "
518 "base or promoted) will be signed, or the bitwidths will match.");
519 }
520
522 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
523 value = emitIncDecConsiderOverflowBehavior(e, value, kind);
524 } else {
525 cir::UnaryOpKind kind =
526 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
527 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
528 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
529 }
530 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
531 QualType type = ptr->getPointeeType();
532 if (cgf.getContext().getAsVariableArrayType(type)) {
533 // VLA types don't have constant size.
534 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
535 return {};
536 } else if (type->isFunctionType()) {
537 // Arithmetic on function pointers (!) is just +-1.
538 cgf.cgm.errorNYI(e->getSourceRange(),
539 "Pointer arithmetic on function pointer");
540 return {};
541 } else {
542 // For everything else, we can just do a simple increment.
543 mlir::Location loc = cgf.getLoc(e->getSourceRange());
544 CIRGenBuilderTy &builder = cgf.getBuilder();
545 int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
546 mlir::Value amt = builder.getSInt32(amount, loc);
548 value = builder.createPtrStride(loc, value, amt);
549 }
550 } else if (type->isVectorType()) {
551 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
552 return {};
553 } else if (type->isRealFloatingType()) {
555
556 if (type->isHalfType() &&
557 !cgf.getContext().getLangOpts().NativeHalfType) {
558 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
559 return {};
560 }
561
562 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
563 // Create the inc/dec operation.
564 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
565 assert(kind == cir::UnaryOpKind::Inc ||
566 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
567 value = emitUnaryOp(e, kind, value);
568 } else {
569 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
570 return {};
571 }
572 } else if (type->isFixedPointType()) {
573 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
574 return {};
575 } else {
576 assert(type->castAs<ObjCObjectPointerType>());
577 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
578 return {};
579 }
580
581 CIRGenFunction::SourceLocRAIIObject sourceloc{
582 cgf, cgf.getLoc(e->getSourceRange())};
583
584 // Store the updated result through the lvalue
585 if (lv.isBitField())
586 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
587 else
588 cgf.emitStoreThroughLValue(RValue::get(value), lv);
589
590 // If this is a postinc, return the value read from memory, otherwise use
591 // the updated value.
592 return isPre ? value : input;
593 }
594
595 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
596 mlir::Value inVal,
597 cir::UnaryOpKind kind) {
598 assert(kind == cir::UnaryOpKind::Inc ||
599 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
600 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
601 case LangOptions::SOB_Defined:
602 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
603 case LangOptions::SOB_Undefined:
605 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
606 case LangOptions::SOB_Trapping:
607 if (!e->canOverflow())
608 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
609 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
610 return {};
611 }
612 llvm_unreachable("Unexpected signed overflow behavior kind");
613 }
614
615 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
616 if (llvm::isa<MemberPointerType>(e->getType())) {
617 cgf.cgm.errorNYI(e->getSourceRange(), "Address of member pointer");
618 return builder.getNullPtr(cgf.convertType(e->getType()),
619 cgf.getLoc(e->getExprLoc()));
620 }
621
622 return cgf.emitLValue(e->getSubExpr()).getPointer();
623 }
624
625 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
626 if (e->getType()->isVoidType())
627 return Visit(e->getSubExpr()); // the actual value should be unused
628 return emitLoadOfLValue(e);
629 }
630
631 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
632 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
633 mlir::Value result =
634 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
635 if (result && !promotionType.isNull())
636 return emitUnPromotedValue(result, e->getType());
637 return result;
638 }
639
640 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
641 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
642 mlir::Value result =
643 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
644 if (result && !promotionType.isNull())
645 return emitUnPromotedValue(result, e->getType());
646 return result;
647 }
648
649 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
650 cir::UnaryOpKind kind,
651 QualType promotionType) {
652 ignoreResultAssign = false;
653 mlir::Value operand;
654 if (!promotionType.isNull())
655 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
656 else
657 operand = Visit(e->getSubExpr());
658
659 bool nsw =
660 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
661
662 // NOTE: LLVM codegen will lower this directly to either a FNeg
663 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
664 return emitUnaryOp(e, kind, operand, nsw);
665 }
666
667 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
668 mlir::Value input, bool nsw = false) {
669 return cir::UnaryOp::create(builder,
670 cgf.getLoc(e->getSourceRange().getBegin()),
671 input.getType(), kind, input, nsw);
672 }
673
674 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
675 ignoreResultAssign = false;
676 mlir::Value op = Visit(e->getSubExpr());
677 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
678 }
679
680 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
681
682 mlir::Value VisitUnaryReal(const UnaryOperator *e);
683 mlir::Value VisitUnaryImag(const UnaryOperator *e);
684 mlir::Value VisitRealImag(const UnaryOperator *e,
685 QualType promotionType = QualType());
686
687 mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
688 return Visit(e->getSubExpr());
689 }
690
691 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
692 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
693 return Visit(die->getExpr());
694 }
695
696 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
697
698 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
699 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
700 return cgf.emitCXXNewExpr(e);
701 }
702 mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
703 cgf.emitCXXDeleteExpr(e);
704 return {};
705 }
706
707 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
708 cgf.emitCXXThrowExpr(e);
709 return {};
710 }
711
712 /// Emit a conversion from the specified type to the specified destination
713 /// type, both of which are CIR scalar types.
714 /// TODO: do we need ScalarConversionOpts here? Should be done in another
715 /// pass.
716 mlir::Value
717 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
718 SourceLocation loc,
719 ScalarConversionOpts opts = ScalarConversionOpts()) {
720 // All conversions involving fixed point types should be handled by the
721 // emitFixedPoint family functions. This is done to prevent bloating up
722 // this function more, and although fixed point numbers are represented by
723 // integers, we do not want to follow any logic that assumes they should be
724 // treated as integers.
725 // TODO(leonardchan): When necessary, add another if statement checking for
726 // conversions to fixed point types from other types.
727 // conversions to fixed point types from other types.
728 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
729 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
730 return {};
731 }
732
733 srcType = srcType.getCanonicalType();
734 dstType = dstType.getCanonicalType();
735 if (srcType == dstType) {
736 if (opts.emitImplicitIntegerSignChangeChecks)
737 cgf.getCIRGenModule().errorNYI(loc,
738 "implicit integer sign change checks");
739 return src;
740 }
741
742 if (dstType->isVoidType())
743 return {};
744
745 mlir::Type mlirSrcType = src.getType();
746
747 // Handle conversions to bool first, they are special: comparisons against
748 // 0.
749 if (dstType->isBooleanType())
750 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
751
752 mlir::Type mlirDstType = cgf.convertType(dstType);
753
754 if (srcType->isHalfType() &&
755 !cgf.getContext().getLangOpts().NativeHalfType) {
756 // Cast to FP using the intrinsic if the half type itself isn't supported.
757 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
758 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
759 cgf.getCIRGenModule().errorNYI(loc,
760 "cast via llvm.convert.from.fp16");
761 } else {
762 // Cast to other types through float, using either the intrinsic or
763 // FPExt, depending on whether the half type itself is supported (as
764 // opposed to operations on half, available with NativeHalfType).
765 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
766 cgf.getCIRGenModule().errorNYI(loc,
767 "cast via llvm.convert.from.fp16");
768 // FIXME(cir): For now lets pretend we shouldn't use the conversion
769 // intrinsics and insert a cast here unconditionally.
770 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
771 cgf.floatTy);
772 srcType = cgf.getContext().FloatTy;
773 mlirSrcType = cgf.floatTy;
774 }
775 }
776
777 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
778 // is there anything to be done for CIR here?
779 if (mlirSrcType == mlirDstType) {
780 if (opts.emitImplicitIntegerSignChangeChecks)
781 cgf.getCIRGenModule().errorNYI(loc,
782 "implicit integer sign change checks");
783 return src;
784 }
785
786 // Handle pointer conversions next: pointers can only be converted to/from
787 // other pointers and integers. Check for pointer types in terms of LLVM, as
788 // some native types (like Obj-C id) may map to a pointer type.
789 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
790 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
791 return builder.getNullPtr(dstPT, src.getLoc());
792 }
793
794 if (isa<cir::PointerType>(mlirSrcType)) {
795 // Must be an ptr to int cast.
796 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
797 return builder.createPtrToInt(src, mlirDstType);
798 }
799
800 // A scalar can be splatted to an extended vector of the same element type
801 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
802 // Sema should add casts to make sure that the source expression's type
803 // is the same as the vector's element type (sans qualifiers)
804 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
805 srcType.getTypePtr() &&
806 "Splatted expr doesn't match with vector element type?");
807
808 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
809 return {};
810 }
811
812 if (srcType->isMatrixType() && dstType->isMatrixType()) {
813 cgf.getCIRGenModule().errorNYI(loc,
814 "matrix type to matrix type conversion");
815 return {};
816 }
817 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
818 "Internal error: conversion between matrix type and scalar type");
819
820 // Finally, we have the arithmetic types or vectors of arithmetic types.
821 mlir::Value res = nullptr;
822 mlir::Type resTy = mlirDstType;
823
824 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
825
826 if (mlirDstType != resTy) {
827 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
828 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
829 }
830 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
831 // required by the target. Change that once this is implemented
832 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
833 resTy);
834 }
835
836 if (opts.emitImplicitIntegerTruncationChecks)
837 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
838
839 if (opts.emitImplicitIntegerSignChangeChecks)
840 cgf.getCIRGenModule().errorNYI(loc,
841 "implicit integer sign change checks");
842
843 return res;
844 }
845
846 BinOpInfo emitBinOps(const BinaryOperator *e,
847 QualType promotionType = QualType()) {
848 ignoreResultAssign = false;
849 BinOpInfo result;
850 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
851 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
852 if (!promotionType.isNull())
853 result.fullType = promotionType;
854 else
855 result.fullType = e->getType();
856 result.compType = result.fullType;
857 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
858 result.compType = vecType->getElementType();
859 }
860 result.opcode = e->getOpcode();
861 result.loc = e->getSourceRange();
862 // TODO(cir): Result.FPFeatures
864 result.e = e;
865 return result;
866 }
867
868 mlir::Value emitMul(const BinOpInfo &ops);
869 mlir::Value emitDiv(const BinOpInfo &ops);
870 mlir::Value emitRem(const BinOpInfo &ops);
871 mlir::Value emitAdd(const BinOpInfo &ops);
872 mlir::Value emitSub(const BinOpInfo &ops);
873 mlir::Value emitShl(const BinOpInfo &ops);
874 mlir::Value emitShr(const BinOpInfo &ops);
875 mlir::Value emitAnd(const BinOpInfo &ops);
876 mlir::Value emitXor(const BinOpInfo &ops);
877 mlir::Value emitOr(const BinOpInfo &ops);
878
879 LValue emitCompoundAssignLValue(
880 const CompoundAssignOperator *e,
881 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
882 mlir::Value &result);
883 mlir::Value
884 emitCompoundAssign(const CompoundAssignOperator *e,
885 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
886
887 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
888 // codegen.
889 QualType getPromotionType(QualType ty) {
890 const clang::ASTContext &ctx = cgf.getContext();
891 if (auto *complexTy = ty->getAs<ComplexType>()) {
892 QualType elementTy = complexTy->getElementType();
893 if (elementTy.UseExcessPrecision(ctx))
894 return ctx.getComplexType(ctx.FloatTy);
895 }
896
897 if (ty.UseExcessPrecision(cgf.getContext())) {
898 if (auto *vt = ty->getAs<VectorType>()) {
899 unsigned numElements = vt->getNumElements();
900 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
901 }
902 return cgf.getContext().FloatTy;
903 }
904
905 return QualType();
906 }
907
908// Binary operators and binary compound assignment operators.
909#define HANDLEBINOP(OP) \
910 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
911 QualType promotionTy = getPromotionType(e->getType()); \
912 auto result = emit##OP(emitBinOps(e, promotionTy)); \
913 if (result && !promotionTy.isNull()) \
914 result = emitUnPromotedValue(result, e->getType()); \
915 return result; \
916 } \
917 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
918 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
919 }
920
921 HANDLEBINOP(Mul)
922 HANDLEBINOP(Div)
923 HANDLEBINOP(Rem)
924 HANDLEBINOP(Add)
925 HANDLEBINOP(Sub)
926 HANDLEBINOP(Shl)
927 HANDLEBINOP(Shr)
929 HANDLEBINOP(Xor)
931#undef HANDLEBINOP
932
933 mlir::Value emitCmp(const BinaryOperator *e) {
934 ignoreResultAssign = false;
935 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
936 mlir::Value result;
937 QualType lhsTy = e->getLHS()->getType();
938 QualType rhsTy = e->getRHS()->getType();
939
940 auto clangCmpToCIRCmp =
941 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
942 switch (clangCmp) {
943 case BO_LT:
944 return cir::CmpOpKind::lt;
945 case BO_GT:
946 return cir::CmpOpKind::gt;
947 case BO_LE:
948 return cir::CmpOpKind::le;
949 case BO_GE:
950 return cir::CmpOpKind::ge;
951 case BO_EQ:
952 return cir::CmpOpKind::eq;
953 case BO_NE:
954 return cir::CmpOpKind::ne;
955 default:
956 llvm_unreachable("unsupported comparison kind for cir.cmp");
957 }
958 };
959
960 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
961 if (lhsTy->getAs<MemberPointerType>()) {
963 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
964 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
965 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
966 result = builder.createCompare(loc, kind, lhs, rhs);
967 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
968 BinOpInfo boInfo = emitBinOps(e);
969 mlir::Value lhs = boInfo.lhs;
970 mlir::Value rhs = boInfo.rhs;
971
972 if (lhsTy->isVectorType()) {
973 if (!e->getType()->isVectorType()) {
974 // If AltiVec, the comparison results in a numeric type, so we use
975 // intrinsics comparing vectors and giving 0 or 1 as a result
976 cgf.cgm.errorNYI(loc, "AltiVec comparison");
977 } else {
978 // Other kinds of vectors. Element-wise comparison returning
979 // a vector.
980 result = cir::VecCmpOp::create(builder, cgf.getLoc(boInfo.loc),
981 cgf.convertType(boInfo.fullType), kind,
982 boInfo.lhs, boInfo.rhs);
983 }
984 } else if (boInfo.isFixedPointOp()) {
986 cgf.cgm.errorNYI(loc, "fixed point comparisons");
987 result = builder.getBool(false, loc);
988 } else {
989 // integers and pointers
990 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
991 mlir::isa<cir::PointerType>(lhs.getType()) &&
992 mlir::isa<cir::PointerType>(rhs.getType())) {
993 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
994 }
995
996 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
997 result = builder.createCompare(loc, kind, lhs, rhs);
998 }
999 } else {
1000 // Complex Comparison: can only be an equality comparison.
1001 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1002
1003 BinOpInfo boInfo = emitBinOps(e);
1004 result = cir::CmpOp::create(builder, loc, kind, boInfo.lhs, boInfo.rhs);
1005 }
1006
1007 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
1008 e->getExprLoc());
1009 }
1010
1011// Comparisons.
1012#define VISITCOMP(CODE) \
1013 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
1014 VISITCOMP(LT)
1015 VISITCOMP(GT)
1016 VISITCOMP(LE)
1017 VISITCOMP(GE)
1018 VISITCOMP(EQ)
1019 VISITCOMP(NE)
1020#undef VISITCOMP
1021
1022 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1023 const bool ignore = std::exchange(ignoreResultAssign, false);
1024
1025 mlir::Value rhs;
1026 LValue lhs;
1027
1028 switch (e->getLHS()->getType().getObjCLifetime()) {
1034 break;
1036 // __block variables need to have the rhs evaluated first, plus this
1037 // should improve codegen just a little.
1038 rhs = Visit(e->getRHS());
1040 // TODO(cir): This needs to be emitCheckedLValue() once we support
1041 // sanitizers
1042 lhs = cgf.emitLValue(e->getLHS());
1043
1044 // Store the value into the LHS. Bit-fields are handled specially because
1045 // the result is altered by the store, i.e., [C99 6.5.16p1]
1046 // 'An assignment expression has the value of the left operand after the
1047 // assignment...'.
1048 if (lhs.isBitField()) {
1049 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1050 } else {
1051 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1053 cgf, cgf.getLoc(e->getSourceRange())};
1054 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1055 }
1056 }
1057
1058 // If the result is clearly ignored, return now.
1059 if (ignore)
1060 return nullptr;
1061
1062 // The result of an assignment in C is the assigned r-value.
1063 if (!cgf.getLangOpts().CPlusPlus)
1064 return rhs;
1065
1066 // If the lvalue is non-volatile, return the computed value of the
1067 // assignment.
1068 if (!lhs.isVolatile())
1069 return rhs;
1070
1071 // Otherwise, reload the value.
1072 return emitLoadOfLValue(lhs, e->getExprLoc());
1073 }
1074
1075 mlir::Value VisitBinComma(const BinaryOperator *e) {
1076 cgf.emitIgnoredExpr(e->getLHS());
1077 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1078 return Visit(e->getRHS());
1079 }
1080
1081 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1082 if (e->getType()->isVectorType()) {
1083 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1084 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1085 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1086 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1087 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1088
1089 mlir::Value lhs = Visit(e->getLHS());
1090 mlir::Value rhs = Visit(e->getRHS());
1091
1092 auto cmpOpKind = cir::CmpOpKind::ne;
1093 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1094 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1095 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1096 return builder.createIntCast(vecOr, vecTy);
1097 }
1098
1100 mlir::Type resTy = cgf.convertType(e->getType());
1101 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1102
1103 CIRGenFunction::ConditionalEvaluation eval(cgf);
1104
1105 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1106 auto resOp = cir::TernaryOp::create(
1107 builder, loc, lhsCondV, /*trueBuilder=*/
1108 [&](mlir::OpBuilder &b, mlir::Location loc) {
1109 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1110 b.getInsertionBlock()};
1111 cgf.curLexScope->setAsTernary();
1112 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1113 lexScope.forceCleanup();
1114 cir::YieldOp::create(b, loc, res);
1115 },
1116 /*falseBuilder*/
1117 [&](mlir::OpBuilder &b, mlir::Location loc) {
1118 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1119 b.getInsertionBlock()};
1120 cgf.curLexScope->setAsTernary();
1121 auto res = cir::ConstantOp::create(b, loc, builder.getFalseAttr());
1122 cir::YieldOp::create(b, loc, res.getRes());
1123 });
1124 return maybePromoteBoolResult(resOp.getResult(), resTy);
1125 }
1126
1127 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1128 if (e->getType()->isVectorType()) {
1129 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1130 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1131 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1132 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1133 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1134
1135 mlir::Value lhs = Visit(e->getLHS());
1136 mlir::Value rhs = Visit(e->getRHS());
1137
1138 auto cmpOpKind = cir::CmpOpKind::ne;
1139 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1140 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1141 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1142 return builder.createIntCast(vecOr, vecTy);
1143 }
1144
1146 mlir::Type resTy = cgf.convertType(e->getType());
1147 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1148
1149 CIRGenFunction::ConditionalEvaluation eval(cgf);
1150
1151 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1152 auto resOp = cir::TernaryOp::create(
1153 builder, loc, lhsCondV, /*trueBuilder=*/
1154 [&](mlir::OpBuilder &b, mlir::Location loc) {
1155 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1156 b.getInsertionBlock()};
1157 cgf.curLexScope->setAsTernary();
1158 auto res = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1159 cir::YieldOp::create(b, loc, res.getRes());
1160 },
1161 /*falseBuilder*/
1162 [&](mlir::OpBuilder &b, mlir::Location loc) {
1163 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1164 b.getInsertionBlock()};
1165 cgf.curLexScope->setAsTernary();
1166 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1167 lexScope.forceCleanup();
1168 cir::YieldOp::create(b, loc, res);
1169 });
1170
1171 return maybePromoteBoolResult(resOp.getResult(), resTy);
1172 }
1173
1174 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1175 return cgf.emitAtomicExpr(e).getValue();
1176 }
1177};
1178
1179LValue ScalarExprEmitter::emitCompoundAssignLValue(
1180 const CompoundAssignOperator *e,
1181 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1182 mlir::Value &result) {
1184 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1185
1186 QualType lhsTy = e->getLHS()->getType();
1187 BinOpInfo opInfo;
1188
1189 // Emit the RHS first. __block variables need to have the rhs evaluated
1190 // first, plus this should improve codegen a little.
1191
1192 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1193 if (promotionTypeCR.isNull())
1194 promotionTypeCR = e->getComputationResultType();
1195
1196 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1197 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1198
1199 if (!promotionTypeRHS.isNull())
1200 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1201 else
1202 opInfo.rhs = Visit(e->getRHS());
1203
1204 opInfo.fullType = promotionTypeCR;
1205 opInfo.compType = opInfo.fullType;
1206 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1207 opInfo.compType = vecType->getElementType();
1208 opInfo.opcode = e->getOpcode();
1209 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1210 opInfo.e = e;
1211 opInfo.loc = e->getSourceRange();
1212
1213 // Load/convert the LHS
1214 LValue lhsLV = cgf.emitLValue(e->getLHS());
1215
1216 if (lhsTy->getAs<AtomicType>()) {
1217 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1218 return LValue();
1219 }
1220
1221 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1222
1223 CIRGenFunction::SourceLocRAIIObject sourceloc{
1224 cgf, cgf.getLoc(e->getSourceRange())};
1225 SourceLocation loc = e->getExprLoc();
1226 if (!promotionTypeLHS.isNull())
1227 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1228 else
1229 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1230 e->getComputationLHSType(), loc);
1231
1232 // Expand the binary operator.
1233 result = (this->*func)(opInfo);
1234
1235 // Convert the result back to the LHS type,
1236 // potentially with Implicit Conversion sanitizer check.
1237 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1238 ScalarConversionOpts(cgf.sanOpts));
1239
1240 // Store the result value into the LHS lvalue. Bit-fields are handled
1241 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1242 // 'An assignment expression has the value of the left operand after the
1243 // assignment...'.
1244 if (lhsLV.isBitField())
1245 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1246 else
1247 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1248
1249 if (cgf.getLangOpts().OpenMP)
1250 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1251
1252 return lhsLV;
1253}
1254
1255mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1256 mlir::Value value,
1257 CastKind kind,
1258 QualType destTy) {
1259 cir::CastKind castOpKind;
1260 switch (kind) {
1261 case CK_FloatingComplexToReal:
1262 castOpKind = cir::CastKind::float_complex_to_real;
1263 break;
1264 case CK_IntegralComplexToReal:
1265 castOpKind = cir::CastKind::int_complex_to_real;
1266 break;
1267 case CK_FloatingComplexToBoolean:
1268 castOpKind = cir::CastKind::float_complex_to_bool;
1269 break;
1270 case CK_IntegralComplexToBoolean:
1271 castOpKind = cir::CastKind::int_complex_to_bool;
1272 break;
1273 default:
1274 llvm_unreachable("invalid complex-to-scalar cast kind");
1275 }
1276
1277 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1278}
1279
1280mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1281 QualType promotionType) {
1282 e = e->IgnoreParens();
1283 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1284 switch (bo->getOpcode()) {
1285#define HANDLE_BINOP(OP) \
1286 case BO_##OP: \
1287 return emit##OP(emitBinOps(bo, promotionType));
1288 HANDLE_BINOP(Add)
1289 HANDLE_BINOP(Sub)
1290 HANDLE_BINOP(Mul)
1291 HANDLE_BINOP(Div)
1292#undef HANDLE_BINOP
1293 default:
1294 break;
1295 }
1296 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1297 switch (uo->getOpcode()) {
1298 case UO_Imag:
1299 case UO_Real:
1300 return VisitRealImag(uo, promotionType);
1301 case UO_Minus:
1302 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
1303 case UO_Plus:
1304 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
1305 default:
1306 break;
1307 }
1308 }
1309 mlir::Value result = Visit(const_cast<Expr *>(e));
1310 if (result) {
1311 if (!promotionType.isNull())
1312 return emitPromotedValue(result, promotionType);
1313 return emitUnPromotedValue(result, e->getType());
1314 }
1315 return result;
1316}
1317
1318mlir::Value ScalarExprEmitter::emitCompoundAssign(
1319 const CompoundAssignOperator *e,
1320 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1321
1322 bool ignore = std::exchange(ignoreResultAssign, false);
1323 mlir::Value rhs;
1324 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1325
1326 // If the result is clearly ignored, return now.
1327 if (ignore)
1328 return {};
1329
1330 // The result of an assignment in C is the assigned r-value.
1331 if (!cgf.getLangOpts().CPlusPlus)
1332 return rhs;
1333
1334 // If the lvalue is non-volatile, return the computed value of the assignment.
1335 if (!lhs.isVolatile())
1336 return rhs;
1337
1338 // Otherwise, reload the value.
1339 return emitLoadOfLValue(lhs, e->getExprLoc());
1340}
1341
1342mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1343 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
1344 mlir::OpBuilder &builder = cgf.builder;
1345
1346 auto scope = cir::ScopeOp::create(
1347 builder, scopeLoc,
1348 /*scopeBuilder=*/
1349 [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
1350 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1351 builder.getInsertionBlock()};
1352 mlir::Value scopeYieldVal = Visit(e->getSubExpr());
1353 if (scopeYieldVal) {
1354 // Defend against dominance problems caused by jumps out of expression
1355 // evaluation through the shared cleanup block.
1356 lexScope.forceCleanup();
1357 cir::YieldOp::create(builder, loc, scopeYieldVal);
1358 yieldTy = scopeYieldVal.getType();
1359 }
1360 });
1361
1362 return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
1363}
1364
1365} // namespace
1366
1367LValue
1369 ScalarExprEmitter emitter(*this, builder);
1370 mlir::Value result;
1371 switch (e->getOpcode()) {
1372#define COMPOUND_OP(Op) \
1373 case BO_##Op##Assign: \
1374 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1375 result)
1376 COMPOUND_OP(Mul);
1377 COMPOUND_OP(Div);
1378 COMPOUND_OP(Rem);
1379 COMPOUND_OP(Add);
1380 COMPOUND_OP(Sub);
1381 COMPOUND_OP(Shl);
1382 COMPOUND_OP(Shr);
1384 COMPOUND_OP(Xor);
1385 COMPOUND_OP(Or);
1386#undef COMPOUND_OP
1387
1388 case BO_PtrMemD:
1389 case BO_PtrMemI:
1390 case BO_Mul:
1391 case BO_Div:
1392 case BO_Rem:
1393 case BO_Add:
1394 case BO_Sub:
1395 case BO_Shl:
1396 case BO_Shr:
1397 case BO_LT:
1398 case BO_GT:
1399 case BO_LE:
1400 case BO_GE:
1401 case BO_EQ:
1402 case BO_NE:
1403 case BO_Cmp:
1404 case BO_And:
1405 case BO_Xor:
1406 case BO_Or:
1407 case BO_LAnd:
1408 case BO_LOr:
1409 case BO_Assign:
1410 case BO_Comma:
1411 llvm_unreachable("Not valid compound assignment operators");
1412 }
1413 llvm_unreachable("Unhandled compound assignment operator");
1414}
1415
1416/// Emit the computation of the specified expression of scalar type.
1418 bool ignoreResultAssign) {
1419 assert(e && hasScalarEvaluationKind(e->getType()) &&
1420 "Invalid scalar expression to emit");
1421
1422 return ScalarExprEmitter(*this, builder, ignoreResultAssign)
1423 .Visit(const_cast<Expr *>(e));
1424}
1425
1427 QualType promotionType) {
1428 if (!promotionType.isNull())
1429 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1430 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1431}
1432
1433[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1434 // If a null pointer expression's type is the C++0x nullptr_t and
1435 // the expression is not a simple literal, it must be evaluated
1436 // for its potential side effects.
1438 return false;
1439 return e->getType()->isNullPtrType();
1440}
1441
1442/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1443static std::optional<QualType>
1444getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1445 const Expr *base = e->IgnoreImpCasts();
1446 if (e == base)
1447 return std::nullopt;
1448
1449 QualType baseTy = base->getType();
1450 if (!astContext.isPromotableIntegerType(baseTy) ||
1451 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1452 return std::nullopt;
1453
1454 return baseTy;
1455}
1456
1457/// Check if \p e is a widened promoted integer.
1458[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1459 const Expr *e) {
1460 return getUnwidenedIntegerType(astContext, e).has_value();
1461}
1462
1463/// Check if we can skip the overflow check for \p Op.
1464[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1465 const BinOpInfo &op) {
1466 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1467 "Expected a unary or binary operator");
1468
1469 // If the binop has constant inputs and we can prove there is no overflow,
1470 // we can elide the overflow check.
1471 if (!op.mayHaveIntegerOverflow())
1472 return true;
1473
1474 // If a unary op has a widened operand, the op cannot overflow.
1475 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1476 return !uo->canOverflow();
1477
1478 // We usually don't need overflow checks for binops with widened operands.
1479 // Multiplication with promoted unsigned operands is a special case.
1480 const auto *bo = cast<BinaryOperator>(op.e);
1481 std::optional<QualType> optionalLHSTy =
1482 getUnwidenedIntegerType(astContext, bo->getLHS());
1483 if (!optionalLHSTy)
1484 return false;
1485
1486 std::optional<QualType> optionalRHSTy =
1487 getUnwidenedIntegerType(astContext, bo->getRHS());
1488 if (!optionalRHSTy)
1489 return false;
1490
1491 QualType lhsTy = *optionalLHSTy;
1492 QualType rhsTy = *optionalRHSTy;
1493
1494 // This is the simple case: binops without unsigned multiplication, and with
1495 // widened operands. No overflow check is needed here.
1496 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1497 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1498 return true;
1499
1500 // For unsigned multiplication the overflow check can be elided if either one
1501 // of the unpromoted types are less than half the size of the promoted type.
1502 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1503 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1504 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1505}
1506
1507/// Emit pointer + index arithmetic.
1509 const BinOpInfo &op,
1510 bool isSubtraction) {
1511 // Must have binary (not unary) expr here. Unary pointer
1512 // increment/decrement doesn't use this path.
1514
1515 mlir::Value pointer = op.lhs;
1516 Expr *pointerOperand = expr->getLHS();
1517 mlir::Value index = op.rhs;
1518 Expr *indexOperand = expr->getRHS();
1519
1520 // In the case of subtraction, the FE has ensured that the LHS is always the
1521 // pointer. However, addition can have the pointer on either side. We will
1522 // always have a pointer operand and an integer operand, so if the LHS wasn't
1523 // a pointer, we need to swap our values.
1524 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1525 std::swap(pointer, index);
1526 std::swap(pointerOperand, indexOperand);
1527 }
1528 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1529 "Need a pointer operand");
1530 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1531
1532 // Some versions of glibc and gcc use idioms (particularly in their malloc
1533 // routines) that add a pointer-sized integer (known to be a pointer value)
1534 // to a null pointer in order to cast the value back to an integer or as
1535 // part of a pointer alignment algorithm. This is undefined behavior, but
1536 // we'd like to be able to compile programs that use it.
1537 //
1538 // Normally, we'd generate a GEP with a null-pointer base here in response
1539 // to that code, but it's also UB to dereference a pointer created that
1540 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1541 // generate a direct cast of the integer value to a pointer.
1542 //
1543 // The idiom (p = nullptr + N) is not met if any of the following are true:
1544 //
1545 // The operation is subtraction.
1546 // The index is not pointer-sized.
1547 // The pointer type is not byte-sized.
1548 //
1550 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1551 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1552
1553 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1554 // LLVM lowering.
1555
1556 // If this is subtraction, negate the index.
1557 if (isSubtraction)
1559
1561
1562 const PointerType *pointerType =
1563 pointerOperand->getType()->getAs<PointerType>();
1564 if (!pointerType) {
1565 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1566 return nullptr;
1567 }
1568
1569 QualType elementType = pointerType->getPointeeType();
1570 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1571 cgf.cgm.errorNYI("variable array type");
1572 return nullptr;
1573 }
1574
1575 if (elementType->isVoidType() || elementType->isFunctionType()) {
1576 cgf.cgm.errorNYI("void* or function pointer arithmetic");
1577 return nullptr;
1578 }
1579
1581 return cir::PtrStrideOp::create(cgf.getBuilder(),
1582 cgf.getLoc(op.e->getExprLoc()),
1583 pointer.getType(), pointer, index);
1584}
1585
1586mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1587 const mlir::Location loc = cgf.getLoc(ops.loc);
1588 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1589 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1590 case LangOptions::SOB_Defined:
1591 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1592 return builder.createMul(loc, ops.lhs, ops.rhs);
1593 [[fallthrough]];
1594 case LangOptions::SOB_Undefined:
1595 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1596 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1597 [[fallthrough]];
1598 case LangOptions::SOB_Trapping:
1599 if (canElideOverflowCheck(cgf.getContext(), ops))
1600 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1601 cgf.cgm.errorNYI("sanitizers");
1602 }
1603 }
1604 if (ops.fullType->isConstantMatrixType()) {
1606 cgf.cgm.errorNYI("matrix types");
1607 return nullptr;
1608 }
1609 if (ops.compType->isUnsignedIntegerType() &&
1610 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1611 !canElideOverflowCheck(cgf.getContext(), ops))
1612 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1613
1614 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1616 return builder.createFMul(loc, ops.lhs, ops.rhs);
1617 }
1618
1619 if (ops.isFixedPointOp()) {
1621 cgf.cgm.errorNYI("fixed point");
1622 return nullptr;
1623 }
1624
1625 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1626 cgf.convertType(ops.fullType), cir::BinOpKind::Mul,
1627 ops.lhs, ops.rhs);
1628}
1629mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1630 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1631 cgf.convertType(ops.fullType), cir::BinOpKind::Div,
1632 ops.lhs, ops.rhs);
1633}
1634mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1635 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1636 cgf.convertType(ops.fullType), cir::BinOpKind::Rem,
1637 ops.lhs, ops.rhs);
1638}
1639
1640mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1641 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1642 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1643 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1644
1645 const mlir::Location loc = cgf.getLoc(ops.loc);
1646 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1647 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1648 case LangOptions::SOB_Defined:
1649 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1650 return builder.createAdd(loc, ops.lhs, ops.rhs);
1651 [[fallthrough]];
1652 case LangOptions::SOB_Undefined:
1653 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1654 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1655 [[fallthrough]];
1656 case LangOptions::SOB_Trapping:
1657 if (canElideOverflowCheck(cgf.getContext(), ops))
1658 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1659 cgf.cgm.errorNYI("sanitizers");
1660 }
1661 }
1662 if (ops.fullType->isConstantMatrixType()) {
1664 cgf.cgm.errorNYI("matrix types");
1665 return nullptr;
1666 }
1667
1668 if (ops.compType->isUnsignedIntegerType() &&
1669 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1670 !canElideOverflowCheck(cgf.getContext(), ops))
1671 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1672
1673 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1675 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1676 }
1677
1678 if (ops.isFixedPointOp()) {
1680 cgf.cgm.errorNYI("fixed point");
1681 return {};
1682 }
1683
1684 return cir::BinOp::create(builder, loc, cgf.convertType(ops.fullType),
1685 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1686}
1687
1688mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1689 const mlir::Location loc = cgf.getLoc(ops.loc);
1690 // The LHS is always a pointer if either side is.
1691 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1692 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1693 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1694 case LangOptions::SOB_Defined: {
1695 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1696 return builder.createSub(loc, ops.lhs, ops.rhs);
1697 [[fallthrough]];
1698 }
1699 case LangOptions::SOB_Undefined:
1700 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1701 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1702 [[fallthrough]];
1703 case LangOptions::SOB_Trapping:
1704 if (canElideOverflowCheck(cgf.getContext(), ops))
1705 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1706 cgf.cgm.errorNYI("sanitizers");
1707 }
1708 }
1709
1710 if (ops.fullType->isConstantMatrixType()) {
1712 cgf.cgm.errorNYI("matrix types");
1713 return nullptr;
1714 }
1715
1716 if (ops.compType->isUnsignedIntegerType() &&
1717 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1718 !canElideOverflowCheck(cgf.getContext(), ops))
1719 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1720
1721 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1723 return builder.createFSub(loc, ops.lhs, ops.rhs);
1724 }
1725
1726 if (ops.isFixedPointOp()) {
1728 cgf.cgm.errorNYI("fixed point");
1729 return {};
1730 }
1731
1732 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1733 cgf.convertType(ops.fullType),
1734 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1735 }
1736
1737 // If the RHS is not a pointer, then we have normal pointer
1738 // arithmetic.
1739 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1740 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1741
1742 // Otherwise, this is a pointer subtraction
1743
1744 // Do the raw subtraction part.
1745 //
1746 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1747 // LLVM we shall take VLA's, division by element size, etc.
1748 //
1749 // See more in `EmitSub` in CGExprScalar.cpp.
1751 return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.ptrDiffTy,
1752 ops.lhs, ops.rhs);
1753}
1754
1755mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1756 // TODO: This misses out on the sanitizer check below.
1757 if (ops.isFixedPointOp()) {
1759 cgf.cgm.errorNYI("fixed point");
1760 return {};
1761 }
1762
1763 // CIR accepts shift between different types, meaning nothing special
1764 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1765 // promote or truncate the RHS to the same size as the LHS.
1766
1767 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1768 ops.compType->hasSignedIntegerRepresentation() &&
1770 !cgf.getLangOpts().CPlusPlus20;
1771 bool sanitizeUnsignedBase =
1772 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
1773 ops.compType->hasUnsignedIntegerRepresentation();
1774 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
1775 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
1776
1777 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1778 if (cgf.getLangOpts().OpenCL)
1779 cgf.cgm.errorNYI("opencl");
1780 else if ((sanitizeBase || sanitizeExponent) &&
1781 mlir::isa<cir::IntType>(ops.lhs.getType()))
1782 cgf.cgm.errorNYI("sanitizers");
1783
1784 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1785}
1786
1787mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
1788 // TODO: This misses out on the sanitizer check below.
1789 if (ops.isFixedPointOp()) {
1791 cgf.cgm.errorNYI("fixed point");
1792 return {};
1793 }
1794
1795 // CIR accepts shift between different types, meaning nothing special
1796 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1797 // promote or truncate the RHS to the same size as the LHS.
1798
1799 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1800 if (cgf.getLangOpts().OpenCL)
1801 cgf.cgm.errorNYI("opencl");
1802 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
1803 mlir::isa<cir::IntType>(ops.lhs.getType()))
1804 cgf.cgm.errorNYI("sanitizers");
1805
1806 // Note that we don't need to distinguish unsigned treatment at this
1807 // point since it will be handled later by LLVM lowering.
1808 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1809}
1810
1811mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
1812 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1813 cgf.convertType(ops.fullType), cir::BinOpKind::And,
1814 ops.lhs, ops.rhs);
1815}
1816mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
1817 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1818 cgf.convertType(ops.fullType), cir::BinOpKind::Xor,
1819 ops.lhs, ops.rhs);
1820}
1821mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
1822 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1823 cgf.convertType(ops.fullType), cir::BinOpKind::Or,
1824 ops.lhs, ops.rhs);
1825}
1826
1827// Emit code for an explicit or implicit cast. Implicit
1828// casts have to handle a more broad range of conversions than explicit
1829// casts, as they handle things like function to ptr-to-function decay
1830// etc.
1831mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
1832 Expr *subExpr = ce->getSubExpr();
1833 QualType destTy = ce->getType();
1834 CastKind kind = ce->getCastKind();
1835
1836 // These cases are generally not written to ignore the result of evaluating
1837 // their sub-expressions, so we clear this now.
1838 ignoreResultAssign = false;
1839
1840 switch (kind) {
1841 case clang::CK_Dependent:
1842 llvm_unreachable("dependent cast kind in CIR gen!");
1843 case clang::CK_BuiltinFnToFnPtr:
1844 llvm_unreachable("builtin functions are handled elsewhere");
1845
1846 case CK_CPointerToObjCPointerCast:
1847 case CK_BlockPointerToObjCPointerCast:
1848 case CK_AnyPointerToBlockPointerCast:
1849 case CK_BitCast: {
1850 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1851 mlir::Type dstTy = cgf.convertType(destTy);
1852
1854
1855 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
1856 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1857 "sanitizer support");
1858
1859 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1860 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1861 "strict vtable pointers");
1862
1863 // Update heapallocsite metadata when there is an explicit pointer cast.
1865
1866 // If Src is a fixed vector and Dst is a scalable vector, and both have the
1867 // same element type, use the llvm.vector.insert intrinsic to perform the
1868 // bitcast.
1870
1871 // If Src is a scalable vector and Dst is a fixed vector, and both have the
1872 // same element type, use the llvm.vector.extract intrinsic to perform the
1873 // bitcast.
1875
1876 // Perform VLAT <-> VLST bitcast through memory.
1877 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
1878 // require the element types of the vectors to be the same, we
1879 // need to keep this around for bitcasts between VLAT <-> VLST where
1880 // the element types of the vectors are not the same, until we figure
1881 // out a better way of doing these casts.
1883
1884 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
1885 src, dstTy);
1886 }
1887
1888 case CK_AtomicToNonAtomic: {
1889 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1890 "CastExpr: ", ce->getCastKindName());
1891 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
1892 return cgf.createDummyValue(loc, destTy);
1893 }
1894 case CK_NonAtomicToAtomic:
1895 case CK_UserDefinedConversion:
1896 return Visit(const_cast<Expr *>(subExpr));
1897 case CK_NoOp: {
1898 auto v = Visit(const_cast<Expr *>(subExpr));
1899 if (v) {
1900 // CK_NoOp can model a pointer qualification conversion, which can remove
1901 // an array bound and change the IR type.
1902 // FIXME: Once pointee types are removed from IR, remove this.
1903 mlir::Type t = cgf.convertType(destTy);
1904 if (t != v.getType())
1905 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
1906 }
1907 return v;
1908 }
1909 case CK_IntegralToPointer: {
1910 mlir::Type destCIRTy = cgf.convertType(destTy);
1911 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1912
1913 // Properly resize by casting to an int of the same size as the pointer.
1914 // Clang's IntegralToPointer includes 'bool' as the source, but in CIR
1915 // 'bool' is not an integral type. So check the source type to get the
1916 // correct CIR conversion.
1917 mlir::Type middleTy = cgf.cgm.getDataLayout().getIntPtrType(destCIRTy);
1918 mlir::Value middleVal = builder.createCast(
1919 subExpr->getType()->isBooleanType() ? cir::CastKind::bool_to_int
1920 : cir::CastKind::integral,
1921 src, middleTy);
1922
1923 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers) {
1924 cgf.cgm.errorNYI(subExpr->getSourceRange(),
1925 "IntegralToPointer: strict vtable pointers");
1926 return {};
1927 }
1928
1929 return builder.createIntToPtr(middleVal, destCIRTy);
1930 }
1931
1932 case CK_Dynamic: {
1933 Address v = cgf.emitPointerWithAlignment(subExpr);
1934 const auto *dce = cast<CXXDynamicCastExpr>(ce);
1935 return cgf.emitDynamicCast(v, dce);
1936 }
1937 case CK_ArrayToPointerDecay:
1938 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
1939
1940 case CK_NullToPointer: {
1941 if (mustVisitNullValue(subExpr))
1942 cgf.emitIgnoredExpr(subExpr);
1943
1944 // Note that DestTy is used as the MLIR type instead of a custom
1945 // nullptr type.
1946 mlir::Type ty = cgf.convertType(destTy);
1947 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
1948 }
1949
1950 case CK_LValueToRValue:
1951 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
1952 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1953 return Visit(const_cast<Expr *>(subExpr));
1954
1955 case CK_IntegralCast: {
1956 ScalarConversionOpts opts;
1957 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1958 if (!ice->isPartOfExplicitCast())
1959 opts = ScalarConversionOpts(cgf.sanOpts);
1960 }
1961 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1962 ce->getExprLoc(), opts);
1963 }
1964
1965 case CK_FloatingComplexToReal:
1966 case CK_IntegralComplexToReal:
1967 case CK_FloatingComplexToBoolean:
1968 case CK_IntegralComplexToBoolean: {
1969 mlir::Value value = cgf.emitComplexExpr(subExpr);
1970 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
1971 kind, destTy);
1972 }
1973
1974 case CK_FloatingRealToComplex:
1975 case CK_FloatingComplexCast:
1976 case CK_IntegralRealToComplex:
1977 case CK_IntegralComplexCast:
1978 case CK_IntegralComplexToFloatingComplex:
1979 case CK_FloatingComplexToIntegralComplex:
1980 llvm_unreachable("scalar cast to non-scalar value");
1981
1982 case CK_PointerToIntegral: {
1983 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
1984 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1985 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1986 "strict vtable pointers");
1987 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
1988 }
1989 case CK_ToVoid:
1990 cgf.emitIgnoredExpr(subExpr);
1991 return {};
1992
1993 case CK_IntegralToFloating:
1994 case CK_FloatingToIntegral:
1995 case CK_FloatingCast:
1996 case CK_FixedPointToFloating:
1997 case CK_FloatingToFixedPoint: {
1998 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
1999 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2000 "fixed point casts");
2001 return {};
2002 }
2004 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2005 ce->getExprLoc());
2006 }
2007
2008 case CK_IntegralToBoolean:
2009 return emitIntToBoolConversion(Visit(subExpr),
2010 cgf.getLoc(ce->getSourceRange()));
2011
2012 case CK_PointerToBoolean:
2013 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
2014 case CK_FloatingToBoolean:
2015 return emitFloatToBoolConversion(Visit(subExpr),
2016 cgf.getLoc(subExpr->getExprLoc()));
2017 case CK_MemberPointerToBoolean: {
2018 mlir::Value memPtr = Visit(subExpr);
2019 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
2020 cir::CastKind::member_ptr_to_bool, memPtr,
2021 cgf.convertType(destTy));
2022 }
2023
2024 case CK_VectorSplat: {
2025 // Create a vector object and fill all elements with the same scalar value.
2026 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
2027 return cir::VecSplatOp::create(builder,
2028 cgf.getLoc(subExpr->getSourceRange()),
2029 cgf.convertType(destTy), Visit(subExpr));
2030 }
2031 case CK_FunctionToPointerDecay:
2032 return cgf.emitLValue(subExpr).getPointer();
2033
2034 default:
2035 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2036 "CastExpr: ", ce->getCastKindName());
2037 }
2038 return {};
2039}
2040
2041mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
2043 return emitLoadOfLValue(e);
2044
2045 auto v = cgf.emitCallExpr(e).getValue();
2047 return v;
2048}
2049
2050mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2051 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2052 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2053 // for now.
2055 Expr::EvalResult result;
2056 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2057 llvm::APSInt value = result.Val.getInt();
2058 cgf.emitIgnoredExpr(e->getBase());
2059 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
2060 }
2061 return emitLoadOfLValue(e);
2062}
2063
2064mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2065 const unsigned numInitElements = e->getNumInits();
2066
2067 [[maybe_unused]] const bool ignore = std::exchange(ignoreResultAssign, false);
2068 assert((ignore == false ||
2069 (numInitElements == 0 && e->getType()->isVoidType())) &&
2070 "init list ignored");
2071
2072 if (e->hadArrayRangeDesignator()) {
2073 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2074 return {};
2075 }
2076
2077 if (e->getType()->isVectorType()) {
2078 const auto vectorType =
2079 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2080
2081 SmallVector<mlir::Value, 16> elements;
2082 for (Expr *init : e->inits()) {
2083 elements.push_back(Visit(init));
2084 }
2085
2086 // Zero-initialize any remaining values.
2087 if (numInitElements < vectorType.getSize()) {
2088 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2089 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2090 std::fill_n(std::back_inserter(elements),
2091 vectorType.getSize() - numInitElements, zeroValue);
2092 }
2093
2094 return cir::VecCreateOp::create(cgf.getBuilder(),
2095 cgf.getLoc(e->getSourceRange()), vectorType,
2096 elements);
2097 }
2098
2099 // C++11 value-initialization for the scalar.
2100 if (numInitElements == 0)
2101 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2102
2103 return Visit(e->getInit(0));
2104}
2105
2106mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2107 QualType srcTy, QualType dstTy,
2108 SourceLocation loc) {
2111 "Invalid scalar expression to emit");
2112 return ScalarExprEmitter(*this, builder)
2113 .emitScalarConversion(src, srcTy, dstTy, loc);
2114}
2115
2117 QualType srcTy,
2118 QualType dstTy,
2119 SourceLocation loc) {
2120 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2121 "Invalid complex -> scalar conversion");
2122
2123 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2124 if (dstTy->isBooleanType()) {
2125 auto kind = complexElemTy->isFloatingType()
2126 ? cir::CastKind::float_complex_to_bool
2127 : cir::CastKind::int_complex_to_bool;
2128 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2129 }
2130
2131 auto kind = complexElemTy->isFloatingType()
2132 ? cir::CastKind::float_complex_to_real
2133 : cir::CastKind::int_complex_to_real;
2134 mlir::Value real =
2135 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2136 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2137}
2138
2139mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2140 // Perform vector logical not on comparison with zero vector.
2141 if (e->getType()->isVectorType() &&
2142 e->getType()->castAs<VectorType>()->getVectorKind() ==
2144 mlir::Value oper = Visit(e->getSubExpr());
2145 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2146 auto operVecTy = mlir::cast<cir::VectorType>(oper.getType());
2147 auto exprVecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2148 mlir::Value zeroVec = builder.getNullValue(operVecTy, loc);
2149 return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq,
2150 oper, zeroVec);
2151 }
2152
2153 // Compare operand to zero.
2154 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2155
2156 // Invert value.
2157 boolVal = builder.createNot(boolVal);
2158
2159 // ZExt result to the expr type.
2160 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2161}
2162
2163mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2164 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2165 mlir::Value result = VisitRealImag(e, promotionTy);
2166 if (result && !promotionTy.isNull())
2167 result = emitUnPromotedValue(result, e->getType());
2168 return result;
2169}
2170
2171mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2172 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2173 mlir::Value result = VisitRealImag(e, promotionTy);
2174 if (result && !promotionTy.isNull())
2175 result = emitUnPromotedValue(result, e->getType());
2176 return result;
2177}
2178
2179mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2180 QualType promotionTy) {
2181 assert(e->getOpcode() == clang::UO_Real ||
2182 e->getOpcode() == clang::UO_Imag &&
2183 "Invalid UnaryOp kind for ComplexType Real or Imag");
2184
2185 Expr *op = e->getSubExpr();
2186 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2187 if (op->getType()->isAnyComplexType()) {
2188 // If it's an l-value, load through the appropriate subobject l-value.
2189 // Note that we have to ask `e` because `op` might be an l-value that
2190 // this won't work for, e.g. an Obj-C property
2191 mlir::Value complex = cgf.emitComplexExpr(op);
2192 if (e->isGLValue() && !promotionTy.isNull()) {
2193 promotionTy = promotionTy->isAnyComplexType()
2194 ? promotionTy
2195 : cgf.getContext().getComplexType(promotionTy);
2196 complex = cgf.emitPromotedValue(complex, promotionTy);
2197 }
2198
2199 return e->getOpcode() == clang::UO_Real
2200 ? builder.createComplexReal(loc, complex)
2201 : builder.createComplexImag(loc, complex);
2202 }
2203
2204 if (e->getOpcode() == UO_Real) {
2205 mlir::Value operand = promotionTy.isNull()
2206 ? Visit(op)
2207 : cgf.emitPromotedScalarExpr(op, promotionTy);
2208 return builder.createComplexReal(loc, operand);
2209 }
2210
2211 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2212 // effects are evaluated, but not the actual value.
2213 mlir::Value operand;
2214 if (op->isGLValue()) {
2215 operand = cgf.emitLValue(op).getPointer();
2216 operand = cir::LoadOp::create(builder, loc, operand);
2217 } else if (!promotionTy.isNull()) {
2218 operand = cgf.emitPromotedScalarExpr(op, promotionTy);
2219 } else {
2220 operand = cgf.emitScalarExpr(op);
2221 }
2222 return builder.createComplexImag(loc, operand);
2223}
2224
2225/// Return the size or alignment of the type of argument of the sizeof
2226/// expression as an integer.
2227mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2228 const UnaryExprOrTypeTraitExpr *e) {
2229 const QualType typeToSize = e->getTypeOfArgument();
2230 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2231 if (auto kind = e->getKind();
2232 kind == UETT_SizeOf || kind == UETT_DataSizeOf) {
2233 if (cgf.getContext().getAsVariableArrayType(typeToSize)) {
2235 "sizeof operator for VariableArrayType",
2236 e->getStmtClassName());
2237 return builder.getConstant(
2238 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2239 llvm::APSInt(llvm::APInt(64, 1), true)));
2240 }
2241 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2243 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
2244 e->getStmtClassName());
2245 return builder.getConstant(
2246 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2247 llvm::APSInt(llvm::APInt(64, 1), true)));
2248 }
2249
2250 return builder.getConstant(
2251 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2253}
2254
2255/// Return true if the specified expression is cheap enough and side-effect-free
2256/// enough to evaluate unconditionally instead of conditionally. This is used
2257/// to convert control flow into selects in some cases.
2258/// TODO(cir): can be shared with LLVM codegen.
2260 CIRGenFunction &cgf) {
2261 // Anything that is an integer or floating point constant is fine.
2262 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2263
2264 // Even non-volatile automatic variables can't be evaluated unconditionally.
2265 // Referencing a thread_local may cause non-trivial initialization work to
2266 // occur. If we're inside a lambda and one of the variables is from the scope
2267 // outside the lambda, that function may have returned already. Reading its
2268 // locals is a bad idea. Also, these reads may introduce races there didn't
2269 // exist in the source-level program.
2270}
2271
2272mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2273 const AbstractConditionalOperator *e) {
2274 CIRGenBuilderTy &builder = cgf.getBuilder();
2275 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2276 ignoreResultAssign = false;
2277
2278 // Bind the common expression if necessary.
2279 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2280
2281 Expr *condExpr = e->getCond();
2282 Expr *lhsExpr = e->getTrueExpr();
2283 Expr *rhsExpr = e->getFalseExpr();
2284
2285 // If the condition constant folds and can be elided, try to avoid emitting
2286 // the condition and the dead arm.
2287 bool condExprBool;
2288 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2289 Expr *live = lhsExpr, *dead = rhsExpr;
2290 if (!condExprBool)
2291 std::swap(live, dead);
2292
2293 // If the dead side doesn't have labels we need, just emit the Live part.
2294 if (!cgf.containsLabel(dead)) {
2295 if (condExprBool)
2297 mlir::Value result = Visit(live);
2298
2299 // If the live part is a throw expression, it acts like it has a void
2300 // type, so evaluating it returns a null Value. However, a conditional
2301 // with non-void type must return a non-null Value.
2302 if (!result && !e->getType()->isVoidType()) {
2303 cgf.cgm.errorNYI(e->getSourceRange(),
2304 "throw expression in conditional operator");
2305 result = {};
2306 }
2307
2308 return result;
2309 }
2310 }
2311
2312 QualType condType = condExpr->getType();
2313
2314 // OpenCL: If the condition is a vector, we can treat this condition like
2315 // the select function.
2316 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2317 condType->isExtVectorType()) {
2319 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2320 }
2321
2322 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2323 if (!condType->isVectorType()) {
2325 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2326 return {};
2327 }
2328
2329 mlir::Value condValue = Visit(condExpr);
2330 mlir::Value lhsValue = Visit(lhsExpr);
2331 mlir::Value rhsValue = Visit(rhsExpr);
2332 return cir::VecTernaryOp::create(builder, loc, condValue, lhsValue,
2333 rhsValue);
2334 }
2335
2336 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2337 // select instead of as control flow. We can only do this if it is cheap
2338 // and safe to evaluate the LHS and RHS unconditionally.
2339 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2341 bool lhsIsVoid = false;
2342 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2344
2345 mlir::Value lhs = Visit(lhsExpr);
2346 if (!lhs) {
2347 lhs = builder.getNullValue(cgf.voidTy, loc);
2348 lhsIsVoid = true;
2349 }
2350
2351 mlir::Value rhs = Visit(rhsExpr);
2352 if (lhsIsVoid) {
2353 assert(!rhs && "lhs and rhs types must match");
2354 rhs = builder.getNullValue(cgf.voidTy, loc);
2355 }
2356
2357 return builder.createSelect(loc, condV, lhs, rhs);
2358 }
2359
2360 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2361 CIRGenFunction::ConditionalEvaluation eval(cgf);
2362 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2363 mlir::Type yieldTy{};
2364
2365 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2366 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2368
2370 eval.beginEvaluation();
2371 mlir::Value branch = Visit(expr);
2372 eval.endEvaluation();
2373
2374 if (branch) {
2375 yieldTy = branch.getType();
2376 cir::YieldOp::create(b, loc, branch);
2377 } else {
2378 // If LHS or RHS is a throw or void expression we need to patch
2379 // arms as to properly match yield types.
2380 insertPoints.push_back(b.saveInsertionPoint());
2381 }
2382 };
2383
2384 mlir::Value result = cir::TernaryOp::create(
2385 builder, loc, condV,
2386 /*trueBuilder=*/
2387 [&](mlir::OpBuilder &b, mlir::Location loc) {
2388 emitBranch(b, loc, lhsExpr);
2389 },
2390 /*falseBuilder=*/
2391 [&](mlir::OpBuilder &b, mlir::Location loc) {
2392 emitBranch(b, loc, rhsExpr);
2393 })
2394 .getResult();
2395
2396 if (!insertPoints.empty()) {
2397 // If both arms are void, so be it.
2398 if (!yieldTy)
2399 yieldTy = cgf.voidTy;
2400
2401 // Insert required yields.
2402 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2403 mlir::OpBuilder::InsertionGuard guard(builder);
2404 builder.restoreInsertionPoint(toInsert);
2405
2406 // Block does not return: build empty yield.
2407 if (mlir::isa<cir::VoidType>(yieldTy)) {
2408 cir::YieldOp::create(builder, loc);
2409 } else { // Block returns: set null yield value.
2410 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2411 cir::YieldOp::create(builder, loc, op0);
2412 }
2413 }
2414 }
2415
2416 return result;
2417}
2418
2420 LValue lv,
2421 cir::UnaryOpKind kind,
2422 bool isPre) {
2423 return ScalarExprEmitter(*this, builder)
2424 .emitScalarPrePostIncDec(e, lv, kind, isPre);
2425}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Type getIntPtrType(mlir::Type ty) const
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
SourceLocation getExprLoc() const
Definition Expr.h:4013
Expr * getRHS() const
Definition Expr.h:4024
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2200
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
mlir::Value getPointer() const
Definition Address.h:82
mlir::Value createNeg(mlir::Value value)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
CIRGenBuilderTy & getBuilder()
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastKind getCastKind() const
Definition Expr.h:3654
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
unsigned getValue() const
Definition Expr.h:1629
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3065
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6396
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
ArrayRef< Expr * > inits()
Definition Expr.h:5283
bool isSignedOverflowDefined() const
Expr * getBase() const
Definition Expr.h:3375
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3653
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4641
const Expr * getSubExpr() const
Definition Expr.h:2199
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getCanonicalType() const
Definition TypeBase.h:8330
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool isCanonical() const
Definition TypeBase.h:8335
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8871
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
bool isConstantMatrixType() const
Definition TypeBase.h:8676
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
bool isReferenceType() const
Definition TypeBase.h:8539
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2607
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2291
bool isExtVectorType() const
Definition TypeBase.h:8658
bool isAnyComplexType() const
Definition TypeBase.h:8650
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8927
bool isHalfType() const
Definition TypeBase.h:8875
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isMatrixType() const
Definition TypeBase.h:8672
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8511
bool isVectorType() const
Definition TypeBase.h:8654
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool isNullPtrType() const
Definition TypeBase.h:8908
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represents a GCC generic vector type.
Definition TypeBase.h:4175
VectorKind getVectorKind() const
Definition TypeBase.h:4195
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4136
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool cgFPOptionsRAII()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool llvmLoweringPtrDiffConsidersPointee()
static bool scalableVectors()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174