clang 22.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14#include "CIRGenValue.h"
15
16#include "clang/AST/Expr.h"
19
20#include "mlir/IR/Location.h"
21#include "mlir/IR/Value.h"
22
23#include <cassert>
24#include <utility>
25
26using namespace clang;
27using namespace clang::CIRGen;
28
29namespace {
30
31struct BinOpInfo {
32 mlir::Value lhs;
33 mlir::Value rhs;
34 SourceRange loc;
35 QualType fullType; // Type of operands and result
36 QualType compType; // Type used for computations. Element type
37 // for vectors, otherwise same as FullType.
38 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
39 FPOptions fpfeatures;
40 const Expr *e; // Entire expr, for error unsupported. May not be binop.
41
42 /// Check if the binop computes a division or a remainder.
43 bool isDivRemOp() const {
44 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
45 opcode == BO_RemAssign;
46 }
47
48 /// Check if the binop can result in integer overflow.
49 bool mayHaveIntegerOverflow() const {
50 // Without constant input, we can't rule out overflow.
51 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
52 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
53 if (!lhsci || !rhsci)
54 return true;
55
57 // TODO(cir): For now we just assume that we might overflow
58 return true;
59 }
60
61 /// Check if at least one operand is a fixed point type. In such cases,
62 /// this operation did not follow usual arithmetic conversion and both
63 /// operands might not be of the same type.
64 bool isFixedPointOp() const {
65 // We cannot simply check the result type since comparison operations
66 // return an int.
67 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
68 QualType lhstype = binOp->getLHS()->getType();
69 QualType rhstype = binOp->getRHS()->getType();
70 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
71 }
72 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
73 return unop->getSubExpr()->getType()->isFixedPointType();
74 return false;
75 }
76};
77
78class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
79 CIRGenFunction &cgf;
80 CIRGenBuilderTy &builder;
81 bool ignoreResultAssign;
82
83public:
84 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder)
85 : cgf(cgf), builder(builder) {}
86
87 //===--------------------------------------------------------------------===//
88 // Utilities
89 //===--------------------------------------------------------------------===//
90
91 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
92 mlir::Value value, CastKind kind,
93 QualType destTy);
94
95 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
96 return cgf.cgm.emitNullConstant(ty, loc);
97 }
98
99 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
100 return builder.createFloatingCast(result, cgf.convertType(promotionType));
101 }
102
103 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
104 return builder.createFloatingCast(result, cgf.convertType(exprType));
105 }
106
107 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
108
109 mlir::Value maybePromoteBoolResult(mlir::Value value,
110 mlir::Type dstTy) const {
111 if (mlir::isa<cir::IntType>(dstTy))
112 return builder.createBoolToInt(value, dstTy);
113 if (mlir::isa<cir::BoolType>(dstTy))
114 return value;
115 llvm_unreachable("Can only promote integer or boolean types");
116 }
117
118 //===--------------------------------------------------------------------===//
119 // Visitor Methods
120 //===--------------------------------------------------------------------===//
121
122 mlir::Value Visit(Expr *e) {
123 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
124 }
125
126 mlir::Value VisitStmt(Stmt *s) {
127 llvm_unreachable("Statement passed to ScalarExprEmitter");
128 }
129
130 mlir::Value VisitExpr(Expr *e) {
131 cgf.getCIRGenModule().errorNYI(
132 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
133 return {};
134 }
135
136 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
137 return Visit(e->getSelectedExpr());
138 }
139
140 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
141
142 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
143 return Visit(ge->getResultExpr());
144 }
145
146 /// Emits the address of the l-value, then loads and returns the result.
147 mlir::Value emitLoadOfLValue(const Expr *e) {
148 LValue lv = cgf.emitLValue(e);
149 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
150 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
151 }
152
153 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
154 return cgf.emitLoadOfLValue(lv, loc).getValue();
155 }
156
157 // l-values
158 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
159 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
160 return cgf.emitScalarConstant(constant, e);
161
162 return emitLoadOfLValue(e);
163 }
164
165 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
166 mlir::Type type = cgf.convertType(e->getType());
167 return builder.create<cir::ConstantOp>(
168 cgf.getLoc(e->getExprLoc()), cir::IntAttr::get(type, e->getValue()));
169 }
170
171 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
172 mlir::Type type = cgf.convertType(e->getType());
173 assert(mlir::isa<cir::FPTypeInterface>(type) &&
174 "expect floating-point type");
175 return builder.create<cir::ConstantOp>(
176 cgf.getLoc(e->getExprLoc()), cir::FPAttr::get(type, e->getValue()));
177 }
178
179 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
180 mlir::Type ty = cgf.convertType(e->getType());
181 auto init = cir::IntAttr::get(ty, e->getValue());
182 return builder.create<cir::ConstantOp>(cgf.getLoc(e->getExprLoc()), init);
183 }
184
185 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
186 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
187 }
188
189 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
190 if (e->getType()->isVoidType())
191 return {};
192
193 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
194 }
195
196 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
197 if (e->isGLValue())
198 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
199 e->getExprLoc());
200
201 // Otherwise, assume the mapping is the scalar directly.
202 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
203 }
204
205 mlir::Value VisitCastExpr(CastExpr *e);
206 mlir::Value VisitCallExpr(const CallExpr *e);
207
208 mlir::Value VisitStmtExpr(StmtExpr *e) {
209 CIRGenFunction::StmtExprEvaluation eval(cgf);
210 if (e->getType()->isVoidType()) {
211 (void)cgf.emitCompoundStmt(*e->getSubStmt());
212 return {};
213 }
214
215 Address retAlloca =
216 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
217 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
218
219 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
220 e->getExprLoc());
221 }
222
223 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
224 if (e->getBase()->getType()->isVectorType()) {
226
227 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
228 const mlir::Value vecValue = Visit(e->getBase());
229 const mlir::Value indexValue = Visit(e->getIdx());
230 return cgf.builder.create<cir::VecExtractOp>(loc, vecValue, indexValue);
231 }
232 // Just load the lvalue formed by the subscript expression.
233 return emitLoadOfLValue(e);
234 }
235
236 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
237 if (e->getNumSubExprs() == 2) {
238 // The undocumented form of __builtin_shufflevector.
239 mlir::Value inputVec = Visit(e->getExpr(0));
240 mlir::Value indexVec = Visit(e->getExpr(1));
241 return cgf.builder.create<cir::VecShuffleDynamicOp>(
242 cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
243 }
244
245 mlir::Value vec1 = Visit(e->getExpr(0));
246 mlir::Value vec2 = Visit(e->getExpr(1));
247
248 // The documented form of __builtin_shufflevector, where the indices are
249 // a variable number of integer constants. The constants will be stored
250 // in an ArrayAttr.
251 SmallVector<mlir::Attribute, 8> indices;
252 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
253 indices.push_back(
254 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
255 e->getExpr(i)
256 ->EvaluateKnownConstInt(cgf.getContext())
257 .getSExtValue()));
258 }
259
260 return cgf.builder.create<cir::VecShuffleOp>(
261 cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()), vec1,
262 vec2, cgf.builder.getArrayAttr(indices));
263 }
264
265 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
266 // __builtin_convertvector is an element-wise cast, and is implemented as a
267 // regular cast. The back end handles casts of vectors correctly.
268 return emitScalarConversion(Visit(e->getSrcExpr()),
269 e->getSrcExpr()->getType(), e->getType(),
270 e->getSourceRange().getBegin());
271 }
272
273 mlir::Value VisitMemberExpr(MemberExpr *e);
274
275 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
276 return emitLoadOfLValue(e);
277 }
278
279 mlir::Value VisitInitListExpr(InitListExpr *e);
280
281 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
282 return VisitCastExpr(e);
283 }
284
285 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
286 return cgf.cgm.emitNullConstant(e->getType(),
287 cgf.getLoc(e->getSourceRange()));
288 }
289
290 /// Perform a pointer to boolean conversion.
291 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
292 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
293 // We might want to have a separate pass for these types of conversions.
294 return cgf.getBuilder().createPtrToBoolCast(v);
295 }
296
297 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
298 cir::BoolType boolTy = builder.getBoolTy();
299 return builder.create<cir::CastOp>(loc, boolTy,
300 cir::CastKind::float_to_bool, src);
301 }
302
303 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
304 // Because of the type rules of C, we often end up computing a
305 // logical value, then zero extending it to int, then wanting it
306 // as a logical value again.
307 // TODO: optimize this common case here or leave it for later
308 // CIR passes?
309 cir::BoolType boolTy = builder.getBoolTy();
310 return builder.create<cir::CastOp>(loc, boolTy, cir::CastKind::int_to_bool,
311 srcVal);
312 }
313
314 /// Convert the specified expression value to a boolean (!cir.bool) truth
315 /// value. This is equivalent to "Val != 0".
316 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
317 mlir::Location loc) {
318 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
319
320 if (srcType->isRealFloatingType())
321 return emitFloatToBoolConversion(src, loc);
322
323 if (llvm::isa<MemberPointerType>(srcType)) {
324 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
325 return builder.getFalse(loc);
326 }
327
328 if (srcType->isIntegerType())
329 return emitIntToBoolConversion(src, loc);
330
331 assert(::mlir::isa<cir::PointerType>(src.getType()));
332 return emitPointerToBoolConversion(src, srcType);
333 }
334
335 // Emit a conversion from the specified type to the specified destination
336 // type, both of which are CIR scalar types.
337 struct ScalarConversionOpts {
338 bool treatBooleanAsSigned;
339 bool emitImplicitIntegerTruncationChecks;
340 bool emitImplicitIntegerSignChangeChecks;
341
342 ScalarConversionOpts()
343 : treatBooleanAsSigned(false),
344 emitImplicitIntegerTruncationChecks(false),
345 emitImplicitIntegerSignChangeChecks(false) {}
346
347 ScalarConversionOpts(clang::SanitizerSet sanOpts)
348 : treatBooleanAsSigned(false),
349 emitImplicitIntegerTruncationChecks(
350 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
351 emitImplicitIntegerSignChangeChecks(
352 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
353 };
354
355 // Conversion from bool, integral, or floating-point to integral or
356 // floating-point. Conversions involving other types are handled elsewhere.
357 // Conversion to bool is handled elsewhere because that's a comparison against
358 // zero, not a simple cast. This handles both individual scalars and vectors.
359 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
360 QualType dstType, mlir::Type srcTy,
361 mlir::Type dstTy, ScalarConversionOpts opts) {
362 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
363 "Internal error: matrix types not handled by this function.");
364 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
365 mlir::isa<mlir::IntegerType>(dstTy)) &&
366 "Obsolete code. Don't use mlir::IntegerType with CIR.");
367
368 mlir::Type fullDstTy = dstTy;
369 if (mlir::isa<cir::VectorType>(srcTy) &&
370 mlir::isa<cir::VectorType>(dstTy)) {
371 // Use the element types of the vectors to figure out the CastKind.
372 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
373 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
374 }
375
376 std::optional<cir::CastKind> castKind;
377
378 if (mlir::isa<cir::BoolType>(srcTy)) {
379 if (opts.treatBooleanAsSigned)
380 cgf.getCIRGenModule().errorNYI("signed bool");
381 if (cgf.getBuilder().isInt(dstTy))
382 castKind = cir::CastKind::bool_to_int;
383 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
384 castKind = cir::CastKind::bool_to_float;
385 else
386 llvm_unreachable("Internal error: Cast to unexpected type");
387 } else if (cgf.getBuilder().isInt(srcTy)) {
388 if (cgf.getBuilder().isInt(dstTy))
389 castKind = cir::CastKind::integral;
390 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
391 castKind = cir::CastKind::int_to_float;
392 else
393 llvm_unreachable("Internal error: Cast to unexpected type");
394 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
395 if (cgf.getBuilder().isInt(dstTy)) {
396 // If we can't recognize overflow as undefined behavior, assume that
397 // overflow saturates. This protects against normal optimizations if we
398 // are compiling with non-standard FP semantics.
399 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
400 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
402 castKind = cir::CastKind::float_to_int;
403 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
404 // TODO: split this to createFPExt/createFPTrunc
405 return builder.createFloatingCast(src, fullDstTy);
406 } else {
407 llvm_unreachable("Internal error: Cast to unexpected type");
408 }
409 } else {
410 llvm_unreachable("Internal error: Cast from unexpected type");
411 }
412
413 assert(castKind.has_value() && "Internal error: CastKind not set.");
414 return builder.create<cir::CastOp>(src.getLoc(), fullDstTy, *castKind, src);
415 }
416
417 mlir::Value
418 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
419 return Visit(e->getReplacement());
420 }
421
422 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
423 QualType ty = ve->getType();
424
425 if (ty->isVariablyModifiedType()) {
426 cgf.cgm.errorNYI(ve->getSourceRange(),
427 "variably modified types in varargs");
428 }
429
430 return cgf.emitVAArg(ve);
431 }
432
433 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
434 mlir::Value
435 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
436
437 // Unary Operators.
438 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
439 LValue lv = cgf.emitLValue(e->getSubExpr());
440 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
441 }
442 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
443 LValue lv = cgf.emitLValue(e->getSubExpr());
444 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
445 }
446 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
447 LValue lv = cgf.emitLValue(e->getSubExpr());
448 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
449 }
450 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
451 LValue lv = cgf.emitLValue(e->getSubExpr());
452 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
453 }
454 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
455 cir::UnaryOpKind kind, bool isPre) {
456 if (cgf.getLangOpts().OpenMP)
457 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
458
459 QualType type = e->getSubExpr()->getType();
460
461 mlir::Value value;
462 mlir::Value input;
463
464 if (type->getAs<AtomicType>()) {
465 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
466 // TODO(cir): This is not correct, but it will produce reasonable code
467 // until atomic operations are implemented.
468 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
469 input = value;
470 } else {
471 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
472 input = value;
473 }
474
475 // NOTE: When possible, more frequent cases are handled first.
476
477 // Special case of integer increment that we have to check first: bool++.
478 // Due to promotion rules, we get:
479 // bool++ -> bool = bool + 1
480 // -> bool = (int)bool + 1
481 // -> bool = ((int)bool + 1 != 0)
482 // An interesting aspect of this is that increment is always true.
483 // Decrement does not have this property.
484 if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
485 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
486 } else if (type->isIntegerType()) {
487 QualType promotedType;
488 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
489 if (cgf.getContext().isPromotableIntegerType(type)) {
490 promotedType = cgf.getContext().getPromotedIntegerType(type);
491 assert(promotedType != type && "Shouldn't promote to the same type.");
492 canPerformLossyDemotionCheck = true;
493 canPerformLossyDemotionCheck &=
494 cgf.getContext().getCanonicalType(type) !=
495 cgf.getContext().getCanonicalType(promotedType);
496 canPerformLossyDemotionCheck &=
497 type->isIntegerType() && promotedType->isIntegerType();
498
499 // TODO(cir): Currently, we store bitwidths in CIR types only for
500 // integers. This might also be required for other types.
501
502 assert(
503 (!canPerformLossyDemotionCheck ||
504 type->isSignedIntegerOrEnumerationType() ||
505 promotedType->isSignedIntegerOrEnumerationType() ||
506 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
507 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
508 "The following check expects that if we do promotion to different "
509 "underlying canonical type, at least one of the types (either "
510 "base or promoted) will be signed, or the bitwidths will match.");
511 }
512
514 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
515 value = emitIncDecConsiderOverflowBehavior(e, value, kind);
516 } else {
517 cir::UnaryOpKind kind =
518 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
519 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
520 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
521 }
522 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
523 QualType type = ptr->getPointeeType();
524 if (cgf.getContext().getAsVariableArrayType(type)) {
525 // VLA types don't have constant size.
526 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
527 return {};
528 } else if (type->isFunctionType()) {
529 // Arithmetic on function pointers (!) is just +-1.
530 cgf.cgm.errorNYI(e->getSourceRange(),
531 "Pointer arithmetic on function pointer");
532 return {};
533 } else {
534 // For everything else, we can just do a simple increment.
535 mlir::Location loc = cgf.getLoc(e->getSourceRange());
536 CIRGenBuilderTy &builder = cgf.getBuilder();
537 int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
538 mlir::Value amt = builder.getSInt32(amount, loc);
540 value = builder.createPtrStride(loc, value, amt);
541 }
542 } else if (type->isVectorType()) {
543 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
544 return {};
545 } else if (type->isRealFloatingType()) {
547
548 if (type->isHalfType() &&
549 !cgf.getContext().getLangOpts().NativeHalfType) {
550 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
551 return {};
552 }
553
554 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
555 // Create the inc/dec operation.
556 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
557 assert(kind == cir::UnaryOpKind::Inc ||
558 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
559 value = emitUnaryOp(e, kind, value);
560 } else {
561 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
562 return {};
563 }
564 } else if (type->isFixedPointType()) {
565 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
566 return {};
567 } else {
568 assert(type->castAs<ObjCObjectPointerType>());
569 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
570 return {};
571 }
572
573 CIRGenFunction::SourceLocRAIIObject sourceloc{
574 cgf, cgf.getLoc(e->getSourceRange())};
575
576 // Store the updated result through the lvalue
577 if (lv.isBitField())
578 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
579 else
580 cgf.emitStoreThroughLValue(RValue::get(value), lv);
581
582 // If this is a postinc, return the value read from memory, otherwise use
583 // the updated value.
584 return isPre ? value : input;
585 }
586
587 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
588 mlir::Value inVal,
589 cir::UnaryOpKind kind) {
590 assert(kind == cir::UnaryOpKind::Inc ||
591 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
592 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
593 case LangOptions::SOB_Defined:
594 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
595 case LangOptions::SOB_Undefined:
597 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
598 case LangOptions::SOB_Trapping:
599 if (!e->canOverflow())
600 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
601 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
602 return {};
603 }
604 llvm_unreachable("Unexpected signed overflow behavior kind");
605 }
606
607 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
608 if (llvm::isa<MemberPointerType>(e->getType())) {
609 cgf.cgm.errorNYI(e->getSourceRange(), "Address of member pointer");
610 return builder.getNullPtr(cgf.convertType(e->getType()),
611 cgf.getLoc(e->getExprLoc()));
612 }
613
614 return cgf.emitLValue(e->getSubExpr()).getPointer();
615 }
616
617 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
618 if (e->getType()->isVoidType())
619 return Visit(e->getSubExpr()); // the actual value should be unused
620 return emitLoadOfLValue(e);
621 }
622
623 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
624 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
625 mlir::Value result =
626 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
627 if (result && !promotionType.isNull())
628 return emitUnPromotedValue(result, e->getType());
629 return result;
630 }
631
632 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
633 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
634 mlir::Value result =
635 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
636 if (result && !promotionType.isNull())
637 return emitUnPromotedValue(result, e->getType());
638 return result;
639 }
640
641 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
642 cir::UnaryOpKind kind,
643 QualType promotionType) {
644 ignoreResultAssign = false;
645 mlir::Value operand;
646 if (!promotionType.isNull())
647 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
648 else
649 operand = Visit(e->getSubExpr());
650
651 bool nsw =
652 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
653
654 // NOTE: LLVM codegen will lower this directly to either a FNeg
655 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
656 return emitUnaryOp(e, kind, operand, nsw);
657 }
658
659 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
660 mlir::Value input, bool nsw = false) {
661 return builder.create<cir::UnaryOp>(
662 cgf.getLoc(e->getSourceRange().getBegin()), input.getType(), kind,
663 input, nsw);
664 }
665
666 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
667 ignoreResultAssign = false;
668 mlir::Value op = Visit(e->getSubExpr());
669 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
670 }
671
672 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
673
674 mlir::Value VisitUnaryReal(const UnaryOperator *e);
675 mlir::Value VisitUnaryImag(const UnaryOperator *e);
676 mlir::Value VisitRealImag(const UnaryOperator *e,
677 QualType promotionType = QualType());
678
679 mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
680 return Visit(e->getSubExpr());
681 }
682
683 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
684 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
685 return Visit(die->getExpr());
686 }
687
688 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
689
690 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
691 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
692 return cgf.emitCXXNewExpr(e);
693 }
694 mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
695 cgf.emitCXXDeleteExpr(e);
696 return {};
697 }
698
699 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
700 cgf.emitCXXThrowExpr(e);
701 return {};
702 }
703
704 /// Emit a conversion from the specified type to the specified destination
705 /// type, both of which are CIR scalar types.
706 /// TODO: do we need ScalarConversionOpts here? Should be done in another
707 /// pass.
708 mlir::Value
709 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
710 SourceLocation loc,
711 ScalarConversionOpts opts = ScalarConversionOpts()) {
712 // All conversions involving fixed point types should be handled by the
713 // emitFixedPoint family functions. This is done to prevent bloating up
714 // this function more, and although fixed point numbers are represented by
715 // integers, we do not want to follow any logic that assumes they should be
716 // treated as integers.
717 // TODO(leonardchan): When necessary, add another if statement checking for
718 // conversions to fixed point types from other types.
719 // conversions to fixed point types from other types.
720 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
721 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
722 return {};
723 }
724
725 srcType = srcType.getCanonicalType();
726 dstType = dstType.getCanonicalType();
727 if (srcType == dstType) {
728 if (opts.emitImplicitIntegerSignChangeChecks)
729 cgf.getCIRGenModule().errorNYI(loc,
730 "implicit integer sign change checks");
731 return src;
732 }
733
734 if (dstType->isVoidType())
735 return {};
736
737 mlir::Type mlirSrcType = src.getType();
738
739 // Handle conversions to bool first, they are special: comparisons against
740 // 0.
741 if (dstType->isBooleanType())
742 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
743
744 mlir::Type mlirDstType = cgf.convertType(dstType);
745
746 if (srcType->isHalfType() &&
747 !cgf.getContext().getLangOpts().NativeHalfType) {
748 // Cast to FP using the intrinsic if the half type itself isn't supported.
749 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
750 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
751 cgf.getCIRGenModule().errorNYI(loc,
752 "cast via llvm.convert.from.fp16");
753 } else {
754 // Cast to other types through float, using either the intrinsic or
755 // FPExt, depending on whether the half type itself is supported (as
756 // opposed to operations on half, available with NativeHalfType).
757 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
758 cgf.getCIRGenModule().errorNYI(loc,
759 "cast via llvm.convert.from.fp16");
760 // FIXME(cir): For now lets pretend we shouldn't use the conversion
761 // intrinsics and insert a cast here unconditionally.
762 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
763 cgf.FloatTy);
764 srcType = cgf.getContext().FloatTy;
765 mlirSrcType = cgf.FloatTy;
766 }
767 }
768
769 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
770 // is there anything to be done for CIR here?
771 if (mlirSrcType == mlirDstType) {
772 if (opts.emitImplicitIntegerSignChangeChecks)
773 cgf.getCIRGenModule().errorNYI(loc,
774 "implicit integer sign change checks");
775 return src;
776 }
777
778 // Handle pointer conversions next: pointers can only be converted to/from
779 // other pointers and integers. Check for pointer types in terms of LLVM, as
780 // some native types (like Obj-C id) may map to a pointer type.
781 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
782 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
783 return builder.getNullPtr(dstPT, src.getLoc());
784 }
785
786 if (isa<cir::PointerType>(mlirSrcType)) {
787 // Must be an ptr to int cast.
788 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
789 return builder.createPtrToInt(src, mlirDstType);
790 }
791
792 // A scalar can be splatted to an extended vector of the same element type
793 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
794 // Sema should add casts to make sure that the source expression's type
795 // is the same as the vector's element type (sans qualifiers)
796 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
797 srcType.getTypePtr() &&
798 "Splatted expr doesn't match with vector element type?");
799
800 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
801 return {};
802 }
803
804 if (srcType->isMatrixType() && dstType->isMatrixType()) {
805 cgf.getCIRGenModule().errorNYI(loc,
806 "matrix type to matrix type conversion");
807 return {};
808 }
809 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
810 "Internal error: conversion between matrix type and scalar type");
811
812 // Finally, we have the arithmetic types or vectors of arithmetic types.
813 mlir::Value res = nullptr;
814 mlir::Type resTy = mlirDstType;
815
816 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
817
818 if (mlirDstType != resTy) {
819 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
820 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
821 }
822 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
823 // required by the target. Change that once this is implemented
824 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
825 resTy);
826 }
827
828 if (opts.emitImplicitIntegerTruncationChecks)
829 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
830
831 if (opts.emitImplicitIntegerSignChangeChecks)
832 cgf.getCIRGenModule().errorNYI(loc,
833 "implicit integer sign change checks");
834
835 return res;
836 }
837
838 BinOpInfo emitBinOps(const BinaryOperator *e,
839 QualType promotionType = QualType()) {
840 BinOpInfo result;
841 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
842 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
843 if (!promotionType.isNull())
844 result.fullType = promotionType;
845 else
846 result.fullType = e->getType();
847 result.compType = result.fullType;
848 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
849 result.compType = vecType->getElementType();
850 }
851 result.opcode = e->getOpcode();
852 result.loc = e->getSourceRange();
853 // TODO(cir): Result.FPFeatures
855 result.e = e;
856 return result;
857 }
858
859 mlir::Value emitMul(const BinOpInfo &ops);
860 mlir::Value emitDiv(const BinOpInfo &ops);
861 mlir::Value emitRem(const BinOpInfo &ops);
862 mlir::Value emitAdd(const BinOpInfo &ops);
863 mlir::Value emitSub(const BinOpInfo &ops);
864 mlir::Value emitShl(const BinOpInfo &ops);
865 mlir::Value emitShr(const BinOpInfo &ops);
866 mlir::Value emitAnd(const BinOpInfo &ops);
867 mlir::Value emitXor(const BinOpInfo &ops);
868 mlir::Value emitOr(const BinOpInfo &ops);
869
870 LValue emitCompoundAssignLValue(
871 const CompoundAssignOperator *e,
872 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
873 mlir::Value &result);
874 mlir::Value
875 emitCompoundAssign(const CompoundAssignOperator *e,
876 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
877
878 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
879 // codegen.
880 QualType getPromotionType(QualType ty) {
881 const clang::ASTContext &ctx = cgf.getContext();
882 if (auto *complexTy = ty->getAs<ComplexType>()) {
883 QualType elementTy = complexTy->getElementType();
884 if (elementTy.UseExcessPrecision(ctx))
885 return ctx.getComplexType(ctx.FloatTy);
886 }
887
888 if (ty.UseExcessPrecision(cgf.getContext())) {
889 if (auto *vt = ty->getAs<VectorType>()) {
890 unsigned numElements = vt->getNumElements();
891 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
892 }
893 return cgf.getContext().FloatTy;
894 }
895
896 return QualType();
897 }
898
899// Binary operators and binary compound assignment operators.
900#define HANDLEBINOP(OP) \
901 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
902 QualType promotionTy = getPromotionType(e->getType()); \
903 auto result = emit##OP(emitBinOps(e, promotionTy)); \
904 if (result && !promotionTy.isNull()) \
905 result = emitUnPromotedValue(result, e->getType()); \
906 return result; \
907 } \
908 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
909 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
910 }
911
912 HANDLEBINOP(Mul)
913 HANDLEBINOP(Div)
914 HANDLEBINOP(Rem)
915 HANDLEBINOP(Add)
916 HANDLEBINOP(Sub)
917 HANDLEBINOP(Shl)
918 HANDLEBINOP(Shr)
920 HANDLEBINOP(Xor)
922#undef HANDLEBINOP
923
924 mlir::Value emitCmp(const BinaryOperator *e) {
925 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
926 mlir::Value result;
927 QualType lhsTy = e->getLHS()->getType();
928 QualType rhsTy = e->getRHS()->getType();
929
930 auto clangCmpToCIRCmp =
931 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
932 switch (clangCmp) {
933 case BO_LT:
934 return cir::CmpOpKind::lt;
935 case BO_GT:
936 return cir::CmpOpKind::gt;
937 case BO_LE:
938 return cir::CmpOpKind::le;
939 case BO_GE:
940 return cir::CmpOpKind::ge;
941 case BO_EQ:
942 return cir::CmpOpKind::eq;
943 case BO_NE:
944 return cir::CmpOpKind::ne;
945 default:
946 llvm_unreachable("unsupported comparison kind for cir.cmp");
947 }
948 };
949
950 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
951 if (lhsTy->getAs<MemberPointerType>()) {
953 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
954 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
955 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
956 result = builder.createCompare(loc, kind, lhs, rhs);
957 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
958 BinOpInfo boInfo = emitBinOps(e);
959 mlir::Value lhs = boInfo.lhs;
960 mlir::Value rhs = boInfo.rhs;
961
962 if (lhsTy->isVectorType()) {
963 if (!e->getType()->isVectorType()) {
964 // If AltiVec, the comparison results in a numeric type, so we use
965 // intrinsics comparing vectors and giving 0 or 1 as a result
966 cgf.cgm.errorNYI(loc, "AltiVec comparison");
967 } else {
968 // Other kinds of vectors. Element-wise comparison returning
969 // a vector.
970 result = builder.create<cir::VecCmpOp>(
971 cgf.getLoc(boInfo.loc), cgf.convertType(boInfo.fullType), kind,
972 boInfo.lhs, boInfo.rhs);
973 }
974 } else if (boInfo.isFixedPointOp()) {
976 cgf.cgm.errorNYI(loc, "fixed point comparisons");
977 result = builder.getBool(false, loc);
978 } else {
979 // integers and pointers
980 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
981 mlir::isa<cir::PointerType>(lhs.getType()) &&
982 mlir::isa<cir::PointerType>(rhs.getType())) {
983 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
984 }
985
986 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
987 result = builder.createCompare(loc, kind, lhs, rhs);
988 }
989 } else {
990 // Complex Comparison: can only be an equality comparison.
991 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
992
993 BinOpInfo boInfo = emitBinOps(e);
994 result = builder.create<cir::CmpOp>(loc, kind, boInfo.lhs, boInfo.rhs);
995 }
996
997 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
998 e->getExprLoc());
999 }
1000
1001// Comparisons.
1002#define VISITCOMP(CODE) \
1003 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
1004 VISITCOMP(LT)
1005 VISITCOMP(GT)
1006 VISITCOMP(LE)
1007 VISITCOMP(GE)
1008 VISITCOMP(EQ)
1009 VISITCOMP(NE)
1010#undef VISITCOMP
1011
1012 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1013 const bool ignore = std::exchange(ignoreResultAssign, false);
1014
1015 mlir::Value rhs;
1016 LValue lhs;
1017
1018 switch (e->getLHS()->getType().getObjCLifetime()) {
1024 break;
1026 // __block variables need to have the rhs evaluated first, plus this
1027 // should improve codegen just a little.
1028 rhs = Visit(e->getRHS());
1030 // TODO(cir): This needs to be emitCheckedLValue() once we support
1031 // sanitizers
1032 lhs = cgf.emitLValue(e->getLHS());
1033
1034 // Store the value into the LHS. Bit-fields are handled specially because
1035 // the result is altered by the store, i.e., [C99 6.5.16p1]
1036 // 'An assignment expression has the value of the left operand after the
1037 // assignment...'.
1038 if (lhs.isBitField()) {
1039 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1040 } else {
1041 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1043 cgf, cgf.getLoc(e->getSourceRange())};
1044 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1045 }
1046 }
1047
1048 // If the result is clearly ignored, return now.
1049 if (ignore)
1050 return nullptr;
1051
1052 // The result of an assignment in C is the assigned r-value.
1053 if (!cgf.getLangOpts().CPlusPlus)
1054 return rhs;
1055
1056 // If the lvalue is non-volatile, return the computed value of the
1057 // assignment.
1058 if (!lhs.isVolatile())
1059 return rhs;
1060
1061 // Otherwise, reload the value.
1062 return emitLoadOfLValue(lhs, e->getExprLoc());
1063 }
1064
1065 mlir::Value VisitBinComma(const BinaryOperator *e) {
1066 cgf.emitIgnoredExpr(e->getLHS());
1067 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1068 return Visit(e->getRHS());
1069 }
1070
1071 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1072 if (e->getType()->isVectorType()) {
1073 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1074 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1075 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1076 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1077 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1078
1079 mlir::Value lhs = Visit(e->getLHS());
1080 mlir::Value rhs = Visit(e->getRHS());
1081
1082 auto cmpOpKind = cir::CmpOpKind::ne;
1083 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1084 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1085 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1086 return builder.createIntCast(vecOr, vecTy);
1087 }
1088
1090 mlir::Type resTy = cgf.convertType(e->getType());
1091 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1092
1093 CIRGenFunction::ConditionalEvaluation eval(cgf);
1094
1095 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1096 auto resOp = builder.create<cir::TernaryOp>(
1097 loc, lhsCondV, /*trueBuilder=*/
1098 [&](mlir::OpBuilder &b, mlir::Location loc) {
1099 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1100 b.getInsertionBlock()};
1101 cgf.curLexScope->setAsTernary();
1102 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1103 lexScope.forceCleanup();
1104 cir::YieldOp::create(b, loc, res);
1105 },
1106 /*falseBuilder*/
1107 [&](mlir::OpBuilder &b, mlir::Location loc) {
1108 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1109 b.getInsertionBlock()};
1110 cgf.curLexScope->setAsTernary();
1111 auto res = cir::ConstantOp::create(b, loc, builder.getFalseAttr());
1112 cir::YieldOp::create(b, loc, res.getRes());
1113 });
1114 return maybePromoteBoolResult(resOp.getResult(), resTy);
1115 }
1116
1117 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1118 if (e->getType()->isVectorType()) {
1119 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1120 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1121 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1122 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1123 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1124
1125 mlir::Value lhs = Visit(e->getLHS());
1126 mlir::Value rhs = Visit(e->getRHS());
1127
1128 auto cmpOpKind = cir::CmpOpKind::ne;
1129 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1130 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1131 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1132 return builder.createIntCast(vecOr, vecTy);
1133 }
1134
1136 mlir::Type resTy = cgf.convertType(e->getType());
1137 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1138
1139 CIRGenFunction::ConditionalEvaluation eval(cgf);
1140
1141 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1142 auto resOp = builder.create<cir::TernaryOp>(
1143 loc, lhsCondV, /*trueBuilder=*/
1144 [&](mlir::OpBuilder &b, mlir::Location loc) {
1145 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1146 b.getInsertionBlock()};
1147 cgf.curLexScope->setAsTernary();
1148 auto res = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1149 cir::YieldOp::create(b, loc, res.getRes());
1150 },
1151 /*falseBuilder*/
1152 [&](mlir::OpBuilder &b, mlir::Location loc) {
1153 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1154 b.getInsertionBlock()};
1155 cgf.curLexScope->setAsTernary();
1156 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1157 lexScope.forceCleanup();
1158 cir::YieldOp::create(b, loc, res);
1159 });
1160
1161 return maybePromoteBoolResult(resOp.getResult(), resTy);
1162 }
1163
1164 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1165 return cgf.emitAtomicExpr(e).getValue();
1166 }
1167};
1168
1169LValue ScalarExprEmitter::emitCompoundAssignLValue(
1170 const CompoundAssignOperator *e,
1171 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1172 mlir::Value &result) {
1174 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1175
1176 QualType lhsTy = e->getLHS()->getType();
1177 BinOpInfo opInfo;
1178
1179 // Emit the RHS first. __block variables need to have the rhs evaluated
1180 // first, plus this should improve codegen a little.
1181
1182 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1183 if (promotionTypeCR.isNull())
1184 promotionTypeCR = e->getComputationResultType();
1185
1186 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1187 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1188
1189 if (!promotionTypeRHS.isNull())
1190 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1191 else
1192 opInfo.rhs = Visit(e->getRHS());
1193
1194 opInfo.fullType = promotionTypeCR;
1195 opInfo.compType = opInfo.fullType;
1196 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1197 opInfo.compType = vecType->getElementType();
1198 opInfo.opcode = e->getOpcode();
1199 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1200 opInfo.e = e;
1201 opInfo.loc = e->getSourceRange();
1202
1203 // Load/convert the LHS
1204 LValue lhsLV = cgf.emitLValue(e->getLHS());
1205
1206 if (lhsTy->getAs<AtomicType>()) {
1207 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1208 return LValue();
1209 }
1210
1211 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1212
1213 CIRGenFunction::SourceLocRAIIObject sourceloc{
1214 cgf, cgf.getLoc(e->getSourceRange())};
1215 SourceLocation loc = e->getExprLoc();
1216 if (!promotionTypeLHS.isNull())
1217 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1218 else
1219 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1220 e->getComputationLHSType(), loc);
1221
1222 // Expand the binary operator.
1223 result = (this->*func)(opInfo);
1224
1225 // Convert the result back to the LHS type,
1226 // potentially with Implicit Conversion sanitizer check.
1227 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1228 ScalarConversionOpts(cgf.sanOpts));
1229
1230 // Store the result value into the LHS lvalue. Bit-fields are handled
1231 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1232 // 'An assignment expression has the value of the left operand after the
1233 // assignment...'.
1234 if (lhsLV.isBitField())
1235 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1236 else
1237 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1238
1239 if (cgf.getLangOpts().OpenMP)
1240 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1241
1242 return lhsLV;
1243}
1244
1245mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1246 mlir::Value value,
1247 CastKind kind,
1248 QualType destTy) {
1249 cir::CastKind castOpKind;
1250 switch (kind) {
1251 case CK_FloatingComplexToReal:
1252 castOpKind = cir::CastKind::float_complex_to_real;
1253 break;
1254 case CK_IntegralComplexToReal:
1255 castOpKind = cir::CastKind::int_complex_to_real;
1256 break;
1257 case CK_FloatingComplexToBoolean:
1258 castOpKind = cir::CastKind::float_complex_to_bool;
1259 break;
1260 case CK_IntegralComplexToBoolean:
1261 castOpKind = cir::CastKind::int_complex_to_bool;
1262 break;
1263 default:
1264 llvm_unreachable("invalid complex-to-scalar cast kind");
1265 }
1266
1267 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1268}
1269
1270mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1271 QualType promotionType) {
1272 e = e->IgnoreParens();
1273 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1274 switch (bo->getOpcode()) {
1275#define HANDLE_BINOP(OP) \
1276 case BO_##OP: \
1277 return emit##OP(emitBinOps(bo, promotionType));
1278 HANDLE_BINOP(Add)
1279 HANDLE_BINOP(Sub)
1280 HANDLE_BINOP(Mul)
1281 HANDLE_BINOP(Div)
1282#undef HANDLE_BINOP
1283 default:
1284 break;
1285 }
1286 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1287 switch (uo->getOpcode()) {
1288 case UO_Imag:
1289 case UO_Real:
1290 return VisitRealImag(uo, promotionType);
1291 case UO_Minus:
1292 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
1293 case UO_Plus:
1294 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
1295 default:
1296 break;
1297 }
1298 }
1299 mlir::Value result = Visit(const_cast<Expr *>(e));
1300 if (result) {
1301 if (!promotionType.isNull())
1302 return emitPromotedValue(result, promotionType);
1303 return emitUnPromotedValue(result, e->getType());
1304 }
1305 return result;
1306}
1307
1308mlir::Value ScalarExprEmitter::emitCompoundAssign(
1309 const CompoundAssignOperator *e,
1310 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1311
1312 bool ignore = std::exchange(ignoreResultAssign, false);
1313 mlir::Value rhs;
1314 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1315
1316 // If the result is clearly ignored, return now.
1317 if (ignore)
1318 return {};
1319
1320 // The result of an assignment in C is the assigned r-value.
1321 if (!cgf.getLangOpts().CPlusPlus)
1322 return rhs;
1323
1324 // If the lvalue is non-volatile, return the computed value of the assignment.
1325 if (!lhs.isVolatile())
1326 return rhs;
1327
1328 // Otherwise, reload the value.
1329 return emitLoadOfLValue(lhs, e->getExprLoc());
1330}
1331
1332mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1333 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
1334 mlir::OpBuilder &builder = cgf.builder;
1335
1336 auto scope = cir::ScopeOp::create(
1337 builder, scopeLoc,
1338 /*scopeBuilder=*/
1339 [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
1340 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1341 builder.getInsertionBlock()};
1342 mlir::Value scopeYieldVal = Visit(e->getSubExpr());
1343 if (scopeYieldVal) {
1344 // Defend against dominance problems caused by jumps out of expression
1345 // evaluation through the shared cleanup block.
1346 lexScope.forceCleanup();
1347 cir::YieldOp::create(builder, loc, scopeYieldVal);
1348 yieldTy = scopeYieldVal.getType();
1349 }
1350 });
1351
1352 return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
1353}
1354
1355} // namespace
1356
1357LValue
1359 ScalarExprEmitter emitter(*this, builder);
1360 mlir::Value result;
1361 switch (e->getOpcode()) {
1362#define COMPOUND_OP(Op) \
1363 case BO_##Op##Assign: \
1364 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1365 result)
1366 COMPOUND_OP(Mul);
1367 COMPOUND_OP(Div);
1368 COMPOUND_OP(Rem);
1369 COMPOUND_OP(Add);
1370 COMPOUND_OP(Sub);
1371 COMPOUND_OP(Shl);
1372 COMPOUND_OP(Shr);
1374 COMPOUND_OP(Xor);
1375 COMPOUND_OP(Or);
1376#undef COMPOUND_OP
1377
1378 case BO_PtrMemD:
1379 case BO_PtrMemI:
1380 case BO_Mul:
1381 case BO_Div:
1382 case BO_Rem:
1383 case BO_Add:
1384 case BO_Sub:
1385 case BO_Shl:
1386 case BO_Shr:
1387 case BO_LT:
1388 case BO_GT:
1389 case BO_LE:
1390 case BO_GE:
1391 case BO_EQ:
1392 case BO_NE:
1393 case BO_Cmp:
1394 case BO_And:
1395 case BO_Xor:
1396 case BO_Or:
1397 case BO_LAnd:
1398 case BO_LOr:
1399 case BO_Assign:
1400 case BO_Comma:
1401 llvm_unreachable("Not valid compound assignment operators");
1402 }
1403 llvm_unreachable("Unhandled compound assignment operator");
1404}
1405
1406/// Emit the computation of the specified expression of scalar type.
1408 assert(e && hasScalarEvaluationKind(e->getType()) &&
1409 "Invalid scalar expression to emit");
1410
1411 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1412}
1413
1415 QualType promotionType) {
1416 if (!promotionType.isNull())
1417 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1418 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1419}
1420
1421[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1422 // If a null pointer expression's type is the C++0x nullptr_t and
1423 // the expression is not a simple literal, it must be evaluated
1424 // for its potential side effects.
1426 return false;
1427 return e->getType()->isNullPtrType();
1428}
1429
1430/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1431static std::optional<QualType>
1432getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1433 const Expr *base = e->IgnoreImpCasts();
1434 if (e == base)
1435 return std::nullopt;
1436
1437 QualType baseTy = base->getType();
1438 if (!astContext.isPromotableIntegerType(baseTy) ||
1439 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1440 return std::nullopt;
1441
1442 return baseTy;
1443}
1444
1445/// Check if \p e is a widened promoted integer.
1446[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1447 const Expr *e) {
1448 return getUnwidenedIntegerType(astContext, e).has_value();
1449}
1450
1451/// Check if we can skip the overflow check for \p Op.
1452[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1453 const BinOpInfo &op) {
1454 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1455 "Expected a unary or binary operator");
1456
1457 // If the binop has constant inputs and we can prove there is no overflow,
1458 // we can elide the overflow check.
1459 if (!op.mayHaveIntegerOverflow())
1460 return true;
1461
1462 // If a unary op has a widened operand, the op cannot overflow.
1463 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1464 return !uo->canOverflow();
1465
1466 // We usually don't need overflow checks for binops with widened operands.
1467 // Multiplication with promoted unsigned operands is a special case.
1468 const auto *bo = cast<BinaryOperator>(op.e);
1469 std::optional<QualType> optionalLHSTy =
1470 getUnwidenedIntegerType(astContext, bo->getLHS());
1471 if (!optionalLHSTy)
1472 return false;
1473
1474 std::optional<QualType> optionalRHSTy =
1475 getUnwidenedIntegerType(astContext, bo->getRHS());
1476 if (!optionalRHSTy)
1477 return false;
1478
1479 QualType lhsTy = *optionalLHSTy;
1480 QualType rhsTy = *optionalRHSTy;
1481
1482 // This is the simple case: binops without unsigned multiplication, and with
1483 // widened operands. No overflow check is needed here.
1484 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1485 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1486 return true;
1487
1488 // For unsigned multiplication the overflow check can be elided if either one
1489 // of the unpromoted types are less than half the size of the promoted type.
1490 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1491 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1492 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1493}
1494
1495/// Emit pointer + index arithmetic.
1497 const BinOpInfo &op,
1498 bool isSubtraction) {
1499 // Must have binary (not unary) expr here. Unary pointer
1500 // increment/decrement doesn't use this path.
1502
1503 mlir::Value pointer = op.lhs;
1504 Expr *pointerOperand = expr->getLHS();
1505 mlir::Value index = op.rhs;
1506 Expr *indexOperand = expr->getRHS();
1507
1508 // In the case of subtraction, the FE has ensured that the LHS is always the
1509 // pointer. However, addition can have the pointer on either side. We will
1510 // always have a pointer operand and an integer operand, so if the LHS wasn't
1511 // a pointer, we need to swap our values.
1512 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1513 std::swap(pointer, index);
1514 std::swap(pointerOperand, indexOperand);
1515 }
1516 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1517 "Need a pointer operand");
1518 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1519
1520 // Some versions of glibc and gcc use idioms (particularly in their malloc
1521 // routines) that add a pointer-sized integer (known to be a pointer value)
1522 // to a null pointer in order to cast the value back to an integer or as
1523 // part of a pointer alignment algorithm. This is undefined behavior, but
1524 // we'd like to be able to compile programs that use it.
1525 //
1526 // Normally, we'd generate a GEP with a null-pointer base here in response
1527 // to that code, but it's also UB to dereference a pointer created that
1528 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1529 // generate a direct cast of the integer value to a pointer.
1530 //
1531 // The idiom (p = nullptr + N) is not met if any of the following are true:
1532 //
1533 // The operation is subtraction.
1534 // The index is not pointer-sized.
1535 // The pointer type is not byte-sized.
1536 //
1538 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1539 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1540
1541 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1542 // LLVM lowering.
1543
1544 // If this is subtraction, negate the index.
1545 if (isSubtraction)
1547
1549
1550 const PointerType *pointerType =
1551 pointerOperand->getType()->getAs<PointerType>();
1552 if (!pointerType) {
1553 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1554 return nullptr;
1555 }
1556
1557 QualType elementType = pointerType->getPointeeType();
1558 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1559 cgf.cgm.errorNYI("variable array type");
1560 return nullptr;
1561 }
1562
1563 if (elementType->isVoidType() || elementType->isFunctionType()) {
1564 cgf.cgm.errorNYI("void* or function pointer arithmetic");
1565 return nullptr;
1566 }
1567
1569 return cgf.getBuilder().create<cir::PtrStrideOp>(
1570 cgf.getLoc(op.e->getExprLoc()), pointer.getType(), pointer, index);
1571}
1572
1573mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1574 const mlir::Location loc = cgf.getLoc(ops.loc);
1575 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1576 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1577 case LangOptions::SOB_Defined:
1578 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1579 return builder.createMul(loc, ops.lhs, ops.rhs);
1580 [[fallthrough]];
1581 case LangOptions::SOB_Undefined:
1582 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1583 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1584 [[fallthrough]];
1585 case LangOptions::SOB_Trapping:
1586 if (canElideOverflowCheck(cgf.getContext(), ops))
1587 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1588 cgf.cgm.errorNYI("sanitizers");
1589 }
1590 }
1591 if (ops.fullType->isConstantMatrixType()) {
1593 cgf.cgm.errorNYI("matrix types");
1594 return nullptr;
1595 }
1596 if (ops.compType->isUnsignedIntegerType() &&
1597 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1598 !canElideOverflowCheck(cgf.getContext(), ops))
1599 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1600
1601 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1603 return builder.createFMul(loc, ops.lhs, ops.rhs);
1604 }
1605
1606 if (ops.isFixedPointOp()) {
1608 cgf.cgm.errorNYI("fixed point");
1609 return nullptr;
1610 }
1611
1612 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1613 cgf.convertType(ops.fullType),
1614 cir::BinOpKind::Mul, ops.lhs, ops.rhs);
1615}
1616mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1617 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1618 cgf.convertType(ops.fullType),
1619 cir::BinOpKind::Div, ops.lhs, ops.rhs);
1620}
1621mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1622 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1623 cgf.convertType(ops.fullType),
1624 cir::BinOpKind::Rem, ops.lhs, ops.rhs);
1625}
1626
1627mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1628 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1629 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1630 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1631
1632 const mlir::Location loc = cgf.getLoc(ops.loc);
1633 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1634 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1635 case LangOptions::SOB_Defined:
1636 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1637 return builder.createAdd(loc, ops.lhs, ops.rhs);
1638 [[fallthrough]];
1639 case LangOptions::SOB_Undefined:
1640 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1641 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1642 [[fallthrough]];
1643 case LangOptions::SOB_Trapping:
1644 if (canElideOverflowCheck(cgf.getContext(), ops))
1645 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1646 cgf.cgm.errorNYI("sanitizers");
1647 }
1648 }
1649 if (ops.fullType->isConstantMatrixType()) {
1651 cgf.cgm.errorNYI("matrix types");
1652 return nullptr;
1653 }
1654
1655 if (ops.compType->isUnsignedIntegerType() &&
1656 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1657 !canElideOverflowCheck(cgf.getContext(), ops))
1658 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1659
1660 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1662 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1663 }
1664
1665 if (ops.isFixedPointOp()) {
1667 cgf.cgm.errorNYI("fixed point");
1668 return {};
1669 }
1670
1671 return builder.create<cir::BinOp>(loc, cgf.convertType(ops.fullType),
1672 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1673}
1674
1675mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1676 const mlir::Location loc = cgf.getLoc(ops.loc);
1677 // The LHS is always a pointer if either side is.
1678 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1679 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1680 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1681 case LangOptions::SOB_Defined: {
1682 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1683 return builder.createSub(loc, ops.lhs, ops.rhs);
1684 [[fallthrough]];
1685 }
1686 case LangOptions::SOB_Undefined:
1687 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1688 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1689 [[fallthrough]];
1690 case LangOptions::SOB_Trapping:
1691 if (canElideOverflowCheck(cgf.getContext(), ops))
1692 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1693 cgf.cgm.errorNYI("sanitizers");
1694 }
1695 }
1696
1697 if (ops.fullType->isConstantMatrixType()) {
1699 cgf.cgm.errorNYI("matrix types");
1700 return nullptr;
1701 }
1702
1703 if (ops.compType->isUnsignedIntegerType() &&
1704 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1705 !canElideOverflowCheck(cgf.getContext(), ops))
1706 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1707
1708 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1710 return builder.createFSub(loc, ops.lhs, ops.rhs);
1711 }
1712
1713 if (ops.isFixedPointOp()) {
1715 cgf.cgm.errorNYI("fixed point");
1716 return {};
1717 }
1718
1719 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1720 cgf.convertType(ops.fullType),
1721 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1722 }
1723
1724 // If the RHS is not a pointer, then we have normal pointer
1725 // arithmetic.
1726 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1727 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1728
1729 // Otherwise, this is a pointer subtraction
1730
1731 // Do the raw subtraction part.
1732 //
1733 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1734 // LLVM we shall take VLA's, division by element size, etc.
1735 //
1736 // See more in `EmitSub` in CGExprScalar.cpp.
1738 cgf.cgm.errorNYI("ptrdiff");
1739 return {};
1740}
1741
1742mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1743 // TODO: This misses out on the sanitizer check below.
1744 if (ops.isFixedPointOp()) {
1746 cgf.cgm.errorNYI("fixed point");
1747 return {};
1748 }
1749
1750 // CIR accepts shift between different types, meaning nothing special
1751 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1752 // promote or truncate the RHS to the same size as the LHS.
1753
1754 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1755 ops.compType->hasSignedIntegerRepresentation() &&
1757 !cgf.getLangOpts().CPlusPlus20;
1758 bool sanitizeUnsignedBase =
1759 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
1760 ops.compType->hasUnsignedIntegerRepresentation();
1761 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
1762 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
1763
1764 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1765 if (cgf.getLangOpts().OpenCL)
1766 cgf.cgm.errorNYI("opencl");
1767 else if ((sanitizeBase || sanitizeExponent) &&
1768 mlir::isa<cir::IntType>(ops.lhs.getType()))
1769 cgf.cgm.errorNYI("sanitizers");
1770
1771 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1772}
1773
1774mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
1775 // TODO: This misses out on the sanitizer check below.
1776 if (ops.isFixedPointOp()) {
1778 cgf.cgm.errorNYI("fixed point");
1779 return {};
1780 }
1781
1782 // CIR accepts shift between different types, meaning nothing special
1783 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1784 // promote or truncate the RHS to the same size as the LHS.
1785
1786 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1787 if (cgf.getLangOpts().OpenCL)
1788 cgf.cgm.errorNYI("opencl");
1789 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
1790 mlir::isa<cir::IntType>(ops.lhs.getType()))
1791 cgf.cgm.errorNYI("sanitizers");
1792
1793 // Note that we don't need to distinguish unsigned treatment at this
1794 // point since it will be handled later by LLVM lowering.
1795 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1796}
1797
1798mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
1799 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1800 cgf.convertType(ops.fullType),
1801 cir::BinOpKind::And, ops.lhs, ops.rhs);
1802}
1803mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
1804 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1805 cgf.convertType(ops.fullType),
1806 cir::BinOpKind::Xor, ops.lhs, ops.rhs);
1807}
1808mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
1809 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1810 cgf.convertType(ops.fullType),
1811 cir::BinOpKind::Or, ops.lhs, ops.rhs);
1812}
1813
1814// Emit code for an explicit or implicit cast. Implicit
1815// casts have to handle a more broad range of conversions than explicit
1816// casts, as they handle things like function to ptr-to-function decay
1817// etc.
1818mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
1819 Expr *subExpr = ce->getSubExpr();
1820 QualType destTy = ce->getType();
1821 CastKind kind = ce->getCastKind();
1822
1823 // These cases are generally not written to ignore the result of evaluating
1824 // their sub-expressions, so we clear this now.
1825 ignoreResultAssign = false;
1826
1827 switch (kind) {
1828 case clang::CK_Dependent:
1829 llvm_unreachable("dependent cast kind in CIR gen!");
1830 case clang::CK_BuiltinFnToFnPtr:
1831 llvm_unreachable("builtin functions are handled elsewhere");
1832
1833 case CK_CPointerToObjCPointerCast:
1834 case CK_BlockPointerToObjCPointerCast:
1835 case CK_AnyPointerToBlockPointerCast:
1836 case CK_BitCast: {
1837 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1838 mlir::Type dstTy = cgf.convertType(destTy);
1839
1841
1842 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
1843 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1844 "sanitizer support");
1845
1846 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1847 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1848 "strict vtable pointers");
1849
1850 // Update heapallocsite metadata when there is an explicit pointer cast.
1852
1853 // If Src is a fixed vector and Dst is a scalable vector, and both have the
1854 // same element type, use the llvm.vector.insert intrinsic to perform the
1855 // bitcast.
1857
1858 // If Src is a scalable vector and Dst is a fixed vector, and both have the
1859 // same element type, use the llvm.vector.extract intrinsic to perform the
1860 // bitcast.
1862
1863 // Perform VLAT <-> VLST bitcast through memory.
1864 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
1865 // require the element types of the vectors to be the same, we
1866 // need to keep this around for bitcasts between VLAT <-> VLST where
1867 // the element types of the vectors are not the same, until we figure
1868 // out a better way of doing these casts.
1870
1871 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
1872 src, dstTy);
1873 }
1874
1875 case CK_AtomicToNonAtomic: {
1876 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1877 "CastExpr: ", ce->getCastKindName());
1878 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
1879 return cgf.createDummyValue(loc, destTy);
1880 }
1881 case CK_NonAtomicToAtomic:
1882 case CK_UserDefinedConversion:
1883 return Visit(const_cast<Expr *>(subExpr));
1884 case CK_NoOp: {
1885 auto v = Visit(const_cast<Expr *>(subExpr));
1886 if (v) {
1887 // CK_NoOp can model a pointer qualification conversion, which can remove
1888 // an array bound and change the IR type.
1889 // FIXME: Once pointee types are removed from IR, remove this.
1890 mlir::Type t = cgf.convertType(destTy);
1891 if (t != v.getType())
1892 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
1893 }
1894 return v;
1895 }
1896 case CK_IntegralToPointer: {
1897 mlir::Type destCIRTy = cgf.convertType(destTy);
1898 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1899
1900 // Properly resize by casting to an int of the same size as the pointer.
1901 // Clang's IntegralToPointer includes 'bool' as the source, but in CIR
1902 // 'bool' is not an integral type. So check the source type to get the
1903 // correct CIR conversion.
1904 mlir::Type middleTy = cgf.cgm.getDataLayout().getIntPtrType(destCIRTy);
1905 mlir::Value middleVal = builder.createCast(
1906 subExpr->getType()->isBooleanType() ? cir::CastKind::bool_to_int
1907 : cir::CastKind::integral,
1908 src, middleTy);
1909
1910 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers) {
1911 cgf.cgm.errorNYI(subExpr->getSourceRange(),
1912 "IntegralToPointer: strict vtable pointers");
1913 return {};
1914 }
1915
1916 return builder.createIntToPtr(middleVal, destCIRTy);
1917 }
1918
1919 case CK_Dynamic: {
1920 Address v = cgf.emitPointerWithAlignment(subExpr);
1921 const auto *dce = cast<CXXDynamicCastExpr>(ce);
1922 return cgf.emitDynamicCast(v, dce);
1923 }
1924 case CK_ArrayToPointerDecay:
1925 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
1926
1927 case CK_NullToPointer: {
1928 if (mustVisitNullValue(subExpr))
1929 cgf.emitIgnoredExpr(subExpr);
1930
1931 // Note that DestTy is used as the MLIR type instead of a custom
1932 // nullptr type.
1933 mlir::Type ty = cgf.convertType(destTy);
1934 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
1935 }
1936
1937 case CK_LValueToRValue:
1938 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
1939 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1940 return Visit(const_cast<Expr *>(subExpr));
1941
1942 case CK_IntegralCast: {
1943 ScalarConversionOpts opts;
1944 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1945 if (!ice->isPartOfExplicitCast())
1946 opts = ScalarConversionOpts(cgf.sanOpts);
1947 }
1948 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1949 ce->getExprLoc(), opts);
1950 }
1951
1952 case CK_FloatingComplexToReal:
1953 case CK_IntegralComplexToReal:
1954 case CK_FloatingComplexToBoolean:
1955 case CK_IntegralComplexToBoolean: {
1956 mlir::Value value = cgf.emitComplexExpr(subExpr);
1957 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
1958 kind, destTy);
1959 }
1960
1961 case CK_FloatingRealToComplex:
1962 case CK_FloatingComplexCast:
1963 case CK_IntegralRealToComplex:
1964 case CK_IntegralComplexCast:
1965 case CK_IntegralComplexToFloatingComplex:
1966 case CK_FloatingComplexToIntegralComplex:
1967 llvm_unreachable("scalar cast to non-scalar value");
1968
1969 case CK_PointerToIntegral: {
1970 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
1971 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1972 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1973 "strict vtable pointers");
1974 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
1975 }
1976 case CK_ToVoid:
1977 cgf.emitIgnoredExpr(subExpr);
1978 return {};
1979
1980 case CK_IntegralToFloating:
1981 case CK_FloatingToIntegral:
1982 case CK_FloatingCast:
1983 case CK_FixedPointToFloating:
1984 case CK_FloatingToFixedPoint: {
1985 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
1986 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1987 "fixed point casts");
1988 return {};
1989 }
1991 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1992 ce->getExprLoc());
1993 }
1994
1995 case CK_IntegralToBoolean:
1996 return emitIntToBoolConversion(Visit(subExpr),
1997 cgf.getLoc(ce->getSourceRange()));
1998
1999 case CK_PointerToBoolean:
2000 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
2001 case CK_FloatingToBoolean:
2002 return emitFloatToBoolConversion(Visit(subExpr),
2003 cgf.getLoc(subExpr->getExprLoc()));
2004 case CK_MemberPointerToBoolean: {
2005 mlir::Value memPtr = Visit(subExpr);
2006 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
2007 cir::CastKind::member_ptr_to_bool, memPtr,
2008 cgf.convertType(destTy));
2009 }
2010
2011 case CK_VectorSplat: {
2012 // Create a vector object and fill all elements with the same scalar value.
2013 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
2014 return builder.create<cir::VecSplatOp>(
2015 cgf.getLoc(subExpr->getSourceRange()), cgf.convertType(destTy),
2016 Visit(subExpr));
2017 }
2018 case CK_FunctionToPointerDecay:
2019 return cgf.emitLValue(subExpr).getPointer();
2020
2021 default:
2022 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2023 "CastExpr: ", ce->getCastKindName());
2024 }
2025 return {};
2026}
2027
2028mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
2030 return emitLoadOfLValue(e);
2031
2032 auto v = cgf.emitCallExpr(e).getValue();
2034 return v;
2035}
2036
2037mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2038 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2039 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2040 // for now.
2042 Expr::EvalResult result;
2043 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2044 cgf.cgm.errorNYI(e->getSourceRange(), "Constant interger member expr");
2045 // Fall through to emit this as a non-constant access.
2046 }
2047 return emitLoadOfLValue(e);
2048}
2049
2050mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2051 const unsigned numInitElements = e->getNumInits();
2052
2053 if (e->hadArrayRangeDesignator()) {
2054 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2055 return {};
2056 }
2057
2058 if (e->getType()->isVectorType()) {
2059 const auto vectorType =
2060 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2061
2062 SmallVector<mlir::Value, 16> elements;
2063 for (Expr *init : e->inits()) {
2064 elements.push_back(Visit(init));
2065 }
2066
2067 // Zero-initialize any remaining values.
2068 if (numInitElements < vectorType.getSize()) {
2069 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2070 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2071 std::fill_n(std::back_inserter(elements),
2072 vectorType.getSize() - numInitElements, zeroValue);
2073 }
2074
2075 return cgf.getBuilder().create<cir::VecCreateOp>(
2076 cgf.getLoc(e->getSourceRange()), vectorType, elements);
2077 }
2078
2079 // C++11 value-initialization for the scalar.
2080 if (numInitElements == 0)
2081 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2082
2083 return Visit(e->getInit(0));
2084}
2085
2086mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2087 QualType srcTy, QualType dstTy,
2088 SourceLocation loc) {
2091 "Invalid scalar expression to emit");
2092 return ScalarExprEmitter(*this, builder)
2093 .emitScalarConversion(src, srcTy, dstTy, loc);
2094}
2095
2097 QualType srcTy,
2098 QualType dstTy,
2099 SourceLocation loc) {
2100 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2101 "Invalid complex -> scalar conversion");
2102
2103 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2104 if (dstTy->isBooleanType()) {
2105 auto kind = complexElemTy->isFloatingType()
2106 ? cir::CastKind::float_complex_to_bool
2107 : cir::CastKind::int_complex_to_bool;
2108 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2109 }
2110
2111 auto kind = complexElemTy->isFloatingType()
2112 ? cir::CastKind::float_complex_to_real
2113 : cir::CastKind::int_complex_to_real;
2114 mlir::Value real =
2115 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2116 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2117}
2118
2119mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2120 // Perform vector logical not on comparison with zero vector.
2121 if (e->getType()->isVectorType() &&
2122 e->getType()->castAs<VectorType>()->getVectorKind() ==
2124 mlir::Value oper = Visit(e->getSubExpr());
2125 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2126 auto operVecTy = mlir::cast<cir::VectorType>(oper.getType());
2127 auto exprVecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2128 mlir::Value zeroVec = builder.getNullValue(operVecTy, loc);
2129 return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq,
2130 oper, zeroVec);
2131 }
2132
2133 // Compare operand to zero.
2134 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2135
2136 // Invert value.
2137 boolVal = builder.createNot(boolVal);
2138
2139 // ZExt result to the expr type.
2140 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2141}
2142
2143mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2144 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2145 mlir::Value result = VisitRealImag(e, promotionTy);
2146 if (result && !promotionTy.isNull())
2147 result = emitUnPromotedValue(result, e->getType());
2148 return result;
2149}
2150
2151mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2152 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2153 mlir::Value result = VisitRealImag(e, promotionTy);
2154 if (result && !promotionTy.isNull())
2155 result = emitUnPromotedValue(result, e->getType());
2156 return result;
2157}
2158
2159mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2160 QualType promotionTy) {
2161 assert(e->getOpcode() == clang::UO_Real ||
2162 e->getOpcode() == clang::UO_Imag &&
2163 "Invalid UnaryOp kind for ComplexType Real or Imag");
2164
2165 Expr *op = e->getSubExpr();
2166 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2167 if (op->getType()->isAnyComplexType()) {
2168 // If it's an l-value, load through the appropriate subobject l-value.
2169 // Note that we have to ask `e` because `op` might be an l-value that
2170 // this won't work for, e.g. an Obj-C property
2171 mlir::Value complex = cgf.emitComplexExpr(op);
2172 if (e->isGLValue() && !promotionTy.isNull()) {
2173 promotionTy = promotionTy->isAnyComplexType()
2174 ? promotionTy
2175 : cgf.getContext().getComplexType(promotionTy);
2176 complex = cgf.emitPromotedValue(complex, promotionTy);
2177 }
2178
2179 return e->getOpcode() == clang::UO_Real
2180 ? builder.createComplexReal(loc, complex)
2181 : builder.createComplexImag(loc, complex);
2182 }
2183
2184 if (e->getOpcode() == UO_Real) {
2185 mlir::Value operand = promotionTy.isNull()
2186 ? Visit(op)
2187 : cgf.emitPromotedScalarExpr(op, promotionTy);
2188 return builder.createComplexReal(loc, operand);
2189 }
2190
2191 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2192 // effects are evaluated, but not the actual value.
2193 mlir::Value operand;
2194 if (op->isGLValue()) {
2195 operand = cgf.emitLValue(op).getPointer();
2196 operand = cir::LoadOp::create(builder, loc, operand);
2197 } else if (!promotionTy.isNull()) {
2198 operand = cgf.emitPromotedScalarExpr(op, promotionTy);
2199 } else {
2200 operand = cgf.emitScalarExpr(op);
2201 }
2202 return builder.createComplexImag(loc, operand);
2203}
2204
2205/// Return the size or alignment of the type of argument of the sizeof
2206/// expression as an integer.
2207mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2208 const UnaryExprOrTypeTraitExpr *e) {
2209 const QualType typeToSize = e->getTypeOfArgument();
2210 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2211 if (auto kind = e->getKind();
2212 kind == UETT_SizeOf || kind == UETT_DataSizeOf) {
2213 if (cgf.getContext().getAsVariableArrayType(typeToSize)) {
2215 "sizeof operator for VariableArrayType",
2216 e->getStmtClassName());
2217 return builder.getConstant(
2218 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2219 llvm::APSInt(llvm::APInt(64, 1), true)));
2220 }
2221 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2223 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
2224 e->getStmtClassName());
2225 return builder.getConstant(
2226 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2227 llvm::APSInt(llvm::APInt(64, 1), true)));
2228 }
2229
2230 return builder.getConstant(
2231 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2233}
2234
2235/// Return true if the specified expression is cheap enough and side-effect-free
2236/// enough to evaluate unconditionally instead of conditionally. This is used
2237/// to convert control flow into selects in some cases.
2238/// TODO(cir): can be shared with LLVM codegen.
2240 CIRGenFunction &cgf) {
2241 // Anything that is an integer or floating point constant is fine.
2242 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2243
2244 // Even non-volatile automatic variables can't be evaluated unconditionally.
2245 // Referencing a thread_local may cause non-trivial initialization work to
2246 // occur. If we're inside a lambda and one of the variables is from the scope
2247 // outside the lambda, that function may have returned already. Reading its
2248 // locals is a bad idea. Also, these reads may introduce races there didn't
2249 // exist in the source-level program.
2250}
2251
2252mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2253 const AbstractConditionalOperator *e) {
2254 CIRGenBuilderTy &builder = cgf.getBuilder();
2255 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2256 ignoreResultAssign = false;
2257
2258 // Bind the common expression if necessary.
2259 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2260
2261 Expr *condExpr = e->getCond();
2262 Expr *lhsExpr = e->getTrueExpr();
2263 Expr *rhsExpr = e->getFalseExpr();
2264
2265 // If the condition constant folds and can be elided, try to avoid emitting
2266 // the condition and the dead arm.
2267 bool condExprBool;
2268 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2269 Expr *live = lhsExpr, *dead = rhsExpr;
2270 if (!condExprBool)
2271 std::swap(live, dead);
2272
2273 // If the dead side doesn't have labels we need, just emit the Live part.
2274 if (!cgf.containsLabel(dead)) {
2275 if (condExprBool)
2277 mlir::Value result = Visit(live);
2278
2279 // If the live part is a throw expression, it acts like it has a void
2280 // type, so evaluating it returns a null Value. However, a conditional
2281 // with non-void type must return a non-null Value.
2282 if (!result && !e->getType()->isVoidType()) {
2283 cgf.cgm.errorNYI(e->getSourceRange(),
2284 "throw expression in conditional operator");
2285 result = {};
2286 }
2287
2288 return result;
2289 }
2290 }
2291
2292 QualType condType = condExpr->getType();
2293
2294 // OpenCL: If the condition is a vector, we can treat this condition like
2295 // the select function.
2296 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2297 condType->isExtVectorType()) {
2299 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2300 }
2301
2302 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2303 if (!condType->isVectorType()) {
2305 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2306 return {};
2307 }
2308
2309 mlir::Value condValue = Visit(condExpr);
2310 mlir::Value lhsValue = Visit(lhsExpr);
2311 mlir::Value rhsValue = Visit(rhsExpr);
2312 return builder.create<cir::VecTernaryOp>(loc, condValue, lhsValue,
2313 rhsValue);
2314 }
2315
2316 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2317 // select instead of as control flow. We can only do this if it is cheap
2318 // and safe to evaluate the LHS and RHS unconditionally.
2319 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2321 bool lhsIsVoid = false;
2322 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2324
2325 mlir::Value lhs = Visit(lhsExpr);
2326 if (!lhs) {
2327 lhs = builder.getNullValue(cgf.VoidTy, loc);
2328 lhsIsVoid = true;
2329 }
2330
2331 mlir::Value rhs = Visit(rhsExpr);
2332 if (lhsIsVoid) {
2333 assert(!rhs && "lhs and rhs types must match");
2334 rhs = builder.getNullValue(cgf.VoidTy, loc);
2335 }
2336
2337 return builder.createSelect(loc, condV, lhs, rhs);
2338 }
2339
2340 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2341 CIRGenFunction::ConditionalEvaluation eval(cgf);
2342 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2343 mlir::Type yieldTy{};
2344
2345 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2346 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2348
2350 eval.beginEvaluation();
2351 mlir::Value branch = Visit(expr);
2352 eval.endEvaluation();
2353
2354 if (branch) {
2355 yieldTy = branch.getType();
2356 b.create<cir::YieldOp>(loc, branch);
2357 } else {
2358 // If LHS or RHS is a throw or void expression we need to patch
2359 // arms as to properly match yield types.
2360 insertPoints.push_back(b.saveInsertionPoint());
2361 }
2362 };
2363
2364 mlir::Value result = builder
2365 .create<cir::TernaryOp>(
2366 loc, condV,
2367 /*trueBuilder=*/
2368 [&](mlir::OpBuilder &b, mlir::Location loc) {
2369 emitBranch(b, loc, lhsExpr);
2370 },
2371 /*falseBuilder=*/
2372 [&](mlir::OpBuilder &b, mlir::Location loc) {
2373 emitBranch(b, loc, rhsExpr);
2374 })
2375 .getResult();
2376
2377 if (!insertPoints.empty()) {
2378 // If both arms are void, so be it.
2379 if (!yieldTy)
2380 yieldTy = cgf.VoidTy;
2381
2382 // Insert required yields.
2383 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2384 mlir::OpBuilder::InsertionGuard guard(builder);
2385 builder.restoreInsertionPoint(toInsert);
2386
2387 // Block does not return: build empty yield.
2388 if (mlir::isa<cir::VoidType>(yieldTy)) {
2389 builder.create<cir::YieldOp>(loc);
2390 } else { // Block returns: set null yield value.
2391 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2392 builder.create<cir::YieldOp>(loc, op0);
2393 }
2394 }
2395 }
2396
2397 return result;
2398}
2399
2401 LValue lv,
2402 cir::UnaryOpKind kind,
2403 bool isPre) {
2404 return ScalarExprEmitter(*this, builder)
2405 .emitScalarPrePostIncDec(e, lv, kind, isPre);
2406}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Type getIntPtrType(mlir::Type ty) const
llvm::APInt getValue() const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
SourceLocation getExprLoc() const
Definition Expr.h:4013
Expr * getRHS() const
Definition Expr.h:4024
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2200
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
mlir::Value getPointer() const
Definition Address.h:82
mlir::Value createNeg(mlir::Value value)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
CIRGenBuilderTy & getBuilder()
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastKind getCastKind() const
Definition Expr.h:3654
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
unsigned getValue() const
Definition Expr.h:1629
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3065
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6396
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
ArrayRef< Expr * > inits()
Definition Expr.h:5283
bool isSignedOverflowDefined() const
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3653
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4641
const Expr * getSubExpr() const
Definition Expr.h:2199
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8290
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getCanonicalType() const
Definition TypeBase.h:8342
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool isCanonical() const
Definition TypeBase.h:8347
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8883
bool isBooleanType() const
Definition TypeBase.h:9013
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
bool isConstantMatrixType() const
Definition TypeBase.h:8688
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8927
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9170
bool isReferenceType() const
Definition TypeBase.h:8551
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2291
bool isExtVectorType() const
Definition TypeBase.h:8670
bool isAnyComplexType() const
Definition TypeBase.h:8662
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8939
bool isHalfType() const
Definition TypeBase.h:8887
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isMatrixType() const
Definition TypeBase.h:8684
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8523
bool isVectorType() const
Definition TypeBase.h:8666
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9103
bool isNullPtrType() const
Definition TypeBase.h:8920
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represents a GCC generic vector type.
Definition TypeBase.h:4175
VectorKind getVectorKind() const
Definition TypeBase.h:4195
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4136
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool cgFPOptionsRAII()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool scalableVectors()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174