clang 22.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14#include "CIRGenValue.h"
15
16#include "clang/AST/Expr.h"
19
20#include "mlir/IR/Location.h"
21#include "mlir/IR/Value.h"
22
23#include <cassert>
24#include <utility>
25
26using namespace clang;
27using namespace clang::CIRGen;
28
29namespace {
30
31struct BinOpInfo {
32 mlir::Value lhs;
33 mlir::Value rhs;
34 SourceRange loc;
35 QualType fullType; // Type of operands and result
36 QualType compType; // Type used for computations. Element type
37 // for vectors, otherwise same as FullType.
38 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
39 FPOptions fpfeatures;
40 const Expr *e; // Entire expr, for error unsupported. May not be binop.
41
42 /// Check if the binop computes a division or a remainder.
43 bool isDivRemOp() const {
44 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
45 opcode == BO_RemAssign;
46 }
47
48 /// Check if the binop can result in integer overflow.
49 bool mayHaveIntegerOverflow() const {
50 // Without constant input, we can't rule out overflow.
51 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
52 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
53 if (!lhsci || !rhsci)
54 return true;
55
57 // TODO(cir): For now we just assume that we might overflow
58 return true;
59 }
60
61 /// Check if at least one operand is a fixed point type. In such cases,
62 /// this operation did not follow usual arithmetic conversion and both
63 /// operands might not be of the same type.
64 bool isFixedPointOp() const {
65 // We cannot simply check the result type since comparison operations
66 // return an int.
67 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
68 QualType lhstype = binOp->getLHS()->getType();
69 QualType rhstype = binOp->getRHS()->getType();
70 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
71 }
72 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
73 return unop->getSubExpr()->getType()->isFixedPointType();
74 return false;
75 }
76};
77
78class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
79 CIRGenFunction &cgf;
80 CIRGenBuilderTy &builder;
81 bool ignoreResultAssign;
82
83public:
84 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder)
85 : cgf(cgf), builder(builder) {}
86
87 //===--------------------------------------------------------------------===//
88 // Utilities
89 //===--------------------------------------------------------------------===//
90
91 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
92 mlir::Value value, CastKind kind,
93 QualType destTy);
94
95 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
96 return cgf.cgm.emitNullConstant(ty, loc);
97 }
98
99 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
100 return builder.createFloatingCast(result, cgf.convertType(promotionType));
101 }
102
103 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
104 return builder.createFloatingCast(result, cgf.convertType(exprType));
105 }
106
107 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
108
109 mlir::Value maybePromoteBoolResult(mlir::Value value,
110 mlir::Type dstTy) const {
111 if (mlir::isa<cir::IntType>(dstTy))
112 return builder.createBoolToInt(value, dstTy);
113 if (mlir::isa<cir::BoolType>(dstTy))
114 return value;
115 llvm_unreachable("Can only promote integer or boolean types");
116 }
117
118 //===--------------------------------------------------------------------===//
119 // Visitor Methods
120 //===--------------------------------------------------------------------===//
121
122 mlir::Value Visit(Expr *e) {
123 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
124 }
125
126 mlir::Value VisitStmt(Stmt *s) {
127 llvm_unreachable("Statement passed to ScalarExprEmitter");
128 }
129
130 mlir::Value VisitExpr(Expr *e) {
131 cgf.getCIRGenModule().errorNYI(
132 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
133 return {};
134 }
135
136 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
137 return Visit(e->getSelectedExpr());
138 }
139
140 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
141
142 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
143 return Visit(ge->getResultExpr());
144 }
145
146 /// Emits the address of the l-value, then loads and returns the result.
147 mlir::Value emitLoadOfLValue(const Expr *e) {
148 LValue lv = cgf.emitLValue(e);
149 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
150 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
151 }
152
153 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
154 return cgf.emitLoadOfLValue(lv, loc).getValue();
155 }
156
157 // l-values
158 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
159 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
160 return cgf.emitScalarConstant(constant, e);
161
162 return emitLoadOfLValue(e);
163 }
164
165 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
166 mlir::Type type = cgf.convertType(e->getType());
167 return builder.create<cir::ConstantOp>(
168 cgf.getLoc(e->getExprLoc()), cir::IntAttr::get(type, e->getValue()));
169 }
170
171 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
172 mlir::Type type = cgf.convertType(e->getType());
173 assert(mlir::isa<cir::FPTypeInterface>(type) &&
174 "expect floating-point type");
175 return builder.create<cir::ConstantOp>(
176 cgf.getLoc(e->getExprLoc()), cir::FPAttr::get(type, e->getValue()));
177 }
178
179 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
180 mlir::Type ty = cgf.convertType(e->getType());
181 auto init = cir::IntAttr::get(ty, e->getValue());
182 return builder.create<cir::ConstantOp>(cgf.getLoc(e->getExprLoc()), init);
183 }
184
185 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
186 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
187 }
188
189 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
190 if (e->getType()->isVoidType())
191 return {};
192
193 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
194 }
195
196 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
197 if (e->isGLValue())
198 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
199 e->getExprLoc());
200
201 // Otherwise, assume the mapping is the scalar directly.
202 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
203 }
204
205 mlir::Value VisitCastExpr(CastExpr *e);
206 mlir::Value VisitCallExpr(const CallExpr *e);
207
208 mlir::Value VisitStmtExpr(StmtExpr *e) {
209 CIRGenFunction::StmtExprEvaluation eval(cgf);
210 if (e->getType()->isVoidType()) {
211 (void)cgf.emitCompoundStmt(*e->getSubStmt());
212 return {};
213 }
214
215 Address retAlloca =
216 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
217 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
218
219 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
220 e->getExprLoc());
221 }
222
223 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
224 if (e->getBase()->getType()->isVectorType()) {
226
227 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
228 const mlir::Value vecValue = Visit(e->getBase());
229 const mlir::Value indexValue = Visit(e->getIdx());
230 return cgf.builder.create<cir::VecExtractOp>(loc, vecValue, indexValue);
231 }
232 // Just load the lvalue formed by the subscript expression.
233 return emitLoadOfLValue(e);
234 }
235
236 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
237 if (e->getNumSubExprs() == 2) {
238 // The undocumented form of __builtin_shufflevector.
239 mlir::Value inputVec = Visit(e->getExpr(0));
240 mlir::Value indexVec = Visit(e->getExpr(1));
241 return cgf.builder.create<cir::VecShuffleDynamicOp>(
242 cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
243 }
244
245 mlir::Value vec1 = Visit(e->getExpr(0));
246 mlir::Value vec2 = Visit(e->getExpr(1));
247
248 // The documented form of __builtin_shufflevector, where the indices are
249 // a variable number of integer constants. The constants will be stored
250 // in an ArrayAttr.
251 SmallVector<mlir::Attribute, 8> indices;
252 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
253 indices.push_back(
254 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
255 e->getExpr(i)
256 ->EvaluateKnownConstInt(cgf.getContext())
257 .getSExtValue()));
258 }
259
260 return cgf.builder.create<cir::VecShuffleOp>(
261 cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()), vec1,
262 vec2, cgf.builder.getArrayAttr(indices));
263 }
264
265 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
266 // __builtin_convertvector is an element-wise cast, and is implemented as a
267 // regular cast. The back end handles casts of vectors correctly.
268 return emitScalarConversion(Visit(e->getSrcExpr()),
269 e->getSrcExpr()->getType(), e->getType(),
270 e->getSourceRange().getBegin());
271 }
272
273 mlir::Value VisitMemberExpr(MemberExpr *e);
274
275 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
276 return emitLoadOfLValue(e);
277 }
278
279 mlir::Value VisitInitListExpr(InitListExpr *e);
280
281 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
282 return VisitCastExpr(e);
283 }
284
285 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
286 return cgf.cgm.emitNullConstant(e->getType(),
287 cgf.getLoc(e->getSourceRange()));
288 }
289
290 /// Perform a pointer to boolean conversion.
291 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
292 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
293 // We might want to have a separate pass for these types of conversions.
294 return cgf.getBuilder().createPtrToBoolCast(v);
295 }
296
297 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
298 cir::BoolType boolTy = builder.getBoolTy();
299 return builder.create<cir::CastOp>(loc, boolTy,
300 cir::CastKind::float_to_bool, src);
301 }
302
303 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
304 // Because of the type rules of C, we often end up computing a
305 // logical value, then zero extending it to int, then wanting it
306 // as a logical value again.
307 // TODO: optimize this common case here or leave it for later
308 // CIR passes?
309 cir::BoolType boolTy = builder.getBoolTy();
310 return builder.create<cir::CastOp>(loc, boolTy, cir::CastKind::int_to_bool,
311 srcVal);
312 }
313
314 /// Convert the specified expression value to a boolean (!cir.bool) truth
315 /// value. This is equivalent to "Val != 0".
316 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
317 mlir::Location loc) {
318 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
319
320 if (srcType->isRealFloatingType())
321 return emitFloatToBoolConversion(src, loc);
322
323 if (llvm::isa<MemberPointerType>(srcType)) {
324 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
325 return builder.getFalse(loc);
326 }
327
328 if (srcType->isIntegerType())
329 return emitIntToBoolConversion(src, loc);
330
331 assert(::mlir::isa<cir::PointerType>(src.getType()));
332 return emitPointerToBoolConversion(src, srcType);
333 }
334
335 // Emit a conversion from the specified type to the specified destination
336 // type, both of which are CIR scalar types.
337 struct ScalarConversionOpts {
338 bool treatBooleanAsSigned;
339 bool emitImplicitIntegerTruncationChecks;
340 bool emitImplicitIntegerSignChangeChecks;
341
342 ScalarConversionOpts()
343 : treatBooleanAsSigned(false),
344 emitImplicitIntegerTruncationChecks(false),
345 emitImplicitIntegerSignChangeChecks(false) {}
346
347 ScalarConversionOpts(clang::SanitizerSet sanOpts)
348 : treatBooleanAsSigned(false),
349 emitImplicitIntegerTruncationChecks(
350 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
351 emitImplicitIntegerSignChangeChecks(
352 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
353 };
354
355 // Conversion from bool, integral, or floating-point to integral or
356 // floating-point. Conversions involving other types are handled elsewhere.
357 // Conversion to bool is handled elsewhere because that's a comparison against
358 // zero, not a simple cast. This handles both individual scalars and vectors.
359 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
360 QualType dstType, mlir::Type srcTy,
361 mlir::Type dstTy, ScalarConversionOpts opts) {
362 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
363 "Internal error: matrix types not handled by this function.");
364 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
365 mlir::isa<mlir::IntegerType>(dstTy)) &&
366 "Obsolete code. Don't use mlir::IntegerType with CIR.");
367
368 mlir::Type fullDstTy = dstTy;
369 if (mlir::isa<cir::VectorType>(srcTy) &&
370 mlir::isa<cir::VectorType>(dstTy)) {
371 // Use the element types of the vectors to figure out the CastKind.
372 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
373 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
374 }
375
376 std::optional<cir::CastKind> castKind;
377
378 if (mlir::isa<cir::BoolType>(srcTy)) {
379 if (opts.treatBooleanAsSigned)
380 cgf.getCIRGenModule().errorNYI("signed bool");
381 if (cgf.getBuilder().isInt(dstTy))
382 castKind = cir::CastKind::bool_to_int;
383 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
384 castKind = cir::CastKind::bool_to_float;
385 else
386 llvm_unreachable("Internal error: Cast to unexpected type");
387 } else if (cgf.getBuilder().isInt(srcTy)) {
388 if (cgf.getBuilder().isInt(dstTy))
389 castKind = cir::CastKind::integral;
390 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
391 castKind = cir::CastKind::int_to_float;
392 else
393 llvm_unreachable("Internal error: Cast to unexpected type");
394 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
395 if (cgf.getBuilder().isInt(dstTy)) {
396 // If we can't recognize overflow as undefined behavior, assume that
397 // overflow saturates. This protects against normal optimizations if we
398 // are compiling with non-standard FP semantics.
399 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
400 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
402 castKind = cir::CastKind::float_to_int;
403 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
404 // TODO: split this to createFPExt/createFPTrunc
405 return builder.createFloatingCast(src, fullDstTy);
406 } else {
407 llvm_unreachable("Internal error: Cast to unexpected type");
408 }
409 } else {
410 llvm_unreachable("Internal error: Cast from unexpected type");
411 }
412
413 assert(castKind.has_value() && "Internal error: CastKind not set.");
414 return builder.create<cir::CastOp>(src.getLoc(), fullDstTy, *castKind, src);
415 }
416
417 mlir::Value
418 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
419 return Visit(e->getReplacement());
420 }
421
422 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
423 QualType ty = ve->getType();
424
425 if (ty->isVariablyModifiedType()) {
426 cgf.cgm.errorNYI(ve->getSourceRange(),
427 "variably modified types in varargs");
428 }
429
430 return cgf.emitVAArg(ve);
431 }
432
433 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
434 mlir::Value
435 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
436
437 // Unary Operators.
438 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
439 LValue lv = cgf.emitLValue(e->getSubExpr());
440 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
441 }
442 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
443 LValue lv = cgf.emitLValue(e->getSubExpr());
444 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
445 }
446 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
447 LValue lv = cgf.emitLValue(e->getSubExpr());
448 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
449 }
450 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
451 LValue lv = cgf.emitLValue(e->getSubExpr());
452 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
453 }
454 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
455 cir::UnaryOpKind kind, bool isPre) {
456 if (cgf.getLangOpts().OpenMP)
457 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
458
459 QualType type = e->getSubExpr()->getType();
460
461 mlir::Value value;
462 mlir::Value input;
463
464 if (type->getAs<AtomicType>()) {
465 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
466 // TODO(cir): This is not correct, but it will produce reasonable code
467 // until atomic operations are implemented.
468 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
469 input = value;
470 } else {
471 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
472 input = value;
473 }
474
475 // NOTE: When possible, more frequent cases are handled first.
476
477 // Special case of integer increment that we have to check first: bool++.
478 // Due to promotion rules, we get:
479 // bool++ -> bool = bool + 1
480 // -> bool = (int)bool + 1
481 // -> bool = ((int)bool + 1 != 0)
482 // An interesting aspect of this is that increment is always true.
483 // Decrement does not have this property.
484 if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
485 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
486 } else if (type->isIntegerType()) {
487 QualType promotedType;
488 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
489 if (cgf.getContext().isPromotableIntegerType(type)) {
490 promotedType = cgf.getContext().getPromotedIntegerType(type);
491 assert(promotedType != type && "Shouldn't promote to the same type.");
492 canPerformLossyDemotionCheck = true;
493 canPerformLossyDemotionCheck &=
494 cgf.getContext().getCanonicalType(type) !=
495 cgf.getContext().getCanonicalType(promotedType);
496 canPerformLossyDemotionCheck &=
497 type->isIntegerType() && promotedType->isIntegerType();
498
499 // TODO(cir): Currently, we store bitwidths in CIR types only for
500 // integers. This might also be required for other types.
501
502 assert(
503 (!canPerformLossyDemotionCheck ||
504 type->isSignedIntegerOrEnumerationType() ||
505 promotedType->isSignedIntegerOrEnumerationType() ||
506 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
507 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
508 "The following check expects that if we do promotion to different "
509 "underlying canonical type, at least one of the types (either "
510 "base or promoted) will be signed, or the bitwidths will match.");
511 }
512
514 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
515 value = emitIncDecConsiderOverflowBehavior(e, value, kind);
516 } else {
517 cir::UnaryOpKind kind =
518 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
519 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
520 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
521 }
522 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
523 QualType type = ptr->getPointeeType();
524 if (cgf.getContext().getAsVariableArrayType(type)) {
525 // VLA types don't have constant size.
526 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
527 return {};
528 } else if (type->isFunctionType()) {
529 // Arithmetic on function pointers (!) is just +-1.
530 cgf.cgm.errorNYI(e->getSourceRange(),
531 "Pointer arithmetic on function pointer");
532 return {};
533 } else {
534 // For everything else, we can just do a simple increment.
535 mlir::Location loc = cgf.getLoc(e->getSourceRange());
536 CIRGenBuilderTy &builder = cgf.getBuilder();
537 int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
538 mlir::Value amt = builder.getSInt32(amount, loc);
540 value = builder.createPtrStride(loc, value, amt);
541 }
542 } else if (type->isVectorType()) {
543 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
544 return {};
545 } else if (type->isRealFloatingType()) {
547
548 if (type->isHalfType() &&
549 !cgf.getContext().getLangOpts().NativeHalfType) {
550 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
551 return {};
552 }
553
554 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
555 // Create the inc/dec operation.
556 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
557 assert(kind == cir::UnaryOpKind::Inc ||
558 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
559 value = emitUnaryOp(e, kind, value);
560 } else {
561 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
562 return {};
563 }
564 } else if (type->isFixedPointType()) {
565 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
566 return {};
567 } else {
568 assert(type->castAs<ObjCObjectPointerType>());
569 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
570 return {};
571 }
572
573 CIRGenFunction::SourceLocRAIIObject sourceloc{
574 cgf, cgf.getLoc(e->getSourceRange())};
575
576 // Store the updated result through the lvalue
577 if (lv.isBitField())
578 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
579 else
580 cgf.emitStoreThroughLValue(RValue::get(value), lv);
581
582 // If this is a postinc, return the value read from memory, otherwise use
583 // the updated value.
584 return isPre ? value : input;
585 }
586
587 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
588 mlir::Value inVal,
589 cir::UnaryOpKind kind) {
590 assert(kind == cir::UnaryOpKind::Inc ||
591 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
592 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
593 case LangOptions::SOB_Defined:
594 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
595 case LangOptions::SOB_Undefined:
597 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
598 case LangOptions::SOB_Trapping:
599 if (!e->canOverflow())
600 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
601 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
602 return {};
603 }
604 llvm_unreachable("Unexpected signed overflow behavior kind");
605 }
606
607 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
608 if (llvm::isa<MemberPointerType>(e->getType())) {
609 cgf.cgm.errorNYI(e->getSourceRange(), "Address of member pointer");
610 return builder.getNullPtr(cgf.convertType(e->getType()),
611 cgf.getLoc(e->getExprLoc()));
612 }
613
614 return cgf.emitLValue(e->getSubExpr()).getPointer();
615 }
616
617 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
618 if (e->getType()->isVoidType())
619 return Visit(e->getSubExpr()); // the actual value should be unused
620 return emitLoadOfLValue(e);
621 }
622
623 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
624 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
625 mlir::Value result =
626 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
627 if (result && !promotionType.isNull())
628 return emitUnPromotedValue(result, e->getType());
629 return result;
630 }
631
632 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
633 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
634 mlir::Value result =
635 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
636 if (result && !promotionType.isNull())
637 return emitUnPromotedValue(result, e->getType());
638 return result;
639 }
640
641 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
642 cir::UnaryOpKind kind,
643 QualType promotionType) {
644 ignoreResultAssign = false;
645 mlir::Value operand;
646 if (!promotionType.isNull())
647 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
648 else
649 operand = Visit(e->getSubExpr());
650
651 bool nsw =
652 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
653
654 // NOTE: LLVM codegen will lower this directly to either a FNeg
655 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
656 return emitUnaryOp(e, kind, operand, nsw);
657 }
658
659 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
660 mlir::Value input, bool nsw = false) {
661 return builder.create<cir::UnaryOp>(
662 cgf.getLoc(e->getSourceRange().getBegin()), input.getType(), kind,
663 input, nsw);
664 }
665
666 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
667 ignoreResultAssign = false;
668 mlir::Value op = Visit(e->getSubExpr());
669 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
670 }
671
672 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
673
674 mlir::Value VisitUnaryReal(const UnaryOperator *e);
675 mlir::Value VisitUnaryImag(const UnaryOperator *e);
676 mlir::Value VisitRealImag(const UnaryOperator *e,
677 QualType promotionType = QualType());
678
679 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
680 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
681 return Visit(die->getExpr());
682 }
683
684 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
685
686 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
687 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
688 return cgf.emitCXXNewExpr(e);
689 }
690
691 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
692 cgf.emitCXXThrowExpr(e);
693 return {};
694 }
695
696 /// Emit a conversion from the specified type to the specified destination
697 /// type, both of which are CIR scalar types.
698 /// TODO: do we need ScalarConversionOpts here? Should be done in another
699 /// pass.
700 mlir::Value
701 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
702 SourceLocation loc,
703 ScalarConversionOpts opts = ScalarConversionOpts()) {
704 // All conversions involving fixed point types should be handled by the
705 // emitFixedPoint family functions. This is done to prevent bloating up
706 // this function more, and although fixed point numbers are represented by
707 // integers, we do not want to follow any logic that assumes they should be
708 // treated as integers.
709 // TODO(leonardchan): When necessary, add another if statement checking for
710 // conversions to fixed point types from other types.
711 // conversions to fixed point types from other types.
712 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
713 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
714 return {};
715 }
716
717 srcType = srcType.getCanonicalType();
718 dstType = dstType.getCanonicalType();
719 if (srcType == dstType) {
720 if (opts.emitImplicitIntegerSignChangeChecks)
721 cgf.getCIRGenModule().errorNYI(loc,
722 "implicit integer sign change checks");
723 return src;
724 }
725
726 if (dstType->isVoidType())
727 return {};
728
729 mlir::Type mlirSrcType = src.getType();
730
731 // Handle conversions to bool first, they are special: comparisons against
732 // 0.
733 if (dstType->isBooleanType())
734 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
735
736 mlir::Type mlirDstType = cgf.convertType(dstType);
737
738 if (srcType->isHalfType() &&
739 !cgf.getContext().getLangOpts().NativeHalfType) {
740 // Cast to FP using the intrinsic if the half type itself isn't supported.
741 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
742 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
743 cgf.getCIRGenModule().errorNYI(loc,
744 "cast via llvm.convert.from.fp16");
745 } else {
746 // Cast to other types through float, using either the intrinsic or
747 // FPExt, depending on whether the half type itself is supported (as
748 // opposed to operations on half, available with NativeHalfType).
749 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
750 cgf.getCIRGenModule().errorNYI(loc,
751 "cast via llvm.convert.from.fp16");
752 // FIXME(cir): For now lets pretend we shouldn't use the conversion
753 // intrinsics and insert a cast here unconditionally.
754 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
755 cgf.FloatTy);
756 srcType = cgf.getContext().FloatTy;
757 mlirSrcType = cgf.FloatTy;
758 }
759 }
760
761 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
762 // is there anything to be done for CIR here?
763 if (mlirSrcType == mlirDstType) {
764 if (opts.emitImplicitIntegerSignChangeChecks)
765 cgf.getCIRGenModule().errorNYI(loc,
766 "implicit integer sign change checks");
767 return src;
768 }
769
770 // Handle pointer conversions next: pointers can only be converted to/from
771 // other pointers and integers. Check for pointer types in terms of LLVM, as
772 // some native types (like Obj-C id) may map to a pointer type.
773 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
774 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
775 return builder.getNullPtr(dstPT, src.getLoc());
776 }
777
778 if (isa<cir::PointerType>(mlirSrcType)) {
779 // Must be an ptr to int cast.
780 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
781 return builder.createPtrToInt(src, mlirDstType);
782 }
783
784 // A scalar can be splatted to an extended vector of the same element type
785 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
786 // Sema should add casts to make sure that the source expression's type
787 // is the same as the vector's element type (sans qualifiers)
788 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
789 srcType.getTypePtr() &&
790 "Splatted expr doesn't match with vector element type?");
791
792 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
793 return {};
794 }
795
796 if (srcType->isMatrixType() && dstType->isMatrixType()) {
797 cgf.getCIRGenModule().errorNYI(loc,
798 "matrix type to matrix type conversion");
799 return {};
800 }
801 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
802 "Internal error: conversion between matrix type and scalar type");
803
804 // Finally, we have the arithmetic types or vectors of arithmetic types.
805 mlir::Value res = nullptr;
806 mlir::Type resTy = mlirDstType;
807
808 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
809
810 if (mlirDstType != resTy) {
811 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
812 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
813 }
814 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
815 // required by the target. Change that once this is implemented
816 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
817 resTy);
818 }
819
820 if (opts.emitImplicitIntegerTruncationChecks)
821 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
822
823 if (opts.emitImplicitIntegerSignChangeChecks)
824 cgf.getCIRGenModule().errorNYI(loc,
825 "implicit integer sign change checks");
826
827 return res;
828 }
829
830 BinOpInfo emitBinOps(const BinaryOperator *e,
831 QualType promotionType = QualType()) {
832 BinOpInfo result;
833 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
834 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
835 if (!promotionType.isNull())
836 result.fullType = promotionType;
837 else
838 result.fullType = e->getType();
839 result.compType = result.fullType;
840 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
841 result.compType = vecType->getElementType();
842 }
843 result.opcode = e->getOpcode();
844 result.loc = e->getSourceRange();
845 // TODO(cir): Result.FPFeatures
847 result.e = e;
848 return result;
849 }
850
851 mlir::Value emitMul(const BinOpInfo &ops);
852 mlir::Value emitDiv(const BinOpInfo &ops);
853 mlir::Value emitRem(const BinOpInfo &ops);
854 mlir::Value emitAdd(const BinOpInfo &ops);
855 mlir::Value emitSub(const BinOpInfo &ops);
856 mlir::Value emitShl(const BinOpInfo &ops);
857 mlir::Value emitShr(const BinOpInfo &ops);
858 mlir::Value emitAnd(const BinOpInfo &ops);
859 mlir::Value emitXor(const BinOpInfo &ops);
860 mlir::Value emitOr(const BinOpInfo &ops);
861
862 LValue emitCompoundAssignLValue(
863 const CompoundAssignOperator *e,
864 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
865 mlir::Value &result);
866 mlir::Value
867 emitCompoundAssign(const CompoundAssignOperator *e,
868 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
869
870 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
871 // codegen.
872 QualType getPromotionType(QualType ty) {
873 const clang::ASTContext &ctx = cgf.getContext();
874 if (auto *complexTy = ty->getAs<ComplexType>()) {
875 QualType elementTy = complexTy->getElementType();
876 if (elementTy.UseExcessPrecision(ctx))
877 return ctx.getComplexType(ctx.FloatTy);
878 }
879
880 if (ty.UseExcessPrecision(cgf.getContext())) {
881 if (auto *vt = ty->getAs<VectorType>()) {
882 unsigned numElements = vt->getNumElements();
883 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
884 }
885 return cgf.getContext().FloatTy;
886 }
887
888 return QualType();
889 }
890
891// Binary operators and binary compound assignment operators.
892#define HANDLEBINOP(OP) \
893 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
894 QualType promotionTy = getPromotionType(e->getType()); \
895 auto result = emit##OP(emitBinOps(e, promotionTy)); \
896 if (result && !promotionTy.isNull()) \
897 result = emitUnPromotedValue(result, e->getType()); \
898 return result; \
899 } \
900 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
901 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
902 }
903
904 HANDLEBINOP(Mul)
905 HANDLEBINOP(Div)
906 HANDLEBINOP(Rem)
907 HANDLEBINOP(Add)
908 HANDLEBINOP(Sub)
909 HANDLEBINOP(Shl)
910 HANDLEBINOP(Shr)
912 HANDLEBINOP(Xor)
914#undef HANDLEBINOP
915
916 mlir::Value emitCmp(const BinaryOperator *e) {
917 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
918 mlir::Value result;
919 QualType lhsTy = e->getLHS()->getType();
920 QualType rhsTy = e->getRHS()->getType();
921
922 auto clangCmpToCIRCmp =
923 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
924 switch (clangCmp) {
925 case BO_LT:
926 return cir::CmpOpKind::lt;
927 case BO_GT:
928 return cir::CmpOpKind::gt;
929 case BO_LE:
930 return cir::CmpOpKind::le;
931 case BO_GE:
932 return cir::CmpOpKind::ge;
933 case BO_EQ:
934 return cir::CmpOpKind::eq;
935 case BO_NE:
936 return cir::CmpOpKind::ne;
937 default:
938 llvm_unreachable("unsupported comparison kind for cir.cmp");
939 }
940 };
941
942 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
943 if (lhsTy->getAs<MemberPointerType>()) {
945 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
946 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
947 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
948 result = builder.createCompare(loc, kind, lhs, rhs);
949 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
950 BinOpInfo boInfo = emitBinOps(e);
951 mlir::Value lhs = boInfo.lhs;
952 mlir::Value rhs = boInfo.rhs;
953
954 if (lhsTy->isVectorType()) {
955 if (!e->getType()->isVectorType()) {
956 // If AltiVec, the comparison results in a numeric type, so we use
957 // intrinsics comparing vectors and giving 0 or 1 as a result
958 cgf.cgm.errorNYI(loc, "AltiVec comparison");
959 } else {
960 // Other kinds of vectors. Element-wise comparison returning
961 // a vector.
962 result = builder.create<cir::VecCmpOp>(
963 cgf.getLoc(boInfo.loc), cgf.convertType(boInfo.fullType), kind,
964 boInfo.lhs, boInfo.rhs);
965 }
966 } else if (boInfo.isFixedPointOp()) {
968 cgf.cgm.errorNYI(loc, "fixed point comparisons");
969 result = builder.getBool(false, loc);
970 } else {
971 // integers and pointers
972 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
973 mlir::isa<cir::PointerType>(lhs.getType()) &&
974 mlir::isa<cir::PointerType>(rhs.getType())) {
975 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
976 }
977
978 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
979 result = builder.createCompare(loc, kind, lhs, rhs);
980 }
981 } else {
982 // Complex Comparison: can only be an equality comparison.
983 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
984
985 BinOpInfo boInfo = emitBinOps(e);
986 result = builder.create<cir::CmpOp>(loc, kind, boInfo.lhs, boInfo.rhs);
987 }
988
989 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
990 e->getExprLoc());
991 }
992
993// Comparisons.
994#define VISITCOMP(CODE) \
995 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
996 VISITCOMP(LT)
997 VISITCOMP(GT)
998 VISITCOMP(LE)
999 VISITCOMP(GE)
1000 VISITCOMP(EQ)
1001 VISITCOMP(NE)
1002#undef VISITCOMP
1003
1004 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1005 const bool ignore = std::exchange(ignoreResultAssign, false);
1006
1007 mlir::Value rhs;
1008 LValue lhs;
1009
1010 switch (e->getLHS()->getType().getObjCLifetime()) {
1016 break;
1018 // __block variables need to have the rhs evaluated first, plus this
1019 // should improve codegen just a little.
1020 rhs = Visit(e->getRHS());
1022 // TODO(cir): This needs to be emitCheckedLValue() once we support
1023 // sanitizers
1024 lhs = cgf.emitLValue(e->getLHS());
1025
1026 // Store the value into the LHS. Bit-fields are handled specially because
1027 // the result is altered by the store, i.e., [C99 6.5.16p1]
1028 // 'An assignment expression has the value of the left operand after the
1029 // assignment...'.
1030 if (lhs.isBitField()) {
1031 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1032 } else {
1033 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1035 cgf, cgf.getLoc(e->getSourceRange())};
1036 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1037 }
1038 }
1039
1040 // If the result is clearly ignored, return now.
1041 if (ignore)
1042 return nullptr;
1043
1044 // The result of an assignment in C is the assigned r-value.
1045 if (!cgf.getLangOpts().CPlusPlus)
1046 return rhs;
1047
1048 // If the lvalue is non-volatile, return the computed value of the
1049 // assignment.
1050 if (!lhs.isVolatile())
1051 return rhs;
1052
1053 // Otherwise, reload the value.
1054 return emitLoadOfLValue(lhs, e->getExprLoc());
1055 }
1056
1057 mlir::Value VisitBinComma(const BinaryOperator *e) {
1058 cgf.emitIgnoredExpr(e->getLHS());
1059 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1060 return Visit(e->getRHS());
1061 }
1062
1063 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1064 if (e->getType()->isVectorType()) {
1065 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1066 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1067 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1068 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1069 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1070
1071 mlir::Value lhs = Visit(e->getLHS());
1072 mlir::Value rhs = Visit(e->getRHS());
1073
1074 auto cmpOpKind = cir::CmpOpKind::ne;
1075 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1076 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1077 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1078 return builder.createIntCast(vecOr, vecTy);
1079 }
1080
1082 mlir::Type resTy = cgf.convertType(e->getType());
1083 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1084
1085 CIRGenFunction::ConditionalEvaluation eval(cgf);
1086
1087 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1088 auto resOp = builder.create<cir::TernaryOp>(
1089 loc, lhsCondV, /*trueBuilder=*/
1090 [&](mlir::OpBuilder &b, mlir::Location loc) {
1091 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1092 b.getInsertionBlock()};
1093 cgf.curLexScope->setAsTernary();
1094 b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
1095 },
1096 /*falseBuilder*/
1097 [&](mlir::OpBuilder &b, mlir::Location loc) {
1098 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1099 b.getInsertionBlock()};
1100 cgf.curLexScope->setAsTernary();
1101 auto res = b.create<cir::ConstantOp>(loc, builder.getFalseAttr());
1102 b.create<cir::YieldOp>(loc, res.getRes());
1103 });
1104 return maybePromoteBoolResult(resOp.getResult(), resTy);
1105 }
1106
1107 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1108 if (e->getType()->isVectorType()) {
1109 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1110 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1111 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1112 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1113 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1114
1115 mlir::Value lhs = Visit(e->getLHS());
1116 mlir::Value rhs = Visit(e->getRHS());
1117
1118 auto cmpOpKind = cir::CmpOpKind::ne;
1119 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1120 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1121 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1122 return builder.createIntCast(vecOr, vecTy);
1123 }
1124
1126 mlir::Type resTy = cgf.convertType(e->getType());
1127 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1128
1129 CIRGenFunction::ConditionalEvaluation eval(cgf);
1130
1131 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1132 auto resOp = builder.create<cir::TernaryOp>(
1133 loc, lhsCondV, /*trueBuilder=*/
1134 [&](mlir::OpBuilder &b, mlir::Location loc) {
1135 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1136 b.getInsertionBlock()};
1137 cgf.curLexScope->setAsTernary();
1138 auto res = b.create<cir::ConstantOp>(loc, builder.getTrueAttr());
1139 b.create<cir::YieldOp>(loc, res.getRes());
1140 },
1141 /*falseBuilder*/
1142 [&](mlir::OpBuilder &b, mlir::Location loc) {
1143 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1144 b.getInsertionBlock()};
1145 cgf.curLexScope->setAsTernary();
1146 b.create<cir::YieldOp>(loc, cgf.evaluateExprAsBool(e->getRHS()));
1147 });
1148
1149 return maybePromoteBoolResult(resOp.getResult(), resTy);
1150 }
1151
1152 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1153 return cgf.emitAtomicExpr(e).getValue();
1154 }
1155};
1156
1157LValue ScalarExprEmitter::emitCompoundAssignLValue(
1158 const CompoundAssignOperator *e,
1159 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1160 mlir::Value &result) {
1162 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1163
1164 QualType lhsTy = e->getLHS()->getType();
1165 BinOpInfo opInfo;
1166
1167 // Emit the RHS first. __block variables need to have the rhs evaluated
1168 // first, plus this should improve codegen a little.
1169
1170 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1171 if (promotionTypeCR.isNull())
1172 promotionTypeCR = e->getComputationResultType();
1173
1174 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1175 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1176
1177 if (!promotionTypeRHS.isNull())
1178 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1179 else
1180 opInfo.rhs = Visit(e->getRHS());
1181
1182 opInfo.fullType = promotionTypeCR;
1183 opInfo.compType = opInfo.fullType;
1184 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1185 opInfo.compType = vecType->getElementType();
1186 opInfo.opcode = e->getOpcode();
1187 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1188 opInfo.e = e;
1189 opInfo.loc = e->getSourceRange();
1190
1191 // Load/convert the LHS
1192 LValue lhsLV = cgf.emitLValue(e->getLHS());
1193
1194 if (lhsTy->getAs<AtomicType>()) {
1195 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1196 return LValue();
1197 }
1198
1199 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1200
1201 CIRGenFunction::SourceLocRAIIObject sourceloc{
1202 cgf, cgf.getLoc(e->getSourceRange())};
1203 SourceLocation loc = e->getExprLoc();
1204 if (!promotionTypeLHS.isNull())
1205 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1206 else
1207 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1208 e->getComputationLHSType(), loc);
1209
1210 // Expand the binary operator.
1211 result = (this->*func)(opInfo);
1212
1213 // Convert the result back to the LHS type,
1214 // potentially with Implicit Conversion sanitizer check.
1215 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1216 ScalarConversionOpts(cgf.sanOpts));
1217
1218 // Store the result value into the LHS lvalue. Bit-fields are handled
1219 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1220 // 'An assignment expression has the value of the left operand after the
1221 // assignment...'.
1222 if (lhsLV.isBitField())
1223 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1224 else
1225 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1226
1227 if (cgf.getLangOpts().OpenMP)
1228 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1229
1230 return lhsLV;
1231}
1232
1233mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1234 mlir::Value value,
1235 CastKind kind,
1236 QualType destTy) {
1237 cir::CastKind castOpKind;
1238 switch (kind) {
1239 case CK_FloatingComplexToReal:
1240 castOpKind = cir::CastKind::float_complex_to_real;
1241 break;
1242 case CK_IntegralComplexToReal:
1243 castOpKind = cir::CastKind::int_complex_to_real;
1244 break;
1245 case CK_FloatingComplexToBoolean:
1246 castOpKind = cir::CastKind::float_complex_to_bool;
1247 break;
1248 case CK_IntegralComplexToBoolean:
1249 castOpKind = cir::CastKind::int_complex_to_bool;
1250 break;
1251 default:
1252 llvm_unreachable("invalid complex-to-scalar cast kind");
1253 }
1254
1255 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1256}
1257
1258mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1259 QualType promotionType) {
1260 e = e->IgnoreParens();
1261 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1262 switch (bo->getOpcode()) {
1263#define HANDLE_BINOP(OP) \
1264 case BO_##OP: \
1265 return emit##OP(emitBinOps(bo, promotionType));
1266 HANDLE_BINOP(Add)
1267 HANDLE_BINOP(Sub)
1268 HANDLE_BINOP(Mul)
1269 HANDLE_BINOP(Div)
1270#undef HANDLE_BINOP
1271 default:
1272 break;
1273 }
1274 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1275 switch (uo->getOpcode()) {
1276 case UO_Imag:
1277 cgf.cgm.errorNYI(e->getSourceRange(),
1278 "ScalarExprEmitter::emitPromoted unary imag");
1279 return {};
1280 case UO_Real:
1281 cgf.cgm.errorNYI(e->getSourceRange(),
1282 "ScalarExprEmitter::emitPromoted unary real");
1283 return {};
1284 case UO_Minus:
1285 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
1286 case UO_Plus:
1287 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
1288 default:
1289 break;
1290 }
1291 }
1292 mlir::Value result = Visit(const_cast<Expr *>(e));
1293 if (result) {
1294 if (!promotionType.isNull())
1295 return emitPromotedValue(result, promotionType);
1296 return emitUnPromotedValue(result, e->getType());
1297 }
1298 return result;
1299}
1300
1301mlir::Value ScalarExprEmitter::emitCompoundAssign(
1302 const CompoundAssignOperator *e,
1303 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1304
1305 bool ignore = std::exchange(ignoreResultAssign, false);
1306 mlir::Value rhs;
1307 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1308
1309 // If the result is clearly ignored, return now.
1310 if (ignore)
1311 return {};
1312
1313 // The result of an assignment in C is the assigned r-value.
1314 if (!cgf.getLangOpts().CPlusPlus)
1315 return rhs;
1316
1317 // If the lvalue is non-volatile, return the computed value of the assignment.
1318 if (!lhs.isVolatile())
1319 return rhs;
1320
1321 // Otherwise, reload the value.
1322 return emitLoadOfLValue(lhs, e->getExprLoc());
1323}
1324
1325mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1326 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
1327 mlir::OpBuilder &builder = cgf.builder;
1328
1329 auto scope = cir::ScopeOp::create(
1330 builder, scopeLoc,
1331 /*scopeBuilder=*/
1332 [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
1333 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1334 builder.getInsertionBlock()};
1335 mlir::Value scopeYieldVal = Visit(e->getSubExpr());
1336 if (scopeYieldVal) {
1337 // Defend against dominance problems caused by jumps out of expression
1338 // evaluation through the shared cleanup block.
1339 lexScope.forceCleanup();
1340 cir::YieldOp::create(builder, loc, scopeYieldVal);
1341 yieldTy = scopeYieldVal.getType();
1342 }
1343 });
1344
1345 return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
1346}
1347
1348} // namespace
1349
1350LValue
1352 ScalarExprEmitter emitter(*this, builder);
1353 mlir::Value result;
1354 switch (e->getOpcode()) {
1355#define COMPOUND_OP(Op) \
1356 case BO_##Op##Assign: \
1357 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1358 result)
1359 COMPOUND_OP(Mul);
1360 COMPOUND_OP(Div);
1361 COMPOUND_OP(Rem);
1362 COMPOUND_OP(Add);
1363 COMPOUND_OP(Sub);
1364 COMPOUND_OP(Shl);
1365 COMPOUND_OP(Shr);
1367 COMPOUND_OP(Xor);
1368 COMPOUND_OP(Or);
1369#undef COMPOUND_OP
1370
1371 case BO_PtrMemD:
1372 case BO_PtrMemI:
1373 case BO_Mul:
1374 case BO_Div:
1375 case BO_Rem:
1376 case BO_Add:
1377 case BO_Sub:
1378 case BO_Shl:
1379 case BO_Shr:
1380 case BO_LT:
1381 case BO_GT:
1382 case BO_LE:
1383 case BO_GE:
1384 case BO_EQ:
1385 case BO_NE:
1386 case BO_Cmp:
1387 case BO_And:
1388 case BO_Xor:
1389 case BO_Or:
1390 case BO_LAnd:
1391 case BO_LOr:
1392 case BO_Assign:
1393 case BO_Comma:
1394 llvm_unreachable("Not valid compound assignment operators");
1395 }
1396 llvm_unreachable("Unhandled compound assignment operator");
1397}
1398
1399/// Emit the computation of the specified expression of scalar type.
1401 assert(e && hasScalarEvaluationKind(e->getType()) &&
1402 "Invalid scalar expression to emit");
1403
1404 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1405}
1406
1408 QualType promotionType) {
1409 if (!promotionType.isNull())
1410 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1411 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1412}
1413
1414[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1415 // If a null pointer expression's type is the C++0x nullptr_t and
1416 // the expression is not a simple literal, it must be evaluated
1417 // for its potential side effects.
1419 return false;
1420 return e->getType()->isNullPtrType();
1421}
1422
1423/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1424static std::optional<QualType>
1425getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1426 const Expr *base = e->IgnoreImpCasts();
1427 if (e == base)
1428 return std::nullopt;
1429
1430 QualType baseTy = base->getType();
1431 if (!astContext.isPromotableIntegerType(baseTy) ||
1432 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1433 return std::nullopt;
1434
1435 return baseTy;
1436}
1437
1438/// Check if \p e is a widened promoted integer.
1439[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1440 const Expr *e) {
1441 return getUnwidenedIntegerType(astContext, e).has_value();
1442}
1443
1444/// Check if we can skip the overflow check for \p Op.
1445[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1446 const BinOpInfo &op) {
1447 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1448 "Expected a unary or binary operator");
1449
1450 // If the binop has constant inputs and we can prove there is no overflow,
1451 // we can elide the overflow check.
1452 if (!op.mayHaveIntegerOverflow())
1453 return true;
1454
1455 // If a unary op has a widened operand, the op cannot overflow.
1456 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1457 return !uo->canOverflow();
1458
1459 // We usually don't need overflow checks for binops with widened operands.
1460 // Multiplication with promoted unsigned operands is a special case.
1461 const auto *bo = cast<BinaryOperator>(op.e);
1462 std::optional<QualType> optionalLHSTy =
1463 getUnwidenedIntegerType(astContext, bo->getLHS());
1464 if (!optionalLHSTy)
1465 return false;
1466
1467 std::optional<QualType> optionalRHSTy =
1468 getUnwidenedIntegerType(astContext, bo->getRHS());
1469 if (!optionalRHSTy)
1470 return false;
1471
1472 QualType lhsTy = *optionalLHSTy;
1473 QualType rhsTy = *optionalRHSTy;
1474
1475 // This is the simple case: binops without unsigned multiplication, and with
1476 // widened operands. No overflow check is needed here.
1477 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1478 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1479 return true;
1480
1481 // For unsigned multiplication the overflow check can be elided if either one
1482 // of the unpromoted types are less than half the size of the promoted type.
1483 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1484 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1485 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1486}
1487
1488/// Emit pointer + index arithmetic.
1490 const BinOpInfo &op,
1491 bool isSubtraction) {
1492 // Must have binary (not unary) expr here. Unary pointer
1493 // increment/decrement doesn't use this path.
1495
1496 mlir::Value pointer = op.lhs;
1497 Expr *pointerOperand = expr->getLHS();
1498 mlir::Value index = op.rhs;
1499 Expr *indexOperand = expr->getRHS();
1500
1501 // In the case of subtraction, the FE has ensured that the LHS is always the
1502 // pointer. However, addition can have the pointer on either side. We will
1503 // always have a pointer operand and an integer operand, so if the LHS wasn't
1504 // a pointer, we need to swap our values.
1505 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1506 std::swap(pointer, index);
1507 std::swap(pointerOperand, indexOperand);
1508 }
1509 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1510 "Need a pointer operand");
1511 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1512
1513 // Some versions of glibc and gcc use idioms (particularly in their malloc
1514 // routines) that add a pointer-sized integer (known to be a pointer value)
1515 // to a null pointer in order to cast the value back to an integer or as
1516 // part of a pointer alignment algorithm. This is undefined behavior, but
1517 // we'd like to be able to compile programs that use it.
1518 //
1519 // Normally, we'd generate a GEP with a null-pointer base here in response
1520 // to that code, but it's also UB to dereference a pointer created that
1521 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1522 // generate a direct cast of the integer value to a pointer.
1523 //
1524 // The idiom (p = nullptr + N) is not met if any of the following are true:
1525 //
1526 // The operation is subtraction.
1527 // The index is not pointer-sized.
1528 // The pointer type is not byte-sized.
1529 //
1531 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1532 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1533
1534 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1535 // LLVM lowering.
1536
1537 // If this is subtraction, negate the index.
1538 if (isSubtraction)
1540
1542
1543 const PointerType *pointerType =
1544 pointerOperand->getType()->getAs<PointerType>();
1545 if (!pointerType) {
1546 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1547 return nullptr;
1548 }
1549
1550 QualType elementType = pointerType->getPointeeType();
1551 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1552 cgf.cgm.errorNYI("variable array type");
1553 return nullptr;
1554 }
1555
1556 if (elementType->isVoidType() || elementType->isFunctionType()) {
1557 cgf.cgm.errorNYI("void* or function pointer arithmetic");
1558 return nullptr;
1559 }
1560
1562 return cgf.getBuilder().create<cir::PtrStrideOp>(
1563 cgf.getLoc(op.e->getExprLoc()), pointer.getType(), pointer, index);
1564}
1565
1566mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1567 const mlir::Location loc = cgf.getLoc(ops.loc);
1568 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1569 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1570 case LangOptions::SOB_Defined:
1571 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1572 return builder.createMul(loc, ops.lhs, ops.rhs);
1573 [[fallthrough]];
1574 case LangOptions::SOB_Undefined:
1575 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1576 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1577 [[fallthrough]];
1578 case LangOptions::SOB_Trapping:
1579 if (canElideOverflowCheck(cgf.getContext(), ops))
1580 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1581 cgf.cgm.errorNYI("sanitizers");
1582 }
1583 }
1584 if (ops.fullType->isConstantMatrixType()) {
1586 cgf.cgm.errorNYI("matrix types");
1587 return nullptr;
1588 }
1589 if (ops.compType->isUnsignedIntegerType() &&
1590 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1591 !canElideOverflowCheck(cgf.getContext(), ops))
1592 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1593
1594 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1596 return builder.createFMul(loc, ops.lhs, ops.rhs);
1597 }
1598
1599 if (ops.isFixedPointOp()) {
1601 cgf.cgm.errorNYI("fixed point");
1602 return nullptr;
1603 }
1604
1605 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1606 cgf.convertType(ops.fullType),
1607 cir::BinOpKind::Mul, ops.lhs, ops.rhs);
1608}
1609mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1610 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1611 cgf.convertType(ops.fullType),
1612 cir::BinOpKind::Div, ops.lhs, ops.rhs);
1613}
1614mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1615 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1616 cgf.convertType(ops.fullType),
1617 cir::BinOpKind::Rem, ops.lhs, ops.rhs);
1618}
1619
1620mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1621 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1622 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1623 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1624
1625 const mlir::Location loc = cgf.getLoc(ops.loc);
1626 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1627 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1628 case LangOptions::SOB_Defined:
1629 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1630 return builder.createAdd(loc, ops.lhs, ops.rhs);
1631 [[fallthrough]];
1632 case LangOptions::SOB_Undefined:
1633 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1634 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1635 [[fallthrough]];
1636 case LangOptions::SOB_Trapping:
1637 if (canElideOverflowCheck(cgf.getContext(), ops))
1638 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1639 cgf.cgm.errorNYI("sanitizers");
1640 }
1641 }
1642 if (ops.fullType->isConstantMatrixType()) {
1644 cgf.cgm.errorNYI("matrix types");
1645 return nullptr;
1646 }
1647
1648 if (ops.compType->isUnsignedIntegerType() &&
1649 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1650 !canElideOverflowCheck(cgf.getContext(), ops))
1651 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1652
1653 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1655 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1656 }
1657
1658 if (ops.isFixedPointOp()) {
1660 cgf.cgm.errorNYI("fixed point");
1661 return {};
1662 }
1663
1664 return builder.create<cir::BinOp>(loc, cgf.convertType(ops.fullType),
1665 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1666}
1667
1668mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1669 const mlir::Location loc = cgf.getLoc(ops.loc);
1670 // The LHS is always a pointer if either side is.
1671 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1672 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1673 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1674 case LangOptions::SOB_Defined: {
1675 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1676 return builder.createSub(loc, ops.lhs, ops.rhs);
1677 [[fallthrough]];
1678 }
1679 case LangOptions::SOB_Undefined:
1680 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1681 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1682 [[fallthrough]];
1683 case LangOptions::SOB_Trapping:
1684 if (canElideOverflowCheck(cgf.getContext(), ops))
1685 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1686 cgf.cgm.errorNYI("sanitizers");
1687 }
1688 }
1689
1690 if (ops.fullType->isConstantMatrixType()) {
1692 cgf.cgm.errorNYI("matrix types");
1693 return nullptr;
1694 }
1695
1696 if (ops.compType->isUnsignedIntegerType() &&
1697 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1698 !canElideOverflowCheck(cgf.getContext(), ops))
1699 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1700
1701 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1703 return builder.createFSub(loc, ops.lhs, ops.rhs);
1704 }
1705
1706 if (ops.isFixedPointOp()) {
1708 cgf.cgm.errorNYI("fixed point");
1709 return {};
1710 }
1711
1712 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1713 cgf.convertType(ops.fullType),
1714 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1715 }
1716
1717 // If the RHS is not a pointer, then we have normal pointer
1718 // arithmetic.
1719 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1720 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1721
1722 // Otherwise, this is a pointer subtraction
1723
1724 // Do the raw subtraction part.
1725 //
1726 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1727 // LLVM we shall take VLA's, division by element size, etc.
1728 //
1729 // See more in `EmitSub` in CGExprScalar.cpp.
1731 cgf.cgm.errorNYI("ptrdiff");
1732 return {};
1733}
1734
1735mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1736 // TODO: This misses out on the sanitizer check below.
1737 if (ops.isFixedPointOp()) {
1739 cgf.cgm.errorNYI("fixed point");
1740 return {};
1741 }
1742
1743 // CIR accepts shift between different types, meaning nothing special
1744 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1745 // promote or truncate the RHS to the same size as the LHS.
1746
1747 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1748 ops.compType->hasSignedIntegerRepresentation() &&
1750 !cgf.getLangOpts().CPlusPlus20;
1751 bool sanitizeUnsignedBase =
1752 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
1753 ops.compType->hasUnsignedIntegerRepresentation();
1754 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
1755 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
1756
1757 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1758 if (cgf.getLangOpts().OpenCL)
1759 cgf.cgm.errorNYI("opencl");
1760 else if ((sanitizeBase || sanitizeExponent) &&
1761 mlir::isa<cir::IntType>(ops.lhs.getType()))
1762 cgf.cgm.errorNYI("sanitizers");
1763
1764 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1765}
1766
1767mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
1768 // TODO: This misses out on the sanitizer check below.
1769 if (ops.isFixedPointOp()) {
1771 cgf.cgm.errorNYI("fixed point");
1772 return {};
1773 }
1774
1775 // CIR accepts shift between different types, meaning nothing special
1776 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1777 // promote or truncate the RHS to the same size as the LHS.
1778
1779 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1780 if (cgf.getLangOpts().OpenCL)
1781 cgf.cgm.errorNYI("opencl");
1782 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
1783 mlir::isa<cir::IntType>(ops.lhs.getType()))
1784 cgf.cgm.errorNYI("sanitizers");
1785
1786 // Note that we don't need to distinguish unsigned treatment at this
1787 // point since it will be handled later by LLVM lowering.
1788 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1789}
1790
1791mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
1792 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1793 cgf.convertType(ops.fullType),
1794 cir::BinOpKind::And, ops.lhs, ops.rhs);
1795}
1796mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
1797 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1798 cgf.convertType(ops.fullType),
1799 cir::BinOpKind::Xor, ops.lhs, ops.rhs);
1800}
1801mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
1802 return builder.create<cir::BinOp>(cgf.getLoc(ops.loc),
1803 cgf.convertType(ops.fullType),
1804 cir::BinOpKind::Or, ops.lhs, ops.rhs);
1805}
1806
1807// Emit code for an explicit or implicit cast. Implicit
1808// casts have to handle a more broad range of conversions than explicit
1809// casts, as they handle things like function to ptr-to-function decay
1810// etc.
1811mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
1812 Expr *subExpr = ce->getSubExpr();
1813 QualType destTy = ce->getType();
1814 CastKind kind = ce->getCastKind();
1815
1816 // These cases are generally not written to ignore the result of evaluating
1817 // their sub-expressions, so we clear this now.
1818 ignoreResultAssign = false;
1819
1820 switch (kind) {
1821 case clang::CK_Dependent:
1822 llvm_unreachable("dependent cast kind in CIR gen!");
1823 case clang::CK_BuiltinFnToFnPtr:
1824 llvm_unreachable("builtin functions are handled elsewhere");
1825
1826 case CK_CPointerToObjCPointerCast:
1827 case CK_BlockPointerToObjCPointerCast:
1828 case CK_AnyPointerToBlockPointerCast:
1829 case CK_BitCast: {
1830 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1831 mlir::Type dstTy = cgf.convertType(destTy);
1832
1834
1835 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
1836 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1837 "sanitizer support");
1838
1839 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1840 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1841 "strict vtable pointers");
1842
1843 // Update heapallocsite metadata when there is an explicit pointer cast.
1845
1846 // If Src is a fixed vector and Dst is a scalable vector, and both have the
1847 // same element type, use the llvm.vector.insert intrinsic to perform the
1848 // bitcast.
1850
1851 // If Src is a scalable vector and Dst is a fixed vector, and both have the
1852 // same element type, use the llvm.vector.extract intrinsic to perform the
1853 // bitcast.
1855
1856 // Perform VLAT <-> VLST bitcast through memory.
1857 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
1858 // require the element types of the vectors to be the same, we
1859 // need to keep this around for bitcasts between VLAT <-> VLST where
1860 // the element types of the vectors are not the same, until we figure
1861 // out a better way of doing these casts.
1863
1864 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
1865 src, dstTy);
1866 }
1867
1868 case CK_AtomicToNonAtomic: {
1869 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1870 "CastExpr: ", ce->getCastKindName());
1871 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
1872 return cgf.createDummyValue(loc, destTy);
1873 }
1874 case CK_NonAtomicToAtomic:
1875 case CK_UserDefinedConversion:
1876 return Visit(const_cast<Expr *>(subExpr));
1877 case CK_NoOp: {
1878 auto v = Visit(const_cast<Expr *>(subExpr));
1879 if (v) {
1880 // CK_NoOp can model a pointer qualification conversion, which can remove
1881 // an array bound and change the IR type.
1882 // FIXME: Once pointee types are removed from IR, remove this.
1883 mlir::Type t = cgf.convertType(destTy);
1884 if (t != v.getType())
1885 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
1886 }
1887 return v;
1888 }
1889
1890 case CK_ArrayToPointerDecay:
1891 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
1892
1893 case CK_NullToPointer: {
1894 if (mustVisitNullValue(subExpr))
1895 cgf.emitIgnoredExpr(subExpr);
1896
1897 // Note that DestTy is used as the MLIR type instead of a custom
1898 // nullptr type.
1899 mlir::Type ty = cgf.convertType(destTy);
1900 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
1901 }
1902
1903 case CK_LValueToRValue:
1904 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
1905 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
1906 return Visit(const_cast<Expr *>(subExpr));
1907
1908 case CK_IntegralCast: {
1909 ScalarConversionOpts opts;
1910 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1911 if (!ice->isPartOfExplicitCast())
1912 opts = ScalarConversionOpts(cgf.sanOpts);
1913 }
1914 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1915 ce->getExprLoc(), opts);
1916 }
1917
1918 case CK_FloatingComplexToReal:
1919 case CK_IntegralComplexToReal:
1920 case CK_FloatingComplexToBoolean:
1921 case CK_IntegralComplexToBoolean: {
1922 mlir::Value value = cgf.emitComplexExpr(subExpr);
1923 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
1924 kind, destTy);
1925 }
1926
1927 case CK_FloatingRealToComplex:
1928 case CK_FloatingComplexCast:
1929 case CK_IntegralRealToComplex:
1930 case CK_IntegralComplexCast:
1931 case CK_IntegralComplexToFloatingComplex:
1932 case CK_FloatingComplexToIntegralComplex:
1933 llvm_unreachable("scalar cast to non-scalar value");
1934
1935 case CK_PointerToIntegral: {
1936 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
1937 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1938 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1939 "strict vtable pointers");
1940 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
1941 }
1942 case CK_ToVoid:
1943 cgf.emitIgnoredExpr(subExpr);
1944 return {};
1945
1946 case CK_IntegralToFloating:
1947 case CK_FloatingToIntegral:
1948 case CK_FloatingCast:
1949 case CK_FixedPointToFloating:
1950 case CK_FloatingToFixedPoint: {
1951 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
1952 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1953 "fixed point casts");
1954 return {};
1955 }
1957 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
1958 ce->getExprLoc());
1959 }
1960
1961 case CK_IntegralToBoolean:
1962 return emitIntToBoolConversion(Visit(subExpr),
1963 cgf.getLoc(ce->getSourceRange()));
1964
1965 case CK_PointerToBoolean:
1966 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
1967 case CK_FloatingToBoolean:
1968 return emitFloatToBoolConversion(Visit(subExpr),
1969 cgf.getLoc(subExpr->getExprLoc()));
1970 case CK_MemberPointerToBoolean: {
1971 mlir::Value memPtr = Visit(subExpr);
1972 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
1973 cir::CastKind::member_ptr_to_bool, memPtr,
1974 cgf.convertType(destTy));
1975 }
1976
1977 case CK_VectorSplat: {
1978 // Create a vector object and fill all elements with the same scalar value.
1979 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
1980 return builder.create<cir::VecSplatOp>(
1981 cgf.getLoc(subExpr->getSourceRange()), cgf.convertType(destTy),
1982 Visit(subExpr));
1983 }
1984 case CK_FunctionToPointerDecay:
1985 return cgf.emitLValue(subExpr).getPointer();
1986
1987 default:
1988 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1989 "CastExpr: ", ce->getCastKindName());
1990 }
1991 return {};
1992}
1993
1994mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
1996 return emitLoadOfLValue(e);
1997
1998 auto v = cgf.emitCallExpr(e).getValue();
2000 return v;
2001}
2002
2003mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2004 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2005 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2006 // for now.
2008 Expr::EvalResult result;
2009 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2010 cgf.cgm.errorNYI(e->getSourceRange(), "Constant interger member expr");
2011 // Fall through to emit this as a non-constant access.
2012 }
2013 return emitLoadOfLValue(e);
2014}
2015
2016mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2017 const unsigned numInitElements = e->getNumInits();
2018
2019 if (e->hadArrayRangeDesignator()) {
2020 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2021 return {};
2022 }
2023
2024 if (e->getType()->isVectorType()) {
2025 const auto vectorType =
2026 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2027
2028 SmallVector<mlir::Value, 16> elements;
2029 for (Expr *init : e->inits()) {
2030 elements.push_back(Visit(init));
2031 }
2032
2033 // Zero-initialize any remaining values.
2034 if (numInitElements < vectorType.getSize()) {
2035 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2036 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2037 std::fill_n(std::back_inserter(elements),
2038 vectorType.getSize() - numInitElements, zeroValue);
2039 }
2040
2041 return cgf.getBuilder().create<cir::VecCreateOp>(
2042 cgf.getLoc(e->getSourceRange()), vectorType, elements);
2043 }
2044
2045 // C++11 value-initialization for the scalar.
2046 if (numInitElements == 0)
2047 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2048
2049 return Visit(e->getInit(0));
2050}
2051
2052mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2053 QualType srcTy, QualType dstTy,
2054 SourceLocation loc) {
2057 "Invalid scalar expression to emit");
2058 return ScalarExprEmitter(*this, builder)
2059 .emitScalarConversion(src, srcTy, dstTy, loc);
2060}
2061
2063 QualType srcTy,
2064 QualType dstTy,
2065 SourceLocation loc) {
2066 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2067 "Invalid complex -> scalar conversion");
2068
2069 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2070 if (dstTy->isBooleanType()) {
2071 auto kind = complexElemTy->isFloatingType()
2072 ? cir::CastKind::float_complex_to_bool
2073 : cir::CastKind::int_complex_to_bool;
2074 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2075 }
2076
2077 auto kind = complexElemTy->isFloatingType()
2078 ? cir::CastKind::float_complex_to_real
2079 : cir::CastKind::int_complex_to_real;
2080 mlir::Value real =
2081 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2082 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2083}
2084
2085mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2086 // Perform vector logical not on comparison with zero vector.
2087 if (e->getType()->isVectorType() &&
2088 e->getType()->castAs<VectorType>()->getVectorKind() ==
2091 cgf.cgm.errorNYI(e->getSourceRange(), "vector logical not");
2092 return {};
2093 }
2094
2095 // Compare operand to zero.
2096 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2097
2098 // Invert value.
2099 boolVal = builder.createNot(boolVal);
2100
2101 // ZExt result to the expr type.
2102 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2103}
2104
2105mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2106 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2107 mlir::Value result = VisitRealImag(e, promotionTy);
2108 if (result && !promotionTy.isNull())
2109 result = emitUnPromotedValue(result, e->getType());
2110 return result;
2111}
2112
2113mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2114 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2115 mlir::Value result = VisitRealImag(e, promotionTy);
2116 if (result && !promotionTy.isNull())
2117 result = emitUnPromotedValue(result, e->getType());
2118 return result;
2119}
2120
2121mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2122 QualType promotionTy) {
2123 assert(e->getOpcode() == clang::UO_Real ||
2124 e->getOpcode() == clang::UO_Imag &&
2125 "Invalid UnaryOp kind for ComplexType Real or Imag");
2126
2127 Expr *op = e->getSubExpr();
2128 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2129 if (op->getType()->isAnyComplexType()) {
2130 // If it's an l-value, load through the appropriate subobject l-value.
2131 // Note that we have to ask `e` because `op` might be an l-value that
2132 // this won't work for, e.g. an Obj-C property
2133 mlir::Value complex = cgf.emitComplexExpr(op);
2134 if (e->isGLValue() && !promotionTy.isNull()) {
2135 complex = cgf.emitPromotedValue(complex, promotionTy);
2136 }
2137
2138 return e->getOpcode() == clang::UO_Real
2139 ? builder.createComplexReal(loc, complex)
2140 : builder.createComplexImag(loc, complex);
2141 }
2142
2143 // __real or __imag on a scalar returns zero. Emit the subexpr to ensure side
2144 // effects are evaluated, but not the actual value.
2145 cgf.cgm.errorNYI(e->getSourceRange(),
2146 "VisitRealImag __real or __imag on a scalar");
2147 return {};
2148}
2149
2150/// Return the size or alignment of the type of argument of the sizeof
2151/// expression as an integer.
2152mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2153 const UnaryExprOrTypeTraitExpr *e) {
2154 const QualType typeToSize = e->getTypeOfArgument();
2155 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2156 if (auto kind = e->getKind();
2157 kind == UETT_SizeOf || kind == UETT_DataSizeOf) {
2158 if (cgf.getContext().getAsVariableArrayType(typeToSize)) {
2160 "sizeof operator for VariableArrayType",
2161 e->getStmtClassName());
2162 return builder.getConstant(
2163 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2164 llvm::APSInt(llvm::APInt(64, 1), true)));
2165 }
2166 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2168 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
2169 e->getStmtClassName());
2170 return builder.getConstant(
2171 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2172 llvm::APSInt(llvm::APInt(64, 1), true)));
2173 }
2174
2175 return builder.getConstant(
2176 loc, cir::IntAttr::get(cgf.cgm.UInt64Ty,
2178}
2179
2180/// Return true if the specified expression is cheap enough and side-effect-free
2181/// enough to evaluate unconditionally instead of conditionally. This is used
2182/// to convert control flow into selects in some cases.
2183/// TODO(cir): can be shared with LLVM codegen.
2185 CIRGenFunction &cgf) {
2186 // Anything that is an integer or floating point constant is fine.
2187 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2188
2189 // Even non-volatile automatic variables can't be evaluated unconditionally.
2190 // Referencing a thread_local may cause non-trivial initialization work to
2191 // occur. If we're inside a lambda and one of the variables is from the scope
2192 // outside the lambda, that function may have returned already. Reading its
2193 // locals is a bad idea. Also, these reads may introduce races there didn't
2194 // exist in the source-level program.
2195}
2196
2197mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2198 const AbstractConditionalOperator *e) {
2199 CIRGenBuilderTy &builder = cgf.getBuilder();
2200 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2201 ignoreResultAssign = false;
2202
2203 // Bind the common expression if necessary.
2204 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2205
2206 Expr *condExpr = e->getCond();
2207 Expr *lhsExpr = e->getTrueExpr();
2208 Expr *rhsExpr = e->getFalseExpr();
2209
2210 // If the condition constant folds and can be elided, try to avoid emitting
2211 // the condition and the dead arm.
2212 bool condExprBool;
2213 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2214 Expr *live = lhsExpr, *dead = rhsExpr;
2215 if (!condExprBool)
2216 std::swap(live, dead);
2217
2218 // If the dead side doesn't have labels we need, just emit the Live part.
2219 if (!cgf.containsLabel(dead)) {
2220 if (condExprBool)
2222 mlir::Value result = Visit(live);
2223
2224 // If the live part is a throw expression, it acts like it has a void
2225 // type, so evaluating it returns a null Value. However, a conditional
2226 // with non-void type must return a non-null Value.
2227 if (!result && !e->getType()->isVoidType()) {
2228 cgf.cgm.errorNYI(e->getSourceRange(),
2229 "throw expression in conditional operator");
2230 result = {};
2231 }
2232
2233 return result;
2234 }
2235 }
2236
2237 QualType condType = condExpr->getType();
2238
2239 // OpenCL: If the condition is a vector, we can treat this condition like
2240 // the select function.
2241 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2242 condType->isExtVectorType()) {
2244 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2245 }
2246
2247 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2248 if (!condType->isVectorType()) {
2250 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2251 return {};
2252 }
2253
2254 mlir::Value condValue = Visit(condExpr);
2255 mlir::Value lhsValue = Visit(lhsExpr);
2256 mlir::Value rhsValue = Visit(rhsExpr);
2257 return builder.create<cir::VecTernaryOp>(loc, condValue, lhsValue,
2258 rhsValue);
2259 }
2260
2261 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2262 // select instead of as control flow. We can only do this if it is cheap
2263 // and safe to evaluate the LHS and RHS unconditionally.
2264 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2266 bool lhsIsVoid = false;
2267 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2269
2270 mlir::Value lhs = Visit(lhsExpr);
2271 if (!lhs) {
2272 lhs = builder.getNullValue(cgf.VoidTy, loc);
2273 lhsIsVoid = true;
2274 }
2275
2276 mlir::Value rhs = Visit(rhsExpr);
2277 if (lhsIsVoid) {
2278 assert(!rhs && "lhs and rhs types must match");
2279 rhs = builder.getNullValue(cgf.VoidTy, loc);
2280 }
2281
2282 return builder.createSelect(loc, condV, lhs, rhs);
2283 }
2284
2285 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2286 CIRGenFunction::ConditionalEvaluation eval(cgf);
2287 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2288 mlir::Type yieldTy{};
2289
2290 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2291 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2293
2295 eval.beginEvaluation();
2296 mlir::Value branch = Visit(expr);
2297 eval.endEvaluation();
2298
2299 if (branch) {
2300 yieldTy = branch.getType();
2301 b.create<cir::YieldOp>(loc, branch);
2302 } else {
2303 // If LHS or RHS is a throw or void expression we need to patch
2304 // arms as to properly match yield types.
2305 insertPoints.push_back(b.saveInsertionPoint());
2306 }
2307 };
2308
2309 mlir::Value result = builder
2310 .create<cir::TernaryOp>(
2311 loc, condV,
2312 /*trueBuilder=*/
2313 [&](mlir::OpBuilder &b, mlir::Location loc) {
2314 emitBranch(b, loc, lhsExpr);
2315 },
2316 /*falseBuilder=*/
2317 [&](mlir::OpBuilder &b, mlir::Location loc) {
2318 emitBranch(b, loc, rhsExpr);
2319 })
2320 .getResult();
2321
2322 if (!insertPoints.empty()) {
2323 // If both arms are void, so be it.
2324 if (!yieldTy)
2325 yieldTy = cgf.VoidTy;
2326
2327 // Insert required yields.
2328 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2329 mlir::OpBuilder::InsertionGuard guard(builder);
2330 builder.restoreInsertionPoint(toInsert);
2331
2332 // Block does not return: build empty yield.
2333 if (mlir::isa<cir::VoidType>(yieldTy)) {
2334 builder.create<cir::YieldOp>(loc);
2335 } else { // Block returns: set null yield value.
2336 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2337 builder.create<cir::YieldOp>(loc, op0);
2338 }
2339 }
2340 }
2341
2342 return result;
2343}
2344
2346 LValue lv,
2347 cir::UnaryOpKind kind,
2348 bool isPre) {
2349 return ScalarExprEmitter(*this, builder)
2350 .emitScalarPrePostIncDec(e, lv, kind, isPre);
2351}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
llvm::APInt getValue() const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:188
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
SourceLocation getExprLoc() const
Definition Expr.h:4013
Expr * getRHS() const
Definition Expr.h:4024
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2200
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
mlir::Value getPointer() const
Definition Address.h:81
mlir::Value createNeg(mlir::Value value)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
const clang::LangOptions & getLangOpts() const
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
CIRGenBuilderTy & getBuilder()
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::CodeGenOptions & getCodeGenOpts() const
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastKind getCastKind() const
Definition Expr.h:3654
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
unsigned getValue() const
Definition Expr.h:1629
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3065
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6396
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
ArrayRef< Expr * > inits()
Definition Expr.h:5283
bool isSignedOverflowDefined() const
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3651
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4633
const Expr * getSubExpr() const
Definition Expr.h:2199
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8285
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getCanonicalType() const
Definition TypeBase.h:8337
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool isCanonical() const
Definition TypeBase.h:8342
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8878
bool isBooleanType() const
Definition TypeBase.h:9008
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
bool isConstantMatrixType() const
Definition TypeBase.h:8683
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8922
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9165
bool isReferenceType() const
Definition TypeBase.h:8546
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2291
bool isExtVectorType() const
Definition TypeBase.h:8665
bool isAnyComplexType() const
Definition TypeBase.h:8657
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8934
bool isHalfType() const
Definition TypeBase.h:8882
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isMatrixType() const
Definition TypeBase.h:8679
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8518
bool isVectorType() const
Definition TypeBase.h:8661
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9098
bool isNullPtrType() const
Definition TypeBase.h:8915
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represents a GCC generic vector type.
Definition TypeBase.h:4173
VectorKind getVectorKind() const
Definition TypeBase.h:4193
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4134
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool cgFPOptionsRAII()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool scalableVectors()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174