clang 22.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
16
17#include "clang/AST/Expr.h"
21
22#include "mlir/IR/Location.h"
23#include "mlir/IR/Value.h"
24
25#include <cassert>
26#include <utility>
27
28using namespace clang;
29using namespace clang::CIRGen;
30
31namespace {
32
33struct BinOpInfo {
34 mlir::Value lhs;
35 mlir::Value rhs;
36 SourceRange loc;
37 QualType fullType; // Type of operands and result
38 QualType compType; // Type used for computations. Element type
39 // for vectors, otherwise same as FullType.
40 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
41 FPOptions fpfeatures;
42 const Expr *e; // Entire expr, for error unsupported. May not be binop.
43
44 /// Check if the binop computes a division or a remainder.
45 bool isDivRemOp() const {
46 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
47 opcode == BO_RemAssign;
48 }
49
50 /// Check if the binop can result in integer overflow.
51 bool mayHaveIntegerOverflow() const {
52 // Without constant input, we can't rule out overflow.
53 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
54 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
55 if (!lhsci || !rhsci)
56 return true;
57
59 // TODO(cir): For now we just assume that we might overflow
60 return true;
61 }
62
63 /// Check if at least one operand is a fixed point type. In such cases,
64 /// this operation did not follow usual arithmetic conversion and both
65 /// operands might not be of the same type.
66 bool isFixedPointOp() const {
67 // We cannot simply check the result type since comparison operations
68 // return an int.
69 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
70 QualType lhstype = binOp->getLHS()->getType();
71 QualType rhstype = binOp->getRHS()->getType();
72 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
73 }
74 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
75 return unop->getSubExpr()->getType()->isFixedPointType();
76 return false;
77 }
78};
79
80class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
81 CIRGenFunction &cgf;
82 CIRGenBuilderTy &builder;
83 // Unlike classic codegen we set this to false or use std::exchange to read
84 // the value instead of calling TestAndClearIgnoreResultAssign to make it
85 // explicit when the value is used
86 bool ignoreResultAssign;
87
88public:
89 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder,
90 bool ignoreResultAssign = false)
91 : cgf(cgf), builder(builder), ignoreResultAssign(ignoreResultAssign) {}
92
93 //===--------------------------------------------------------------------===//
94 // Utilities
95 //===--------------------------------------------------------------------===//
96 mlir::Type convertType(QualType ty) { return cgf.convertType(ty); }
97
98 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
99 mlir::Value value, CastKind kind,
100 QualType destTy);
101
102 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
103 return cgf.cgm.emitNullConstant(ty, loc);
104 }
105
106 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
107 return builder.createFloatingCast(result, cgf.convertType(promotionType));
108 }
109
110 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
111 return builder.createFloatingCast(result, cgf.convertType(exprType));
112 }
113
114 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
115
116 mlir::Value maybePromoteBoolResult(mlir::Value value,
117 mlir::Type dstTy) const {
118 if (mlir::isa<cir::IntType>(dstTy))
119 return builder.createBoolToInt(value, dstTy);
120 if (mlir::isa<cir::BoolType>(dstTy))
121 return value;
122 llvm_unreachable("Can only promote integer or boolean types");
123 }
124
125 //===--------------------------------------------------------------------===//
126 // Visitor Methods
127 //===--------------------------------------------------------------------===//
128
129 mlir::Value Visit(Expr *e) {
130 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
131 }
132
133 mlir::Value VisitStmt(Stmt *s) {
134 llvm_unreachable("Statement passed to ScalarExprEmitter");
135 }
136
137 mlir::Value VisitExpr(Expr *e) {
138 cgf.getCIRGenModule().errorNYI(
139 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
140 return {};
141 }
142
143 mlir::Value VisitConstantExpr(ConstantExpr *e) {
144 // A constant expression of type 'void' generates no code and produces no
145 // value.
146 if (e->getType()->isVoidType())
147 return {};
148
149 if (mlir::Attribute result = ConstantEmitter(cgf).tryEmitConstantExpr(e)) {
150 if (e->isGLValue()) {
151 cgf.cgm.errorNYI(e->getSourceRange(),
152 "ScalarExprEmitter: constant expr GL Value");
153 return {};
154 }
155
156 return builder.getConstant(cgf.getLoc(e->getSourceRange()),
157 mlir::cast<mlir::TypedAttr>(result));
158 }
159
160 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: constant expr");
161 return {};
162 }
163
164 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
165 return Visit(e->getSelectedExpr());
166 }
167
168 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
169
170 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
171 return Visit(ge->getResultExpr());
172 }
173
174 /// Emits the address of the l-value, then loads and returns the result.
175 mlir::Value emitLoadOfLValue(const Expr *e) {
176 LValue lv = cgf.emitLValue(e);
177 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
178 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
179 }
180
181 mlir::Value VisitCoawaitExpr(CoawaitExpr *s) {
182 return cgf.emitCoawaitExpr(*s).getValue();
183 }
184 mlir::Value VisitCoyieldExpr(CoyieldExpr *e) {
185 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: coyield");
186 return {};
187 }
188 mlir::Value VisitUnaryCoawait(const UnaryOperator *e) {
189 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: unary coawait");
190 return {};
191 }
192
193 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
194 return cgf.emitLoadOfLValue(lv, loc).getValue();
195 }
196
197 // l-values
198 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
199 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
200 return cgf.emitScalarConstant(constant, e);
201
202 return emitLoadOfLValue(e);
203 }
204
205 mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *e) {
206 auto func = cast<cir::FuncOp>(cgf.curFn);
207 cir::BlockAddrInfoAttr blockInfoAttr = cir::BlockAddrInfoAttr::get(
208 &cgf.getMLIRContext(), func.getSymName(), e->getLabel()->getName());
209 cir::BlockAddressOp blockAddressOp = cir::BlockAddressOp::create(
210 builder, cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()),
211 blockInfoAttr);
212 cir::LabelOp resolvedLabel = cgf.cgm.lookupBlockAddressInfo(blockInfoAttr);
213 if (!resolvedLabel) {
214 cgf.cgm.mapUnresolvedBlockAddress(blockAddressOp);
215 // Still add the op to maintain insertion order it will be resolved in
216 // resolveBlockAddresses
217 cgf.cgm.mapResolvedBlockAddress(blockAddressOp, nullptr);
218 } else {
219 cgf.cgm.mapResolvedBlockAddress(blockAddressOp, resolvedLabel);
220 }
221 cgf.instantiateIndirectGotoBlock();
222 return blockAddressOp;
223 }
224
225 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
226 mlir::Type type = cgf.convertType(e->getType());
227 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
228 cir::IntAttr::get(type, e->getValue()));
229 }
230
231 mlir::Value VisitFixedPointLiteral(const FixedPointLiteral *e) {
232 cgf.cgm.errorNYI(e->getSourceRange(),
233 "ScalarExprEmitter: fixed point literal");
234 return {};
235 }
236
237 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
238 mlir::Type type = cgf.convertType(e->getType());
239 assert(mlir::isa<cir::FPTypeInterface>(type) &&
240 "expect floating-point type");
241 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
242 cir::FPAttr::get(type, e->getValue()));
243 }
244
245 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
246 mlir::Type ty = cgf.convertType(e->getType());
247 auto init = cir::IntAttr::get(ty, e->getValue());
248 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()), init);
249 }
250
251 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
252 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
253 }
254
255 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
256 if (e->getType()->isVoidType())
257 return {};
258
259 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
260 }
261
262 mlir::Value VisitGNUNullExpr(const GNUNullExpr *e) {
263 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
264 }
265
266 mlir::Value VisitOffsetOfExpr(OffsetOfExpr *e);
267
268 mlir::Value VisitSizeOfPackExpr(SizeOfPackExpr *e) {
269 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: size of pack");
270 return {};
271 }
272 mlir::Value VisitPseudoObjectExpr(PseudoObjectExpr *e) {
273 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: pseudo object");
274 return {};
275 }
276 mlir::Value VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *e) {
277 cgf.cgm.errorNYI(e->getSourceRange(),
278 "ScalarExprEmitter: sycl unique stable name");
279 return {};
280 }
281 mlir::Value VisitEmbedExpr(EmbedExpr *e) {
282 assert(e->getDataElementCount() == 1);
283 auto it = e->begin();
284 llvm::APInt value = (*it)->getValue();
285 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value,
287 }
288 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
289 if (e->isGLValue())
290 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
291 e->getExprLoc());
292
293 // Otherwise, assume the mapping is the scalar directly.
294 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
295 }
296
297 mlir::Value VisitObjCSelectorExpr(ObjCSelectorExpr *e) {
298 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc selector");
299 return {};
300 }
301 mlir::Value VisitObjCProtocolExpr(ObjCProtocolExpr *e) {
302 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc protocol");
303 return {};
304 }
305 mlir::Value VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
306 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc ivar ref");
307 return {};
308 }
309 mlir::Value VisitObjCMessageExpr(ObjCMessageExpr *e) {
310 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc message");
311 return {};
312 }
313 mlir::Value VisitObjCIsaExpr(ObjCIsaExpr *e) {
314 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc isa");
315 return {};
316 }
317 mlir::Value VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *e) {
318 cgf.cgm.errorNYI(e->getSourceRange(),
319 "ScalarExprEmitter: objc availability check");
320 return {};
321 }
322
323 mlir::Value VisitMatrixSubscriptExpr(MatrixSubscriptExpr *e) {
324 cgf.cgm.errorNYI(e->getSourceRange(),
325 "ScalarExprEmitter: matrix subscript");
326 return {};
327 }
328
329 mlir::Value VisitCastExpr(CastExpr *e);
330 mlir::Value VisitCallExpr(const CallExpr *e);
331
332 mlir::Value VisitStmtExpr(StmtExpr *e) {
333 CIRGenFunction::StmtExprEvaluation eval(cgf);
334 if (e->getType()->isVoidType()) {
335 (void)cgf.emitCompoundStmt(*e->getSubStmt());
336 return {};
337 }
338
339 Address retAlloca =
340 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
341 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
342
343 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
344 e->getExprLoc());
345 }
346
347 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
348 ignoreResultAssign = false;
349
350 if (e->getBase()->getType()->isVectorType()) {
352
353 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
354 const mlir::Value vecValue = Visit(e->getBase());
355 const mlir::Value indexValue = Visit(e->getIdx());
356 return cir::VecExtractOp::create(cgf.builder, loc, vecValue, indexValue);
357 }
358 // Just load the lvalue formed by the subscript expression.
359 return emitLoadOfLValue(e);
360 }
361
362 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
363 if (e->getNumSubExprs() == 2) {
364 // The undocumented form of __builtin_shufflevector.
365 mlir::Value inputVec = Visit(e->getExpr(0));
366 mlir::Value indexVec = Visit(e->getExpr(1));
367 return cir::VecShuffleDynamicOp::create(
368 cgf.builder, cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
369 }
370
371 mlir::Value vec1 = Visit(e->getExpr(0));
372 mlir::Value vec2 = Visit(e->getExpr(1));
373
374 // The documented form of __builtin_shufflevector, where the indices are
375 // a variable number of integer constants. The constants will be stored
376 // in an ArrayAttr.
377 SmallVector<mlir::Attribute, 8> indices;
378 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
379 indices.push_back(
380 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
381 e->getExpr(i)
382 ->EvaluateKnownConstInt(cgf.getContext())
383 .getSExtValue()));
384 }
385
386 return cir::VecShuffleOp::create(cgf.builder,
387 cgf.getLoc(e->getSourceRange()),
388 cgf.convertType(e->getType()), vec1, vec2,
389 cgf.builder.getArrayAttr(indices));
390 }
391
392 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
393 // __builtin_convertvector is an element-wise cast, and is implemented as a
394 // regular cast. The back end handles casts of vectors correctly.
395 return emitScalarConversion(Visit(e->getSrcExpr()),
396 e->getSrcExpr()->getType(), e->getType(),
397 e->getSourceRange().getBegin());
398 }
399
400 mlir::Value VisitExtVectorElementExpr(Expr *e) { return emitLoadOfLValue(e); }
401
402 mlir::Value VisitMemberExpr(MemberExpr *e);
403
404 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
405 return emitLoadOfLValue(e);
406 }
407
408 mlir::Value VisitInitListExpr(InitListExpr *e);
409
410 mlir::Value VisitArrayInitIndexExpr(ArrayInitIndexExpr *e) {
411 cgf.cgm.errorNYI(e->getSourceRange(),
412 "ScalarExprEmitter: array init index");
413 return {};
414 }
415
416 mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *e) {
417 cgf.cgm.errorNYI(e->getSourceRange(),
418 "ScalarExprEmitter: implicit value init");
419 return {};
420 }
421
422 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
423 return VisitCastExpr(e);
424 }
425
426 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
427 return cgf.cgm.emitNullConstant(e->getType(),
428 cgf.getLoc(e->getSourceRange()));
429 }
430
431 /// Perform a pointer to boolean conversion.
432 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
433 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
434 // We might want to have a separate pass for these types of conversions.
435 return cgf.getBuilder().createPtrToBoolCast(v);
436 }
437
438 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
439 cir::BoolType boolTy = builder.getBoolTy();
440 return cir::CastOp::create(builder, loc, boolTy,
441 cir::CastKind::float_to_bool, src);
442 }
443
444 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
445 // Because of the type rules of C, we often end up computing a
446 // logical value, then zero extending it to int, then wanting it
447 // as a logical value again.
448 // TODO: optimize this common case here or leave it for later
449 // CIR passes?
450 cir::BoolType boolTy = builder.getBoolTy();
451 return cir::CastOp::create(builder, loc, boolTy, cir::CastKind::int_to_bool,
452 srcVal);
453 }
454
455 /// Convert the specified expression value to a boolean (!cir.bool) truth
456 /// value. This is equivalent to "Val != 0".
457 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
458 mlir::Location loc) {
459 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
460
461 if (srcType->isRealFloatingType())
462 return emitFloatToBoolConversion(src, loc);
463
464 if (llvm::isa<MemberPointerType>(srcType)) {
465 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
466 return builder.getFalse(loc);
467 }
468
469 if (srcType->isIntegerType())
470 return emitIntToBoolConversion(src, loc);
471
472 assert(::mlir::isa<cir::PointerType>(src.getType()));
473 return emitPointerToBoolConversion(src, srcType);
474 }
475
476 // Emit a conversion from the specified type to the specified destination
477 // type, both of which are CIR scalar types.
478 struct ScalarConversionOpts {
479 bool treatBooleanAsSigned;
480 bool emitImplicitIntegerTruncationChecks;
481 bool emitImplicitIntegerSignChangeChecks;
482
483 ScalarConversionOpts()
484 : treatBooleanAsSigned(false),
485 emitImplicitIntegerTruncationChecks(false),
486 emitImplicitIntegerSignChangeChecks(false) {}
487
488 ScalarConversionOpts(clang::SanitizerSet sanOpts)
489 : treatBooleanAsSigned(false),
490 emitImplicitIntegerTruncationChecks(
491 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
492 emitImplicitIntegerSignChangeChecks(
493 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
494 };
495
496 // Conversion from bool, integral, or floating-point to integral or
497 // floating-point. Conversions involving other types are handled elsewhere.
498 // Conversion to bool is handled elsewhere because that's a comparison against
499 // zero, not a simple cast. This handles both individual scalars and vectors.
500 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
501 QualType dstType, mlir::Type srcTy,
502 mlir::Type dstTy, ScalarConversionOpts opts) {
503 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
504 "Internal error: matrix types not handled by this function.");
505 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
506 mlir::isa<mlir::IntegerType>(dstTy)) &&
507 "Obsolete code. Don't use mlir::IntegerType with CIR.");
508
509 mlir::Type fullDstTy = dstTy;
510 if (mlir::isa<cir::VectorType>(srcTy) &&
511 mlir::isa<cir::VectorType>(dstTy)) {
512 // Use the element types of the vectors to figure out the CastKind.
513 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
514 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
515 }
516
517 std::optional<cir::CastKind> castKind;
518
519 if (mlir::isa<cir::BoolType>(srcTy)) {
520 if (opts.treatBooleanAsSigned)
521 cgf.getCIRGenModule().errorNYI("signed bool");
522 if (cgf.getBuilder().isInt(dstTy))
523 castKind = cir::CastKind::bool_to_int;
524 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
525 castKind = cir::CastKind::bool_to_float;
526 else
527 llvm_unreachable("Internal error: Cast to unexpected type");
528 } else if (cgf.getBuilder().isInt(srcTy)) {
529 if (cgf.getBuilder().isInt(dstTy))
530 castKind = cir::CastKind::integral;
531 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
532 castKind = cir::CastKind::int_to_float;
533 else
534 llvm_unreachable("Internal error: Cast to unexpected type");
535 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
536 if (cgf.getBuilder().isInt(dstTy)) {
537 // If we can't recognize overflow as undefined behavior, assume that
538 // overflow saturates. This protects against normal optimizations if we
539 // are compiling with non-standard FP semantics.
540 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
541 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
543 castKind = cir::CastKind::float_to_int;
544 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
545 // TODO: split this to createFPExt/createFPTrunc
546 return builder.createFloatingCast(src, fullDstTy);
547 } else {
548 llvm_unreachable("Internal error: Cast to unexpected type");
549 }
550 } else {
551 llvm_unreachable("Internal error: Cast from unexpected type");
552 }
553
554 assert(castKind.has_value() && "Internal error: CastKind not set.");
555 return cir::CastOp::create(builder, src.getLoc(), fullDstTy, *castKind,
556 src);
557 }
558
559 mlir::Value
560 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
561 return Visit(e->getReplacement());
562 }
563
564 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
565 QualType ty = ve->getType();
566
567 if (ty->isVariablyModifiedType()) {
568 cgf.cgm.errorNYI(ve->getSourceRange(),
569 "variably modified types in varargs");
570 }
571
572 return cgf.emitVAArg(ve);
573 }
574
575 mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
576 return Visit(e->getSemanticForm());
577 }
578
579 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
580 mlir::Value
581 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
582
583 // Unary Operators.
584 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
585 LValue lv = cgf.emitLValue(e->getSubExpr());
586 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
587 }
588 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
589 LValue lv = cgf.emitLValue(e->getSubExpr());
590 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
591 }
592 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
593 LValue lv = cgf.emitLValue(e->getSubExpr());
594 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
595 }
596 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
597 LValue lv = cgf.emitLValue(e->getSubExpr());
598 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
599 }
600 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
601 cir::UnaryOpKind kind, bool isPre) {
602 if (cgf.getLangOpts().OpenMP)
603 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
604
605 QualType type = e->getSubExpr()->getType();
606
607 mlir::Value value;
608 mlir::Value input;
609
610 if (type->getAs<AtomicType>()) {
611 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
612 // TODO(cir): This is not correct, but it will produce reasonable code
613 // until atomic operations are implemented.
614 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
615 input = value;
616 } else {
617 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
618 input = value;
619 }
620
621 // NOTE: When possible, more frequent cases are handled first.
622
623 // Special case of integer increment that we have to check first: bool++.
624 // Due to promotion rules, we get:
625 // bool++ -> bool = bool + 1
626 // -> bool = (int)bool + 1
627 // -> bool = ((int)bool + 1 != 0)
628 // An interesting aspect of this is that increment is always true.
629 // Decrement does not have this property.
630 if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
631 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
632 } else if (type->isIntegerType()) {
633 QualType promotedType;
634 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
635 if (cgf.getContext().isPromotableIntegerType(type)) {
636 promotedType = cgf.getContext().getPromotedIntegerType(type);
637 assert(promotedType != type && "Shouldn't promote to the same type.");
638 canPerformLossyDemotionCheck = true;
639 canPerformLossyDemotionCheck &=
640 cgf.getContext().getCanonicalType(type) !=
641 cgf.getContext().getCanonicalType(promotedType);
642 canPerformLossyDemotionCheck &=
643 type->isIntegerType() && promotedType->isIntegerType();
644
645 // TODO(cir): Currently, we store bitwidths in CIR types only for
646 // integers. This might also be required for other types.
647
648 assert(
649 (!canPerformLossyDemotionCheck ||
650 type->isSignedIntegerOrEnumerationType() ||
651 promotedType->isSignedIntegerOrEnumerationType() ||
652 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
653 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
654 "The following check expects that if we do promotion to different "
655 "underlying canonical type, at least one of the types (either "
656 "base or promoted) will be signed, or the bitwidths will match.");
657 }
658
660 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
661 value = emitIncDecConsiderOverflowBehavior(e, value, kind);
662 } else {
663 cir::UnaryOpKind kind =
664 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
665 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
666 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
667 }
668 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
669 QualType type = ptr->getPointeeType();
670 if (cgf.getContext().getAsVariableArrayType(type)) {
671 // VLA types don't have constant size.
672 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
673 return {};
674 } else if (type->isFunctionType()) {
675 // Arithmetic on function pointers (!) is just +-1.
676 cgf.cgm.errorNYI(e->getSourceRange(),
677 "Pointer arithmetic on function pointer");
678 return {};
679 } else {
680 // For everything else, we can just do a simple increment.
681 mlir::Location loc = cgf.getLoc(e->getSourceRange());
682 CIRGenBuilderTy &builder = cgf.getBuilder();
683 int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
684 mlir::Value amt = builder.getSInt32(amount, loc);
686 value = builder.createPtrStride(loc, value, amt);
687 }
688 } else if (type->isVectorType()) {
689 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
690 return {};
691 } else if (type->isRealFloatingType()) {
693
694 if (type->isHalfType() &&
695 !cgf.getContext().getLangOpts().NativeHalfType) {
696 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
697 return {};
698 }
699
700 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
701 // Create the inc/dec operation.
702 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
703 assert(kind == cir::UnaryOpKind::Inc ||
704 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
705 value = emitUnaryOp(e, kind, value);
706 } else {
707 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
708 return {};
709 }
710 } else if (type->isFixedPointType()) {
711 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
712 return {};
713 } else {
714 assert(type->castAs<ObjCObjectPointerType>());
715 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
716 return {};
717 }
718
719 CIRGenFunction::SourceLocRAIIObject sourceloc{
720 cgf, cgf.getLoc(e->getSourceRange())};
721
722 // Store the updated result through the lvalue
723 if (lv.isBitField())
724 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
725 else
726 cgf.emitStoreThroughLValue(RValue::get(value), lv);
727
728 // If this is a postinc, return the value read from memory, otherwise use
729 // the updated value.
730 return isPre ? value : input;
731 }
732
733 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
734 mlir::Value inVal,
735 cir::UnaryOpKind kind) {
736 assert(kind == cir::UnaryOpKind::Inc ||
737 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
738 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
739 case LangOptions::SOB_Defined:
740 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
741 case LangOptions::SOB_Undefined:
743 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
744 case LangOptions::SOB_Trapping:
745 if (!e->canOverflow())
746 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
747 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
748 return {};
749 }
750 llvm_unreachable("Unexpected signed overflow behavior kind");
751 }
752
753 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
754 if (llvm::isa<MemberPointerType>(e->getType()))
755 return cgf.cgm.emitMemberPointerConstant(e);
756
757 return cgf.emitLValue(e->getSubExpr()).getPointer();
758 }
759
760 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
761 if (e->getType()->isVoidType())
762 return Visit(e->getSubExpr()); // the actual value should be unused
763 return emitLoadOfLValue(e);
764 }
765
766 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
767 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
768 mlir::Value result =
769 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
770 if (result && !promotionType.isNull())
771 return emitUnPromotedValue(result, e->getType());
772 return result;
773 }
774
775 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
776 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
777 mlir::Value result =
778 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
779 if (result && !promotionType.isNull())
780 return emitUnPromotedValue(result, e->getType());
781 return result;
782 }
783
784 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
785 cir::UnaryOpKind kind,
786 QualType promotionType) {
787 ignoreResultAssign = false;
788 mlir::Value operand;
789 if (!promotionType.isNull())
790 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
791 else
792 operand = Visit(e->getSubExpr());
793
794 bool nsw =
795 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
796
797 // NOTE: LLVM codegen will lower this directly to either a FNeg
798 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
799 return emitUnaryOp(e, kind, operand, nsw);
800 }
801
802 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
803 mlir::Value input, bool nsw = false) {
804 return cir::UnaryOp::create(builder,
805 cgf.getLoc(e->getSourceRange().getBegin()),
806 input.getType(), kind, input, nsw);
807 }
808
809 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
810 ignoreResultAssign = false;
811 mlir::Value op = Visit(e->getSubExpr());
812 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
813 }
814
815 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
816
817 mlir::Value VisitUnaryReal(const UnaryOperator *e);
818 mlir::Value VisitUnaryImag(const UnaryOperator *e);
819 mlir::Value VisitRealImag(const UnaryOperator *e,
820 QualType promotionType = QualType());
821
822 mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
823 return Visit(e->getSubExpr());
824 }
825
826 // C++
827 mlir::Value VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e) {
828 cgf.cgm.errorNYI(e->getSourceRange(),
829 "ScalarExprEmitter: materialize temporary");
830 return {};
831 }
832 mlir::Value VisitSourceLocExpr(SourceLocExpr *e) {
833 ASTContext &ctx = cgf.getContext();
834 APValue evaluated =
835 e->EvaluateInContext(ctx, cgf.curSourceLocExprScope.getDefaultExpr());
836 mlir::Attribute attribute = ConstantEmitter(cgf).emitAbstract(
837 e->getLocation(), evaluated, e->getType());
838 mlir::TypedAttr typedAttr = mlir::cast<mlir::TypedAttr>(attribute);
839 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
840 typedAttr);
841 }
842 mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
843 CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
844 return Visit(dae->getExpr());
845 }
846 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
847 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
848 return Visit(die->getExpr());
849 }
850
851 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
852
853 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
854 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
855 return cgf.emitCXXNewExpr(e);
856 }
857 mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
858 cgf.emitCXXDeleteExpr(e);
859 return {};
860 }
861 mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *e) {
862 mlir::Location loc = cgf.getLoc(e->getExprLoc());
863 if (e->isStoredAsBoolean())
864 return builder.getBool(e->getBoolValue(), loc);
865 cgf.cgm.errorNYI(e->getSourceRange(),
866 "ScalarExprEmitter: TypeTraitExpr stored as int");
867 return {};
868 }
869 mlir::Value
870 VisitConceptSpecializationExpr(const ConceptSpecializationExpr *e) {
871 return builder.getBool(e->isSatisfied(), cgf.getLoc(e->getExprLoc()));
872 }
873 mlir::Value VisitRequiresExpr(const RequiresExpr *e) {
874 return builder.getBool(e->isSatisfied(), cgf.getLoc(e->getExprLoc()));
875 }
876 mlir::Value VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *e) {
877 mlir::Type type = cgf.convertType(e->getType());
878 mlir::Location loc = cgf.getLoc(e->getExprLoc());
879 return builder.getConstInt(loc, type, e->getValue());
880 }
881 mlir::Value VisitExpressionTraitExpr(const ExpressionTraitExpr *e) {
882 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
883 }
884 mlir::Value VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *e) {
885 cgf.cgm.errorNYI(e->getSourceRange(),
886 "ScalarExprEmitter: cxx pseudo destructor");
887 return {};
888 }
889 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
890 cgf.emitCXXThrowExpr(e);
891 return {};
892 }
893
894 mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *e) {
895 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
896 }
897
898 /// Emit a conversion from the specified type to the specified destination
899 /// type, both of which are CIR scalar types.
900 /// TODO: do we need ScalarConversionOpts here? Should be done in another
901 /// pass.
902 mlir::Value
903 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
904 SourceLocation loc,
905 ScalarConversionOpts opts = ScalarConversionOpts()) {
906 // All conversions involving fixed point types should be handled by the
907 // emitFixedPoint family functions. This is done to prevent bloating up
908 // this function more, and although fixed point numbers are represented by
909 // integers, we do not want to follow any logic that assumes they should be
910 // treated as integers.
911 // TODO(leonardchan): When necessary, add another if statement checking for
912 // conversions to fixed point types from other types.
913 // conversions to fixed point types from other types.
914 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
915 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
916 return {};
917 }
918
919 srcType = srcType.getCanonicalType();
920 dstType = dstType.getCanonicalType();
921 if (srcType == dstType) {
922 if (opts.emitImplicitIntegerSignChangeChecks)
923 cgf.getCIRGenModule().errorNYI(loc,
924 "implicit integer sign change checks");
925 return src;
926 }
927
928 if (dstType->isVoidType())
929 return {};
930
931 mlir::Type mlirSrcType = src.getType();
932
933 // Handle conversions to bool first, they are special: comparisons against
934 // 0.
935 if (dstType->isBooleanType())
936 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
937
938 mlir::Type mlirDstType = cgf.convertType(dstType);
939
940 if (srcType->isHalfType() &&
941 !cgf.getContext().getLangOpts().NativeHalfType) {
942 // Cast to FP using the intrinsic if the half type itself isn't supported.
943 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
944 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
945 cgf.getCIRGenModule().errorNYI(loc,
946 "cast via llvm.convert.from.fp16");
947 } else {
948 // Cast to other types through float, using either the intrinsic or
949 // FPExt, depending on whether the half type itself is supported (as
950 // opposed to operations on half, available with NativeHalfType).
951 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
952 cgf.getCIRGenModule().errorNYI(loc,
953 "cast via llvm.convert.from.fp16");
954 // FIXME(cir): For now lets pretend we shouldn't use the conversion
955 // intrinsics and insert a cast here unconditionally.
956 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
957 cgf.floatTy);
958 srcType = cgf.getContext().FloatTy;
959 mlirSrcType = cgf.floatTy;
960 }
961 }
962
963 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
964 // is there anything to be done for CIR here?
965 if (mlirSrcType == mlirDstType) {
966 if (opts.emitImplicitIntegerSignChangeChecks)
967 cgf.getCIRGenModule().errorNYI(loc,
968 "implicit integer sign change checks");
969 return src;
970 }
971
972 // Handle pointer conversions next: pointers can only be converted to/from
973 // other pointers and integers. Check for pointer types in terms of LLVM, as
974 // some native types (like Obj-C id) may map to a pointer type.
975 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
976 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
977 return builder.getNullPtr(dstPT, src.getLoc());
978 }
979
980 if (isa<cir::PointerType>(mlirSrcType)) {
981 // Must be an ptr to int cast.
982 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
983 return builder.createPtrToInt(src, mlirDstType);
984 }
985
986 // A scalar can be splatted to an extended vector of the same element type
987 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
988 // Sema should add casts to make sure that the source expression's type
989 // is the same as the vector's element type (sans qualifiers)
990 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
991 srcType.getTypePtr() &&
992 "Splatted expr doesn't match with vector element type?");
993
994 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
995 return {};
996 }
997
998 if (srcType->isMatrixType() && dstType->isMatrixType()) {
999 cgf.getCIRGenModule().errorNYI(loc,
1000 "matrix type to matrix type conversion");
1001 return {};
1002 }
1003 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
1004 "Internal error: conversion between matrix type and scalar type");
1005
1006 // Finally, we have the arithmetic types or vectors of arithmetic types.
1007 mlir::Value res = nullptr;
1008 mlir::Type resTy = mlirDstType;
1009
1010 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
1011
1012 if (mlirDstType != resTy) {
1013 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1014 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
1015 }
1016 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
1017 // required by the target. Change that once this is implemented
1018 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
1019 resTy);
1020 }
1021
1022 if (opts.emitImplicitIntegerTruncationChecks)
1023 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
1024
1025 if (opts.emitImplicitIntegerSignChangeChecks)
1026 cgf.getCIRGenModule().errorNYI(loc,
1027 "implicit integer sign change checks");
1028
1029 return res;
1030 }
1031
1032 BinOpInfo emitBinOps(const BinaryOperator *e,
1033 QualType promotionType = QualType()) {
1034 ignoreResultAssign = false;
1035 BinOpInfo result;
1036 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
1037 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
1038 if (!promotionType.isNull())
1039 result.fullType = promotionType;
1040 else
1041 result.fullType = e->getType();
1042 result.compType = result.fullType;
1043 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
1044 result.compType = vecType->getElementType();
1045 }
1046 result.opcode = e->getOpcode();
1047 result.loc = e->getSourceRange();
1048 // TODO(cir): Result.FPFeatures
1050 result.e = e;
1051 return result;
1052 }
1053
1054 mlir::Value emitMul(const BinOpInfo &ops);
1055 mlir::Value emitDiv(const BinOpInfo &ops);
1056 mlir::Value emitRem(const BinOpInfo &ops);
1057 mlir::Value emitAdd(const BinOpInfo &ops);
1058 mlir::Value emitSub(const BinOpInfo &ops);
1059 mlir::Value emitShl(const BinOpInfo &ops);
1060 mlir::Value emitShr(const BinOpInfo &ops);
1061 mlir::Value emitAnd(const BinOpInfo &ops);
1062 mlir::Value emitXor(const BinOpInfo &ops);
1063 mlir::Value emitOr(const BinOpInfo &ops);
1064
1065 LValue emitCompoundAssignLValue(
1066 const CompoundAssignOperator *e,
1067 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
1068 mlir::Value &result);
1069 mlir::Value
1070 emitCompoundAssign(const CompoundAssignOperator *e,
1071 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
1072
1073 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
1074 // codegen.
1075 QualType getPromotionType(QualType ty) {
1076 const clang::ASTContext &ctx = cgf.getContext();
1077 if (auto *complexTy = ty->getAs<ComplexType>()) {
1078 QualType elementTy = complexTy->getElementType();
1079 if (elementTy.UseExcessPrecision(ctx))
1080 return ctx.getComplexType(ctx.FloatTy);
1081 }
1082
1083 if (ty.UseExcessPrecision(cgf.getContext())) {
1084 if (auto *vt = ty->getAs<VectorType>()) {
1085 unsigned numElements = vt->getNumElements();
1086 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
1087 }
1088 return cgf.getContext().FloatTy;
1089 }
1090
1091 return QualType();
1092 }
1093
1094// Binary operators and binary compound assignment operators.
1095#define HANDLEBINOP(OP) \
1096 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
1097 QualType promotionTy = getPromotionType(e->getType()); \
1098 auto result = emit##OP(emitBinOps(e, promotionTy)); \
1099 if (result && !promotionTy.isNull()) \
1100 result = emitUnPromotedValue(result, e->getType()); \
1101 return result; \
1102 } \
1103 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
1104 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
1105 }
1106
1107 HANDLEBINOP(Mul)
1108 HANDLEBINOP(Div)
1109 HANDLEBINOP(Rem)
1110 HANDLEBINOP(Add)
1111 HANDLEBINOP(Sub)
1112 HANDLEBINOP(Shl)
1113 HANDLEBINOP(Shr)
1115 HANDLEBINOP(Xor)
1117#undef HANDLEBINOP
1118
1119 mlir::Value emitCmp(const BinaryOperator *e) {
1120 ignoreResultAssign = false;
1121 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
1122 mlir::Value result;
1123 QualType lhsTy = e->getLHS()->getType();
1124 QualType rhsTy = e->getRHS()->getType();
1125
1126 auto clangCmpToCIRCmp =
1127 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
1128 switch (clangCmp) {
1129 case BO_LT:
1130 return cir::CmpOpKind::lt;
1131 case BO_GT:
1132 return cir::CmpOpKind::gt;
1133 case BO_LE:
1134 return cir::CmpOpKind::le;
1135 case BO_GE:
1136 return cir::CmpOpKind::ge;
1137 case BO_EQ:
1138 return cir::CmpOpKind::eq;
1139 case BO_NE:
1140 return cir::CmpOpKind::ne;
1141 default:
1142 llvm_unreachable("unsupported comparison kind for cir.cmp");
1143 }
1144 };
1145
1146 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
1147 if (lhsTy->getAs<MemberPointerType>()) {
1149 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1150 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
1151 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
1152 result = builder.createCompare(loc, kind, lhs, rhs);
1153 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
1154 BinOpInfo boInfo = emitBinOps(e);
1155 mlir::Value lhs = boInfo.lhs;
1156 mlir::Value rhs = boInfo.rhs;
1157
1158 if (lhsTy->isVectorType()) {
1159 if (!e->getType()->isVectorType()) {
1160 // If AltiVec, the comparison results in a numeric type, so we use
1161 // intrinsics comparing vectors and giving 0 or 1 as a result
1162 cgf.cgm.errorNYI(loc, "AltiVec comparison");
1163 } else {
1164 // Other kinds of vectors. Element-wise comparison returning
1165 // a vector.
1166 result = cir::VecCmpOp::create(builder, cgf.getLoc(boInfo.loc),
1167 cgf.convertType(boInfo.fullType), kind,
1168 boInfo.lhs, boInfo.rhs);
1169 }
1170 } else if (boInfo.isFixedPointOp()) {
1172 cgf.cgm.errorNYI(loc, "fixed point comparisons");
1173 result = builder.getBool(false, loc);
1174 } else {
1175 // integers and pointers
1176 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
1177 mlir::isa<cir::PointerType>(lhs.getType()) &&
1178 mlir::isa<cir::PointerType>(rhs.getType())) {
1179 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
1180 }
1181
1182 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
1183 result = builder.createCompare(loc, kind, lhs, rhs);
1184 }
1185 } else {
1186 // Complex Comparison: can only be an equality comparison.
1187 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1188
1189 BinOpInfo boInfo = emitBinOps(e);
1190 result = cir::CmpOp::create(builder, loc, kind, boInfo.lhs, boInfo.rhs);
1191 }
1192
1193 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
1194 e->getExprLoc());
1195 }
1196
1197// Comparisons.
1198#define VISITCOMP(CODE) \
1199 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
1200 VISITCOMP(LT)
1201 VISITCOMP(GT)
1202 VISITCOMP(LE)
1203 VISITCOMP(GE)
1204 VISITCOMP(EQ)
1205 VISITCOMP(NE)
1206#undef VISITCOMP
1207
1208 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1209 const bool ignore = std::exchange(ignoreResultAssign, false);
1210
1211 mlir::Value rhs;
1212 LValue lhs;
1213
1214 switch (e->getLHS()->getType().getObjCLifetime()) {
1220 break;
1222 // __block variables need to have the rhs evaluated first, plus this
1223 // should improve codegen just a little.
1224 rhs = Visit(e->getRHS());
1226 // TODO(cir): This needs to be emitCheckedLValue() once we support
1227 // sanitizers
1228 lhs = cgf.emitLValue(e->getLHS());
1229
1230 // Store the value into the LHS. Bit-fields are handled specially because
1231 // the result is altered by the store, i.e., [C99 6.5.16p1]
1232 // 'An assignment expression has the value of the left operand after the
1233 // assignment...'.
1234 if (lhs.isBitField()) {
1235 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1236 } else {
1237 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1239 cgf, cgf.getLoc(e->getSourceRange())};
1240 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1241 }
1242 }
1243
1244 // If the result is clearly ignored, return now.
1245 if (ignore)
1246 return nullptr;
1247
1248 // The result of an assignment in C is the assigned r-value.
1249 if (!cgf.getLangOpts().CPlusPlus)
1250 return rhs;
1251
1252 // If the lvalue is non-volatile, return the computed value of the
1253 // assignment.
1254 if (!lhs.isVolatile())
1255 return rhs;
1256
1257 // Otherwise, reload the value.
1258 return emitLoadOfLValue(lhs, e->getExprLoc());
1259 }
1260
1261 mlir::Value VisitBinComma(const BinaryOperator *e) {
1262 cgf.emitIgnoredExpr(e->getLHS());
1263 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1264 return Visit(e->getRHS());
1265 }
1266
1267 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1268 if (e->getType()->isVectorType()) {
1269 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1270 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1271 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1272 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1273 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1274
1275 mlir::Value lhs = Visit(e->getLHS());
1276 mlir::Value rhs = Visit(e->getRHS());
1277
1278 auto cmpOpKind = cir::CmpOpKind::ne;
1279 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1280 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1281 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1282 return builder.createIntCast(vecOr, vecTy);
1283 }
1284
1286 mlir::Type resTy = cgf.convertType(e->getType());
1287 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1288
1289 CIRGenFunction::ConditionalEvaluation eval(cgf);
1290
1291 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1292 auto resOp = cir::TernaryOp::create(
1293 builder, loc, lhsCondV, /*trueBuilder=*/
1294 [&](mlir::OpBuilder &b, mlir::Location loc) {
1295 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1296 b.getInsertionBlock()};
1297 cgf.curLexScope->setAsTernary();
1298 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1299 lexScope.forceCleanup();
1300 cir::YieldOp::create(b, loc, res);
1301 },
1302 /*falseBuilder*/
1303 [&](mlir::OpBuilder &b, mlir::Location loc) {
1304 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1305 b.getInsertionBlock()};
1306 cgf.curLexScope->setAsTernary();
1307 auto res = cir::ConstantOp::create(b, loc, builder.getFalseAttr());
1308 cir::YieldOp::create(b, loc, res.getRes());
1309 });
1310 return maybePromoteBoolResult(resOp.getResult(), resTy);
1311 }
1312
1313 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1314 if (e->getType()->isVectorType()) {
1315 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1316 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1317 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1318 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1319 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1320
1321 mlir::Value lhs = Visit(e->getLHS());
1322 mlir::Value rhs = Visit(e->getRHS());
1323
1324 auto cmpOpKind = cir::CmpOpKind::ne;
1325 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1326 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1327 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1328 return builder.createIntCast(vecOr, vecTy);
1329 }
1330
1332 mlir::Type resTy = cgf.convertType(e->getType());
1333 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1334
1335 CIRGenFunction::ConditionalEvaluation eval(cgf);
1336
1337 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1338 auto resOp = cir::TernaryOp::create(
1339 builder, loc, lhsCondV, /*trueBuilder=*/
1340 [&](mlir::OpBuilder &b, mlir::Location loc) {
1341 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1342 b.getInsertionBlock()};
1343 cgf.curLexScope->setAsTernary();
1344 auto res = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1345 cir::YieldOp::create(b, loc, res.getRes());
1346 },
1347 /*falseBuilder*/
1348 [&](mlir::OpBuilder &b, mlir::Location loc) {
1349 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1350 b.getInsertionBlock()};
1351 cgf.curLexScope->setAsTernary();
1352 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1353 lexScope.forceCleanup();
1354 cir::YieldOp::create(b, loc, res);
1355 });
1356
1357 return maybePromoteBoolResult(resOp.getResult(), resTy);
1358 }
1359
1360 mlir::Value VisitBinPtrMemD(const BinaryOperator *e) {
1361 return emitLoadOfLValue(e);
1362 }
1363
1364 mlir::Value VisitBinPtrMemI(const BinaryOperator *e) {
1365 return emitLoadOfLValue(e);
1366 }
1367
1368 // Other Operators.
1369 mlir::Value VisitBlockExpr(const BlockExpr *e) {
1370 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: block");
1371 return {};
1372 }
1373
1374 mlir::Value VisitChooseExpr(ChooseExpr *e) {
1375 return Visit(e->getChosenSubExpr());
1376 }
1377
1378 mlir::Value VisitObjCStringLiteral(const ObjCStringLiteral *e) {
1379 cgf.cgm.errorNYI(e->getSourceRange(),
1380 "ScalarExprEmitter: objc string literal");
1381 return {};
1382 }
1383 mlir::Value VisitObjCBoxedExpr(ObjCBoxedExpr *e) {
1384 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc boxed");
1385 return {};
1386 }
1387 mlir::Value VisitObjCArrayLiteral(ObjCArrayLiteral *e) {
1388 cgf.cgm.errorNYI(e->getSourceRange(),
1389 "ScalarExprEmitter: objc array literal");
1390 return {};
1391 }
1392 mlir::Value VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *e) {
1393 cgf.cgm.errorNYI(e->getSourceRange(),
1394 "ScalarExprEmitter: objc dictionary literal");
1395 return {};
1396 }
1397
1398 mlir::Value VisitAsTypeExpr(AsTypeExpr *e) {
1399 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: as type");
1400 return {};
1401 }
1402
1403 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1404 return cgf.emitAtomicExpr(e).getValue();
1405 }
1406};
1407
1408LValue ScalarExprEmitter::emitCompoundAssignLValue(
1409 const CompoundAssignOperator *e,
1410 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1411 mlir::Value &result) {
1413 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1414
1415 QualType lhsTy = e->getLHS()->getType();
1416 BinOpInfo opInfo;
1417
1418 // Emit the RHS first. __block variables need to have the rhs evaluated
1419 // first, plus this should improve codegen a little.
1420
1421 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1422 if (promotionTypeCR.isNull())
1423 promotionTypeCR = e->getComputationResultType();
1424
1425 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1426 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1427
1428 if (!promotionTypeRHS.isNull())
1429 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1430 else
1431 opInfo.rhs = Visit(e->getRHS());
1432
1433 opInfo.fullType = promotionTypeCR;
1434 opInfo.compType = opInfo.fullType;
1435 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1436 opInfo.compType = vecType->getElementType();
1437 opInfo.opcode = e->getOpcode();
1438 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1439 opInfo.e = e;
1440 opInfo.loc = e->getSourceRange();
1441
1442 // Load/convert the LHS
1443 LValue lhsLV = cgf.emitLValue(e->getLHS());
1444
1445 if (lhsTy->getAs<AtomicType>()) {
1446 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1447 return LValue();
1448 }
1449
1450 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1451
1452 CIRGenFunction::SourceLocRAIIObject sourceloc{
1453 cgf, cgf.getLoc(e->getSourceRange())};
1454 SourceLocation loc = e->getExprLoc();
1455 if (!promotionTypeLHS.isNull())
1456 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1457 else
1458 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1459 e->getComputationLHSType(), loc);
1460
1461 // Expand the binary operator.
1462 result = (this->*func)(opInfo);
1463
1464 // Convert the result back to the LHS type,
1465 // potentially with Implicit Conversion sanitizer check.
1466 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1467 ScalarConversionOpts(cgf.sanOpts));
1468
1469 // Store the result value into the LHS lvalue. Bit-fields are handled
1470 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1471 // 'An assignment expression has the value of the left operand after the
1472 // assignment...'.
1473 if (lhsLV.isBitField())
1474 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1475 else
1476 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1477
1478 if (cgf.getLangOpts().OpenMP)
1479 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1480
1481 return lhsLV;
1482}
1483
1484mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1485 mlir::Value value,
1486 CastKind kind,
1487 QualType destTy) {
1488 cir::CastKind castOpKind;
1489 switch (kind) {
1490 case CK_FloatingComplexToReal:
1491 castOpKind = cir::CastKind::float_complex_to_real;
1492 break;
1493 case CK_IntegralComplexToReal:
1494 castOpKind = cir::CastKind::int_complex_to_real;
1495 break;
1496 case CK_FloatingComplexToBoolean:
1497 castOpKind = cir::CastKind::float_complex_to_bool;
1498 break;
1499 case CK_IntegralComplexToBoolean:
1500 castOpKind = cir::CastKind::int_complex_to_bool;
1501 break;
1502 default:
1503 llvm_unreachable("invalid complex-to-scalar cast kind");
1504 }
1505
1506 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1507}
1508
1509mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1510 QualType promotionType) {
1511 e = e->IgnoreParens();
1512 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1513 switch (bo->getOpcode()) {
1514#define HANDLE_BINOP(OP) \
1515 case BO_##OP: \
1516 return emit##OP(emitBinOps(bo, promotionType));
1517 HANDLE_BINOP(Add)
1518 HANDLE_BINOP(Sub)
1519 HANDLE_BINOP(Mul)
1520 HANDLE_BINOP(Div)
1521#undef HANDLE_BINOP
1522 default:
1523 break;
1524 }
1525 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1526 switch (uo->getOpcode()) {
1527 case UO_Imag:
1528 case UO_Real:
1529 return VisitRealImag(uo, promotionType);
1530 case UO_Minus:
1531 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
1532 case UO_Plus:
1533 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
1534 default:
1535 break;
1536 }
1537 }
1538 mlir::Value result = Visit(const_cast<Expr *>(e));
1539 if (result) {
1540 if (!promotionType.isNull())
1541 return emitPromotedValue(result, promotionType);
1542 return emitUnPromotedValue(result, e->getType());
1543 }
1544 return result;
1545}
1546
1547mlir::Value ScalarExprEmitter::emitCompoundAssign(
1548 const CompoundAssignOperator *e,
1549 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1550
1551 bool ignore = std::exchange(ignoreResultAssign, false);
1552 mlir::Value rhs;
1553 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1554
1555 // If the result is clearly ignored, return now.
1556 if (ignore)
1557 return {};
1558
1559 // The result of an assignment in C is the assigned r-value.
1560 if (!cgf.getLangOpts().CPlusPlus)
1561 return rhs;
1562
1563 // If the lvalue is non-volatile, return the computed value of the assignment.
1564 if (!lhs.isVolatile())
1565 return rhs;
1566
1567 // Otherwise, reload the value.
1568 return emitLoadOfLValue(lhs, e->getExprLoc());
1569}
1570
1571mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1572 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
1573 mlir::OpBuilder &builder = cgf.builder;
1574
1575 auto scope = cir::ScopeOp::create(
1576 builder, scopeLoc,
1577 /*scopeBuilder=*/
1578 [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
1579 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1580 builder.getInsertionBlock()};
1581 mlir::Value scopeYieldVal = Visit(e->getSubExpr());
1582 if (scopeYieldVal) {
1583 // Defend against dominance problems caused by jumps out of expression
1584 // evaluation through the shared cleanup block.
1585 lexScope.forceCleanup();
1586 cir::YieldOp::create(builder, loc, scopeYieldVal);
1587 yieldTy = scopeYieldVal.getType();
1588 }
1589 });
1590
1591 return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
1592}
1593
1594} // namespace
1595
1596LValue
1598 ScalarExprEmitter emitter(*this, builder);
1599 mlir::Value result;
1600 switch (e->getOpcode()) {
1601#define COMPOUND_OP(Op) \
1602 case BO_##Op##Assign: \
1603 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1604 result)
1605 COMPOUND_OP(Mul);
1606 COMPOUND_OP(Div);
1607 COMPOUND_OP(Rem);
1608 COMPOUND_OP(Add);
1609 COMPOUND_OP(Sub);
1610 COMPOUND_OP(Shl);
1611 COMPOUND_OP(Shr);
1613 COMPOUND_OP(Xor);
1614 COMPOUND_OP(Or);
1615#undef COMPOUND_OP
1616
1617 case BO_PtrMemD:
1618 case BO_PtrMemI:
1619 case BO_Mul:
1620 case BO_Div:
1621 case BO_Rem:
1622 case BO_Add:
1623 case BO_Sub:
1624 case BO_Shl:
1625 case BO_Shr:
1626 case BO_LT:
1627 case BO_GT:
1628 case BO_LE:
1629 case BO_GE:
1630 case BO_EQ:
1631 case BO_NE:
1632 case BO_Cmp:
1633 case BO_And:
1634 case BO_Xor:
1635 case BO_Or:
1636 case BO_LAnd:
1637 case BO_LOr:
1638 case BO_Assign:
1639 case BO_Comma:
1640 llvm_unreachable("Not valid compound assignment operators");
1641 }
1642 llvm_unreachable("Unhandled compound assignment operator");
1643}
1644
1645/// Emit the computation of the specified expression of scalar type.
1647 bool ignoreResultAssign) {
1648 assert(e && hasScalarEvaluationKind(e->getType()) &&
1649 "Invalid scalar expression to emit");
1650
1651 return ScalarExprEmitter(*this, builder, ignoreResultAssign)
1652 .Visit(const_cast<Expr *>(e));
1653}
1654
1656 QualType promotionType) {
1657 if (!promotionType.isNull())
1658 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1659 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1660}
1661
1662[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1663 // If a null pointer expression's type is the C++0x nullptr_t and
1664 // the expression is not a simple literal, it must be evaluated
1665 // for its potential side effects.
1667 return false;
1668 return e->getType()->isNullPtrType();
1669}
1670
1671/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1672static std::optional<QualType>
1673getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1674 const Expr *base = e->IgnoreImpCasts();
1675 if (e == base)
1676 return std::nullopt;
1677
1678 QualType baseTy = base->getType();
1679 if (!astContext.isPromotableIntegerType(baseTy) ||
1680 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1681 return std::nullopt;
1682
1683 return baseTy;
1684}
1685
1686/// Check if \p e is a widened promoted integer.
1687[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1688 const Expr *e) {
1689 return getUnwidenedIntegerType(astContext, e).has_value();
1690}
1691
1692/// Check if we can skip the overflow check for \p Op.
1693[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1694 const BinOpInfo &op) {
1695 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1696 "Expected a unary or binary operator");
1697
1698 // If the binop has constant inputs and we can prove there is no overflow,
1699 // we can elide the overflow check.
1700 if (!op.mayHaveIntegerOverflow())
1701 return true;
1702
1703 // If a unary op has a widened operand, the op cannot overflow.
1704 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1705 return !uo->canOverflow();
1706
1707 // We usually don't need overflow checks for binops with widened operands.
1708 // Multiplication with promoted unsigned operands is a special case.
1709 const auto *bo = cast<BinaryOperator>(op.e);
1710 std::optional<QualType> optionalLHSTy =
1711 getUnwidenedIntegerType(astContext, bo->getLHS());
1712 if (!optionalLHSTy)
1713 return false;
1714
1715 std::optional<QualType> optionalRHSTy =
1716 getUnwidenedIntegerType(astContext, bo->getRHS());
1717 if (!optionalRHSTy)
1718 return false;
1719
1720 QualType lhsTy = *optionalLHSTy;
1721 QualType rhsTy = *optionalRHSTy;
1722
1723 // This is the simple case: binops without unsigned multiplication, and with
1724 // widened operands. No overflow check is needed here.
1725 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1726 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1727 return true;
1728
1729 // For unsigned multiplication the overflow check can be elided if either one
1730 // of the unpromoted types are less than half the size of the promoted type.
1731 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1732 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1733 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1734}
1735
1736/// Emit pointer + index arithmetic.
1738 const BinOpInfo &op,
1739 bool isSubtraction) {
1740 // Must have binary (not unary) expr here. Unary pointer
1741 // increment/decrement doesn't use this path.
1743
1744 mlir::Value pointer = op.lhs;
1745 Expr *pointerOperand = expr->getLHS();
1746 mlir::Value index = op.rhs;
1747 Expr *indexOperand = expr->getRHS();
1748
1749 // In the case of subtraction, the FE has ensured that the LHS is always the
1750 // pointer. However, addition can have the pointer on either side. We will
1751 // always have a pointer operand and an integer operand, so if the LHS wasn't
1752 // a pointer, we need to swap our values.
1753 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1754 std::swap(pointer, index);
1755 std::swap(pointerOperand, indexOperand);
1756 }
1757 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1758 "Need a pointer operand");
1759 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1760
1761 // Some versions of glibc and gcc use idioms (particularly in their malloc
1762 // routines) that add a pointer-sized integer (known to be a pointer value)
1763 // to a null pointer in order to cast the value back to an integer or as
1764 // part of a pointer alignment algorithm. This is undefined behavior, but
1765 // we'd like to be able to compile programs that use it.
1766 //
1767 // Normally, we'd generate a GEP with a null-pointer base here in response
1768 // to that code, but it's also UB to dereference a pointer created that
1769 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1770 // generate a direct cast of the integer value to a pointer.
1771 //
1772 // The idiom (p = nullptr + N) is not met if any of the following are true:
1773 //
1774 // The operation is subtraction.
1775 // The index is not pointer-sized.
1776 // The pointer type is not byte-sized.
1777 //
1779 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1780 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1781
1782 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1783 // LLVM lowering.
1784
1785 // If this is subtraction, negate the index.
1786 if (isSubtraction)
1788
1790
1791 const PointerType *pointerType =
1792 pointerOperand->getType()->getAs<PointerType>();
1793 if (!pointerType) {
1794 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1795 return nullptr;
1796 }
1797
1798 QualType elementType = pointerType->getPointeeType();
1799 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1800 cgf.cgm.errorNYI("variable array type");
1801 return nullptr;
1802 }
1803
1804 if (elementType->isVoidType() || elementType->isFunctionType()) {
1805 cgf.cgm.errorNYI("void* or function pointer arithmetic");
1806 return nullptr;
1807 }
1808
1810 return cir::PtrStrideOp::create(cgf.getBuilder(),
1811 cgf.getLoc(op.e->getExprLoc()),
1812 pointer.getType(), pointer, index);
1813}
1814
1815mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1816 const mlir::Location loc = cgf.getLoc(ops.loc);
1817 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1818 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1819 case LangOptions::SOB_Defined:
1820 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1821 return builder.createMul(loc, ops.lhs, ops.rhs);
1822 [[fallthrough]];
1823 case LangOptions::SOB_Undefined:
1824 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1825 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1826 [[fallthrough]];
1827 case LangOptions::SOB_Trapping:
1828 if (canElideOverflowCheck(cgf.getContext(), ops))
1829 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1830 cgf.cgm.errorNYI("sanitizers");
1831 }
1832 }
1833 if (ops.fullType->isConstantMatrixType()) {
1835 cgf.cgm.errorNYI("matrix types");
1836 return nullptr;
1837 }
1838 if (ops.compType->isUnsignedIntegerType() &&
1839 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1840 !canElideOverflowCheck(cgf.getContext(), ops))
1841 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1842
1843 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1845 return builder.createFMul(loc, ops.lhs, ops.rhs);
1846 }
1847
1848 if (ops.isFixedPointOp()) {
1850 cgf.cgm.errorNYI("fixed point");
1851 return nullptr;
1852 }
1853
1854 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1855 cgf.convertType(ops.fullType), cir::BinOpKind::Mul,
1856 ops.lhs, ops.rhs);
1857}
1858mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1859 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1860 cgf.convertType(ops.fullType), cir::BinOpKind::Div,
1861 ops.lhs, ops.rhs);
1862}
1863mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1864 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1865 cgf.convertType(ops.fullType), cir::BinOpKind::Rem,
1866 ops.lhs, ops.rhs);
1867}
1868
1869mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1870 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1871 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1872 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1873
1874 const mlir::Location loc = cgf.getLoc(ops.loc);
1875 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1876 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1877 case LangOptions::SOB_Defined:
1878 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1879 return builder.createAdd(loc, ops.lhs, ops.rhs);
1880 [[fallthrough]];
1881 case LangOptions::SOB_Undefined:
1882 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1883 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1884 [[fallthrough]];
1885 case LangOptions::SOB_Trapping:
1886 if (canElideOverflowCheck(cgf.getContext(), ops))
1887 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1888 cgf.cgm.errorNYI("sanitizers");
1889 }
1890 }
1891 if (ops.fullType->isConstantMatrixType()) {
1893 cgf.cgm.errorNYI("matrix types");
1894 return nullptr;
1895 }
1896
1897 if (ops.compType->isUnsignedIntegerType() &&
1898 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1899 !canElideOverflowCheck(cgf.getContext(), ops))
1900 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1901
1902 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1904 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1905 }
1906
1907 if (ops.isFixedPointOp()) {
1909 cgf.cgm.errorNYI("fixed point");
1910 return {};
1911 }
1912
1913 return cir::BinOp::create(builder, loc, cgf.convertType(ops.fullType),
1914 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1915}
1916
1917mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1918 const mlir::Location loc = cgf.getLoc(ops.loc);
1919 // The LHS is always a pointer if either side is.
1920 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1921 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1922 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1923 case LangOptions::SOB_Defined: {
1924 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1925 return builder.createSub(loc, ops.lhs, ops.rhs);
1926 [[fallthrough]];
1927 }
1928 case LangOptions::SOB_Undefined:
1929 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1930 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1931 [[fallthrough]];
1932 case LangOptions::SOB_Trapping:
1933 if (canElideOverflowCheck(cgf.getContext(), ops))
1934 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1935 cgf.cgm.errorNYI("sanitizers");
1936 }
1937 }
1938
1939 if (ops.fullType->isConstantMatrixType()) {
1941 cgf.cgm.errorNYI("matrix types");
1942 return nullptr;
1943 }
1944
1945 if (ops.compType->isUnsignedIntegerType() &&
1946 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1947 !canElideOverflowCheck(cgf.getContext(), ops))
1948 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1949
1950 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1952 return builder.createFSub(loc, ops.lhs, ops.rhs);
1953 }
1954
1955 if (ops.isFixedPointOp()) {
1957 cgf.cgm.errorNYI("fixed point");
1958 return {};
1959 }
1960
1961 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1962 cgf.convertType(ops.fullType),
1963 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1964 }
1965
1966 // If the RHS is not a pointer, then we have normal pointer
1967 // arithmetic.
1968 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1969 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1970
1971 // Otherwise, this is a pointer subtraction
1972
1973 // Do the raw subtraction part.
1974 //
1975 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1976 // LLVM we shall take VLA's, division by element size, etc.
1977 //
1978 // See more in `EmitSub` in CGExprScalar.cpp.
1980 return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.ptrDiffTy,
1981 ops.lhs, ops.rhs);
1982}
1983
1984mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1985 // TODO: This misses out on the sanitizer check below.
1986 if (ops.isFixedPointOp()) {
1988 cgf.cgm.errorNYI("fixed point");
1989 return {};
1990 }
1991
1992 // CIR accepts shift between different types, meaning nothing special
1993 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1994 // promote or truncate the RHS to the same size as the LHS.
1995
1996 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1997 ops.compType->hasSignedIntegerRepresentation() &&
1999 !cgf.getLangOpts().CPlusPlus20;
2000 bool sanitizeUnsignedBase =
2001 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
2002 ops.compType->hasUnsignedIntegerRepresentation();
2003 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
2004 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
2005
2006 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2007 if (cgf.getLangOpts().OpenCL)
2008 cgf.cgm.errorNYI("opencl");
2009 else if ((sanitizeBase || sanitizeExponent) &&
2010 mlir::isa<cir::IntType>(ops.lhs.getType()))
2011 cgf.cgm.errorNYI("sanitizers");
2012
2013 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2014}
2015
2016mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
2017 // TODO: This misses out on the sanitizer check below.
2018 if (ops.isFixedPointOp()) {
2020 cgf.cgm.errorNYI("fixed point");
2021 return {};
2022 }
2023
2024 // CIR accepts shift between different types, meaning nothing special
2025 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
2026 // promote or truncate the RHS to the same size as the LHS.
2027
2028 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2029 if (cgf.getLangOpts().OpenCL)
2030 cgf.cgm.errorNYI("opencl");
2031 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
2032 mlir::isa<cir::IntType>(ops.lhs.getType()))
2033 cgf.cgm.errorNYI("sanitizers");
2034
2035 // Note that we don't need to distinguish unsigned treatment at this
2036 // point since it will be handled later by LLVM lowering.
2037 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2038}
2039
2040mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
2041 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2042 cgf.convertType(ops.fullType), cir::BinOpKind::And,
2043 ops.lhs, ops.rhs);
2044}
2045mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
2046 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2047 cgf.convertType(ops.fullType), cir::BinOpKind::Xor,
2048 ops.lhs, ops.rhs);
2049}
2050mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
2051 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2052 cgf.convertType(ops.fullType), cir::BinOpKind::Or,
2053 ops.lhs, ops.rhs);
2054}
2055
2056// Emit code for an explicit or implicit cast. Implicit
2057// casts have to handle a more broad range of conversions than explicit
2058// casts, as they handle things like function to ptr-to-function decay
2059// etc.
2060mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
2061 Expr *subExpr = ce->getSubExpr();
2062 QualType destTy = ce->getType();
2063 CastKind kind = ce->getCastKind();
2064
2065 // These cases are generally not written to ignore the result of evaluating
2066 // their sub-expressions, so we clear this now.
2067 ignoreResultAssign = false;
2068
2069 switch (kind) {
2070 case clang::CK_Dependent:
2071 llvm_unreachable("dependent cast kind in CIR gen!");
2072 case clang::CK_BuiltinFnToFnPtr:
2073 llvm_unreachable("builtin functions are handled elsewhere");
2074
2075 case CK_CPointerToObjCPointerCast:
2076 case CK_BlockPointerToObjCPointerCast:
2077 case CK_AnyPointerToBlockPointerCast:
2078 case CK_BitCast: {
2079 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
2080 mlir::Type dstTy = cgf.convertType(destTy);
2081
2083
2084 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
2085 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2086 "sanitizer support");
2087
2088 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2089 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2090 "strict vtable pointers");
2091
2092 // Update heapallocsite metadata when there is an explicit pointer cast.
2094
2095 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2096 // same element type, use the llvm.vector.insert intrinsic to perform the
2097 // bitcast.
2099
2100 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2101 // same element type, use the llvm.vector.extract intrinsic to perform the
2102 // bitcast.
2104
2105 // Perform VLAT <-> VLST bitcast through memory.
2106 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2107 // require the element types of the vectors to be the same, we
2108 // need to keep this around for bitcasts between VLAT <-> VLST where
2109 // the element types of the vectors are not the same, until we figure
2110 // out a better way of doing these casts.
2112
2113 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
2114 src, dstTy);
2115 }
2116 case CK_AddressSpaceConversion: {
2117 Expr::EvalResult result;
2118 if (subExpr->EvaluateAsRValue(result, cgf.getContext()) &&
2119 result.Val.isNullPointer()) {
2120 // If e has side effect, it is emitted even if its final result is a
2121 // null pointer. In that case, a DCE pass should be able to
2122 // eliminate the useless instructions emitted during translating E.
2123 if (result.HasSideEffects)
2124 Visit(subExpr);
2125 return cgf.cgm.emitNullConstant(destTy,
2126 cgf.getLoc(subExpr->getExprLoc()));
2127 }
2128
2129 clang::QualType srcTy = subExpr->IgnoreImpCasts()->getType();
2130 if (srcTy->isPointerType() || srcTy->isReferenceType())
2131 srcTy = srcTy->getPointeeType();
2132
2133 clang::LangAS srcLangAS = srcTy.getAddressSpace();
2134 cir::TargetAddressSpaceAttr subExprAS;
2135 if (clang::isTargetAddressSpace(srcLangAS))
2136 subExprAS = cir::toCIRTargetAddressSpace(cgf.getMLIRContext(), srcLangAS);
2137 else
2138 cgf.cgm.errorNYI(subExpr->getSourceRange(),
2139 "non-target address space conversion");
2140 // Since target may map different address spaces in AST to the same address
2141 // space, an address space conversion may end up as a bitcast.
2143 cgf, Visit(subExpr), subExprAS, convertType(destTy));
2144 }
2145
2146 case CK_AtomicToNonAtomic: {
2147 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2148 "CastExpr: ", ce->getCastKindName());
2149 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
2150 return cgf.createDummyValue(loc, destTy);
2151 }
2152 case CK_NonAtomicToAtomic:
2153 case CK_UserDefinedConversion:
2154 return Visit(const_cast<Expr *>(subExpr));
2155 case CK_NoOp: {
2156 auto v = Visit(const_cast<Expr *>(subExpr));
2157 if (v) {
2158 // CK_NoOp can model a pointer qualification conversion, which can remove
2159 // an array bound and change the IR type.
2160 // FIXME: Once pointee types are removed from IR, remove this.
2161 mlir::Type t = cgf.convertType(destTy);
2162 if (t != v.getType())
2163 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
2164 }
2165 return v;
2166 }
2167 case CK_IntegralToPointer: {
2168 mlir::Type destCIRTy = cgf.convertType(destTy);
2169 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
2170
2171 // Properly resize by casting to an int of the same size as the pointer.
2172 // Clang's IntegralToPointer includes 'bool' as the source, but in CIR
2173 // 'bool' is not an integral type. So check the source type to get the
2174 // correct CIR conversion.
2175 mlir::Type middleTy = cgf.cgm.getDataLayout().getIntPtrType(destCIRTy);
2176 mlir::Value middleVal = builder.createCast(
2177 subExpr->getType()->isBooleanType() ? cir::CastKind::bool_to_int
2178 : cir::CastKind::integral,
2179 src, middleTy);
2180
2181 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers) {
2182 cgf.cgm.errorNYI(subExpr->getSourceRange(),
2183 "IntegralToPointer: strict vtable pointers");
2184 return {};
2185 }
2186
2187 return builder.createIntToPtr(middleVal, destCIRTy);
2188 }
2189
2190 case CK_BaseToDerived: {
2191 const CXXRecordDecl *derivedClassDecl = destTy->getPointeeCXXRecordDecl();
2192 assert(derivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2193 Address base = cgf.emitPointerWithAlignment(subExpr);
2194 Address derived = cgf.getAddressOfDerivedClass(
2195 cgf.getLoc(ce->getSourceRange()), base, derivedClassDecl, ce->path(),
2197
2198 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2199 // performed and the object is not of the derived type.
2201
2202 return cgf.getAsNaturalPointerTo(derived, ce->getType()->getPointeeType());
2203 }
2204 case CK_UncheckedDerivedToBase:
2205 case CK_DerivedToBase: {
2206 // The EmitPointerWithAlignment path does this fine; just discard
2207 // the alignment.
2209 ce->getType()->getPointeeType());
2210 }
2211 case CK_Dynamic: {
2212 Address v = cgf.emitPointerWithAlignment(subExpr);
2213 const auto *dce = cast<CXXDynamicCastExpr>(ce);
2214 return cgf.emitDynamicCast(v, dce);
2215 }
2216 case CK_ArrayToPointerDecay:
2217 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
2218
2219 case CK_NullToPointer: {
2220 if (mustVisitNullValue(subExpr))
2221 cgf.emitIgnoredExpr(subExpr);
2222
2223 // Note that DestTy is used as the MLIR type instead of a custom
2224 // nullptr type.
2225 mlir::Type ty = cgf.convertType(destTy);
2226 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
2227 }
2228
2229 case CK_NullToMemberPointer: {
2230 if (mustVisitNullValue(subExpr))
2231 cgf.emitIgnoredExpr(subExpr);
2232
2234
2235 const MemberPointerType *mpt = ce->getType()->getAs<MemberPointerType>();
2236 if (mpt->isMemberFunctionPointerType()) {
2237 cgf.cgm.errorNYI(subExpr->getSourceRange(),
2238 "CK_NullToMemberPointer: member function pointer");
2239 return {};
2240 }
2241
2242 auto ty = mlir::cast<cir::DataMemberType>(cgf.convertType(destTy));
2243 return builder.getNullDataMemberPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
2244 }
2245
2246 case CK_LValueToRValue:
2247 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
2248 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2249 return Visit(const_cast<Expr *>(subExpr));
2250
2251 case CK_IntegralCast: {
2252 ScalarConversionOpts opts;
2253 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
2254 if (!ice->isPartOfExplicitCast())
2255 opts = ScalarConversionOpts(cgf.sanOpts);
2256 }
2257 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2258 ce->getExprLoc(), opts);
2259 }
2260
2261 case CK_FloatingComplexToReal:
2262 case CK_IntegralComplexToReal:
2263 case CK_FloatingComplexToBoolean:
2264 case CK_IntegralComplexToBoolean: {
2265 mlir::Value value = cgf.emitComplexExpr(subExpr);
2266 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
2267 kind, destTy);
2268 }
2269
2270 case CK_FloatingRealToComplex:
2271 case CK_FloatingComplexCast:
2272 case CK_IntegralRealToComplex:
2273 case CK_IntegralComplexCast:
2274 case CK_IntegralComplexToFloatingComplex:
2275 case CK_FloatingComplexToIntegralComplex:
2276 llvm_unreachable("scalar cast to non-scalar value");
2277
2278 case CK_PointerToIntegral: {
2279 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
2280 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2281 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2282 "strict vtable pointers");
2283 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
2284 }
2285 case CK_ToVoid:
2286 cgf.emitIgnoredExpr(subExpr);
2287 return {};
2288
2289 case CK_IntegralToFloating:
2290 case CK_FloatingToIntegral:
2291 case CK_FloatingCast:
2292 case CK_FixedPointToFloating:
2293 case CK_FloatingToFixedPoint: {
2294 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
2295 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2296 "fixed point casts");
2297 return {};
2298 }
2300 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2301 ce->getExprLoc());
2302 }
2303
2304 case CK_IntegralToBoolean:
2305 return emitIntToBoolConversion(Visit(subExpr),
2306 cgf.getLoc(ce->getSourceRange()));
2307
2308 case CK_PointerToBoolean:
2309 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
2310 case CK_FloatingToBoolean:
2311 return emitFloatToBoolConversion(Visit(subExpr),
2312 cgf.getLoc(subExpr->getExprLoc()));
2313 case CK_MemberPointerToBoolean: {
2314 mlir::Value memPtr = Visit(subExpr);
2315 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
2316 cir::CastKind::member_ptr_to_bool, memPtr,
2317 cgf.convertType(destTy));
2318 }
2319
2320 case CK_VectorSplat: {
2321 // Create a vector object and fill all elements with the same scalar value.
2322 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
2323 return cir::VecSplatOp::create(builder,
2324 cgf.getLoc(subExpr->getSourceRange()),
2325 cgf.convertType(destTy), Visit(subExpr));
2326 }
2327 case CK_FunctionToPointerDecay:
2328 return cgf.emitLValue(subExpr).getPointer();
2329
2330 default:
2331 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2332 "CastExpr: ", ce->getCastKindName());
2333 }
2334 return {};
2335}
2336
2337mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
2339 return emitLoadOfLValue(e);
2340
2341 auto v = cgf.emitCallExpr(e).getValue();
2343 return v;
2344}
2345
2346mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2347 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2348 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2349 // for now.
2351 Expr::EvalResult result;
2352 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2353 llvm::APSInt value = result.Val.getInt();
2354 cgf.emitIgnoredExpr(e->getBase());
2355 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
2356 }
2357 return emitLoadOfLValue(e);
2358}
2359
2360mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2361 const unsigned numInitElements = e->getNumInits();
2362
2363 [[maybe_unused]] const bool ignore = std::exchange(ignoreResultAssign, false);
2364 assert((ignore == false ||
2365 (numInitElements == 0 && e->getType()->isVoidType())) &&
2366 "init list ignored");
2367
2368 if (e->hadArrayRangeDesignator()) {
2369 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2370 return {};
2371 }
2372
2373 if (e->getType()->isVectorType()) {
2374 const auto vectorType =
2375 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2376
2377 SmallVector<mlir::Value, 16> elements;
2378 for (Expr *init : e->inits()) {
2379 elements.push_back(Visit(init));
2380 }
2381
2382 // Zero-initialize any remaining values.
2383 if (numInitElements < vectorType.getSize()) {
2384 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2385 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2386 std::fill_n(std::back_inserter(elements),
2387 vectorType.getSize() - numInitElements, zeroValue);
2388 }
2389
2390 return cir::VecCreateOp::create(cgf.getBuilder(),
2391 cgf.getLoc(e->getSourceRange()), vectorType,
2392 elements);
2393 }
2394
2395 // C++11 value-initialization for the scalar.
2396 if (numInitElements == 0)
2397 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2398
2399 return Visit(e->getInit(0));
2400}
2401
2402mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2403 QualType srcTy, QualType dstTy,
2404 SourceLocation loc) {
2407 "Invalid scalar expression to emit");
2408 return ScalarExprEmitter(*this, builder)
2409 .emitScalarConversion(src, srcTy, dstTy, loc);
2410}
2411
2413 QualType srcTy,
2414 QualType dstTy,
2415 SourceLocation loc) {
2416 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2417 "Invalid complex -> scalar conversion");
2418
2419 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2420 if (dstTy->isBooleanType()) {
2421 auto kind = complexElemTy->isFloatingType()
2422 ? cir::CastKind::float_complex_to_bool
2423 : cir::CastKind::int_complex_to_bool;
2424 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2425 }
2426
2427 auto kind = complexElemTy->isFloatingType()
2428 ? cir::CastKind::float_complex_to_real
2429 : cir::CastKind::int_complex_to_real;
2430 mlir::Value real =
2431 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2432 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2433}
2434
2435mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2436 // Perform vector logical not on comparison with zero vector.
2437 if (e->getType()->isVectorType() &&
2438 e->getType()->castAs<VectorType>()->getVectorKind() ==
2440 mlir::Value oper = Visit(e->getSubExpr());
2441 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2442 auto operVecTy = mlir::cast<cir::VectorType>(oper.getType());
2443 auto exprVecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2444 mlir::Value zeroVec = builder.getNullValue(operVecTy, loc);
2445 return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq,
2446 oper, zeroVec);
2447 }
2448
2449 // Compare operand to zero.
2450 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2451
2452 // Invert value.
2453 boolVal = builder.createNot(boolVal);
2454
2455 // ZExt result to the expr type.
2456 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2457}
2458
2459mlir::Value ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *e) {
2460 // Try folding the offsetof to a constant.
2461 Expr::EvalResult evalResult;
2462 if (e->EvaluateAsInt(evalResult, cgf.getContext())) {
2463 mlir::Type type = cgf.convertType(e->getType());
2464 llvm::APSInt value = evalResult.Val.getInt();
2465 return builder.getConstAPInt(cgf.getLoc(e->getExprLoc()), type, value);
2466 }
2467
2469 e->getSourceRange(),
2470 "ScalarExprEmitter::VisitOffsetOfExpr Can't eval expr as int");
2471 return {};
2472}
2473
2474mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2475 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2476 mlir::Value result = VisitRealImag(e, promotionTy);
2477 if (result && !promotionTy.isNull())
2478 result = emitUnPromotedValue(result, e->getType());
2479 return result;
2480}
2481
2482mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2483 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2484 mlir::Value result = VisitRealImag(e, promotionTy);
2485 if (result && !promotionTy.isNull())
2486 result = emitUnPromotedValue(result, e->getType());
2487 return result;
2488}
2489
2490mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2491 QualType promotionTy) {
2492 assert(e->getOpcode() == clang::UO_Real ||
2493 e->getOpcode() == clang::UO_Imag &&
2494 "Invalid UnaryOp kind for ComplexType Real or Imag");
2495
2496 Expr *op = e->getSubExpr();
2497 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2498 if (op->getType()->isAnyComplexType()) {
2499 // If it's an l-value, load through the appropriate subobject l-value.
2500 // Note that we have to ask `e` because `op` might be an l-value that
2501 // this won't work for, e.g. an Obj-C property
2502 mlir::Value complex = cgf.emitComplexExpr(op);
2503 if (e->isGLValue() && !promotionTy.isNull()) {
2504 promotionTy = promotionTy->isAnyComplexType()
2505 ? promotionTy
2506 : cgf.getContext().getComplexType(promotionTy);
2507 complex = cgf.emitPromotedValue(complex, promotionTy);
2508 }
2509
2510 return e->getOpcode() == clang::UO_Real
2511 ? builder.createComplexReal(loc, complex)
2512 : builder.createComplexImag(loc, complex);
2513 }
2514
2515 if (e->getOpcode() == UO_Real) {
2516 mlir::Value operand = promotionTy.isNull()
2517 ? Visit(op)
2518 : cgf.emitPromotedScalarExpr(op, promotionTy);
2519 return builder.createComplexReal(loc, operand);
2520 }
2521
2522 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2523 // effects are evaluated, but not the actual value.
2524 mlir::Value operand;
2525 if (op->isGLValue()) {
2526 operand = cgf.emitLValue(op).getPointer();
2527 operand = cir::LoadOp::create(builder, loc, operand);
2528 } else if (!promotionTy.isNull()) {
2529 operand = cgf.emitPromotedScalarExpr(op, promotionTy);
2530 } else {
2531 operand = cgf.emitScalarExpr(op);
2532 }
2533 return builder.createComplexImag(loc, operand);
2534}
2535
2536/// Return the size or alignment of the type of argument of the sizeof
2537/// expression as an integer.
2538mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2539 const UnaryExprOrTypeTraitExpr *e) {
2540 const QualType typeToSize = e->getTypeOfArgument();
2541 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2542 if (auto kind = e->getKind();
2543 kind == UETT_SizeOf || kind == UETT_DataSizeOf || kind == UETT_CountOf) {
2544 if (const VariableArrayType *vat =
2545 cgf.getContext().getAsVariableArrayType(typeToSize)) {
2546 // For _Countof, we only want to evaluate if the extent is actually
2547 // variable as opposed to a multi-dimensional array whose extent is
2548 // constant but whose element type is variable.
2549 bool evaluateExtent = true;
2550 if (kind == UETT_CountOf && vat->getElementType()->isArrayType()) {
2551 evaluateExtent =
2552 !vat->getSizeExpr()->isIntegerConstantExpr(cgf.getContext());
2553 }
2554
2555 if (evaluateExtent) {
2556 if (e->isArgumentType()) {
2557 // sizeof(type) - make sure to emit the VLA size.
2558 cgf.emitVariablyModifiedType(typeToSize);
2559 } else {
2560 // C99 6.5.3.4p2: If the argument is an expression of type
2561 // VLA, it is evaluated.
2563 }
2564
2565 // For _Countof, we just want to return the size of a single dimension.
2566 if (kind == UETT_CountOf)
2567 return cgf.getVLAElements1D(vat).numElts;
2568
2569 // For sizeof and __datasizeof, we need to scale the number of elements
2570 // by the size of the array element type.
2571 CIRGenFunction::VlaSizePair vlaSize = cgf.getVLASize(vat);
2572 mlir::Value numElts = vlaSize.numElts;
2573
2574 // Scale the number of non-VLA elements by the non-VLA element size.
2575 CharUnits eltSize = cgf.getContext().getTypeSizeInChars(vlaSize.type);
2576 if (!eltSize.isOne()) {
2577 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2578 mlir::Value eltSizeValue =
2579 builder.getConstAPInt(numElts.getLoc(), numElts.getType(),
2580 cgf.cgm.getSize(eltSize).getValue());
2581 return builder.createMul(loc, eltSizeValue, numElts,
2583 }
2584
2585 return numElts;
2586 }
2587 }
2588 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2590 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
2591 e->getStmtClassName());
2592 return builder.getConstant(
2593 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2594 llvm::APSInt(llvm::APInt(64, 1), true)));
2595 } else if (e->getKind() == UETT_VectorElements) {
2596 auto vecTy = cast<cir::VectorType>(convertType(e->getTypeOfArgument()));
2597 if (vecTy.getIsScalable()) {
2599 e->getSourceRange(),
2600 "VisitUnaryExprOrTypeTraitExpr: sizeOf scalable vector");
2601 return builder.getConstant(
2602 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2604 }
2605
2606 return builder.getConstant(
2607 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, vecTy.getSize()));
2608 }
2609
2610 return builder.getConstant(
2611 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2613}
2614
2615/// Return true if the specified expression is cheap enough and side-effect-free
2616/// enough to evaluate unconditionally instead of conditionally. This is used
2617/// to convert control flow into selects in some cases.
2618/// TODO(cir): can be shared with LLVM codegen.
2620 CIRGenFunction &cgf) {
2621 // Anything that is an integer or floating point constant is fine.
2622 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2623
2624 // Even non-volatile automatic variables can't be evaluated unconditionally.
2625 // Referencing a thread_local may cause non-trivial initialization work to
2626 // occur. If we're inside a lambda and one of the variables is from the scope
2627 // outside the lambda, that function may have returned already. Reading its
2628 // locals is a bad idea. Also, these reads may introduce races there didn't
2629 // exist in the source-level program.
2630}
2631
2632mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2633 const AbstractConditionalOperator *e) {
2634 CIRGenBuilderTy &builder = cgf.getBuilder();
2635 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2636 ignoreResultAssign = false;
2637
2638 // Bind the common expression if necessary.
2639 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2640
2641 Expr *condExpr = e->getCond();
2642 Expr *lhsExpr = e->getTrueExpr();
2643 Expr *rhsExpr = e->getFalseExpr();
2644
2645 // If the condition constant folds and can be elided, try to avoid emitting
2646 // the condition and the dead arm.
2647 bool condExprBool;
2648 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2649 Expr *live = lhsExpr, *dead = rhsExpr;
2650 if (!condExprBool)
2651 std::swap(live, dead);
2652
2653 // If the dead side doesn't have labels we need, just emit the Live part.
2654 if (!cgf.containsLabel(dead)) {
2655 if (condExprBool)
2657 mlir::Value result = Visit(live);
2658
2659 // If the live part is a throw expression, it acts like it has a void
2660 // type, so evaluating it returns a null Value. However, a conditional
2661 // with non-void type must return a non-null Value.
2662 if (!result && !e->getType()->isVoidType()) {
2663 result = builder.getConstant(
2664 loc, cir::PoisonAttr::get(builder.getContext(),
2665 cgf.convertType(e->getType())));
2666 }
2667
2668 return result;
2669 }
2670 }
2671
2672 QualType condType = condExpr->getType();
2673
2674 // OpenCL: If the condition is a vector, we can treat this condition like
2675 // the select function.
2676 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2677 condType->isExtVectorType()) {
2679 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2680 }
2681
2682 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2683 if (!condType->isVectorType()) {
2685 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2686 return {};
2687 }
2688
2689 mlir::Value condValue = Visit(condExpr);
2690 mlir::Value lhsValue = Visit(lhsExpr);
2691 mlir::Value rhsValue = Visit(rhsExpr);
2692 return cir::VecTernaryOp::create(builder, loc, condValue, lhsValue,
2693 rhsValue);
2694 }
2695
2696 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2697 // select instead of as control flow. We can only do this if it is cheap
2698 // and safe to evaluate the LHS and RHS unconditionally.
2699 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2701 bool lhsIsVoid = false;
2702 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2704
2705 mlir::Value lhs = Visit(lhsExpr);
2706 if (!lhs) {
2707 lhs = builder.getNullValue(cgf.voidTy, loc);
2708 lhsIsVoid = true;
2709 }
2710
2711 mlir::Value rhs = Visit(rhsExpr);
2712 if (lhsIsVoid) {
2713 assert(!rhs && "lhs and rhs types must match");
2714 rhs = builder.getNullValue(cgf.voidTy, loc);
2715 }
2716
2717 return builder.createSelect(loc, condV, lhs, rhs);
2718 }
2719
2720 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2721 CIRGenFunction::ConditionalEvaluation eval(cgf);
2722 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2723 mlir::Type yieldTy{};
2724
2725 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2726 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2728
2730 eval.beginEvaluation();
2731 mlir::Value branch = Visit(expr);
2732 eval.endEvaluation();
2733
2734 if (branch) {
2735 yieldTy = branch.getType();
2736 cir::YieldOp::create(b, loc, branch);
2737 } else {
2738 // If LHS or RHS is a throw or void expression we need to patch
2739 // arms as to properly match yield types.
2740 insertPoints.push_back(b.saveInsertionPoint());
2741 }
2742 };
2743
2744 mlir::Value result = cir::TernaryOp::create(
2745 builder, loc, condV,
2746 /*trueBuilder=*/
2747 [&](mlir::OpBuilder &b, mlir::Location loc) {
2748 emitBranch(b, loc, lhsExpr);
2749 },
2750 /*falseBuilder=*/
2751 [&](mlir::OpBuilder &b, mlir::Location loc) {
2752 emitBranch(b, loc, rhsExpr);
2753 })
2754 .getResult();
2755
2756 if (!insertPoints.empty()) {
2757 // If both arms are void, so be it.
2758 if (!yieldTy)
2759 yieldTy = cgf.voidTy;
2760
2761 // Insert required yields.
2762 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2763 mlir::OpBuilder::InsertionGuard guard(builder);
2764 builder.restoreInsertionPoint(toInsert);
2765
2766 // Block does not return: build empty yield.
2767 if (mlir::isa<cir::VoidType>(yieldTy)) {
2768 cir::YieldOp::create(builder, loc);
2769 } else { // Block returns: set null yield value.
2770 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2771 cir::YieldOp::create(builder, loc, op0);
2772 }
2773 }
2774 }
2775
2776 return result;
2777}
2778
2780 LValue lv,
2781 cir::UnaryOpKind kind,
2782 bool isPre) {
2783 return ScalarExprEmitter(*this, builder)
2784 .emitScalarPrePostIncDec(e, lv, kind, isPre);
2785}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Type getIntPtrType(mlir::Type ty) const
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isNullPointer() const
Definition APValue.cpp:1019
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4531
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4537
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4543
LabelDecl * getLabel() const
Definition Expr.h:4573
uint64_t getValue() const
Definition ExprCXX.h:3044
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
SourceLocation getExprLoc() const
Definition Expr.h:4079
Expr * getRHS() const
Definition Expr.h:4090
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4251
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2204
Opcode getOpcode() const
Definition Expr.h:4083
BinaryOperatorKind Opcode
Definition Expr.h:4043
mlir::Value getPointer() const
Definition Address.h:90
mlir::Value createNeg(mlir::Value value)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
const clang::LangOptions & getLangOpts() const
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
const TargetCIRGenInfo & getTargetCIRGenInfo()
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
virtual mlir::Value performAddrSpaceCast(CIRGenFunction &cgf, mlir::Value v, cir::TargetAddressSpaceAttr srcAddr, mlir::Type destTy, bool isNonNull=false) const
Perform address space cast of an expression of pointer type.
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4332
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastKind getCastKind() const
Definition Expr.h:3720
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3763
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1950
Expr * getSubExpr()
Definition Expr.h:3726
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4884
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4300
QualType getComputationLHSType() const
Definition Expr.h:4334
QualType getComputationResultType() const
Definition Expr.h:4337
SourceLocation getExprLoc() const LLVM_READONLY
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4809
ChildElementIter< false > begin()
Definition Expr.h:5232
size_t getDataElementCount() const
Definition Expr.h:5148
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6462
unsigned getNumInits() const
Definition Expr.h:5329
bool hadArrayRangeDesignator() const
Definition Expr.h:5483
const Expr * getInit(unsigned Init) const
Definition Expr.h:5353
ArrayRef< Expr * > inits()
Definition Expr.h:5349
bool isSignedOverflowDefined() const
Expr * getBase() const
Definition Expr.h:3441
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3559
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:218
SourceRange getSourceRange() const
Definition ExprObjC.h:1719
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:162
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:381
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8292
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getCanonicalType() const
Definition TypeBase.h:8344
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1613
bool isCanonical() const
Definition TypeBase.h:8349
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool isSatisfied() const
Whether or not the requires clause is satisfied.
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4676
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4682
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2281
SourceLocation getLocation() const
Definition Expr.h:5061
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4612
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool getBoolValue() const
Definition ExprCXX.h:2947
bool isStoredAsBoolean() const
Definition ExprCXX.h:2943
bool isVoidType() const
Definition TypeBase.h:8891
bool isBooleanType() const
Definition TypeBase.h:9021
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2226
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2206
bool isConstantMatrixType() const
Definition TypeBase.h:8696
bool isPointerType() const
Definition TypeBase.h:8529
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8935
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
bool isReferenceType() const
Definition TypeBase.h:8553
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1910
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2292
bool isExtVectorType() const
Definition TypeBase.h:8672
bool isAnyComplexType() const
Definition TypeBase.h:8664
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8947
bool isHalfType() const
Definition TypeBase.h:8895
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2244
bool isMatrixType() const
Definition TypeBase.h:8692
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2801
bool isFunctionType() const
Definition TypeBase.h:8525
bool isMemberFunctionPointerType() const
Definition TypeBase.h:8614
bool isVectorType() const
Definition TypeBase.h:8668
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2321
bool isFloatingType() const
Definition Type.cpp:2305
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2254
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool isNullPtrType() const
Definition TypeBase.h:8928
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represents a GCC generic vector type.
Definition TypeBase.h:4176
VectorKind getVectorKind() const
Definition TypeBase.h:4196
cir::TargetAddressSpaceAttr toCIRTargetAddressSpace(mlir::MLIRContext &context, clang::LangAS langAS)
Definition CIRTypes.cpp:932
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isTargetAddressSpace(LangAS AS)
LangAS
Defines the address space values used by the address space qualifier of QualType.
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4137
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool cgFPOptionsRAII()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool llvmLoweringPtrDiffConsidersPointee()
static bool scalableVectors()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174