clang 23.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
16
17#include "clang/AST/Expr.h"
21
22#include "mlir/IR/Location.h"
23#include "mlir/IR/Value.h"
24
25#include <cassert>
26#include <utility>
27
28using namespace clang;
29using namespace clang::CIRGen;
30
31namespace {
32
33struct BinOpInfo {
34 mlir::Value lhs;
35 mlir::Value rhs;
36 SourceRange loc;
37 QualType fullType; // Type of operands and result
38 QualType compType; // Type used for computations. Element type
39 // for vectors, otherwise same as FullType.
40 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
41 FPOptions fpfeatures;
42 const Expr *e; // Entire expr, for error unsupported. May not be binop.
43
44 /// Check if the binop computes a division or a remainder.
45 bool isDivRemOp() const {
46 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
47 opcode == BO_RemAssign;
48 }
49
50 /// Check if the binop can result in integer overflow.
51 bool mayHaveIntegerOverflow() const {
52 // Without constant input, we can't rule out overflow.
53 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
54 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
55 if (!lhsci || !rhsci)
56 return true;
57
59 // TODO(cir): For now we just assume that we might overflow
60 return true;
61 }
62
63 /// Check if at least one operand is a fixed point type. In such cases,
64 /// this operation did not follow usual arithmetic conversion and both
65 /// operands might not be of the same type.
66 bool isFixedPointOp() const {
67 // We cannot simply check the result type since comparison operations
68 // return an int.
69 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
70 QualType lhstype = binOp->getLHS()->getType();
71 QualType rhstype = binOp->getRHS()->getType();
72 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
73 }
74 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
75 return unop->getSubExpr()->getType()->isFixedPointType();
76 return false;
77 }
78};
79
80class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
81 CIRGenFunction &cgf;
82 CIRGenBuilderTy &builder;
83 // Unlike classic codegen we set this to false or use std::exchange to read
84 // the value instead of calling TestAndClearIgnoreResultAssign to make it
85 // explicit when the value is used
86 bool ignoreResultAssign;
87
88public:
89 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder,
90 bool ignoreResultAssign = false)
91 : cgf(cgf), builder(builder), ignoreResultAssign(ignoreResultAssign) {}
92
93 //===--------------------------------------------------------------------===//
94 // Utilities
95 //===--------------------------------------------------------------------===//
96 mlir::Type convertType(QualType ty) { return cgf.convertType(ty); }
97
98 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
99 mlir::Value value, CastKind kind,
100 QualType destTy);
101
102 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
103 return cgf.cgm.emitNullConstant(ty, loc);
104 }
105
106 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
107 return builder.createFloatingCast(result, cgf.convertType(promotionType));
108 }
109
110 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
111 return builder.createFloatingCast(result, cgf.convertType(exprType));
112 }
113
114 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
115
116 mlir::Value maybePromoteBoolResult(mlir::Value value,
117 mlir::Type dstTy) const {
118 if (mlir::isa<cir::IntType>(dstTy))
119 return builder.createBoolToInt(value, dstTy);
120 if (mlir::isa<cir::BoolType>(dstTy))
121 return value;
122 llvm_unreachable("Can only promote integer or boolean types");
123 }
124
125 //===--------------------------------------------------------------------===//
126 // Visitor Methods
127 //===--------------------------------------------------------------------===//
128
129 mlir::Value Visit(Expr *e) {
130 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
131 }
132
133 mlir::Value VisitStmt(Stmt *s) {
134 llvm_unreachable("Statement passed to ScalarExprEmitter");
135 }
136
137 mlir::Value VisitExpr(Expr *e) {
138 cgf.getCIRGenModule().errorNYI(
139 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
140 return {};
141 }
142
143 mlir::Value VisitConstantExpr(ConstantExpr *e) {
144 // A constant expression of type 'void' generates no code and produces no
145 // value.
146 if (e->getType()->isVoidType())
147 return {};
148
149 if (mlir::Attribute result = ConstantEmitter(cgf).tryEmitConstantExpr(e)) {
150 if (e->isGLValue()) {
151 cgf.cgm.errorNYI(e->getSourceRange(),
152 "ScalarExprEmitter: constant expr GL Value");
153 return {};
154 }
155
156 return builder.getConstant(cgf.getLoc(e->getSourceRange()),
157 mlir::cast<mlir::TypedAttr>(result));
158 }
159
160 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: constant expr");
161 return {};
162 }
163
164 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
165 return Visit(e->getSelectedExpr());
166 }
167
168 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
169
170 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
171 return Visit(ge->getResultExpr());
172 }
173
174 /// Emits the address of the l-value, then loads and returns the result.
175 mlir::Value emitLoadOfLValue(const Expr *e) {
176 LValue lv = cgf.emitLValue(e);
177 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
178 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
179 }
180
181 mlir::Value VisitCoawaitExpr(CoawaitExpr *s) {
182 return cgf.emitCoawaitExpr(*s).getValue();
183 }
184
185 mlir::Value VisitCoyieldExpr(CoyieldExpr *e) {
186 return cgf.emitCoyieldExpr(*e).getValue();
187 }
188
189 mlir::Value VisitUnaryCoawait(const UnaryOperator *e) {
190 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: unary coawait");
191 return {};
192 }
193
194 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
195 return cgf.emitLoadOfLValue(lv, loc).getValue();
196 }
197
198 // l-values
199 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
200 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
201 return cgf.emitScalarConstant(constant, e);
202
203 return emitLoadOfLValue(e);
204 }
205
206 mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *e) {
207 auto func = cast<cir::FuncOp>(cgf.curFn);
208 cir::BlockAddrInfoAttr blockInfoAttr = cir::BlockAddrInfoAttr::get(
209 &cgf.getMLIRContext(), func.getSymName(), e->getLabel()->getName());
210 cir::BlockAddressOp blockAddressOp = cir::BlockAddressOp::create(
211 builder, cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()),
212 blockInfoAttr);
213 cir::LabelOp resolvedLabel = cgf.cgm.lookupBlockAddressInfo(blockInfoAttr);
214 if (!resolvedLabel) {
215 cgf.cgm.mapUnresolvedBlockAddress(blockAddressOp);
216 // Still add the op to maintain insertion order it will be resolved in
217 // resolveBlockAddresses
218 cgf.cgm.mapResolvedBlockAddress(blockAddressOp, nullptr);
219 } else {
220 cgf.cgm.mapResolvedBlockAddress(blockAddressOp, resolvedLabel);
221 }
222 cgf.instantiateIndirectGotoBlock();
223 return blockAddressOp;
224 }
225
226 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
227 mlir::Type type = cgf.convertType(e->getType());
228 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
229 cir::IntAttr::get(type, e->getValue()));
230 }
231
232 mlir::Value VisitFixedPointLiteral(const FixedPointLiteral *e) {
233 cgf.cgm.errorNYI(e->getSourceRange(),
234 "ScalarExprEmitter: fixed point literal");
235 return {};
236 }
237
238 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
239 mlir::Type type = cgf.convertType(e->getType());
240 assert(mlir::isa<cir::FPTypeInterface>(type) &&
241 "expect floating-point type");
242 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
243 cir::FPAttr::get(type, e->getValue()));
244 }
245
246 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
247 mlir::Type ty = cgf.convertType(e->getType());
248 auto init = cir::IntAttr::get(ty, e->getValue());
249 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()), init);
250 }
251
252 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
253 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
254 }
255
256 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
257 if (e->getType()->isVoidType())
258 return {};
259
260 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
261 }
262
263 mlir::Value VisitGNUNullExpr(const GNUNullExpr *e) {
264 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
265 }
266
267 mlir::Value VisitOffsetOfExpr(OffsetOfExpr *e);
268
269 mlir::Value VisitSizeOfPackExpr(SizeOfPackExpr *e) {
270 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: size of pack");
271 return {};
272 }
273 mlir::Value VisitPseudoObjectExpr(PseudoObjectExpr *e) {
274 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: pseudo object");
275 return {};
276 }
277 mlir::Value VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *e) {
278 cgf.cgm.errorNYI(e->getSourceRange(),
279 "ScalarExprEmitter: sycl unique stable name");
280 return {};
281 }
282 mlir::Value VisitEmbedExpr(EmbedExpr *e) {
283 assert(e->getDataElementCount() == 1);
284 auto it = e->begin();
285 llvm::APInt value = (*it)->getValue();
286 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value,
288 }
289 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
290 if (e->isGLValue())
291 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
292 e->getExprLoc());
293
294 // Otherwise, assume the mapping is the scalar directly.
295 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
296 }
297
298 mlir::Value VisitObjCSelectorExpr(ObjCSelectorExpr *e) {
299 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc selector");
300 return {};
301 }
302 mlir::Value VisitObjCProtocolExpr(ObjCProtocolExpr *e) {
303 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc protocol");
304 return {};
305 }
306 mlir::Value VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
307 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc ivar ref");
308 return {};
309 }
310 mlir::Value VisitObjCMessageExpr(ObjCMessageExpr *e) {
311 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc message");
312 return {};
313 }
314 mlir::Value VisitObjCIsaExpr(ObjCIsaExpr *e) {
315 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc isa");
316 return {};
317 }
318 mlir::Value VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *e) {
319 cgf.cgm.errorNYI(e->getSourceRange(),
320 "ScalarExprEmitter: objc availability check");
321 return {};
322 }
323
324 mlir::Value VisitMatrixSubscriptExpr(MatrixSubscriptExpr *e) {
325 cgf.cgm.errorNYI(e->getSourceRange(),
326 "ScalarExprEmitter: matrix subscript");
327 return {};
328 }
329
330 mlir::Value VisitCastExpr(CastExpr *e);
331 mlir::Value VisitCallExpr(const CallExpr *e);
332
333 mlir::Value VisitStmtExpr(StmtExpr *e) {
334 CIRGenFunction::StmtExprEvaluation eval(cgf);
335 if (e->getType()->isVoidType()) {
336 (void)cgf.emitCompoundStmt(*e->getSubStmt());
337 return {};
338 }
339
340 Address retAlloca =
341 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
342 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
343
344 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
345 e->getExprLoc());
346 }
347
348 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
349 ignoreResultAssign = false;
350
351 if (e->getBase()->getType()->isVectorType()) {
353
354 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
355 const mlir::Value vecValue = Visit(e->getBase());
356 const mlir::Value indexValue = Visit(e->getIdx());
357 return cir::VecExtractOp::create(cgf.builder, loc, vecValue, indexValue);
358 }
359 // Just load the lvalue formed by the subscript expression.
360 return emitLoadOfLValue(e);
361 }
362
363 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
364 if (e->getNumSubExprs() == 2) {
365 // The undocumented form of __builtin_shufflevector.
366 mlir::Value inputVec = Visit(e->getExpr(0));
367 mlir::Value indexVec = Visit(e->getExpr(1));
368 return cir::VecShuffleDynamicOp::create(
369 cgf.builder, cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
370 }
371
372 mlir::Value vec1 = Visit(e->getExpr(0));
373 mlir::Value vec2 = Visit(e->getExpr(1));
374
375 // The documented form of __builtin_shufflevector, where the indices are
376 // a variable number of integer constants. The constants will be stored
377 // in an ArrayAttr.
378 SmallVector<mlir::Attribute, 8> indices;
379 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
380 indices.push_back(
381 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
382 e->getExpr(i)
383 ->EvaluateKnownConstInt(cgf.getContext())
384 .getSExtValue()));
385 }
386
387 return cir::VecShuffleOp::create(cgf.builder,
388 cgf.getLoc(e->getSourceRange()),
389 cgf.convertType(e->getType()), vec1, vec2,
390 cgf.builder.getArrayAttr(indices));
391 }
392
393 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
394 // __builtin_convertvector is an element-wise cast, and is implemented as a
395 // regular cast. The back end handles casts of vectors correctly.
396 return emitScalarConversion(Visit(e->getSrcExpr()),
397 e->getSrcExpr()->getType(), e->getType(),
398 e->getSourceRange().getBegin());
399 }
400
401 mlir::Value VisitExtVectorElementExpr(Expr *e) { return emitLoadOfLValue(e); }
402
403 mlir::Value VisitMemberExpr(MemberExpr *e);
404
405 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
406 return emitLoadOfLValue(e);
407 }
408
409 mlir::Value VisitInitListExpr(InitListExpr *e);
410
411 mlir::Value VisitArrayInitIndexExpr(ArrayInitIndexExpr *e) {
412 cgf.cgm.errorNYI(e->getSourceRange(),
413 "ScalarExprEmitter: array init index");
414 return {};
415 }
416
417 mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *e) {
418 cgf.cgm.errorNYI(e->getSourceRange(),
419 "ScalarExprEmitter: implicit value init");
420 return {};
421 }
422
423 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
424 return VisitCastExpr(e);
425 }
426
427 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
428 return cgf.cgm.emitNullConstant(e->getType(),
429 cgf.getLoc(e->getSourceRange()));
430 }
431
432 /// Perform a pointer to boolean conversion.
433 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
434 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
435 // We might want to have a separate pass for these types of conversions.
436 return cgf.getBuilder().createPtrToBoolCast(v);
437 }
438
439 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
440 cir::BoolType boolTy = builder.getBoolTy();
441 return cir::CastOp::create(builder, loc, boolTy,
442 cir::CastKind::float_to_bool, src);
443 }
444
445 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
446 // Because of the type rules of C, we often end up computing a
447 // logical value, then zero extending it to int, then wanting it
448 // as a logical value again.
449 // TODO: optimize this common case here or leave it for later
450 // CIR passes?
451 cir::BoolType boolTy = builder.getBoolTy();
452 return cir::CastOp::create(builder, loc, boolTy, cir::CastKind::int_to_bool,
453 srcVal);
454 }
455
456 /// Convert the specified expression value to a boolean (!cir.bool) truth
457 /// value. This is equivalent to "Val != 0".
458 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
459 mlir::Location loc) {
460 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
461
462 if (srcType->isRealFloatingType())
463 return emitFloatToBoolConversion(src, loc);
464
465 if (llvm::isa<MemberPointerType>(srcType)) {
466 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
467 return builder.getFalse(loc);
468 }
469
470 if (srcType->isIntegerType())
471 return emitIntToBoolConversion(src, loc);
472
473 assert(::mlir::isa<cir::PointerType>(src.getType()));
474 return emitPointerToBoolConversion(src, srcType);
475 }
476
477 // Emit a conversion from the specified type to the specified destination
478 // type, both of which are CIR scalar types.
479 struct ScalarConversionOpts {
480 bool treatBooleanAsSigned;
481 bool emitImplicitIntegerTruncationChecks;
482 bool emitImplicitIntegerSignChangeChecks;
483
484 ScalarConversionOpts()
485 : treatBooleanAsSigned(false),
486 emitImplicitIntegerTruncationChecks(false),
487 emitImplicitIntegerSignChangeChecks(false) {}
488
489 ScalarConversionOpts(clang::SanitizerSet sanOpts)
490 : treatBooleanAsSigned(false),
491 emitImplicitIntegerTruncationChecks(
492 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
493 emitImplicitIntegerSignChangeChecks(
494 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
495 };
496
497 // Conversion from bool, integral, or floating-point to integral or
498 // floating-point. Conversions involving other types are handled elsewhere.
499 // Conversion to bool is handled elsewhere because that's a comparison against
500 // zero, not a simple cast. This handles both individual scalars and vectors.
501 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
502 QualType dstType, mlir::Type srcTy,
503 mlir::Type dstTy, ScalarConversionOpts opts) {
504 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
505 "Internal error: matrix types not handled by this function.");
506 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
507 mlir::isa<mlir::IntegerType>(dstTy)) &&
508 "Obsolete code. Don't use mlir::IntegerType with CIR.");
509
510 mlir::Type fullDstTy = dstTy;
511 if (mlir::isa<cir::VectorType>(srcTy) &&
512 mlir::isa<cir::VectorType>(dstTy)) {
513 // Use the element types of the vectors to figure out the CastKind.
514 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
515 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
516 }
517
518 std::optional<cir::CastKind> castKind;
519
520 if (mlir::isa<cir::BoolType>(srcTy)) {
521 if (opts.treatBooleanAsSigned)
522 cgf.getCIRGenModule().errorNYI("signed bool");
523 if (cgf.getBuilder().isInt(dstTy))
524 castKind = cir::CastKind::bool_to_int;
525 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
526 castKind = cir::CastKind::bool_to_float;
527 else
528 llvm_unreachable("Internal error: Cast to unexpected type");
529 } else if (cgf.getBuilder().isInt(srcTy)) {
530 if (cgf.getBuilder().isInt(dstTy))
531 castKind = cir::CastKind::integral;
532 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
533 castKind = cir::CastKind::int_to_float;
534 else
535 llvm_unreachable("Internal error: Cast to unexpected type");
536 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
537 if (cgf.getBuilder().isInt(dstTy)) {
538 // If we can't recognize overflow as undefined behavior, assume that
539 // overflow saturates. This protects against normal optimizations if we
540 // are compiling with non-standard FP semantics.
541 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
542 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
544 castKind = cir::CastKind::float_to_int;
545 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
546 // TODO: split this to createFPExt/createFPTrunc
547 return builder.createFloatingCast(src, fullDstTy);
548 } else {
549 llvm_unreachable("Internal error: Cast to unexpected type");
550 }
551 } else {
552 llvm_unreachable("Internal error: Cast from unexpected type");
553 }
554
555 assert(castKind.has_value() && "Internal error: CastKind not set.");
556 return builder.createOrFold<cir::CastOp>(src.getLoc(), fullDstTy, *castKind,
557 src);
558 }
559
560 mlir::Value
561 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
562 return Visit(e->getReplacement());
563 }
564
565 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
566 QualType ty = ve->getType();
567
568 if (ty->isVariablyModifiedType()) {
569 cgf.cgm.errorNYI(ve->getSourceRange(),
570 "variably modified types in varargs");
571 }
572
573 return cgf.emitVAArg(ve);
574 }
575
576 mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
577 return Visit(e->getSemanticForm());
578 }
579
580 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
581 mlir::Value
582 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
583
584 // Unary Operators.
585 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
586 LValue lv = cgf.emitLValue(e->getSubExpr());
587 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
588 }
589 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
590 LValue lv = cgf.emitLValue(e->getSubExpr());
591 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
592 }
593 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
594 LValue lv = cgf.emitLValue(e->getSubExpr());
595 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
596 }
597 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
598 LValue lv = cgf.emitLValue(e->getSubExpr());
599 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
600 }
601 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
602 cir::UnaryOpKind kind, bool isPre) {
603 if (cgf.getLangOpts().OpenMP)
604 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
605
606 QualType type = e->getSubExpr()->getType();
607
608 mlir::Value value;
609 mlir::Value input;
610
611 if (type->getAs<AtomicType>()) {
612 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
613 // TODO(cir): This is not correct, but it will produce reasonable code
614 // until atomic operations are implemented.
615 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
616 input = value;
617 } else {
618 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
619 input = value;
620 }
621
622 // NOTE: When possible, more frequent cases are handled first.
623
624 // Special case of integer increment that we have to check first: bool++.
625 // Due to promotion rules, we get:
626 // bool++ -> bool = bool + 1
627 // -> bool = (int)bool + 1
628 // -> bool = ((int)bool + 1 != 0)
629 // An interesting aspect of this is that increment is always true.
630 // Decrement does not have this property.
631 if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
632 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
633 } else if (type->isIntegerType()) {
634 QualType promotedType;
635 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
636 if (cgf.getContext().isPromotableIntegerType(type)) {
637 promotedType = cgf.getContext().getPromotedIntegerType(type);
638 assert(promotedType != type && "Shouldn't promote to the same type.");
639 canPerformLossyDemotionCheck = true;
640 canPerformLossyDemotionCheck &=
641 cgf.getContext().getCanonicalType(type) !=
642 cgf.getContext().getCanonicalType(promotedType);
643 canPerformLossyDemotionCheck &=
644 type->isIntegerType() && promotedType->isIntegerType();
645
646 // TODO(cir): Currently, we store bitwidths in CIR types only for
647 // integers. This might also be required for other types.
648
649 assert(
650 (!canPerformLossyDemotionCheck ||
651 type->isSignedIntegerOrEnumerationType() ||
652 promotedType->isSignedIntegerOrEnumerationType() ||
653 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
654 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
655 "The following check expects that if we do promotion to different "
656 "underlying canonical type, at least one of the types (either "
657 "base or promoted) will be signed, or the bitwidths will match.");
658 }
659
661 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
662 value = emitIncDecConsiderOverflowBehavior(e, value, kind);
663 } else {
664 cir::UnaryOpKind kind =
665 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
666 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
667 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
668 }
669 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
670 QualType type = ptr->getPointeeType();
671 if (cgf.getContext().getAsVariableArrayType(type)) {
672 // VLA types don't have constant size.
673 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
674 return {};
675 } else if (type->isFunctionType()) {
676 // Arithmetic on function pointers (!) is just +-1.
677 cgf.cgm.errorNYI(e->getSourceRange(),
678 "Pointer arithmetic on function pointer");
679 return {};
680 } else {
681 // For everything else, we can just do a simple increment.
682 mlir::Location loc = cgf.getLoc(e->getSourceRange());
683 CIRGenBuilderTy &builder = cgf.getBuilder();
684 int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
685 mlir::Value amt = builder.getSInt32(amount, loc);
687 value = builder.createPtrStride(loc, value, amt);
688 }
689 } else if (type->isVectorType()) {
690 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
691 return {};
692 } else if (type->isRealFloatingType()) {
694
695 if (type->isHalfType() &&
696 !cgf.getContext().getLangOpts().NativeHalfType) {
697 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
698 return {};
699 }
700
701 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
702 // Create the inc/dec operation.
703 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
704 assert(kind == cir::UnaryOpKind::Inc ||
705 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
706 value = emitUnaryOp(e, kind, value);
707 } else {
708 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
709 return {};
710 }
711 } else if (type->isFixedPointType()) {
712 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
713 return {};
714 } else {
715 assert(type->castAs<ObjCObjectPointerType>());
716 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
717 return {};
718 }
719
720 CIRGenFunction::SourceLocRAIIObject sourceloc{
721 cgf, cgf.getLoc(e->getSourceRange())};
722
723 // Store the updated result through the lvalue
724 if (lv.isBitField())
725 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
726 else
727 cgf.emitStoreThroughLValue(RValue::get(value), lv);
728
729 // If this is a postinc, return the value read from memory, otherwise use
730 // the updated value.
731 return isPre ? value : input;
732 }
733
734 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
735 mlir::Value inVal,
736 cir::UnaryOpKind kind) {
737 assert(kind == cir::UnaryOpKind::Inc ||
738 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
739 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
740 case LangOptions::SOB_Defined:
741 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
742 case LangOptions::SOB_Undefined:
744 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
745 case LangOptions::SOB_Trapping:
746 if (!e->canOverflow())
747 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
748 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
749 return {};
750 }
751 llvm_unreachable("Unexpected signed overflow behavior kind");
752 }
753
754 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
755 if (llvm::isa<MemberPointerType>(e->getType()))
756 return cgf.cgm.emitMemberPointerConstant(e);
757
758 return cgf.emitLValue(e->getSubExpr()).getPointer();
759 }
760
761 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
762 if (e->getType()->isVoidType())
763 return Visit(e->getSubExpr()); // the actual value should be unused
764 return emitLoadOfLValue(e);
765 }
766
767 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
768 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
769 mlir::Value result =
770 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
771 if (result && !promotionType.isNull())
772 return emitUnPromotedValue(result, e->getType());
773 return result;
774 }
775
776 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
777 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
778 mlir::Value result =
779 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
780 if (result && !promotionType.isNull())
781 return emitUnPromotedValue(result, e->getType());
782 return result;
783 }
784
785 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
786 cir::UnaryOpKind kind,
787 QualType promotionType) {
788 ignoreResultAssign = false;
789 mlir::Value operand;
790 if (!promotionType.isNull())
791 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
792 else
793 operand = Visit(e->getSubExpr());
794
795 bool nsw =
796 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
797
798 // NOTE: LLVM codegen will lower this directly to either a FNeg
799 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
800 return emitUnaryOp(e, kind, operand, nsw);
801 }
802
803 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
804 mlir::Value input, bool nsw = false) {
805 return builder.createOrFold<cir::UnaryOp>(
806 cgf.getLoc(e->getSourceRange().getBegin()), input.getType(), kind,
807 input, nsw);
808 }
809
810 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
811 ignoreResultAssign = false;
812 mlir::Value op = Visit(e->getSubExpr());
813 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
814 }
815
816 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
817
818 mlir::Value VisitUnaryReal(const UnaryOperator *e);
819 mlir::Value VisitUnaryImag(const UnaryOperator *e);
820 mlir::Value VisitRealImag(const UnaryOperator *e,
821 QualType promotionType = QualType());
822
823 mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
824 return Visit(e->getSubExpr());
825 }
826
827 // C++
828 mlir::Value VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e) {
829 cgf.cgm.errorNYI(e->getSourceRange(),
830 "ScalarExprEmitter: materialize temporary");
831 return {};
832 }
833 mlir::Value VisitSourceLocExpr(SourceLocExpr *e) {
834 ASTContext &ctx = cgf.getContext();
835 APValue evaluated =
836 e->EvaluateInContext(ctx, cgf.curSourceLocExprScope.getDefaultExpr());
837 mlir::Attribute attribute = ConstantEmitter(cgf).emitAbstract(
838 e->getLocation(), evaluated, e->getType());
839 mlir::TypedAttr typedAttr = mlir::cast<mlir::TypedAttr>(attribute);
840 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
841 typedAttr);
842 }
843 mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
844 CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
845 return Visit(dae->getExpr());
846 }
847 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
848 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
849 return Visit(die->getExpr());
850 }
851
852 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
853
854 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
855 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
856 return cgf.emitCXXNewExpr(e);
857 }
858 mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
859 cgf.emitCXXDeleteExpr(e);
860 return {};
861 }
862 mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *e) {
863 mlir::Location loc = cgf.getLoc(e->getExprLoc());
864 if (e->isStoredAsBoolean())
865 return builder.getBool(e->getBoolValue(), loc);
866 cgf.cgm.errorNYI(e->getSourceRange(),
867 "ScalarExprEmitter: TypeTraitExpr stored as int");
868 return {};
869 }
870 mlir::Value
871 VisitConceptSpecializationExpr(const ConceptSpecializationExpr *e) {
872 return builder.getBool(e->isSatisfied(), cgf.getLoc(e->getExprLoc()));
873 }
874 mlir::Value VisitRequiresExpr(const RequiresExpr *e) {
875 return builder.getBool(e->isSatisfied(), cgf.getLoc(e->getExprLoc()));
876 }
877 mlir::Value VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *e) {
878 mlir::Type type = cgf.convertType(e->getType());
879 mlir::Location loc = cgf.getLoc(e->getExprLoc());
880 return builder.getConstInt(loc, type, e->getValue());
881 }
882 mlir::Value VisitExpressionTraitExpr(const ExpressionTraitExpr *e) {
883 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
884 }
885 mlir::Value VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *e) {
886 cgf.cgm.errorNYI(e->getSourceRange(),
887 "ScalarExprEmitter: cxx pseudo destructor");
888 return {};
889 }
890 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
891 cgf.emitCXXThrowExpr(e);
892 return {};
893 }
894
895 mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *e) {
896 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
897 }
898
899 /// Emit a conversion from the specified type to the specified destination
900 /// type, both of which are CIR scalar types.
901 /// TODO: do we need ScalarConversionOpts here? Should be done in another
902 /// pass.
903 mlir::Value
904 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
905 SourceLocation loc,
906 ScalarConversionOpts opts = ScalarConversionOpts()) {
907 // All conversions involving fixed point types should be handled by the
908 // emitFixedPoint family functions. This is done to prevent bloating up
909 // this function more, and although fixed point numbers are represented by
910 // integers, we do not want to follow any logic that assumes they should be
911 // treated as integers.
912 // TODO(leonardchan): When necessary, add another if statement checking for
913 // conversions to fixed point types from other types.
914 // conversions to fixed point types from other types.
915 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
916 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
917 return {};
918 }
919
920 srcType = srcType.getCanonicalType();
921 dstType = dstType.getCanonicalType();
922 if (srcType == dstType) {
923 if (opts.emitImplicitIntegerSignChangeChecks)
924 cgf.getCIRGenModule().errorNYI(loc,
925 "implicit integer sign change checks");
926 return src;
927 }
928
929 if (dstType->isVoidType())
930 return {};
931
932 mlir::Type mlirSrcType = src.getType();
933
934 // Handle conversions to bool first, they are special: comparisons against
935 // 0.
936 if (dstType->isBooleanType())
937 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
938
939 mlir::Type mlirDstType = cgf.convertType(dstType);
940
941 if (srcType->isHalfType() &&
942 !cgf.getContext().getLangOpts().NativeHalfType) {
943 // Cast to FP using the intrinsic if the half type itself isn't supported.
944 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
945 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
946 cgf.getCIRGenModule().errorNYI(loc,
947 "cast via llvm.convert.from.fp16");
948 } else {
949 // Cast to other types through float, using either the intrinsic or
950 // FPExt, depending on whether the half type itself is supported (as
951 // opposed to operations on half, available with NativeHalfType).
952 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
953 cgf.getCIRGenModule().errorNYI(loc,
954 "cast via llvm.convert.from.fp16");
955 // FIXME(cir): For now lets pretend we shouldn't use the conversion
956 // intrinsics and insert a cast here unconditionally.
957 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
958 cgf.floatTy);
959 srcType = cgf.getContext().FloatTy;
960 mlirSrcType = cgf.floatTy;
961 }
962 }
963
964 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
965 // is there anything to be done for CIR here?
966 if (mlirSrcType == mlirDstType) {
967 if (opts.emitImplicitIntegerSignChangeChecks)
968 cgf.getCIRGenModule().errorNYI(loc,
969 "implicit integer sign change checks");
970 return src;
971 }
972
973 // Handle pointer conversions next: pointers can only be converted to/from
974 // other pointers and integers. Check for pointer types in terms of LLVM, as
975 // some native types (like Obj-C id) may map to a pointer type.
976 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
977 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
978 return builder.getNullPtr(dstPT, src.getLoc());
979 }
980
981 if (isa<cir::PointerType>(mlirSrcType)) {
982 // Must be an ptr to int cast.
983 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
984 return builder.createPtrToInt(src, mlirDstType);
985 }
986
987 // A scalar can be splatted to an extended vector of the same element type
988 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
989 // Sema should add casts to make sure that the source expression's type
990 // is the same as the vector's element type (sans qualifiers)
991 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
992 srcType.getTypePtr() &&
993 "Splatted expr doesn't match with vector element type?");
994
995 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
996 return {};
997 }
998
999 if (srcType->isMatrixType() && dstType->isMatrixType()) {
1000 cgf.getCIRGenModule().errorNYI(loc,
1001 "matrix type to matrix type conversion");
1002 return {};
1003 }
1004 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
1005 "Internal error: conversion between matrix type and scalar type");
1006
1007 // Finally, we have the arithmetic types or vectors of arithmetic types.
1008 mlir::Value res = nullptr;
1009 mlir::Type resTy = mlirDstType;
1010
1011 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
1012
1013 if (mlirDstType != resTy) {
1014 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1015 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
1016 }
1017 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
1018 // required by the target. Change that once this is implemented
1019 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
1020 resTy);
1021 }
1022
1023 if (opts.emitImplicitIntegerTruncationChecks)
1024 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
1025
1026 if (opts.emitImplicitIntegerSignChangeChecks)
1027 cgf.getCIRGenModule().errorNYI(loc,
1028 "implicit integer sign change checks");
1029
1030 return res;
1031 }
1032
1033 BinOpInfo emitBinOps(const BinaryOperator *e,
1034 QualType promotionType = QualType()) {
1035 ignoreResultAssign = false;
1036 BinOpInfo result;
1037 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
1038 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
1039 if (!promotionType.isNull())
1040 result.fullType = promotionType;
1041 else
1042 result.fullType = e->getType();
1043 result.compType = result.fullType;
1044 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
1045 result.compType = vecType->getElementType();
1046 }
1047 result.opcode = e->getOpcode();
1048 result.loc = e->getSourceRange();
1049 // TODO(cir): Result.FPFeatures
1051 result.e = e;
1052 return result;
1053 }
1054
1055 mlir::Value emitMul(const BinOpInfo &ops);
1056 mlir::Value emitDiv(const BinOpInfo &ops);
1057 mlir::Value emitRem(const BinOpInfo &ops);
1058 mlir::Value emitAdd(const BinOpInfo &ops);
1059 mlir::Value emitSub(const BinOpInfo &ops);
1060 mlir::Value emitShl(const BinOpInfo &ops);
1061 mlir::Value emitShr(const BinOpInfo &ops);
1062 mlir::Value emitAnd(const BinOpInfo &ops);
1063 mlir::Value emitXor(const BinOpInfo &ops);
1064 mlir::Value emitOr(const BinOpInfo &ops);
1065
1066 LValue emitCompoundAssignLValue(
1067 const CompoundAssignOperator *e,
1068 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
1069 mlir::Value &result);
1070 mlir::Value
1071 emitCompoundAssign(const CompoundAssignOperator *e,
1072 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
1073
1074 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
1075 // codegen.
1076 QualType getPromotionType(QualType ty) {
1077 const clang::ASTContext &ctx = cgf.getContext();
1078 if (auto *complexTy = ty->getAs<ComplexType>()) {
1079 QualType elementTy = complexTy->getElementType();
1080 if (elementTy.UseExcessPrecision(ctx))
1081 return ctx.getComplexType(ctx.FloatTy);
1082 }
1083
1084 if (ty.UseExcessPrecision(cgf.getContext())) {
1085 if (auto *vt = ty->getAs<VectorType>()) {
1086 unsigned numElements = vt->getNumElements();
1087 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
1088 }
1089 return cgf.getContext().FloatTy;
1090 }
1091
1092 return QualType();
1093 }
1094
1095// Binary operators and binary compound assignment operators.
1096#define HANDLEBINOP(OP) \
1097 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
1098 QualType promotionTy = getPromotionType(e->getType()); \
1099 auto result = emit##OP(emitBinOps(e, promotionTy)); \
1100 if (result && !promotionTy.isNull()) \
1101 result = emitUnPromotedValue(result, e->getType()); \
1102 return result; \
1103 } \
1104 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
1105 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
1106 }
1107
1108 HANDLEBINOP(Mul)
1109 HANDLEBINOP(Div)
1110 HANDLEBINOP(Rem)
1111 HANDLEBINOP(Add)
1112 HANDLEBINOP(Sub)
1113 HANDLEBINOP(Shl)
1114 HANDLEBINOP(Shr)
1116 HANDLEBINOP(Xor)
1118#undef HANDLEBINOP
1119
1120 mlir::Value emitCmp(const BinaryOperator *e) {
1121 ignoreResultAssign = false;
1122 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
1123 mlir::Value result;
1124 QualType lhsTy = e->getLHS()->getType();
1125 QualType rhsTy = e->getRHS()->getType();
1126
1127 auto clangCmpToCIRCmp =
1128 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
1129 switch (clangCmp) {
1130 case BO_LT:
1131 return cir::CmpOpKind::lt;
1132 case BO_GT:
1133 return cir::CmpOpKind::gt;
1134 case BO_LE:
1135 return cir::CmpOpKind::le;
1136 case BO_GE:
1137 return cir::CmpOpKind::ge;
1138 case BO_EQ:
1139 return cir::CmpOpKind::eq;
1140 case BO_NE:
1141 return cir::CmpOpKind::ne;
1142 default:
1143 llvm_unreachable("unsupported comparison kind for cir.cmp");
1144 }
1145 };
1146
1147 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
1148 if (lhsTy->getAs<MemberPointerType>()) {
1150 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1151 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
1152 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
1153 result = builder.createCompare(loc, kind, lhs, rhs);
1154 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
1155 BinOpInfo boInfo = emitBinOps(e);
1156 mlir::Value lhs = boInfo.lhs;
1157 mlir::Value rhs = boInfo.rhs;
1158
1159 if (lhsTy->isVectorType()) {
1160 if (!e->getType()->isVectorType()) {
1161 // If AltiVec, the comparison results in a numeric type, so we use
1162 // intrinsics comparing vectors and giving 0 or 1 as a result
1163 cgf.cgm.errorNYI(loc, "AltiVec comparison");
1164 } else {
1165 // Other kinds of vectors. Element-wise comparison returning
1166 // a vector.
1167 result = cir::VecCmpOp::create(builder, cgf.getLoc(boInfo.loc),
1168 cgf.convertType(boInfo.fullType), kind,
1169 boInfo.lhs, boInfo.rhs);
1170 }
1171 } else if (boInfo.isFixedPointOp()) {
1173 cgf.cgm.errorNYI(loc, "fixed point comparisons");
1174 result = builder.getBool(false, loc);
1175 } else {
1176 // integers and pointers
1177 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
1178 mlir::isa<cir::PointerType>(lhs.getType()) &&
1179 mlir::isa<cir::PointerType>(rhs.getType())) {
1180 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
1181 }
1182
1183 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
1184 result = builder.createCompare(loc, kind, lhs, rhs);
1185 }
1186 } else {
1187 // Complex Comparison: can only be an equality comparison.
1188 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1189
1190 BinOpInfo boInfo = emitBinOps(e);
1191 result = cir::CmpOp::create(builder, loc, kind, boInfo.lhs, boInfo.rhs);
1192 }
1193
1194 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
1195 e->getExprLoc());
1196 }
1197
1198// Comparisons.
1199#define VISITCOMP(CODE) \
1200 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
1201 VISITCOMP(LT)
1202 VISITCOMP(GT)
1203 VISITCOMP(LE)
1204 VISITCOMP(GE)
1205 VISITCOMP(EQ)
1206 VISITCOMP(NE)
1207#undef VISITCOMP
1208
1209 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1210 const bool ignore = std::exchange(ignoreResultAssign, false);
1211
1212 mlir::Value rhs;
1213 LValue lhs;
1214
1215 switch (e->getLHS()->getType().getObjCLifetime()) {
1221 break;
1223 // __block variables need to have the rhs evaluated first, plus this
1224 // should improve codegen just a little.
1225 rhs = Visit(e->getRHS());
1227 // TODO(cir): This needs to be emitCheckedLValue() once we support
1228 // sanitizers
1229 lhs = cgf.emitLValue(e->getLHS());
1230
1231 // Store the value into the LHS. Bit-fields are handled specially because
1232 // the result is altered by the store, i.e., [C99 6.5.16p1]
1233 // 'An assignment expression has the value of the left operand after the
1234 // assignment...'.
1235 if (lhs.isBitField()) {
1236 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1237 } else {
1238 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1240 cgf, cgf.getLoc(e->getSourceRange())};
1241 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1242 }
1243 }
1244
1245 // If the result is clearly ignored, return now.
1246 if (ignore)
1247 return nullptr;
1248
1249 // The result of an assignment in C is the assigned r-value.
1250 if (!cgf.getLangOpts().CPlusPlus)
1251 return rhs;
1252
1253 // If the lvalue is non-volatile, return the computed value of the
1254 // assignment.
1255 if (!lhs.isVolatile())
1256 return rhs;
1257
1258 // Otherwise, reload the value.
1259 return emitLoadOfLValue(lhs, e->getExprLoc());
1260 }
1261
1262 mlir::Value VisitBinComma(const BinaryOperator *e) {
1263 cgf.emitIgnoredExpr(e->getLHS());
1264 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1265 return Visit(e->getRHS());
1266 }
1267
1268 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1269 if (e->getType()->isVectorType()) {
1270 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1271 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1272 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1273 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1274 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1275
1276 mlir::Value lhs = Visit(e->getLHS());
1277 mlir::Value rhs = Visit(e->getRHS());
1278
1279 auto cmpOpKind = cir::CmpOpKind::ne;
1280 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1281 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1282 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1283 return builder.createIntCast(vecOr, vecTy);
1284 }
1285
1287 mlir::Type resTy = cgf.convertType(e->getType());
1288 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1289
1290 CIRGenFunction::ConditionalEvaluation eval(cgf);
1291
1292 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1293 auto resOp = cir::TernaryOp::create(
1294 builder, loc, lhsCondV, /*trueBuilder=*/
1295 [&](mlir::OpBuilder &b, mlir::Location loc) {
1296 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1297 b.getInsertionBlock()};
1298 cgf.curLexScope->setAsTernary();
1299 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1300 lexScope.forceCleanup();
1301 cir::YieldOp::create(b, loc, res);
1302 },
1303 /*falseBuilder*/
1304 [&](mlir::OpBuilder &b, mlir::Location loc) {
1305 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1306 b.getInsertionBlock()};
1307 cgf.curLexScope->setAsTernary();
1308 auto res = cir::ConstantOp::create(b, loc, builder.getFalseAttr());
1309 cir::YieldOp::create(b, loc, res.getRes());
1310 });
1311 return maybePromoteBoolResult(resOp.getResult(), resTy);
1312 }
1313
1314 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1315 if (e->getType()->isVectorType()) {
1316 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1317 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1318 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1319 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1320 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1321
1322 mlir::Value lhs = Visit(e->getLHS());
1323 mlir::Value rhs = Visit(e->getRHS());
1324
1325 auto cmpOpKind = cir::CmpOpKind::ne;
1326 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1327 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1328 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1329 return builder.createIntCast(vecOr, vecTy);
1330 }
1331
1333 mlir::Type resTy = cgf.convertType(e->getType());
1334 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1335
1336 CIRGenFunction::ConditionalEvaluation eval(cgf);
1337
1338 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1339 auto resOp = cir::TernaryOp::create(
1340 builder, loc, lhsCondV, /*trueBuilder=*/
1341 [&](mlir::OpBuilder &b, mlir::Location loc) {
1342 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1343 b.getInsertionBlock()};
1344 cgf.curLexScope->setAsTernary();
1345 auto res = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1346 cir::YieldOp::create(b, loc, res.getRes());
1347 },
1348 /*falseBuilder*/
1349 [&](mlir::OpBuilder &b, mlir::Location loc) {
1350 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1351 b.getInsertionBlock()};
1352 cgf.curLexScope->setAsTernary();
1353 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1354 lexScope.forceCleanup();
1355 cir::YieldOp::create(b, loc, res);
1356 });
1357
1358 return maybePromoteBoolResult(resOp.getResult(), resTy);
1359 }
1360
1361 mlir::Value VisitBinPtrMemD(const BinaryOperator *e) {
1362 return emitLoadOfLValue(e);
1363 }
1364
1365 mlir::Value VisitBinPtrMemI(const BinaryOperator *e) {
1366 return emitLoadOfLValue(e);
1367 }
1368
1369 // Other Operators.
1370 mlir::Value VisitBlockExpr(const BlockExpr *e) {
1371 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: block");
1372 return {};
1373 }
1374
1375 mlir::Value VisitChooseExpr(ChooseExpr *e) {
1376 return Visit(e->getChosenSubExpr());
1377 }
1378
1379 mlir::Value VisitObjCStringLiteral(const ObjCStringLiteral *e) {
1380 cgf.cgm.errorNYI(e->getSourceRange(),
1381 "ScalarExprEmitter: objc string literal");
1382 return {};
1383 }
1384 mlir::Value VisitObjCBoxedExpr(ObjCBoxedExpr *e) {
1385 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc boxed");
1386 return {};
1387 }
1388 mlir::Value VisitObjCArrayLiteral(ObjCArrayLiteral *e) {
1389 cgf.cgm.errorNYI(e->getSourceRange(),
1390 "ScalarExprEmitter: objc array literal");
1391 return {};
1392 }
1393 mlir::Value VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *e) {
1394 cgf.cgm.errorNYI(e->getSourceRange(),
1395 "ScalarExprEmitter: objc dictionary literal");
1396 return {};
1397 }
1398
1399 mlir::Value VisitAsTypeExpr(AsTypeExpr *e) {
1400 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: as type");
1401 return {};
1402 }
1403
1404 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1405 return cgf.emitAtomicExpr(e).getValue();
1406 }
1407};
1408
1409LValue ScalarExprEmitter::emitCompoundAssignLValue(
1410 const CompoundAssignOperator *e,
1411 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1412 mlir::Value &result) {
1414 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1415
1416 QualType lhsTy = e->getLHS()->getType();
1417 BinOpInfo opInfo;
1418
1419 // Emit the RHS first. __block variables need to have the rhs evaluated
1420 // first, plus this should improve codegen a little.
1421
1422 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1423 if (promotionTypeCR.isNull())
1424 promotionTypeCR = e->getComputationResultType();
1425
1426 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1427 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1428
1429 if (!promotionTypeRHS.isNull())
1430 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1431 else
1432 opInfo.rhs = Visit(e->getRHS());
1433
1434 opInfo.fullType = promotionTypeCR;
1435 opInfo.compType = opInfo.fullType;
1436 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1437 opInfo.compType = vecType->getElementType();
1438 opInfo.opcode = e->getOpcode();
1439 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1440 opInfo.e = e;
1441 opInfo.loc = e->getSourceRange();
1442
1443 // Load/convert the LHS
1444 LValue lhsLV = cgf.emitLValue(e->getLHS());
1445
1446 if (lhsTy->getAs<AtomicType>()) {
1447 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1448 return LValue();
1449 }
1450
1451 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1452
1453 CIRGenFunction::SourceLocRAIIObject sourceloc{
1454 cgf, cgf.getLoc(e->getSourceRange())};
1455 SourceLocation loc = e->getExprLoc();
1456 if (!promotionTypeLHS.isNull())
1457 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1458 else
1459 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1460 e->getComputationLHSType(), loc);
1461
1462 // Expand the binary operator.
1463 result = (this->*func)(opInfo);
1464
1465 // Convert the result back to the LHS type,
1466 // potentially with Implicit Conversion sanitizer check.
1467 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1468 ScalarConversionOpts(cgf.sanOpts));
1469
1470 // Store the result value into the LHS lvalue. Bit-fields are handled
1471 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1472 // 'An assignment expression has the value of the left operand after the
1473 // assignment...'.
1474 if (lhsLV.isBitField())
1475 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1476 else
1477 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1478
1479 if (cgf.getLangOpts().OpenMP)
1480 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1481
1482 return lhsLV;
1483}
1484
1485mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1486 mlir::Value value,
1487 CastKind kind,
1488 QualType destTy) {
1489 cir::CastKind castOpKind;
1490 switch (kind) {
1491 case CK_FloatingComplexToReal:
1492 castOpKind = cir::CastKind::float_complex_to_real;
1493 break;
1494 case CK_IntegralComplexToReal:
1495 castOpKind = cir::CastKind::int_complex_to_real;
1496 break;
1497 case CK_FloatingComplexToBoolean:
1498 castOpKind = cir::CastKind::float_complex_to_bool;
1499 break;
1500 case CK_IntegralComplexToBoolean:
1501 castOpKind = cir::CastKind::int_complex_to_bool;
1502 break;
1503 default:
1504 llvm_unreachable("invalid complex-to-scalar cast kind");
1505 }
1506
1507 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1508}
1509
1510mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1511 QualType promotionType) {
1512 e = e->IgnoreParens();
1513 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1514 switch (bo->getOpcode()) {
1515#define HANDLE_BINOP(OP) \
1516 case BO_##OP: \
1517 return emit##OP(emitBinOps(bo, promotionType));
1518 HANDLE_BINOP(Add)
1519 HANDLE_BINOP(Sub)
1520 HANDLE_BINOP(Mul)
1521 HANDLE_BINOP(Div)
1522#undef HANDLE_BINOP
1523 default:
1524 break;
1525 }
1526 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1527 switch (uo->getOpcode()) {
1528 case UO_Imag:
1529 case UO_Real:
1530 return VisitRealImag(uo, promotionType);
1531 case UO_Minus:
1532 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
1533 case UO_Plus:
1534 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
1535 default:
1536 break;
1537 }
1538 }
1539 mlir::Value result = Visit(const_cast<Expr *>(e));
1540 if (result) {
1541 if (!promotionType.isNull())
1542 return emitPromotedValue(result, promotionType);
1543 return emitUnPromotedValue(result, e->getType());
1544 }
1545 return result;
1546}
1547
1548mlir::Value ScalarExprEmitter::emitCompoundAssign(
1549 const CompoundAssignOperator *e,
1550 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1551
1552 bool ignore = std::exchange(ignoreResultAssign, false);
1553 mlir::Value rhs;
1554 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1555
1556 // If the result is clearly ignored, return now.
1557 if (ignore)
1558 return {};
1559
1560 // The result of an assignment in C is the assigned r-value.
1561 if (!cgf.getLangOpts().CPlusPlus)
1562 return rhs;
1563
1564 // If the lvalue is non-volatile, return the computed value of the assignment.
1565 if (!lhs.isVolatile())
1566 return rhs;
1567
1568 // Otherwise, reload the value.
1569 return emitLoadOfLValue(lhs, e->getExprLoc());
1570}
1571
1572mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1573 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
1574 mlir::OpBuilder &builder = cgf.builder;
1575
1576 auto scope = cir::ScopeOp::create(
1577 builder, scopeLoc,
1578 /*scopeBuilder=*/
1579 [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
1580 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1581 builder.getInsertionBlock()};
1582 mlir::Value scopeYieldVal = Visit(e->getSubExpr());
1583 if (scopeYieldVal) {
1584 // Defend against dominance problems caused by jumps out of expression
1585 // evaluation through the shared cleanup block.
1586 lexScope.forceCleanup();
1587 cir::YieldOp::create(builder, loc, scopeYieldVal);
1588 yieldTy = scopeYieldVal.getType();
1589 }
1590 });
1591
1592 return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
1593}
1594
1595} // namespace
1596
1597LValue
1599 ScalarExprEmitter emitter(*this, builder);
1600 mlir::Value result;
1601 switch (e->getOpcode()) {
1602#define COMPOUND_OP(Op) \
1603 case BO_##Op##Assign: \
1604 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1605 result)
1606 COMPOUND_OP(Mul);
1607 COMPOUND_OP(Div);
1608 COMPOUND_OP(Rem);
1609 COMPOUND_OP(Add);
1610 COMPOUND_OP(Sub);
1611 COMPOUND_OP(Shl);
1612 COMPOUND_OP(Shr);
1614 COMPOUND_OP(Xor);
1615 COMPOUND_OP(Or);
1616#undef COMPOUND_OP
1617
1618 case BO_PtrMemD:
1619 case BO_PtrMemI:
1620 case BO_Mul:
1621 case BO_Div:
1622 case BO_Rem:
1623 case BO_Add:
1624 case BO_Sub:
1625 case BO_Shl:
1626 case BO_Shr:
1627 case BO_LT:
1628 case BO_GT:
1629 case BO_LE:
1630 case BO_GE:
1631 case BO_EQ:
1632 case BO_NE:
1633 case BO_Cmp:
1634 case BO_And:
1635 case BO_Xor:
1636 case BO_Or:
1637 case BO_LAnd:
1638 case BO_LOr:
1639 case BO_Assign:
1640 case BO_Comma:
1641 llvm_unreachable("Not valid compound assignment operators");
1642 }
1643 llvm_unreachable("Unhandled compound assignment operator");
1644}
1645
1646/// Emit the computation of the specified expression of scalar type.
1648 bool ignoreResultAssign) {
1649 assert(e && hasScalarEvaluationKind(e->getType()) &&
1650 "Invalid scalar expression to emit");
1651
1652 return ScalarExprEmitter(*this, builder, ignoreResultAssign)
1653 .Visit(const_cast<Expr *>(e));
1654}
1655
1657 QualType promotionType) {
1658 if (!promotionType.isNull())
1659 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1660 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1661}
1662
1663[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1664 // If a null pointer expression's type is the C++0x nullptr_t and
1665 // the expression is not a simple literal, it must be evaluated
1666 // for its potential side effects.
1668 return false;
1669 return e->getType()->isNullPtrType();
1670}
1671
1672/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1673static std::optional<QualType>
1674getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1675 const Expr *base = e->IgnoreImpCasts();
1676 if (e == base)
1677 return std::nullopt;
1678
1679 QualType baseTy = base->getType();
1680 if (!astContext.isPromotableIntegerType(baseTy) ||
1681 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1682 return std::nullopt;
1683
1684 return baseTy;
1685}
1686
1687/// Check if \p e is a widened promoted integer.
1688[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1689 const Expr *e) {
1690 return getUnwidenedIntegerType(astContext, e).has_value();
1691}
1692
1693/// Check if we can skip the overflow check for \p Op.
1694[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1695 const BinOpInfo &op) {
1696 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1697 "Expected a unary or binary operator");
1698
1699 // If the binop has constant inputs and we can prove there is no overflow,
1700 // we can elide the overflow check.
1701 if (!op.mayHaveIntegerOverflow())
1702 return true;
1703
1704 // If a unary op has a widened operand, the op cannot overflow.
1705 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1706 return !uo->canOverflow();
1707
1708 // We usually don't need overflow checks for binops with widened operands.
1709 // Multiplication with promoted unsigned operands is a special case.
1710 const auto *bo = cast<BinaryOperator>(op.e);
1711 std::optional<QualType> optionalLHSTy =
1712 getUnwidenedIntegerType(astContext, bo->getLHS());
1713 if (!optionalLHSTy)
1714 return false;
1715
1716 std::optional<QualType> optionalRHSTy =
1717 getUnwidenedIntegerType(astContext, bo->getRHS());
1718 if (!optionalRHSTy)
1719 return false;
1720
1721 QualType lhsTy = *optionalLHSTy;
1722 QualType rhsTy = *optionalRHSTy;
1723
1724 // This is the simple case: binops without unsigned multiplication, and with
1725 // widened operands. No overflow check is needed here.
1726 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1727 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1728 return true;
1729
1730 // For unsigned multiplication the overflow check can be elided if either one
1731 // of the unpromoted types are less than half the size of the promoted type.
1732 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1733 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1734 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1735}
1736
1737/// Emit pointer + index arithmetic.
1739 const BinOpInfo &op,
1740 bool isSubtraction) {
1741 // Must have binary (not unary) expr here. Unary pointer
1742 // increment/decrement doesn't use this path.
1744
1745 mlir::Value pointer = op.lhs;
1746 Expr *pointerOperand = expr->getLHS();
1747 mlir::Value index = op.rhs;
1748 Expr *indexOperand = expr->getRHS();
1749
1750 // In the case of subtraction, the FE has ensured that the LHS is always the
1751 // pointer. However, addition can have the pointer on either side. We will
1752 // always have a pointer operand and an integer operand, so if the LHS wasn't
1753 // a pointer, we need to swap our values.
1754 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1755 std::swap(pointer, index);
1756 std::swap(pointerOperand, indexOperand);
1757 }
1758 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1759 "Need a pointer operand");
1760 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1761
1762 // Some versions of glibc and gcc use idioms (particularly in their malloc
1763 // routines) that add a pointer-sized integer (known to be a pointer value)
1764 // to a null pointer in order to cast the value back to an integer or as
1765 // part of a pointer alignment algorithm. This is undefined behavior, but
1766 // we'd like to be able to compile programs that use it.
1767 //
1768 // Normally, we'd generate a GEP with a null-pointer base here in response
1769 // to that code, but it's also UB to dereference a pointer created that
1770 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1771 // generate a direct cast of the integer value to a pointer.
1772 //
1773 // The idiom (p = nullptr + N) is not met if any of the following are true:
1774 //
1775 // The operation is subtraction.
1776 // The index is not pointer-sized.
1777 // The pointer type is not byte-sized.
1778 //
1780 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1781 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1782
1783 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1784 // LLVM lowering.
1785
1786 // If this is subtraction, negate the index.
1787 if (isSubtraction)
1789
1791
1792 const PointerType *pointerType =
1793 pointerOperand->getType()->getAs<PointerType>();
1794 if (!pointerType) {
1795 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1796 return nullptr;
1797 }
1798
1799 QualType elementType = pointerType->getPointeeType();
1800 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1801 cgf.cgm.errorNYI("variable array type");
1802 return nullptr;
1803 }
1804
1805 if (elementType->isVoidType() || elementType->isFunctionType()) {
1806 cgf.cgm.errorNYI("void* or function pointer arithmetic");
1807 return nullptr;
1808 }
1809
1811 return cir::PtrStrideOp::create(cgf.getBuilder(),
1812 cgf.getLoc(op.e->getExprLoc()),
1813 pointer.getType(), pointer, index);
1814}
1815
1816mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1817 const mlir::Location loc = cgf.getLoc(ops.loc);
1818 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1819 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1820 case LangOptions::SOB_Defined:
1821 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1822 return builder.createMul(loc, ops.lhs, ops.rhs);
1823 [[fallthrough]];
1824 case LangOptions::SOB_Undefined:
1825 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1826 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1827 [[fallthrough]];
1828 case LangOptions::SOB_Trapping:
1829 if (canElideOverflowCheck(cgf.getContext(), ops))
1830 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1831 cgf.cgm.errorNYI("sanitizers");
1832 }
1833 }
1834 if (ops.fullType->isConstantMatrixType()) {
1836 cgf.cgm.errorNYI("matrix types");
1837 return nullptr;
1838 }
1839 if (ops.compType->isUnsignedIntegerType() &&
1840 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1841 !canElideOverflowCheck(cgf.getContext(), ops))
1842 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1843
1844 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1846 return builder.createFMul(loc, ops.lhs, ops.rhs);
1847 }
1848
1849 if (ops.isFixedPointOp()) {
1851 cgf.cgm.errorNYI("fixed point");
1852 return nullptr;
1853 }
1854
1855 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1856 cgf.convertType(ops.fullType), cir::BinOpKind::Mul,
1857 ops.lhs, ops.rhs);
1858}
1859mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1860 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1861 cgf.convertType(ops.fullType), cir::BinOpKind::Div,
1862 ops.lhs, ops.rhs);
1863}
1864mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1865 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1866 cgf.convertType(ops.fullType), cir::BinOpKind::Rem,
1867 ops.lhs, ops.rhs);
1868}
1869
1870mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1871 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1872 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1873 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1874
1875 const mlir::Location loc = cgf.getLoc(ops.loc);
1876 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1877 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1878 case LangOptions::SOB_Defined:
1879 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1880 return builder.createAdd(loc, ops.lhs, ops.rhs);
1881 [[fallthrough]];
1882 case LangOptions::SOB_Undefined:
1883 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1884 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1885 [[fallthrough]];
1886 case LangOptions::SOB_Trapping:
1887 if (canElideOverflowCheck(cgf.getContext(), ops))
1888 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1889 cgf.cgm.errorNYI("sanitizers");
1890 }
1891 }
1892 if (ops.fullType->isConstantMatrixType()) {
1894 cgf.cgm.errorNYI("matrix types");
1895 return nullptr;
1896 }
1897
1898 if (ops.compType->isUnsignedIntegerType() &&
1899 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1900 !canElideOverflowCheck(cgf.getContext(), ops))
1901 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1902
1903 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1905 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1906 }
1907
1908 if (ops.isFixedPointOp()) {
1910 cgf.cgm.errorNYI("fixed point");
1911 return {};
1912 }
1913
1914 return cir::BinOp::create(builder, loc, cgf.convertType(ops.fullType),
1915 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1916}
1917
1918mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1919 const mlir::Location loc = cgf.getLoc(ops.loc);
1920 // The LHS is always a pointer if either side is.
1921 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1922 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1923 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1924 case LangOptions::SOB_Defined: {
1925 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1926 return builder.createSub(loc, ops.lhs, ops.rhs);
1927 [[fallthrough]];
1928 }
1929 case LangOptions::SOB_Undefined:
1930 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1931 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1932 [[fallthrough]];
1933 case LangOptions::SOB_Trapping:
1934 if (canElideOverflowCheck(cgf.getContext(), ops))
1935 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1936 cgf.cgm.errorNYI("sanitizers");
1937 }
1938 }
1939
1940 if (ops.fullType->isConstantMatrixType()) {
1942 cgf.cgm.errorNYI("matrix types");
1943 return nullptr;
1944 }
1945
1946 if (ops.compType->isUnsignedIntegerType() &&
1947 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1948 !canElideOverflowCheck(cgf.getContext(), ops))
1949 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1950
1951 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1953 return builder.createFSub(loc, ops.lhs, ops.rhs);
1954 }
1955
1956 if (ops.isFixedPointOp()) {
1958 cgf.cgm.errorNYI("fixed point");
1959 return {};
1960 }
1961
1962 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1963 cgf.convertType(ops.fullType),
1964 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1965 }
1966
1967 // If the RHS is not a pointer, then we have normal pointer
1968 // arithmetic.
1969 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1970 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1971
1972 // Otherwise, this is a pointer subtraction
1973
1974 // Do the raw subtraction part.
1975 //
1976 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1977 // LLVM we shall take VLA's, division by element size, etc.
1978 //
1979 // See more in `EmitSub` in CGExprScalar.cpp.
1981 return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.ptrDiffTy,
1982 ops.lhs, ops.rhs);
1983}
1984
1985mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1986 // TODO: This misses out on the sanitizer check below.
1987 if (ops.isFixedPointOp()) {
1989 cgf.cgm.errorNYI("fixed point");
1990 return {};
1991 }
1992
1993 // CIR accepts shift between different types, meaning nothing special
1994 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1995 // promote or truncate the RHS to the same size as the LHS.
1996
1997 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1998 ops.compType->hasSignedIntegerRepresentation() &&
2000 !cgf.getLangOpts().CPlusPlus20;
2001 bool sanitizeUnsignedBase =
2002 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
2003 ops.compType->hasUnsignedIntegerRepresentation();
2004 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
2005 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
2006
2007 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2008 if (cgf.getLangOpts().OpenCL)
2009 cgf.cgm.errorNYI("opencl");
2010 else if ((sanitizeBase || sanitizeExponent) &&
2011 mlir::isa<cir::IntType>(ops.lhs.getType()))
2012 cgf.cgm.errorNYI("sanitizers");
2013
2014 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2015}
2016
2017mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
2018 // TODO: This misses out on the sanitizer check below.
2019 if (ops.isFixedPointOp()) {
2021 cgf.cgm.errorNYI("fixed point");
2022 return {};
2023 }
2024
2025 // CIR accepts shift between different types, meaning nothing special
2026 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
2027 // promote or truncate the RHS to the same size as the LHS.
2028
2029 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2030 if (cgf.getLangOpts().OpenCL)
2031 cgf.cgm.errorNYI("opencl");
2032 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
2033 mlir::isa<cir::IntType>(ops.lhs.getType()))
2034 cgf.cgm.errorNYI("sanitizers");
2035
2036 // Note that we don't need to distinguish unsigned treatment at this
2037 // point since it will be handled later by LLVM lowering.
2038 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2039}
2040
2041mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
2042 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2043 cgf.convertType(ops.fullType), cir::BinOpKind::And,
2044 ops.lhs, ops.rhs);
2045}
2046mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
2047 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2048 cgf.convertType(ops.fullType), cir::BinOpKind::Xor,
2049 ops.lhs, ops.rhs);
2050}
2051mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
2052 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
2053 cgf.convertType(ops.fullType), cir::BinOpKind::Or,
2054 ops.lhs, ops.rhs);
2055}
2056
2057// Emit code for an explicit or implicit cast. Implicit
2058// casts have to handle a more broad range of conversions than explicit
2059// casts, as they handle things like function to ptr-to-function decay
2060// etc.
2061mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
2062 Expr *subExpr = ce->getSubExpr();
2063 QualType destTy = ce->getType();
2064 CastKind kind = ce->getCastKind();
2065
2066 // These cases are generally not written to ignore the result of evaluating
2067 // their sub-expressions, so we clear this now.
2068 ignoreResultAssign = false;
2069
2070 switch (kind) {
2071 case clang::CK_Dependent:
2072 llvm_unreachable("dependent cast kind in CIR gen!");
2073 case clang::CK_BuiltinFnToFnPtr:
2074 llvm_unreachable("builtin functions are handled elsewhere");
2075 case CK_LValueBitCast:
2076 case CK_LValueToRValueBitCast: {
2077 LValue sourceLVal = cgf.emitLValue(subExpr);
2078 Address sourceAddr = sourceLVal.getAddress();
2079
2080 mlir::Type destElemTy = cgf.convertTypeForMem(destTy);
2081 Address destAddr = sourceAddr.withElementType(cgf.getBuilder(), destElemTy);
2082 LValue destLVal = cgf.makeAddrLValue(destAddr, destTy);
2084 return emitLoadOfLValue(destLVal, ce->getExprLoc());
2085 }
2086
2087 case CK_CPointerToObjCPointerCast:
2088 case CK_BlockPointerToObjCPointerCast:
2089 case CK_AnyPointerToBlockPointerCast:
2090 case CK_BitCast: {
2091 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
2092 mlir::Type dstTy = cgf.convertType(destTy);
2093
2095
2096 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
2097 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2098 "sanitizer support");
2099
2100 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2101 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2102 "strict vtable pointers");
2103
2104 // Update heapallocsite metadata when there is an explicit pointer cast.
2106
2107 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2108 // same element type, use the llvm.vector.insert intrinsic to perform the
2109 // bitcast.
2111
2112 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2113 // same element type, use the llvm.vector.extract intrinsic to perform the
2114 // bitcast.
2116
2117 // Perform VLAT <-> VLST bitcast through memory.
2118 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2119 // require the element types of the vectors to be the same, we
2120 // need to keep this around for bitcasts between VLAT <-> VLST where
2121 // the element types of the vectors are not the same, until we figure
2122 // out a better way of doing these casts.
2124
2125 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
2126 src, dstTy);
2127 }
2128 case CK_AddressSpaceConversion: {
2129 Expr::EvalResult result;
2130 if (subExpr->EvaluateAsRValue(result, cgf.getContext()) &&
2131 result.Val.isNullPointer()) {
2132 // If e has side effect, it is emitted even if its final result is a
2133 // null pointer. In that case, a DCE pass should be able to
2134 // eliminate the useless instructions emitted during translating E.
2135 if (result.HasSideEffects)
2136 Visit(subExpr);
2137 return cgf.cgm.emitNullConstant(destTy,
2138 cgf.getLoc(subExpr->getExprLoc()));
2139 }
2140
2141 clang::QualType srcTy = subExpr->IgnoreImpCasts()->getType();
2142 if (srcTy->isPointerType() || srcTy->isReferenceType())
2143 srcTy = srcTy->getPointeeType();
2144
2145 clang::LangAS srcLangAS = srcTy.getAddressSpace();
2146 cir::TargetAddressSpaceAttr subExprAS;
2147 if (clang::isTargetAddressSpace(srcLangAS))
2148 subExprAS = cir::toCIRTargetAddressSpace(cgf.getMLIRContext(), srcLangAS);
2149 else
2150 cgf.cgm.errorNYI(subExpr->getSourceRange(),
2151 "non-target address space conversion");
2152 // Since target may map different address spaces in AST to the same address
2153 // space, an address space conversion may end up as a bitcast.
2155 cgf, Visit(subExpr), subExprAS, convertType(destTy));
2156 }
2157
2158 case CK_AtomicToNonAtomic: {
2159 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2160 "CastExpr: ", ce->getCastKindName());
2161 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
2162 return cgf.createDummyValue(loc, destTy);
2163 }
2164 case CK_NonAtomicToAtomic:
2165 case CK_UserDefinedConversion:
2166 return Visit(const_cast<Expr *>(subExpr));
2167 case CK_NoOp: {
2168 auto v = Visit(const_cast<Expr *>(subExpr));
2169 if (v) {
2170 // CK_NoOp can model a pointer qualification conversion, which can remove
2171 // an array bound and change the IR type.
2172 // FIXME: Once pointee types are removed from IR, remove this.
2173 mlir::Type t = cgf.convertType(destTy);
2174 if (t != v.getType())
2175 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
2176 }
2177 return v;
2178 }
2179 case CK_IntegralToPointer: {
2180 mlir::Type destCIRTy = cgf.convertType(destTy);
2181 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
2182
2183 // Properly resize by casting to an int of the same size as the pointer.
2184 // Clang's IntegralToPointer includes 'bool' as the source, but in CIR
2185 // 'bool' is not an integral type. So check the source type to get the
2186 // correct CIR conversion.
2187 mlir::Type middleTy = cgf.cgm.getDataLayout().getIntPtrType(destCIRTy);
2188 mlir::Value middleVal = builder.createCast(
2189 subExpr->getType()->isBooleanType() ? cir::CastKind::bool_to_int
2190 : cir::CastKind::integral,
2191 src, middleTy);
2192
2193 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers) {
2194 cgf.cgm.errorNYI(subExpr->getSourceRange(),
2195 "IntegralToPointer: strict vtable pointers");
2196 return {};
2197 }
2198
2199 return builder.createIntToPtr(middleVal, destCIRTy);
2200 }
2201
2202 case CK_BaseToDerived: {
2203 const CXXRecordDecl *derivedClassDecl = destTy->getPointeeCXXRecordDecl();
2204 assert(derivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2205 Address base = cgf.emitPointerWithAlignment(subExpr);
2206 Address derived = cgf.getAddressOfDerivedClass(
2207 cgf.getLoc(ce->getSourceRange()), base, derivedClassDecl, ce->path(),
2209
2210 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2211 // performed and the object is not of the derived type.
2213
2214 return cgf.getAsNaturalPointerTo(derived, ce->getType()->getPointeeType());
2215 }
2216 case CK_UncheckedDerivedToBase:
2217 case CK_DerivedToBase: {
2218 // The EmitPointerWithAlignment path does this fine; just discard
2219 // the alignment.
2221 ce->getType()->getPointeeType());
2222 }
2223 case CK_Dynamic: {
2224 Address v = cgf.emitPointerWithAlignment(subExpr);
2225 const auto *dce = cast<CXXDynamicCastExpr>(ce);
2226 return cgf.emitDynamicCast(v, dce);
2227 }
2228 case CK_ArrayToPointerDecay:
2229 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
2230
2231 case CK_NullToPointer: {
2232 if (mustVisitNullValue(subExpr))
2233 cgf.emitIgnoredExpr(subExpr);
2234
2235 // Note that DestTy is used as the MLIR type instead of a custom
2236 // nullptr type.
2237 mlir::Type ty = cgf.convertType(destTy);
2238 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
2239 }
2240
2241 case CK_NullToMemberPointer: {
2242 if (mustVisitNullValue(subExpr))
2243 cgf.emitIgnoredExpr(subExpr);
2244
2246
2247 const MemberPointerType *mpt = ce->getType()->getAs<MemberPointerType>();
2248 if (mpt->isMemberFunctionPointerType()) {
2249 auto ty = mlir::cast<cir::MethodType>(cgf.convertType(destTy));
2250 return builder.getNullMethodPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
2251 }
2252
2253 auto ty = mlir::cast<cir::DataMemberType>(cgf.convertType(destTy));
2254 return builder.getNullDataMemberPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
2255 }
2256
2257 case CK_ReinterpretMemberPointer: {
2258 mlir::Value src = Visit(subExpr);
2259 return builder.createBitcast(cgf.getLoc(subExpr->getExprLoc()), src,
2260 cgf.convertType(destTy));
2261 }
2262 case CK_BaseToDerivedMemberPointer:
2263 case CK_DerivedToBaseMemberPointer: {
2264 mlir::Value src = Visit(subExpr);
2265
2267
2268 QualType derivedTy =
2269 kind == CK_DerivedToBaseMemberPointer ? subExpr->getType() : destTy;
2270 const auto *mpType = derivedTy->castAs<MemberPointerType>();
2271 NestedNameSpecifier qualifier = mpType->getQualifier();
2272 assert(qualifier && "member pointer without class qualifier");
2273 const Type *qualifierType = qualifier.getAsType();
2274 assert(qualifierType && "member pointer qualifier is not a type");
2275 const CXXRecordDecl *derivedClass = qualifierType->getAsCXXRecordDecl();
2276 CharUnits offset =
2277 cgf.cgm.computeNonVirtualBaseClassOffset(derivedClass, ce->path());
2278
2279 mlir::Location loc = cgf.getLoc(subExpr->getExprLoc());
2280 mlir::Type resultTy = cgf.convertType(destTy);
2281 mlir::IntegerAttr offsetAttr = builder.getIndexAttr(offset.getQuantity());
2282
2283 if (subExpr->getType()->isMemberFunctionPointerType()) {
2284 if (kind == CK_BaseToDerivedMemberPointer)
2285 return cir::DerivedMethodOp::create(builder, loc, resultTy, src,
2286 offsetAttr);
2287 return cir::BaseMethodOp::create(builder, loc, resultTy, src, offsetAttr);
2288 }
2289
2290 if (kind == CK_BaseToDerivedMemberPointer)
2291 return cir::DerivedDataMemberOp::create(builder, loc, resultTy, src,
2292 offsetAttr);
2293 return cir::BaseDataMemberOp::create(builder, loc, resultTy, src,
2294 offsetAttr);
2295 }
2296
2297 case CK_LValueToRValue:
2298 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
2299 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2300 return Visit(const_cast<Expr *>(subExpr));
2301
2302 case CK_IntegralCast: {
2303 ScalarConversionOpts opts;
2304 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
2305 if (!ice->isPartOfExplicitCast())
2306 opts = ScalarConversionOpts(cgf.sanOpts);
2307 }
2308 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2309 ce->getExprLoc(), opts);
2310 }
2311
2312 case CK_FloatingComplexToReal:
2313 case CK_IntegralComplexToReal:
2314 case CK_FloatingComplexToBoolean:
2315 case CK_IntegralComplexToBoolean: {
2316 mlir::Value value = cgf.emitComplexExpr(subExpr);
2317 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
2318 kind, destTy);
2319 }
2320
2321 case CK_FloatingRealToComplex:
2322 case CK_FloatingComplexCast:
2323 case CK_IntegralRealToComplex:
2324 case CK_IntegralComplexCast:
2325 case CK_IntegralComplexToFloatingComplex:
2326 case CK_FloatingComplexToIntegralComplex:
2327 llvm_unreachable("scalar cast to non-scalar value");
2328
2329 case CK_PointerToIntegral: {
2330 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
2331 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2332 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2333 "strict vtable pointers");
2334 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
2335 }
2336 case CK_ToVoid:
2337 cgf.emitIgnoredExpr(subExpr);
2338 return {};
2339
2340 case CK_IntegralToFloating:
2341 case CK_FloatingToIntegral:
2342 case CK_FloatingCast:
2343 case CK_FixedPointToFloating:
2344 case CK_FloatingToFixedPoint: {
2345 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
2346 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2347 "fixed point casts");
2348 return {};
2349 }
2351 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2352 ce->getExprLoc());
2353 }
2354
2355 case CK_IntegralToBoolean:
2356 return emitIntToBoolConversion(Visit(subExpr),
2357 cgf.getLoc(ce->getSourceRange()));
2358
2359 case CK_PointerToBoolean:
2360 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
2361 case CK_FloatingToBoolean:
2362 return emitFloatToBoolConversion(Visit(subExpr),
2363 cgf.getLoc(subExpr->getExprLoc()));
2364 case CK_MemberPointerToBoolean: {
2365 mlir::Value memPtr = Visit(subExpr);
2366 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
2367 cir::CastKind::member_ptr_to_bool, memPtr,
2368 cgf.convertType(destTy));
2369 }
2370
2371 case CK_VectorSplat: {
2372 // Create a vector object and fill all elements with the same scalar value.
2373 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
2374 return cir::VecSplatOp::create(builder,
2375 cgf.getLoc(subExpr->getSourceRange()),
2376 cgf.convertType(destTy), Visit(subExpr));
2377 }
2378 case CK_FunctionToPointerDecay:
2379 return cgf.emitLValue(subExpr).getPointer();
2380
2381 default:
2382 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2383 "CastExpr: ", ce->getCastKindName());
2384 }
2385 return {};
2386}
2387
2388mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
2390 return emitLoadOfLValue(e);
2391
2392 auto v = cgf.emitCallExpr(e).getValue();
2394 return v;
2395}
2396
2397mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2398 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2399 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2400 // for now.
2402 Expr::EvalResult result;
2403 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2404 llvm::APSInt value = result.Val.getInt();
2405 cgf.emitIgnoredExpr(e->getBase());
2406 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
2407 }
2408 return emitLoadOfLValue(e);
2409}
2410
2411mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2412 const unsigned numInitElements = e->getNumInits();
2413
2414 [[maybe_unused]] const bool ignore = std::exchange(ignoreResultAssign, false);
2415 assert((ignore == false ||
2416 (numInitElements == 0 && e->getType()->isVoidType())) &&
2417 "init list ignored");
2418
2419 if (e->hadArrayRangeDesignator()) {
2420 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2421 return {};
2422 }
2423
2424 if (e->getType()->isVectorType()) {
2425 const auto vectorType =
2426 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2427
2428 SmallVector<mlir::Value, 16> elements;
2429 for (Expr *init : e->inits()) {
2430 elements.push_back(Visit(init));
2431 }
2432
2433 // Zero-initialize any remaining values.
2434 if (numInitElements < vectorType.getSize()) {
2435 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2436 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2437 std::fill_n(std::back_inserter(elements),
2438 vectorType.getSize() - numInitElements, zeroValue);
2439 }
2440
2441 return cir::VecCreateOp::create(cgf.getBuilder(),
2442 cgf.getLoc(e->getSourceRange()), vectorType,
2443 elements);
2444 }
2445
2446 // C++11 value-initialization for the scalar.
2447 if (numInitElements == 0)
2448 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2449
2450 return Visit(e->getInit(0));
2451}
2452
2453mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2454 QualType srcTy, QualType dstTy,
2455 SourceLocation loc) {
2458 "Invalid scalar expression to emit");
2459 return ScalarExprEmitter(*this, builder)
2460 .emitScalarConversion(src, srcTy, dstTy, loc);
2461}
2462
2464 QualType srcTy,
2465 QualType dstTy,
2466 SourceLocation loc) {
2467 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2468 "Invalid complex -> scalar conversion");
2469
2470 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2471 if (dstTy->isBooleanType()) {
2472 auto kind = complexElemTy->isFloatingType()
2473 ? cir::CastKind::float_complex_to_bool
2474 : cir::CastKind::int_complex_to_bool;
2475 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2476 }
2477
2478 auto kind = complexElemTy->isFloatingType()
2479 ? cir::CastKind::float_complex_to_real
2480 : cir::CastKind::int_complex_to_real;
2481 mlir::Value real =
2482 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2483 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2484}
2485
2486mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2487 // Perform vector logical not on comparison with zero vector.
2488 if (e->getType()->isVectorType() &&
2489 e->getType()->castAs<VectorType>()->getVectorKind() ==
2491 mlir::Value oper = Visit(e->getSubExpr());
2492 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2493 auto operVecTy = mlir::cast<cir::VectorType>(oper.getType());
2494 auto exprVecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2495 mlir::Value zeroVec = builder.getNullValue(operVecTy, loc);
2496 return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq,
2497 oper, zeroVec);
2498 }
2499
2500 // Compare operand to zero.
2501 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2502
2503 // Invert value.
2504 boolVal = builder.createNot(boolVal);
2505
2506 // ZExt result to the expr type.
2507 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2508}
2509
2510mlir::Value ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *e) {
2511 // Try folding the offsetof to a constant.
2512 Expr::EvalResult evalResult;
2513 if (e->EvaluateAsInt(evalResult, cgf.getContext())) {
2514 mlir::Type type = cgf.convertType(e->getType());
2515 llvm::APSInt value = evalResult.Val.getInt();
2516 return builder.getConstAPInt(cgf.getLoc(e->getExprLoc()), type, value);
2517 }
2518
2520 e->getSourceRange(),
2521 "ScalarExprEmitter::VisitOffsetOfExpr Can't eval expr as int");
2522 return {};
2523}
2524
2525mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2526 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2527 mlir::Value result = VisitRealImag(e, promotionTy);
2528 if (result && !promotionTy.isNull())
2529 result = emitUnPromotedValue(result, e->getType());
2530 return result;
2531}
2532
2533mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2534 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2535 mlir::Value result = VisitRealImag(e, promotionTy);
2536 if (result && !promotionTy.isNull())
2537 result = emitUnPromotedValue(result, e->getType());
2538 return result;
2539}
2540
2541mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2542 QualType promotionTy) {
2543 assert(e->getOpcode() == clang::UO_Real ||
2544 e->getOpcode() == clang::UO_Imag &&
2545 "Invalid UnaryOp kind for ComplexType Real or Imag");
2546
2547 Expr *op = e->getSubExpr();
2548 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2549 if (op->getType()->isAnyComplexType()) {
2550 // If it's an l-value, load through the appropriate subobject l-value.
2551 // Note that we have to ask `e` because `op` might be an l-value that
2552 // this won't work for, e.g. an Obj-C property
2553 mlir::Value complex = cgf.emitComplexExpr(op);
2554 if (e->isGLValue() && !promotionTy.isNull()) {
2555 promotionTy = promotionTy->isAnyComplexType()
2556 ? promotionTy
2557 : cgf.getContext().getComplexType(promotionTy);
2558 complex = cgf.emitPromotedValue(complex, promotionTy);
2559 }
2560
2561 return e->getOpcode() == clang::UO_Real
2562 ? builder.createComplexReal(loc, complex)
2563 : builder.createComplexImag(loc, complex);
2564 }
2565
2566 if (e->getOpcode() == UO_Real) {
2567 mlir::Value operand = promotionTy.isNull()
2568 ? Visit(op)
2569 : cgf.emitPromotedScalarExpr(op, promotionTy);
2570 return builder.createComplexReal(loc, operand);
2571 }
2572
2573 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2574 // effects are evaluated, but not the actual value.
2575 mlir::Value operand;
2576 if (op->isGLValue()) {
2577 operand = cgf.emitLValue(op).getPointer();
2578 operand = cir::LoadOp::create(builder, loc, operand);
2579 } else if (!promotionTy.isNull()) {
2580 operand = cgf.emitPromotedScalarExpr(op, promotionTy);
2581 } else {
2582 operand = cgf.emitScalarExpr(op);
2583 }
2584 return builder.createComplexImag(loc, operand);
2585}
2586
2587/// Return the size or alignment of the type of argument of the sizeof
2588/// expression as an integer.
2589mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2590 const UnaryExprOrTypeTraitExpr *e) {
2591 const QualType typeToSize = e->getTypeOfArgument();
2592 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2593 if (auto kind = e->getKind();
2594 kind == UETT_SizeOf || kind == UETT_DataSizeOf || kind == UETT_CountOf) {
2595 if (const VariableArrayType *vat =
2596 cgf.getContext().getAsVariableArrayType(typeToSize)) {
2597 // For _Countof, we only want to evaluate if the extent is actually
2598 // variable as opposed to a multi-dimensional array whose extent is
2599 // constant but whose element type is variable.
2600 bool evaluateExtent = true;
2601 if (kind == UETT_CountOf && vat->getElementType()->isArrayType()) {
2602 evaluateExtent =
2603 !vat->getSizeExpr()->isIntegerConstantExpr(cgf.getContext());
2604 }
2605
2606 if (evaluateExtent) {
2607 if (e->isArgumentType()) {
2608 // sizeof(type) - make sure to emit the VLA size.
2609 cgf.emitVariablyModifiedType(typeToSize);
2610 } else {
2611 // C99 6.5.3.4p2: If the argument is an expression of type
2612 // VLA, it is evaluated.
2614 }
2615
2616 // For _Countof, we just want to return the size of a single dimension.
2617 if (kind == UETT_CountOf)
2618 return cgf.getVLAElements1D(vat).numElts;
2619
2620 // For sizeof and __datasizeof, we need to scale the number of elements
2621 // by the size of the array element type.
2622 CIRGenFunction::VlaSizePair vlaSize = cgf.getVLASize(vat);
2623 mlir::Value numElts = vlaSize.numElts;
2624
2625 // Scale the number of non-VLA elements by the non-VLA element size.
2626 CharUnits eltSize = cgf.getContext().getTypeSizeInChars(vlaSize.type);
2627 if (!eltSize.isOne()) {
2628 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2629 mlir::Value eltSizeValue =
2630 builder.getConstAPInt(numElts.getLoc(), numElts.getType(),
2631 cgf.cgm.getSize(eltSize).getValue());
2632 return builder.createMul(loc, eltSizeValue, numElts,
2634 }
2635
2636 return numElts;
2637 }
2638 }
2639 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2641 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
2642 e->getStmtClassName());
2643 return builder.getConstant(
2644 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2645 llvm::APSInt(llvm::APInt(64, 1), true)));
2646 } else if (e->getKind() == UETT_VectorElements) {
2647 auto vecTy = cast<cir::VectorType>(convertType(e->getTypeOfArgument()));
2648 if (vecTy.getIsScalable()) {
2650 e->getSourceRange(),
2651 "VisitUnaryExprOrTypeTraitExpr: sizeOf scalable vector");
2652 return builder.getConstant(
2653 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2655 }
2656
2657 return builder.getConstant(
2658 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, vecTy.getSize()));
2659 }
2660
2661 return builder.getConstant(
2662 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2664}
2665
2666/// Return true if the specified expression is cheap enough and side-effect-free
2667/// enough to evaluate unconditionally instead of conditionally. This is used
2668/// to convert control flow into selects in some cases.
2669/// TODO(cir): can be shared with LLVM codegen.
2671 CIRGenFunction &cgf) {
2672 // Anything that is an integer or floating point constant is fine.
2673 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2674
2675 // Even non-volatile automatic variables can't be evaluated unconditionally.
2676 // Referencing a thread_local may cause non-trivial initialization work to
2677 // occur. If we're inside a lambda and one of the variables is from the scope
2678 // outside the lambda, that function may have returned already. Reading its
2679 // locals is a bad idea. Also, these reads may introduce races there didn't
2680 // exist in the source-level program.
2681}
2682
2683mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2684 const AbstractConditionalOperator *e) {
2685 CIRGenBuilderTy &builder = cgf.getBuilder();
2686 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2687 ignoreResultAssign = false;
2688
2689 // Bind the common expression if necessary.
2690 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2691
2692 Expr *condExpr = e->getCond();
2693 Expr *lhsExpr = e->getTrueExpr();
2694 Expr *rhsExpr = e->getFalseExpr();
2695
2696 // If the condition constant folds and can be elided, try to avoid emitting
2697 // the condition and the dead arm.
2698 bool condExprBool;
2699 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2700 Expr *live = lhsExpr, *dead = rhsExpr;
2701 if (!condExprBool)
2702 std::swap(live, dead);
2703
2704 // If the dead side doesn't have labels we need, just emit the Live part.
2705 if (!cgf.containsLabel(dead)) {
2706 if (condExprBool)
2708 mlir::Value result = Visit(live);
2709
2710 // If the live part is a throw expression, it acts like it has a void
2711 // type, so evaluating it returns a null Value. However, a conditional
2712 // with non-void type must return a non-null Value.
2713 if (!result && !e->getType()->isVoidType()) {
2714 result = builder.getConstant(
2715 loc, cir::PoisonAttr::get(builder.getContext(),
2716 cgf.convertType(e->getType())));
2717 }
2718
2719 return result;
2720 }
2721 }
2722
2723 QualType condType = condExpr->getType();
2724
2725 // OpenCL: If the condition is a vector, we can treat this condition like
2726 // the select function.
2727 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2728 condType->isExtVectorType()) {
2730 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2731 }
2732
2733 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2734 if (!condType->isVectorType()) {
2736 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2737 return {};
2738 }
2739
2740 mlir::Value condValue = Visit(condExpr);
2741 mlir::Value lhsValue = Visit(lhsExpr);
2742 mlir::Value rhsValue = Visit(rhsExpr);
2743 return cir::VecTernaryOp::create(builder, loc, condValue, lhsValue,
2744 rhsValue);
2745 }
2746
2747 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2748 // select instead of as control flow. We can only do this if it is cheap
2749 // and safe to evaluate the LHS and RHS unconditionally.
2750 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2752 bool lhsIsVoid = false;
2753 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2755
2756 mlir::Value lhs = Visit(lhsExpr);
2757 if (!lhs) {
2758 lhs = builder.getNullValue(cgf.voidTy, loc);
2759 lhsIsVoid = true;
2760 }
2761
2762 mlir::Value rhs = Visit(rhsExpr);
2763 if (lhsIsVoid) {
2764 assert(!rhs && "lhs and rhs types must match");
2765 rhs = builder.getNullValue(cgf.voidTy, loc);
2766 }
2767
2768 return builder.createSelect(loc, condV, lhs, rhs);
2769 }
2770
2771 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2772 CIRGenFunction::ConditionalEvaluation eval(cgf);
2773 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2774 mlir::Type yieldTy{};
2775
2776 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2777 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2779
2781 eval.beginEvaluation();
2782 mlir::Value branch = Visit(expr);
2783 eval.endEvaluation();
2784
2785 if (branch) {
2786 yieldTy = branch.getType();
2787 cir::YieldOp::create(b, loc, branch);
2788 } else {
2789 // If LHS or RHS is a throw or void expression we need to patch
2790 // arms as to properly match yield types.
2791 insertPoints.push_back(b.saveInsertionPoint());
2792 }
2793 };
2794
2795 mlir::Value result = cir::TernaryOp::create(
2796 builder, loc, condV,
2797 /*trueBuilder=*/
2798 [&](mlir::OpBuilder &b, mlir::Location loc) {
2799 emitBranch(b, loc, lhsExpr);
2800 },
2801 /*falseBuilder=*/
2802 [&](mlir::OpBuilder &b, mlir::Location loc) {
2803 emitBranch(b, loc, rhsExpr);
2804 })
2805 .getResult();
2806
2807 if (!insertPoints.empty()) {
2808 // If both arms are void, so be it.
2809 if (!yieldTy)
2810 yieldTy = cgf.voidTy;
2811
2812 // Insert required yields.
2813 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2814 mlir::OpBuilder::InsertionGuard guard(builder);
2815 builder.restoreInsertionPoint(toInsert);
2816
2817 // Block does not return: build empty yield.
2818 if (mlir::isa<cir::VoidType>(yieldTy)) {
2819 cir::YieldOp::create(builder, loc);
2820 } else { // Block returns: set null yield value.
2821 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2822 cir::YieldOp::create(builder, loc, op0);
2823 }
2824 }
2825 }
2826
2827 return result;
2828}
2829
2831 LValue lv,
2832 cir::UnaryOpKind kind,
2833 bool isPre) {
2834 return ScalarExprEmitter(*this, builder)
2835 .emitScalarPrePostIncDec(e, lv, kind, isPre);
2836}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Type getIntPtrType(mlir::Type ty) const
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isNullPointer() const
Definition APValue.cpp:1019
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4531
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4537
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4543
LabelDecl * getLabel() const
Definition Expr.h:4573
uint64_t getValue() const
Definition ExprCXX.h:3044
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
SourceLocation getExprLoc() const
Definition Expr.h:4079
Expr * getRHS() const
Definition Expr.h:4090
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4251
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2204
Opcode getOpcode() const
Definition Expr.h:4083
BinaryOperatorKind Opcode
Definition Expr.h:4043
mlir::Value getPointer() const
Definition Address.h:95
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
mlir::Value createNeg(mlir::Value value)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
const clang::LangOptions & getLangOpts() const
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Type convertTypeForMem(QualType t)
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
CharUnits computeNonVirtualBaseClassOffset(const CXXRecordDecl *derivedClass, llvm::iterator_range< CastExpr::path_const_iterator > path)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
const TargetCIRGenInfo & getTargetCIRGenInfo()
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
virtual mlir::Value performAddrSpaceCast(CIRGenFunction &cgf, mlir::Value v, cir::TargetAddressSpaceAttr srcAddr, mlir::Type destTy, bool isNonNull=false) const
Perform address space cast of an expression of pointer type.
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4332
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastKind getCastKind() const
Definition Expr.h:3720
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3763
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1950
Expr * getSubExpr()
Definition Expr.h:3726
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1629
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4884
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4300
QualType getComputationLHSType() const
Definition Expr.h:4334
QualType getComputationResultType() const
Definition Expr.h:4337
SourceLocation getExprLoc() const LLVM_READONLY
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4809
ChildElementIter< false > begin()
Definition Expr.h:5232
size_t getDataElementCount() const
Definition Expr.h:5148
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6462
unsigned getNumInits() const
Definition Expr.h:5329
bool hadArrayRangeDesignator() const
Definition Expr.h:5483
const Expr * getInit(unsigned Init) const
Definition Expr.h:5353
ArrayRef< Expr * > inits()
Definition Expr.h:5349
bool isSignedOverflowDefined() const
Expr * getBase() const
Definition Expr.h:3441
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3559
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3654
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:218
SourceRange getSourceRange() const
Definition ExprObjC.h:1719
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:162
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:381
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8292
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getCanonicalType() const
Definition TypeBase.h:8344
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1613
bool isCanonical() const
Definition TypeBase.h:8349
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool isSatisfied() const
Whether or not the requires clause is satisfied.
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4676
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4682
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2281
SourceLocation getLocation() const
Definition Expr.h:5061
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4612
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool getBoolValue() const
Definition ExprCXX.h:2947
bool isStoredAsBoolean() const
Definition ExprCXX.h:2943
bool isVoidType() const
Definition TypeBase.h:8891
bool isBooleanType() const
Definition TypeBase.h:9021
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2226
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2206
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantMatrixType() const
Definition TypeBase.h:8696
bool isPointerType() const
Definition TypeBase.h:8529
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8935
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
bool isReferenceType() const
Definition TypeBase.h:8553
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1910
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2608
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2292
bool isExtVectorType() const
Definition TypeBase.h:8672
bool isAnyComplexType() const
Definition TypeBase.h:8664
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8947
bool isHalfType() const
Definition TypeBase.h:8895
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2244
bool isMatrixType() const
Definition TypeBase.h:8692
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2801
bool isFunctionType() const
Definition TypeBase.h:8525
bool isMemberFunctionPointerType() const
Definition TypeBase.h:8614
bool isVectorType() const
Definition TypeBase.h:8668
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2321
bool isFloatingType() const
Definition Type.cpp:2305
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2254
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool isNullPtrType() const
Definition TypeBase.h:8928
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represents a GCC generic vector type.
Definition TypeBase.h:4176
VectorKind getVectorKind() const
Definition TypeBase.h:4196
cir::TargetAddressSpaceAttr toCIRTargetAddressSpace(mlir::MLIRContext &context, clang::LangAS langAS)
Definition CIRTypes.cpp:937
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isTargetAddressSpace(LangAS AS)
@ Type
The name was classified as a type.
Definition Sema.h:564
LangAS
Defines the address space values used by the address space qualifier of QualType.
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4137
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool cgFPOptionsRAII()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool llvmLoweringPtrDiffConsidersPointee()
static bool scalableVectors()
static bool memberFuncPtrAuthInfo()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174