clang 23.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
16
17#include "clang/AST/Expr.h"
21
22#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h"
23#include "mlir/IR/Location.h"
24#include "mlir/IR/Value.h"
25
26#include <cassert>
27#include <utility>
28
29using namespace clang;
30using namespace clang::CIRGen;
31
32namespace {
33
34struct BinOpInfo {
35 mlir::Value lhs;
36 mlir::Value rhs;
37 SourceRange loc;
38 QualType fullType; // Type of operands and result
39 QualType compType; // Type used for computations. Element type
40 // for vectors, otherwise same as FullType.
41 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
42 FPOptions fpFeatures;
43 const Expr *e; // Entire expr, for error unsupported. May not be binop.
44
45 /// Check if the binop computes a division or a remainder.
46 bool isDivRemOp() const {
47 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
48 opcode == BO_RemAssign;
49 }
50
51 /// Check if the binop can result in integer overflow.
52 bool mayHaveIntegerOverflow() const {
53 // Without constant input, we can't rule out overflow.
54 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
55 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
56 if (!lhsci || !rhsci)
57 return true;
58
60 // TODO(cir): For now we just assume that we might overflow
61 return true;
62 }
63
64 /// Check if at least one operand is a fixed point type. In such cases,
65 /// this operation did not follow usual arithmetic conversion and both
66 /// operands might not be of the same type.
67 bool isFixedPointOp() const {
68 // We cannot simply check the result type since comparison operations
69 // return an int.
70 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
71 QualType lhstype = binOp->getLHS()->getType();
72 QualType rhstype = binOp->getRHS()->getType();
73 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
74 }
75 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
76 return unop->getSubExpr()->getType()->isFixedPointType();
77 return false;
78 }
79};
80
81class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
82 CIRGenFunction &cgf;
83 CIRGenBuilderTy &builder;
84 // Unlike classic codegen we set this to false or use std::exchange to read
85 // the value instead of calling TestAndClearIgnoreResultAssign to make it
86 // explicit when the value is used
87 bool ignoreResultAssign;
88
89public:
90 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder,
91 bool ignoreResultAssign = false)
92 : cgf(cgf), builder(builder), ignoreResultAssign(ignoreResultAssign) {}
93
94 //===--------------------------------------------------------------------===//
95 // Utilities
96 //===--------------------------------------------------------------------===//
97 mlir::Type convertType(QualType ty) { return cgf.convertType(ty); }
98
99 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
100 mlir::Value value, CastKind kind,
101 QualType destTy);
102
103 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
104 return cgf.cgm.emitNullConstant(ty, loc);
105 }
106
107 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
108 return builder.createFloatingCast(result, cgf.convertType(promotionType));
109 }
110
111 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
112 return builder.createFloatingCast(result, cgf.convertType(exprType));
113 }
114
115 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
116
117 mlir::Value maybePromoteBoolResult(mlir::Value value,
118 mlir::Type dstTy) const {
119 if (mlir::isa<cir::IntType>(dstTy))
120 return builder.createBoolToInt(value, dstTy);
121 if (mlir::isa<cir::BoolType>(dstTy))
122 return value;
123 llvm_unreachable("Can only promote integer or boolean types");
124 }
125
126 //===--------------------------------------------------------------------===//
127 // Visitor Methods
128 //===--------------------------------------------------------------------===//
129
130 mlir::Value Visit(Expr *e) {
131 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
132 }
133
134 mlir::Value VisitStmt(Stmt *s) {
135 llvm_unreachable("Statement passed to ScalarExprEmitter");
136 }
137
138 mlir::Value VisitExpr(Expr *e) {
139 cgf.getCIRGenModule().errorNYI(
140 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
141 return {};
142 }
143
144 mlir::Value VisitConstantExpr(ConstantExpr *e) {
145 // A constant expression of type 'void' generates no code and produces no
146 // value.
147 if (e->getType()->isVoidType())
148 return {};
149
150 if (mlir::Attribute result = ConstantEmitter(cgf).tryEmitConstantExpr(e)) {
151 if (e->isGLValue()) {
152 cgf.cgm.errorNYI(e->getSourceRange(),
153 "ScalarExprEmitter: constant expr GL Value");
154 return {};
155 }
156
157 return builder.getConstant(cgf.getLoc(e->getSourceRange()),
158 mlir::cast<mlir::TypedAttr>(result));
159 }
160
161 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: constant expr");
162 return {};
163 }
164
165 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
166 return Visit(e->getSelectedExpr());
167 }
168
169 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
170
171 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
172 return Visit(ge->getResultExpr());
173 }
174
175 /// Emits the address of the l-value, then loads and returns the result.
176 mlir::Value emitLoadOfLValue(const Expr *e) {
177 LValue lv = cgf.emitLValue(e);
178 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
179 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
180 }
181
182 mlir::Value VisitCoawaitExpr(CoawaitExpr *s) {
183 return cgf.emitCoawaitExpr(*s).getValue();
184 }
185
186 mlir::Value VisitCoyieldExpr(CoyieldExpr *e) {
187 return cgf.emitCoyieldExpr(*e).getValue();
188 }
189
190 mlir::Value VisitUnaryCoawait(const UnaryOperator *e) {
191 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: unary coawait");
192 return {};
193 }
194
195 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
196 return cgf.emitLoadOfLValue(lv, loc).getValue();
197 }
198
199 // l-values
200 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
201 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
202 return cgf.emitScalarConstant(constant, e);
203
204 return emitLoadOfLValue(e);
205 }
206
207 mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *e) {
208 auto func = cast<cir::FuncOp>(cgf.curFn);
209 cir::BlockAddrInfoAttr blockInfoAttr = cir::BlockAddrInfoAttr::get(
210 &cgf.getMLIRContext(), func.getSymName(), e->getLabel()->getName());
211 cir::BlockAddressOp blockAddressOp = cir::BlockAddressOp::create(
212 builder, cgf.getLoc(e->getSourceRange()), cgf.convertType(e->getType()),
213 blockInfoAttr);
214 cir::LabelOp resolvedLabel = cgf.cgm.lookupBlockAddressInfo(blockInfoAttr);
215 if (!resolvedLabel) {
216 cgf.cgm.mapUnresolvedBlockAddress(blockAddressOp);
217 // Still add the op to maintain insertion order it will be resolved in
218 // resolveBlockAddresses
219 cgf.cgm.mapResolvedBlockAddress(blockAddressOp, nullptr);
220 } else {
221 cgf.cgm.mapResolvedBlockAddress(blockAddressOp, resolvedLabel);
222 }
223 cgf.instantiateIndirectGotoBlock();
224 return blockAddressOp;
225 }
226
227 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
228 mlir::Type type = cgf.convertType(e->getType());
229 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
230 cir::IntAttr::get(type, e->getValue()));
231 }
232
233 mlir::Value VisitFixedPointLiteral(const FixedPointLiteral *e) {
234 cgf.cgm.errorNYI(e->getSourceRange(),
235 "ScalarExprEmitter: fixed point literal");
236 return {};
237 }
238
239 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
240 mlir::Type type = cgf.convertType(e->getType());
241 assert(mlir::isa<cir::FPTypeInterface>(type) &&
242 "expect floating-point type");
243 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
244 cir::FPAttr::get(type, e->getValue()));
245 }
246
247 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
248 mlir::Type ty = cgf.convertType(e->getType());
249 auto init = cir::IntAttr::get(ty, e->getValue());
250 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()), init);
251 }
252
253 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
254 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
255 }
256
257 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
258 if (e->getType()->isVoidType())
259 return {};
260
261 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
262 }
263
264 mlir::Value VisitGNUNullExpr(const GNUNullExpr *e) {
265 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
266 }
267
268 mlir::Value VisitOffsetOfExpr(OffsetOfExpr *e);
269
270 mlir::Value VisitSizeOfPackExpr(SizeOfPackExpr *e) {
271 return builder.getConstInt(cgf.getLoc(e->getExprLoc()),
272 convertType(e->getType()), e->getPackLength());
273 }
274 mlir::Value VisitPseudoObjectExpr(PseudoObjectExpr *e) {
275 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: pseudo object");
276 return {};
277 }
278 mlir::Value VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *e) {
279 cgf.cgm.errorNYI(e->getSourceRange(),
280 "ScalarExprEmitter: sycl unique stable name");
281 return {};
282 }
283 mlir::Value VisitEmbedExpr(EmbedExpr *e) {
284 assert(e->getDataElementCount() == 1);
285 auto it = e->begin();
286 llvm::APInt value = (*it)->getValue();
287 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value,
289 }
290 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
291 if (e->isGLValue())
292 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
293 e->getExprLoc());
294
295 // Otherwise, assume the mapping is the scalar directly.
296 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
297 }
298
299 mlir::Value VisitObjCSelectorExpr(ObjCSelectorExpr *e) {
300 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc selector");
301 return {};
302 }
303 mlir::Value VisitObjCProtocolExpr(ObjCProtocolExpr *e) {
304 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc protocol");
305 return {};
306 }
307 mlir::Value VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
308 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc ivar ref");
309 return {};
310 }
311 mlir::Value VisitObjCMessageExpr(ObjCMessageExpr *e) {
312 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc message");
313 return {};
314 }
315 mlir::Value VisitObjCIsaExpr(ObjCIsaExpr *e) {
316 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc isa");
317 return {};
318 }
319 mlir::Value VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *e) {
320 cgf.cgm.errorNYI(e->getSourceRange(),
321 "ScalarExprEmitter: objc availability check");
322 return {};
323 }
324
325 mlir::Value VisitMatrixSubscriptExpr(MatrixSubscriptExpr *e) {
326 cgf.cgm.errorNYI(e->getSourceRange(),
327 "ScalarExprEmitter: matrix subscript");
328 return {};
329 }
330
331 mlir::Value VisitCastExpr(CastExpr *e);
332 mlir::Value VisitCallExpr(const CallExpr *e);
333
334 mlir::Value VisitStmtExpr(StmtExpr *e) {
335 CIRGenFunction::StmtExprEvaluation eval(cgf);
336 if (e->getType()->isVoidType()) {
337 (void)cgf.emitCompoundStmt(*e->getSubStmt());
338 return {};
339 }
340
341 Address retAlloca =
342 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
343 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
344
345 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
346 e->getExprLoc());
347 }
348
349 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
350 ignoreResultAssign = false;
351
352 if (e->getBase()->getType()->isVectorType()) {
354
355 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
356 const mlir::Value vecValue = Visit(e->getBase());
357 const mlir::Value indexValue = Visit(e->getIdx());
358 return cir::VecExtractOp::create(cgf.builder, loc, vecValue, indexValue);
359 }
360 // Just load the lvalue formed by the subscript expression.
361 return emitLoadOfLValue(e);
362 }
363
364 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
365 if (e->getNumSubExprs() == 2) {
366 // The undocumented form of __builtin_shufflevector.
367 mlir::Value inputVec = Visit(e->getExpr(0));
368 mlir::Value indexVec = Visit(e->getExpr(1));
369 return cir::VecShuffleDynamicOp::create(
370 cgf.builder, cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
371 }
372
373 mlir::Value vec1 = Visit(e->getExpr(0));
374 mlir::Value vec2 = Visit(e->getExpr(1));
375
376 // The documented form of __builtin_shufflevector, where the indices are
377 // a variable number of integer constants. The constants will be stored
378 // in an ArrayAttr.
379 SmallVector<mlir::Attribute, 8> indices;
380 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
381 indices.push_back(
382 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
383 e->getExpr(i)
384 ->EvaluateKnownConstInt(cgf.getContext())
385 .getSExtValue()));
386 }
387
388 return cir::VecShuffleOp::create(cgf.builder,
389 cgf.getLoc(e->getSourceRange()),
390 cgf.convertType(e->getType()), vec1, vec2,
391 cgf.builder.getArrayAttr(indices));
392 }
393
394 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
395 // __builtin_convertvector is an element-wise cast, and is implemented as a
396 // regular cast. The back end handles casts of vectors correctly.
397 return emitScalarConversion(Visit(e->getSrcExpr()),
398 e->getSrcExpr()->getType(), e->getType(),
399 e->getSourceRange().getBegin());
400 }
401
402 mlir::Value VisitExtVectorElementExpr(Expr *e) { return emitLoadOfLValue(e); }
403
404 mlir::Value VisitMatrixElementExpr(Expr *e) {
405 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: matrix element");
406 return {};
407 }
408
409 mlir::Value VisitMemberExpr(MemberExpr *e);
410
411 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
412 return emitLoadOfLValue(e);
413 }
414
415 mlir::Value VisitInitListExpr(InitListExpr *e);
416
417 mlir::Value VisitArrayInitIndexExpr(ArrayInitIndexExpr *e) {
418 cgf.cgm.errorNYI(e->getSourceRange(),
419 "ScalarExprEmitter: array init index");
420 return {};
421 }
422
423 mlir::Value VisitImplicitValueInitExpr(const ImplicitValueInitExpr *e) {
424 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
425 }
426
427 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
428 return VisitCastExpr(e);
429 }
430
431 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
432 return cgf.cgm.emitNullConstant(e->getType(),
433 cgf.getLoc(e->getSourceRange()));
434 }
435
436 /// Perform a pointer to boolean conversion.
437 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
438 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
439 // We might want to have a separate pass for these types of conversions.
440 return cgf.getBuilder().createPtrToBoolCast(v);
441 }
442
443 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
444 cir::BoolType boolTy = builder.getBoolTy();
445 return cir::CastOp::create(builder, loc, boolTy,
446 cir::CastKind::float_to_bool, src);
447 }
448
449 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
450 // Because of the type rules of C, we often end up computing a
451 // logical value, then zero extending it to int, then wanting it
452 // as a logical value again.
453 // TODO: optimize this common case here or leave it for later
454 // CIR passes?
455 cir::BoolType boolTy = builder.getBoolTy();
456 return cir::CastOp::create(builder, loc, boolTy, cir::CastKind::int_to_bool,
457 srcVal);
458 }
459
460 /// Convert the specified expression value to a boolean (!cir.bool) truth
461 /// value. This is equivalent to "Val != 0".
462 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
463 mlir::Location loc) {
464 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
465
466 if (srcType->isRealFloatingType())
467 return emitFloatToBoolConversion(src, loc);
468
469 if (llvm::isa<MemberPointerType>(srcType)) {
470 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
471 return builder.getFalse(loc);
472 }
473
474 if (srcType->isIntegerType())
475 return emitIntToBoolConversion(src, loc);
476
477 assert(::mlir::isa<cir::PointerType>(src.getType()));
478 return emitPointerToBoolConversion(src, srcType);
479 }
480
481 // Emit a conversion from the specified type to the specified destination
482 // type, both of which are CIR scalar types.
483 struct ScalarConversionOpts {
484 bool treatBooleanAsSigned;
485 bool emitImplicitIntegerTruncationChecks;
486 bool emitImplicitIntegerSignChangeChecks;
487
488 ScalarConversionOpts()
489 : treatBooleanAsSigned(false),
490 emitImplicitIntegerTruncationChecks(false),
491 emitImplicitIntegerSignChangeChecks(false) {}
492
493 ScalarConversionOpts(clang::SanitizerSet sanOpts)
494 : treatBooleanAsSigned(false),
495 emitImplicitIntegerTruncationChecks(
496 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
497 emitImplicitIntegerSignChangeChecks(
498 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
499 };
500
501 // Conversion from bool, integral, or floating-point to integral or
502 // floating-point. Conversions involving other types are handled elsewhere.
503 // Conversion to bool is handled elsewhere because that's a comparison against
504 // zero, not a simple cast. This handles both individual scalars and vectors.
505 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
506 QualType dstType, mlir::Type srcTy,
507 mlir::Type dstTy, ScalarConversionOpts opts) {
508 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
509 "Internal error: matrix types not handled by this function.");
510 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
511 mlir::isa<mlir::IntegerType>(dstTy)) &&
512 "Obsolete code. Don't use mlir::IntegerType with CIR.");
513
514 mlir::Type fullDstTy = dstTy;
515 if (mlir::isa<cir::VectorType>(srcTy) &&
516 mlir::isa<cir::VectorType>(dstTy)) {
517 // Use the element types of the vectors to figure out the CastKind.
518 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
519 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
520 }
521
522 std::optional<cir::CastKind> castKind;
523
524 if (mlir::isa<cir::BoolType>(srcTy)) {
525 if (opts.treatBooleanAsSigned)
526 cgf.getCIRGenModule().errorNYI("signed bool");
527 if (cgf.getBuilder().isInt(dstTy))
528 castKind = cir::CastKind::bool_to_int;
529 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
530 castKind = cir::CastKind::bool_to_float;
531 else
532 llvm_unreachable("Internal error: Cast to unexpected type");
533 } else if (cgf.getBuilder().isInt(srcTy)) {
534 if (cgf.getBuilder().isInt(dstTy))
535 castKind = cir::CastKind::integral;
536 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
537 castKind = cir::CastKind::int_to_float;
538 else
539 llvm_unreachable("Internal error: Cast to unexpected type");
540 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
541 if (cgf.getBuilder().isInt(dstTy)) {
542 // If we can't recognize overflow as undefined behavior, assume that
543 // overflow saturates. This protects against normal optimizations if we
544 // are compiling with non-standard FP semantics.
545 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
546 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
548 castKind = cir::CastKind::float_to_int;
549 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
550 // TODO: split this to createFPExt/createFPTrunc
551 return builder.createFloatingCast(src, fullDstTy);
552 } else {
553 llvm_unreachable("Internal error: Cast to unexpected type");
554 }
555 } else {
556 llvm_unreachable("Internal error: Cast from unexpected type");
557 }
558
559 assert(castKind.has_value() && "Internal error: CastKind not set.");
560 return builder.createOrFold<cir::CastOp>(src.getLoc(), fullDstTy, *castKind,
561 src);
562 }
563
564 mlir::Value
565 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
566 return Visit(e->getReplacement());
567 }
568
569 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
570 QualType ty = ve->getType();
571
572 if (ty->isVariablyModifiedType()) {
573 cgf.cgm.errorNYI(ve->getSourceRange(),
574 "variably modified types in varargs");
575 }
576
577 return cgf.emitVAArg(ve);
578 }
579
580 mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
581 return Visit(e->getSemanticForm());
582 }
583
584 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
585 mlir::Value
586 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
587
588 // Unary Operators.
589 mlir::Value VisitUnaryPrePostIncDec(const UnaryOperator *e) {
590 LValue lv = cgf.emitLValue(e->getSubExpr());
591 return emitScalarPrePostIncDec(e, lv);
592 }
593 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
594 return VisitUnaryPrePostIncDec(e);
595 }
596 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
597 return VisitUnaryPrePostIncDec(e);
598 }
599 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
600 return VisitUnaryPrePostIncDec(e);
601 }
602 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
603 return VisitUnaryPrePostIncDec(e);
604 }
605 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv) {
606 if (cgf.getLangOpts().OpenMP)
607 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
608
609 QualType type = e->getSubExpr()->getType();
610
611 mlir::Value value;
612 mlir::Value input;
613
614 if (type->getAs<AtomicType>()) {
615 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
616 // TODO(cir): This is not correct, but it will produce reasonable code
617 // until atomic operations are implemented.
618 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
619 input = value;
620 } else {
621 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
622 input = value;
623 }
624
625 // NOTE: When possible, more frequent cases are handled first.
626
627 // Special case of integer increment that we have to check first: bool++.
628 // Due to promotion rules, we get:
629 // bool++ -> bool = bool + 1
630 // -> bool = (int)bool + 1
631 // -> bool = ((int)bool + 1 != 0)
632 // An interesting aspect of this is that increment is always true.
633 // Decrement does not have this property.
634 if (e->isIncrementOp() && type->isBooleanType()) {
635 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
636 } else if (type->isIntegerType()) {
637 QualType promotedType;
638 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
639 if (cgf.getContext().isPromotableIntegerType(type)) {
640 promotedType = cgf.getContext().getPromotedIntegerType(type);
641 assert(promotedType != type && "Shouldn't promote to the same type.");
642 canPerformLossyDemotionCheck = true;
643 canPerformLossyDemotionCheck &=
644 cgf.getContext().getCanonicalType(type) !=
645 cgf.getContext().getCanonicalType(promotedType);
646 canPerformLossyDemotionCheck &=
647 type->isIntegerType() && promotedType->isIntegerType();
648
649 // TODO(cir): Currently, we store bitwidths in CIR types only for
650 // integers. This might also be required for other types.
651
652 assert(
653 (!canPerformLossyDemotionCheck ||
654 type->isSignedIntegerOrEnumerationType() ||
655 promotedType->isSignedIntegerOrEnumerationType() ||
656 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
657 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
658 "The following check expects that if we do promotion to different "
659 "underlying canonical type, at least one of the types (either "
660 "base or promoted) will be signed, or the bitwidths will match.");
661 }
662
664 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
665 value = emitIncDecConsiderOverflowBehavior(e, value);
666 } else {
667 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
668 value = emitIncOrDec(e, input, /*nsw=*/false);
669 }
670 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
671 QualType type = ptr->getPointeeType();
672 if (cgf.getContext().getAsVariableArrayType(type)) {
673 // VLA types don't have constant size.
674 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
675 return {};
676 } else {
677 // For everything else, we can just do a simple increment.
678 mlir::Location loc = cgf.getLoc(e->getSourceRange());
679 CIRGenBuilderTy &builder = cgf.getBuilder();
680 int amount = e->isIncrementOp() ? 1 : -1;
681 mlir::Value amt = builder.getSInt32(amount, loc);
683 value = builder.createPtrStride(loc, value, amt);
684 }
685 } else if (type->isVectorType()) {
686 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
687 return {};
688 } else if (type->isRealFloatingType()) {
689 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, e);
690
691 if (type->isHalfType() &&
692 !cgf.getContext().getLangOpts().NativeHalfType) {
693 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
694 return {};
695 }
696
697 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
698 // Create the inc/dec operation.
699 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
700 value = emitIncOrDec(e, value);
701 } else {
702 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
703 return {};
704 }
705 } else if (type->isFixedPointType()) {
706 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
707 return {};
708 } else {
709 assert(type->castAs<ObjCObjectPointerType>());
710 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
711 return {};
712 }
713
714 CIRGenFunction::SourceLocRAIIObject sourceloc{
715 cgf, cgf.getLoc(e->getSourceRange())};
716
717 // Store the updated result through the lvalue
718 if (lv.isBitField())
719 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
720 else
721 cgf.emitStoreThroughLValue(RValue::get(value), lv);
722
723 // If this is a postinc, return the value read from memory, otherwise use
724 // the updated value.
725 return e->isPrefix() ? value : input;
726 }
727
728 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
729 mlir::Value inVal) {
730 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
731 case LangOptions::SOB_Defined:
732 return emitIncOrDec(e, inVal, /*nsw=*/false);
733 case LangOptions::SOB_Undefined:
735 return emitIncOrDec(e, inVal, /*nsw=*/true);
736 case LangOptions::SOB_Trapping:
737 if (!e->canOverflow())
738 return emitIncOrDec(e, inVal, /*nsw=*/true);
739 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
740 return {};
741 }
742 llvm_unreachable("Unexpected signed overflow behavior kind");
743 }
744
745 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
746 if (llvm::isa<MemberPointerType>(e->getType()))
747 return cgf.cgm.emitMemberPointerConstant(e);
748
749 return cgf.emitLValue(e->getSubExpr()).getPointer();
750 }
751
752 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
753 if (e->getType()->isVoidType())
754 return Visit(e->getSubExpr()); // the actual value should be unused
755 return emitLoadOfLValue(e);
756 }
757
758 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
759 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
760 mlir::Value result = VisitUnaryPlus(e, promotionType);
761 if (result && !promotionType.isNull())
762 return emitUnPromotedValue(result, e->getType());
763 return result;
764 }
765
766 mlir::Value VisitUnaryPlus(const UnaryOperator *e, QualType promotionType) {
767 ignoreResultAssign = false;
768 if (!promotionType.isNull())
769 return cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
770 return Visit(e->getSubExpr());
771 }
772
773 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
774 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
775 mlir::Value result = VisitUnaryMinus(e, promotionType);
776 if (result && !promotionType.isNull())
777 return emitUnPromotedValue(result, e->getType());
778 return result;
779 }
780
781 mlir::Value VisitUnaryMinus(const UnaryOperator *e, QualType promotionType) {
782 ignoreResultAssign = false;
783 mlir::Value operand;
784 if (!promotionType.isNull())
785 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
786 else
787 operand = Visit(e->getSubExpr());
788
789 // TODO(cir): We might have to change this to support overflow trapping.
790 // Classic codegen routes unary minus through emitSub to ensure
791 // that the overflow behavior is handled correctly.
792 bool nsw = e->getType()->isSignedIntegerType() &&
793 cgf.getLangOpts().getSignedOverflowBehavior() !=
794 LangOptions::SOB_Defined;
795
796 // NOTE: LLVM codegen will lower this directly to either a FNeg
797 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
798 return builder.createOrFold<cir::MinusOp>(
799 cgf.getLoc(e->getSourceRange().getBegin()), operand, nsw);
800 }
801
802 mlir::Value emitIncOrDec(const UnaryOperator *e, mlir::Value input,
803 bool nsw = false) {
804 mlir::Location loc = cgf.getLoc(e->getSourceRange().getBegin());
805 return e->isIncrementOp()
806 ? builder.createOrFold<cir::IncOp>(loc, input, nsw)
807 : builder.createOrFold<cir::DecOp>(loc, input, nsw);
808 }
809
810 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
811 ignoreResultAssign = false;
812 mlir::Value op = Visit(e->getSubExpr());
813 return builder.createOrFold<cir::NotOp>(
814 cgf.getLoc(e->getSourceRange().getBegin()), op);
815 }
816
817 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
818
819 mlir::Value VisitUnaryReal(const UnaryOperator *e);
820 mlir::Value VisitUnaryImag(const UnaryOperator *e);
821 mlir::Value VisitRealImag(const UnaryOperator *e,
822 QualType promotionType = QualType());
823
824 mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
825 return Visit(e->getSubExpr());
826 }
827
828 // C++
829 mlir::Value VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e) {
830 cgf.cgm.errorNYI(e->getSourceRange(),
831 "ScalarExprEmitter: materialize temporary");
832 return {};
833 }
834 mlir::Value VisitSourceLocExpr(SourceLocExpr *e) {
835 ASTContext &ctx = cgf.getContext();
836 APValue evaluated =
837 e->EvaluateInContext(ctx, cgf.curSourceLocExprScope.getDefaultExpr());
838 mlir::Attribute attribute = ConstantEmitter(cgf).emitAbstract(
839 e->getLocation(), evaluated, e->getType());
840 mlir::TypedAttr typedAttr = mlir::cast<mlir::TypedAttr>(attribute);
841 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
842 typedAttr);
843 }
844 mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
845 CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
846 return Visit(dae->getExpr());
847 }
848 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
849 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
850 return Visit(die->getExpr());
851 }
852
853 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
854
855 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
856 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
857 return cgf.emitCXXNewExpr(e);
858 }
859 mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
860 cgf.emitCXXDeleteExpr(e);
861 return {};
862 }
863 mlir::Value VisitTypeTraitExpr(const TypeTraitExpr *e) {
864 // We diverge slightly from classic codegen here because CIR has stricter
865 // typing. In LLVM IR, constant folding covers up some potential type
866 // mismatches such as bool-to-int conversions that would fail the verifier
867 // in CIR. To make things work, we need to be sure we only emit a bool value
868 // if the expression type is bool.
869 mlir::Location loc = cgf.getLoc(e->getExprLoc());
870 if (e->isStoredAsBoolean()) {
871 if (e->getType()->isBooleanType())
872 return builder.getBool(e->getBoolValue(), loc);
873 assert(e->getType()->isIntegerType() &&
874 "Expected int type for TypeTraitExpr");
875 return builder.getConstInt(loc, cgf.convertType(e->getType()),
876 (uint64_t)e->getBoolValue());
877 }
878 return builder.getConstInt(loc, e->getAPValue().getInt());
879 }
880 mlir::Value
881 VisitConceptSpecializationExpr(const ConceptSpecializationExpr *e) {
882 return builder.getBool(e->isSatisfied(), cgf.getLoc(e->getExprLoc()));
883 }
884 mlir::Value VisitRequiresExpr(const RequiresExpr *e) {
885 return builder.getBool(e->isSatisfied(), cgf.getLoc(e->getExprLoc()));
886 }
887 mlir::Value VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *e) {
888 mlir::Type type = cgf.convertType(e->getType());
889 mlir::Location loc = cgf.getLoc(e->getExprLoc());
890 return builder.getConstInt(loc, type, e->getValue());
891 }
892 mlir::Value VisitExpressionTraitExpr(const ExpressionTraitExpr *e) {
893 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
894 }
895 mlir::Value VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *e) {
896 cgf.cgm.errorNYI(e->getSourceRange(),
897 "ScalarExprEmitter: cxx pseudo destructor");
898 return {};
899 }
900 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
901 cgf.emitCXXThrowExpr(e);
902 return {};
903 }
904
905 mlir::Value VisitCXXNoexceptExpr(CXXNoexceptExpr *e) {
906 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
907 }
908
909 /// Emit a conversion from the specified type to the specified destination
910 /// type, both of which are CIR scalar types.
911 /// TODO: do we need ScalarConversionOpts here? Should be done in another
912 /// pass.
913 mlir::Value
914 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
915 SourceLocation loc,
916 ScalarConversionOpts opts = ScalarConversionOpts()) {
917 // All conversions involving fixed point types should be handled by the
918 // emitFixedPoint family functions. This is done to prevent bloating up
919 // this function more, and although fixed point numbers are represented by
920 // integers, we do not want to follow any logic that assumes they should be
921 // treated as integers.
922 // TODO(leonardchan): When necessary, add another if statement checking for
923 // conversions to fixed point types from other types.
924 // conversions to fixed point types from other types.
925 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
926 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
927 return {};
928 }
929
930 srcType = srcType.getCanonicalType();
931 dstType = dstType.getCanonicalType();
932 if (srcType == dstType) {
933 if (opts.emitImplicitIntegerSignChangeChecks)
934 cgf.getCIRGenModule().errorNYI(loc,
935 "implicit integer sign change checks");
936 return src;
937 }
938
939 if (dstType->isVoidType())
940 return {};
941
942 mlir::Type mlirSrcType = src.getType();
943
944 // Handle conversions to bool first, they are special: comparisons against
945 // 0.
946 if (dstType->isBooleanType())
947 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
948
949 mlir::Type mlirDstType = cgf.convertType(dstType);
950
951 if (srcType->isHalfType() &&
952 !cgf.getContext().getLangOpts().NativeHalfType) {
953 // Cast to FP using the intrinsic if the half type itself isn't supported.
954 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
955 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
956 cgf.getCIRGenModule().errorNYI(loc,
957 "cast via llvm.convert.from.fp16");
958 } else {
959 // Cast to other types through float, using either the intrinsic or
960 // FPExt, depending on whether the half type itself is supported (as
961 // opposed to operations on half, available with NativeHalfType).
962 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
963 cgf.getCIRGenModule().errorNYI(loc,
964 "cast via llvm.convert.from.fp16");
965 // FIXME(cir): For now lets pretend we shouldn't use the conversion
966 // intrinsics and insert a cast here unconditionally.
967 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
968 cgf.floatTy);
969 srcType = cgf.getContext().FloatTy;
970 mlirSrcType = cgf.floatTy;
971 }
972 }
973
974 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
975 // is there anything to be done for CIR here?
976 if (mlirSrcType == mlirDstType) {
977 if (opts.emitImplicitIntegerSignChangeChecks)
978 cgf.getCIRGenModule().errorNYI(loc,
979 "implicit integer sign change checks");
980 return src;
981 }
982
983 // Handle pointer conversions next: pointers can only be converted to/from
984 // other pointers and integers. Check for pointer types in terms of LLVM, as
985 // some native types (like Obj-C id) may map to a pointer type.
986 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
987 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
988 return builder.getNullPtr(dstPT, src.getLoc());
989 }
990
991 if (isa<cir::PointerType>(mlirSrcType)) {
992 // Must be an ptr to int cast.
993 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
994 return builder.createPtrToInt(src, mlirDstType);
995 }
996
997 // A scalar can be splatted to an extended vector of the same element type
998 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
999 // Sema should add casts to make sure that the source expression's type
1000 // is the same as the vector's element type (sans qualifiers)
1001 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1002 srcType.getTypePtr() &&
1003 "Splatted expr doesn't match with vector element type?");
1004
1005 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
1006 return {};
1007 }
1008
1009 if (srcType->isMatrixType() && dstType->isMatrixType()) {
1010 cgf.getCIRGenModule().errorNYI(loc,
1011 "matrix type to matrix type conversion");
1012 return {};
1013 }
1014 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
1015 "Internal error: conversion between matrix type and scalar type");
1016
1017 // Finally, we have the arithmetic types or vectors of arithmetic types.
1018 mlir::Value res = nullptr;
1019 mlir::Type resTy = mlirDstType;
1020
1021 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
1022
1023 if (mlirDstType != resTy) {
1024 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
1025 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
1026 }
1027 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
1028 // required by the target. Change that once this is implemented
1029 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
1030 resTy);
1031 }
1032
1033 if (opts.emitImplicitIntegerTruncationChecks)
1034 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
1035
1036 if (opts.emitImplicitIntegerSignChangeChecks)
1037 cgf.getCIRGenModule().errorNYI(loc,
1038 "implicit integer sign change checks");
1039
1040 return res;
1041 }
1042
1043 BinOpInfo emitBinOps(const BinaryOperator *e,
1044 QualType promotionType = QualType()) {
1045 ignoreResultAssign = false;
1046 BinOpInfo result;
1047 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
1048 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
1049 if (!promotionType.isNull())
1050 result.fullType = promotionType;
1051 else
1052 result.fullType = e->getType();
1053 result.compType = result.fullType;
1054 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
1055 result.compType = vecType->getElementType();
1056 }
1057 result.opcode = e->getOpcode();
1058 result.loc = e->getSourceRange();
1059 // TODO(cir): Result.FPFeatures
1060 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, e);
1061 result.e = e;
1062 return result;
1063 }
1064
1065 mlir::Value emitMul(const BinOpInfo &ops);
1066 mlir::Value emitDiv(const BinOpInfo &ops);
1067 mlir::Value emitRem(const BinOpInfo &ops);
1068 mlir::Value emitAdd(const BinOpInfo &ops);
1069 mlir::Value emitSub(const BinOpInfo &ops);
1070 mlir::Value emitShl(const BinOpInfo &ops);
1071 mlir::Value emitShr(const BinOpInfo &ops);
1072 mlir::Value emitAnd(const BinOpInfo &ops);
1073 mlir::Value emitXor(const BinOpInfo &ops);
1074 mlir::Value emitOr(const BinOpInfo &ops);
1075
1076 LValue emitCompoundAssignLValue(
1077 const CompoundAssignOperator *e,
1078 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
1079 mlir::Value &result);
1080 mlir::Value
1081 emitCompoundAssign(const CompoundAssignOperator *e,
1082 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
1083
1084 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
1085 // codegen.
1086 QualType getPromotionType(QualType ty) {
1087 const clang::ASTContext &ctx = cgf.getContext();
1088 if (auto *complexTy = ty->getAs<ComplexType>()) {
1089 QualType elementTy = complexTy->getElementType();
1090 if (elementTy.UseExcessPrecision(ctx))
1091 return ctx.getComplexType(ctx.FloatTy);
1092 }
1093
1094 if (ty.UseExcessPrecision(cgf.getContext())) {
1095 if (auto *vt = ty->getAs<VectorType>()) {
1096 unsigned numElements = vt->getNumElements();
1097 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
1098 }
1099 return cgf.getContext().FloatTy;
1100 }
1101
1102 return QualType();
1103 }
1104
1105// Binary operators and binary compound assignment operators.
1106#define HANDLEBINOP(OP) \
1107 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
1108 QualType promotionTy = getPromotionType(e->getType()); \
1109 auto result = emit##OP(emitBinOps(e, promotionTy)); \
1110 if (result && !promotionTy.isNull()) \
1111 result = emitUnPromotedValue(result, e->getType()); \
1112 return result; \
1113 } \
1114 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
1115 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
1116 }
1117
1118 HANDLEBINOP(Mul)
1119 HANDLEBINOP(Div)
1120 HANDLEBINOP(Rem)
1121 HANDLEBINOP(Add)
1122 HANDLEBINOP(Sub)
1123 HANDLEBINOP(Shl)
1124 HANDLEBINOP(Shr)
1126 HANDLEBINOP(Xor)
1128#undef HANDLEBINOP
1129
1130 mlir::Value emitCmp(const BinaryOperator *e) {
1131 ignoreResultAssign = false;
1132 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
1133 mlir::Value result;
1134 QualType lhsTy = e->getLHS()->getType();
1135 QualType rhsTy = e->getRHS()->getType();
1136
1137 auto clangCmpToCIRCmp =
1138 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
1139 switch (clangCmp) {
1140 case BO_LT:
1141 return cir::CmpOpKind::lt;
1142 case BO_GT:
1143 return cir::CmpOpKind::gt;
1144 case BO_LE:
1145 return cir::CmpOpKind::le;
1146 case BO_GE:
1147 return cir::CmpOpKind::ge;
1148 case BO_EQ:
1149 return cir::CmpOpKind::eq;
1150 case BO_NE:
1151 return cir::CmpOpKind::ne;
1152 default:
1153 llvm_unreachable("unsupported comparison kind for cir.cmp");
1154 }
1155 };
1156
1157 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
1158 if (lhsTy->getAs<MemberPointerType>()) {
1160 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1161 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
1162 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
1163 result = builder.createCompare(loc, kind, lhs, rhs);
1164 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
1165 BinOpInfo boInfo = emitBinOps(e);
1166 mlir::Value lhs = boInfo.lhs;
1167 mlir::Value rhs = boInfo.rhs;
1168
1169 if (lhsTy->isVectorType()) {
1170 if (!e->getType()->isVectorType()) {
1171 // If AltiVec, the comparison results in a numeric type, so we use
1172 // intrinsics comparing vectors and giving 0 or 1 as a result
1173 cgf.cgm.errorNYI(loc, "AltiVec comparison");
1174 } else {
1175 // Other kinds of vectors. Element-wise comparison returning
1176 // a vector.
1177 result = cir::VecCmpOp::create(builder, cgf.getLoc(boInfo.loc),
1178 cgf.convertType(boInfo.fullType), kind,
1179 boInfo.lhs, boInfo.rhs);
1180 }
1181 } else if (boInfo.isFixedPointOp()) {
1183 cgf.cgm.errorNYI(loc, "fixed point comparisons");
1184 result = builder.getBool(false, loc);
1185 } else {
1186 // integers and pointers
1187 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
1188 mlir::isa<cir::PointerType>(lhs.getType()) &&
1189 mlir::isa<cir::PointerType>(rhs.getType())) {
1190 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
1191 }
1192 result = builder.createCompare(loc, kind, lhs, rhs);
1193 }
1194 } else {
1195 assert((e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE) &&
1196 "Complex Comparison: can only be an equality comparison");
1197
1198 mlir::Value lhs;
1199 if (lhsTy->isAnyComplexType()) {
1200 lhs = cgf.emitComplexExpr(e->getLHS());
1201 } else {
1202 mlir::Value lhsReal = Visit(e->getLHS());
1203 mlir::Value lhsImag = builder.getNullValue(convertType(lhsTy), loc);
1204 lhs = builder.createComplexCreate(loc, lhsReal, lhsImag);
1205 }
1206
1207 mlir::Value rhs;
1208 if (rhsTy->isAnyComplexType()) {
1209 rhs = cgf.emitComplexExpr(e->getRHS());
1210 } else {
1211 mlir::Value rhsReal = Visit(e->getRHS());
1212 mlir::Value rhsImag = builder.getNullValue(convertType(rhsTy), loc);
1213 rhs = builder.createComplexCreate(loc, rhsReal, rhsImag);
1214 }
1215
1216 result = builder.createCompare(loc, kind, lhs, rhs);
1217 }
1218
1219 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
1220 e->getExprLoc());
1221 }
1222
1223// Comparisons.
1224#define VISITCOMP(CODE) \
1225 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
1226 VISITCOMP(LT)
1227 VISITCOMP(GT)
1228 VISITCOMP(LE)
1229 VISITCOMP(GE)
1230 VISITCOMP(EQ)
1231 VISITCOMP(NE)
1232#undef VISITCOMP
1233
1234 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1235 const bool ignore = std::exchange(ignoreResultAssign, false);
1236
1237 mlir::Value rhs;
1238 LValue lhs;
1239
1240 switch (e->getLHS()->getType().getObjCLifetime()) {
1246 break;
1248 // __block variables need to have the rhs evaluated first, plus this
1249 // should improve codegen just a little.
1250 rhs = Visit(e->getRHS());
1252 // TODO(cir): This needs to be emitCheckedLValue() once we support
1253 // sanitizers
1254 lhs = cgf.emitLValue(e->getLHS());
1255
1256 // Store the value into the LHS. Bit-fields are handled specially because
1257 // the result is altered by the store, i.e., [C99 6.5.16p1]
1258 // 'An assignment expression has the value of the left operand after the
1259 // assignment...'.
1260 if (lhs.isBitField()) {
1262 cgf, cgf.getLoc(e->getSourceRange())};
1263 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1264 } else {
1265 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1267 cgf, cgf.getLoc(e->getSourceRange())};
1268 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1269 }
1270 }
1271
1272 // If the result is clearly ignored, return now.
1273 if (ignore)
1274 return nullptr;
1275
1276 // The result of an assignment in C is the assigned r-value.
1277 if (!cgf.getLangOpts().CPlusPlus)
1278 return rhs;
1279
1280 // If the lvalue is non-volatile, return the computed value of the
1281 // assignment.
1282 if (!lhs.isVolatile())
1283 return rhs;
1284
1285 // Otherwise, reload the value.
1286 return emitLoadOfLValue(lhs, e->getExprLoc());
1287 }
1288
1289 mlir::Value VisitBinComma(const BinaryOperator *e) {
1290 cgf.emitIgnoredExpr(e->getLHS());
1291 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1292 return Visit(e->getRHS());
1293 }
1294
1295 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1296 if (e->getType()->isVectorType()) {
1297 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1298 mlir::Type lhsTy = cgf.convertType(e->getLHS()->getType());
1299 mlir::Value zeroVec = builder.getNullValue(lhsTy, loc);
1300
1301 mlir::Value lhs = Visit(e->getLHS());
1302 mlir::Value rhs = Visit(e->getRHS());
1303
1304 auto cmpOpKind = cir::CmpOpKind::ne;
1305 mlir::Type resTy = cgf.convertType(e->getType());
1306 lhs = cir::VecCmpOp::create(builder, loc, resTy, cmpOpKind, lhs, zeroVec);
1307 rhs = cir::VecCmpOp::create(builder, loc, resTy, cmpOpKind, rhs, zeroVec);
1308 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1309 return builder.createIntCast(vecOr, resTy);
1310 }
1311
1313 mlir::Type resTy = cgf.convertType(e->getType());
1314 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1315
1316 CIRGenFunction::ConditionalEvaluation eval(cgf);
1317
1318 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1319 auto resOp = cir::TernaryOp::create(
1320 builder, loc, lhsCondV, /*trueBuilder=*/
1321 [&](mlir::OpBuilder &b, mlir::Location loc) {
1322 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1323 b.getInsertionBlock()};
1324 cgf.curLexScope->setAsTernary();
1325 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1326 lexScope.forceCleanup({&res});
1327 cir::YieldOp::create(b, loc, res);
1328 },
1329 /*falseBuilder*/
1330 [&](mlir::OpBuilder &b, mlir::Location loc) {
1331 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1332 b.getInsertionBlock()};
1333 cgf.curLexScope->setAsTernary();
1334 auto res = cir::ConstantOp::create(b, loc, builder.getFalseAttr());
1335 cir::YieldOp::create(b, loc, res.getRes());
1336 });
1337 return maybePromoteBoolResult(resOp.getResult(), resTy);
1338 }
1339
1340 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1341 if (e->getType()->isVectorType()) {
1342 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1343 mlir::Type lhsTy = cgf.convertType(e->getLHS()->getType());
1344 mlir::Value zeroVec = builder.getNullValue(lhsTy, loc);
1345
1346 mlir::Value lhs = Visit(e->getLHS());
1347 mlir::Value rhs = Visit(e->getRHS());
1348
1349 auto cmpOpKind = cir::CmpOpKind::ne;
1350 mlir::Type resTy = cgf.convertType(e->getType());
1351 lhs = cir::VecCmpOp::create(builder, loc, resTy, cmpOpKind, lhs, zeroVec);
1352 rhs = cir::VecCmpOp::create(builder, loc, resTy, cmpOpKind, rhs, zeroVec);
1353 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1354 return builder.createIntCast(vecOr, resTy);
1355 }
1356
1358 mlir::Type resTy = cgf.convertType(e->getType());
1359 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1360
1361 CIRGenFunction::ConditionalEvaluation eval(cgf);
1362
1363 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1364 auto resOp = cir::TernaryOp::create(
1365 builder, loc, lhsCondV, /*trueBuilder=*/
1366 [&](mlir::OpBuilder &b, mlir::Location loc) {
1367 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1368 b.getInsertionBlock()};
1369 cgf.curLexScope->setAsTernary();
1370 auto res = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1371 cir::YieldOp::create(b, loc, res.getRes());
1372 },
1373 /*falseBuilder*/
1374 [&](mlir::OpBuilder &b, mlir::Location loc) {
1375 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1376 b.getInsertionBlock()};
1377 cgf.curLexScope->setAsTernary();
1378 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1379 lexScope.forceCleanup({&res});
1380 cir::YieldOp::create(b, loc, res);
1381 });
1382
1383 return maybePromoteBoolResult(resOp.getResult(), resTy);
1384 }
1385
1386 mlir::Value VisitBinPtrMemD(const BinaryOperator *e) {
1387 return emitLoadOfLValue(e);
1388 }
1389
1390 mlir::Value VisitBinPtrMemI(const BinaryOperator *e) {
1391 return emitLoadOfLValue(e);
1392 }
1393
1394 // Other Operators.
1395 mlir::Value VisitBlockExpr(const BlockExpr *e) {
1396 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: block");
1397 return {};
1398 }
1399
1400 mlir::Value VisitChooseExpr(ChooseExpr *e) {
1401 return Visit(e->getChosenSubExpr());
1402 }
1403
1404 mlir::Value VisitObjCStringLiteral(const ObjCStringLiteral *e) {
1405 cgf.cgm.errorNYI(e->getSourceRange(),
1406 "ScalarExprEmitter: objc string literal");
1407 return {};
1408 }
1409 mlir::Value VisitObjCBoxedExpr(ObjCBoxedExpr *e) {
1410 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: objc boxed");
1411 return {};
1412 }
1413 mlir::Value VisitObjCArrayLiteral(ObjCArrayLiteral *e) {
1414 cgf.cgm.errorNYI(e->getSourceRange(),
1415 "ScalarExprEmitter: objc array literal");
1416 return {};
1417 }
1418 mlir::Value VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *e) {
1419 cgf.cgm.errorNYI(e->getSourceRange(),
1420 "ScalarExprEmitter: objc dictionary literal");
1421 return {};
1422 }
1423
1424 mlir::Value VisitAsTypeExpr(AsTypeExpr *e) {
1425 cgf.cgm.errorNYI(e->getSourceRange(), "ScalarExprEmitter: as type");
1426 return {};
1427 }
1428
1429 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1430 return cgf.emitAtomicExpr(e).getValue();
1431 }
1432};
1433
1434LValue ScalarExprEmitter::emitCompoundAssignLValue(
1435 const CompoundAssignOperator *e,
1436 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1437 mlir::Value &result) {
1439 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1440
1441 QualType lhsTy = e->getLHS()->getType();
1442 BinOpInfo opInfo;
1443
1444 // Emit the RHS first. __block variables need to have the rhs evaluated
1445 // first, plus this should improve codegen a little.
1446
1447 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1448 if (promotionTypeCR.isNull())
1449 promotionTypeCR = e->getComputationResultType();
1450
1451 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1452 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1453
1454 if (!promotionTypeRHS.isNull())
1455 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1456 else
1457 opInfo.rhs = Visit(e->getRHS());
1458
1459 opInfo.fullType = promotionTypeCR;
1460 opInfo.compType = opInfo.fullType;
1461 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1462 opInfo.compType = vecType->getElementType();
1463 opInfo.opcode = e->getOpcode();
1464 opInfo.fpFeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1465 opInfo.e = e;
1466 opInfo.loc = e->getSourceRange();
1467
1468 // Load/convert the LHS
1469 LValue lhsLV = cgf.emitLValue(e->getLHS());
1470
1471 if (lhsTy->getAs<AtomicType>()) {
1472 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1473 return LValue();
1474 }
1475
1476 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1477
1478 CIRGenFunction::SourceLocRAIIObject sourceloc{
1479 cgf, cgf.getLoc(e->getSourceRange())};
1480 SourceLocation loc = e->getExprLoc();
1481 if (!promotionTypeLHS.isNull())
1482 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1483 else
1484 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1485 e->getComputationLHSType(), loc);
1486
1487 // Expand the binary operator.
1488 result = (this->*func)(opInfo);
1489
1490 // Convert the result back to the LHS type,
1491 // potentially with Implicit Conversion sanitizer check.
1492 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1493 ScalarConversionOpts(cgf.sanOpts));
1494
1495 // Store the result value into the LHS lvalue. Bit-fields are handled
1496 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1497 // 'An assignment expression has the value of the left operand after the
1498 // assignment...'.
1499 if (lhsLV.isBitField())
1500 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1501 else
1502 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1503
1504 if (cgf.getLangOpts().OpenMP)
1505 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1506
1507 return lhsLV;
1508}
1509
1510mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1511 mlir::Value value,
1512 CastKind kind,
1513 QualType destTy) {
1514 cir::CastKind castOpKind;
1515 switch (kind) {
1516 case CK_FloatingComplexToReal:
1517 castOpKind = cir::CastKind::float_complex_to_real;
1518 break;
1519 case CK_IntegralComplexToReal:
1520 castOpKind = cir::CastKind::int_complex_to_real;
1521 break;
1522 case CK_FloatingComplexToBoolean:
1523 castOpKind = cir::CastKind::float_complex_to_bool;
1524 break;
1525 case CK_IntegralComplexToBoolean:
1526 castOpKind = cir::CastKind::int_complex_to_bool;
1527 break;
1528 default:
1529 llvm_unreachable("invalid complex-to-scalar cast kind");
1530 }
1531
1532 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1533}
1534
1535mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1536 QualType promotionType) {
1537 e = e->IgnoreParens();
1538 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1539 switch (bo->getOpcode()) {
1540#define HANDLE_BINOP(OP) \
1541 case BO_##OP: \
1542 return emit##OP(emitBinOps(bo, promotionType));
1543 HANDLE_BINOP(Add)
1544 HANDLE_BINOP(Sub)
1545 HANDLE_BINOP(Mul)
1546 HANDLE_BINOP(Div)
1547#undef HANDLE_BINOP
1548 default:
1549 break;
1550 }
1551 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1552 switch (uo->getOpcode()) {
1553 case UO_Imag:
1554 case UO_Real:
1555 return VisitRealImag(uo, promotionType);
1556 case UO_Minus:
1557 return VisitUnaryMinus(uo, promotionType);
1558 case UO_Plus:
1559 return VisitUnaryPlus(uo, promotionType);
1560 default:
1561 break;
1562 }
1563 }
1564 mlir::Value result = Visit(const_cast<Expr *>(e));
1565 if (result) {
1566 if (!promotionType.isNull())
1567 return emitPromotedValue(result, promotionType);
1568 return emitUnPromotedValue(result, e->getType());
1569 }
1570 return result;
1571}
1572
1573mlir::Value ScalarExprEmitter::emitCompoundAssign(
1574 const CompoundAssignOperator *e,
1575 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1576
1577 bool ignore = std::exchange(ignoreResultAssign, false);
1578 mlir::Value rhs;
1579 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1580
1581 // If the result is clearly ignored, return now.
1582 if (ignore)
1583 return {};
1584
1585 // The result of an assignment in C is the assigned r-value.
1586 if (!cgf.getLangOpts().CPlusPlus)
1587 return rhs;
1588
1589 // If the lvalue is non-volatile, return the computed value of the assignment.
1590 if (!lhs.isVolatile())
1591 return rhs;
1592
1593 // Otherwise, reload the value.
1594 return emitLoadOfLValue(lhs, e->getExprLoc());
1595}
1596
1597mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1598 CIRGenFunction::RunCleanupsScope cleanups(cgf);
1599 mlir::Value v = Visit(e->getSubExpr());
1600 // Defend against dominance problems caused by jumps out of expression
1601 // evaluation through the shared cleanup block.
1602 cleanups.forceCleanup({&v});
1603 return v;
1604}
1605
1606} // namespace
1607
1608LValue
1610 ScalarExprEmitter emitter(*this, builder);
1611 mlir::Value result;
1612 switch (e->getOpcode()) {
1613#define COMPOUND_OP(Op) \
1614 case BO_##Op##Assign: \
1615 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1616 result)
1617 COMPOUND_OP(Mul);
1618 COMPOUND_OP(Div);
1619 COMPOUND_OP(Rem);
1620 COMPOUND_OP(Add);
1621 COMPOUND_OP(Sub);
1622 COMPOUND_OP(Shl);
1623 COMPOUND_OP(Shr);
1625 COMPOUND_OP(Xor);
1626 COMPOUND_OP(Or);
1627#undef COMPOUND_OP
1628
1629 case BO_PtrMemD:
1630 case BO_PtrMemI:
1631 case BO_Mul:
1632 case BO_Div:
1633 case BO_Rem:
1634 case BO_Add:
1635 case BO_Sub:
1636 case BO_Shl:
1637 case BO_Shr:
1638 case BO_LT:
1639 case BO_GT:
1640 case BO_LE:
1641 case BO_GE:
1642 case BO_EQ:
1643 case BO_NE:
1644 case BO_Cmp:
1645 case BO_And:
1646 case BO_Xor:
1647 case BO_Or:
1648 case BO_LAnd:
1649 case BO_LOr:
1650 case BO_Assign:
1651 case BO_Comma:
1652 llvm_unreachable("Not valid compound assignment operators");
1653 }
1654 llvm_unreachable("Unhandled compound assignment operator");
1655}
1656
1657/// Emit the computation of the specified expression of scalar type.
1659 bool ignoreResultAssign) {
1660 assert(e && hasScalarEvaluationKind(e->getType()) &&
1661 "Invalid scalar expression to emit");
1662
1663 return ScalarExprEmitter(*this, builder, ignoreResultAssign)
1664 .Visit(const_cast<Expr *>(e));
1665}
1666
1668 QualType promotionType) {
1669 if (!promotionType.isNull())
1670 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1671 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1672}
1673
1674[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1675 // If a null pointer expression's type is the C++0x nullptr_t and
1676 // the expression is not a simple literal, it must be evaluated
1677 // for its potential side effects.
1679 return false;
1680 return e->getType()->isNullPtrType();
1681}
1682
1683/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1684static std::optional<QualType>
1685getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1686 const Expr *base = e->IgnoreImpCasts();
1687 if (e == base)
1688 return std::nullopt;
1689
1690 QualType baseTy = base->getType();
1691 if (!astContext.isPromotableIntegerType(baseTy) ||
1692 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1693 return std::nullopt;
1694
1695 return baseTy;
1696}
1697
1698/// Check if \p e is a widened promoted integer.
1699[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1700 const Expr *e) {
1701 return getUnwidenedIntegerType(astContext, e).has_value();
1702}
1703
1704/// Check if we can skip the overflow check for \p Op.
1705[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1706 const BinOpInfo &op) {
1707 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1708 "Expected a unary or binary operator");
1709
1710 // If the binop has constant inputs and we can prove there is no overflow,
1711 // we can elide the overflow check.
1712 if (!op.mayHaveIntegerOverflow())
1713 return true;
1714
1715 // If a unary op has a widened operand, the op cannot overflow.
1716 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1717 return !uo->canOverflow();
1718
1719 // We usually don't need overflow checks for binops with widened operands.
1720 // Multiplication with promoted unsigned operands is a special case.
1721 const auto *bo = cast<BinaryOperator>(op.e);
1722 std::optional<QualType> optionalLHSTy =
1723 getUnwidenedIntegerType(astContext, bo->getLHS());
1724 if (!optionalLHSTy)
1725 return false;
1726
1727 std::optional<QualType> optionalRHSTy =
1728 getUnwidenedIntegerType(astContext, bo->getRHS());
1729 if (!optionalRHSTy)
1730 return false;
1731
1732 QualType lhsTy = *optionalLHSTy;
1733 QualType rhsTy = *optionalRHSTy;
1734
1735 // This is the simple case: binops without unsigned multiplication, and with
1736 // widened operands. No overflow check is needed here.
1737 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1738 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1739 return true;
1740
1741 // For unsigned multiplication the overflow check can be elided if either one
1742 // of the unpromoted types are less than half the size of the promoted type.
1743 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1744 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1745 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1746}
1747
1748/// Emit pointer + index arithmetic.
1750 const BinOpInfo &op,
1751 bool isSubtraction) {
1752 // Must have binary (not unary) expr here. Unary pointer
1753 // increment/decrement doesn't use this path.
1755
1756 mlir::Value pointer = op.lhs;
1757 Expr *pointerOperand = expr->getLHS();
1758 mlir::Value index = op.rhs;
1759 Expr *indexOperand = expr->getRHS();
1760
1761 // In the case of subtraction, the FE has ensured that the LHS is always the
1762 // pointer. However, addition can have the pointer on either side. We will
1763 // always have a pointer operand and an integer operand, so if the LHS wasn't
1764 // a pointer, we need to swap our values.
1765 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1766 std::swap(pointer, index);
1767 std::swap(pointerOperand, indexOperand);
1768 }
1769 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1770 "Need a pointer operand");
1771 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1772
1773 // Some versions of glibc and gcc use idioms (particularly in their malloc
1774 // routines) that add a pointer-sized integer (known to be a pointer value)
1775 // to a null pointer in order to cast the value back to an integer or as
1776 // part of a pointer alignment algorithm. This is undefined behavior, but
1777 // we'd like to be able to compile programs that use it.
1778 //
1779 // Normally, we'd generate a GEP with a null-pointer base here in response
1780 // to that code, but it's also UB to dereference a pointer created that
1781 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1782 // generate a direct cast of the integer value to a pointer.
1783 //
1784 // The idiom (p = nullptr + N) is not met if any of the following are true:
1785 //
1786 // The operation is subtraction.
1787 // The index is not pointer-sized.
1788 // The pointer type is not byte-sized.
1789 //
1791 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1792 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1793
1794 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1795 // LLVM lowering.
1796
1797 // If this is subtraction, negate the index.
1798 if (isSubtraction)
1800
1802
1803 const PointerType *pointerType =
1804 pointerOperand->getType()->getAs<PointerType>();
1805 if (!pointerType) {
1806 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1807 return nullptr;
1808 }
1809
1810 QualType elementType = pointerType->getPointeeType();
1811 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1812 cgf.cgm.errorNYI("variable array type");
1813 return nullptr;
1814 }
1815
1817 return cir::PtrStrideOp::create(cgf.getBuilder(),
1818 cgf.getLoc(op.e->getExprLoc()),
1819 pointer.getType(), pointer, index);
1820}
1821
1822mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1823 const mlir::Location loc = cgf.getLoc(ops.loc);
1824 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1825 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1826 case LangOptions::SOB_Defined:
1827 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1828 return builder.createMul(loc, ops.lhs, ops.rhs);
1829 [[fallthrough]];
1830 case LangOptions::SOB_Undefined:
1831 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1832 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1833 [[fallthrough]];
1834 case LangOptions::SOB_Trapping:
1835 if (canElideOverflowCheck(cgf.getContext(), ops))
1836 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1837 cgf.cgm.errorNYI("sanitizers");
1838 }
1839 }
1840 if (ops.fullType->isConstantMatrixType()) {
1842 cgf.cgm.errorNYI("matrix types");
1843 return nullptr;
1844 }
1845 if (ops.compType->isUnsignedIntegerType() &&
1846 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1847 !canElideOverflowCheck(cgf.getContext(), ops))
1848 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1849
1850 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1851 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, ops.fpFeatures);
1852 return builder.createFMul(loc, ops.lhs, ops.rhs);
1853 }
1854
1855 if (ops.isFixedPointOp()) {
1857 cgf.cgm.errorNYI("fixed point");
1858 return nullptr;
1859 }
1860
1861 return cir::MulOp::create(builder, cgf.getLoc(ops.loc),
1862 cgf.convertType(ops.fullType), ops.lhs, ops.rhs);
1863}
1864mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1865 return cir::DivOp::create(builder, cgf.getLoc(ops.loc),
1866 cgf.convertType(ops.fullType), ops.lhs, ops.rhs);
1867}
1868mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1869 return cir::RemOp::create(builder, cgf.getLoc(ops.loc),
1870 cgf.convertType(ops.fullType), ops.lhs, ops.rhs);
1871}
1872
1873mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1874 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1875 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1876 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1877
1878 const mlir::Location loc = cgf.getLoc(ops.loc);
1879 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1880 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1881 case LangOptions::SOB_Defined:
1882 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1883 return builder.createAdd(loc, ops.lhs, ops.rhs);
1884 [[fallthrough]];
1885 case LangOptions::SOB_Undefined:
1886 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1887 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1888 [[fallthrough]];
1889 case LangOptions::SOB_Trapping:
1890 if (canElideOverflowCheck(cgf.getContext(), ops))
1891 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1892 cgf.cgm.errorNYI("sanitizers");
1893 }
1894 }
1895 if (ops.fullType->isConstantMatrixType()) {
1897 cgf.cgm.errorNYI("matrix types");
1898 return nullptr;
1899 }
1900
1901 if (ops.compType->isUnsignedIntegerType() &&
1902 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1903 !canElideOverflowCheck(cgf.getContext(), ops))
1904 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1905
1906 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1907 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, ops.fpFeatures);
1908 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1909 }
1910
1911 if (ops.isFixedPointOp()) {
1913 cgf.cgm.errorNYI("fixed point");
1914 return {};
1915 }
1916
1917 return builder.createAdd(loc, ops.lhs, ops.rhs);
1918}
1919
1920mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1921 const mlir::Location loc = cgf.getLoc(ops.loc);
1922 // The LHS is always a pointer if either side is.
1923 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1924 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1925 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1926 case LangOptions::SOB_Defined: {
1927 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1928 return builder.createSub(loc, ops.lhs, ops.rhs);
1929 [[fallthrough]];
1930 }
1931 case LangOptions::SOB_Undefined:
1932 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1933 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1934 [[fallthrough]];
1935 case LangOptions::SOB_Trapping:
1936 if (canElideOverflowCheck(cgf.getContext(), ops))
1937 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1938 cgf.cgm.errorNYI("sanitizers");
1939 }
1940 }
1941
1942 if (ops.fullType->isConstantMatrixType()) {
1944 cgf.cgm.errorNYI("matrix types");
1945 return nullptr;
1946 }
1947
1948 if (ops.compType->isUnsignedIntegerType() &&
1949 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1950 !canElideOverflowCheck(cgf.getContext(), ops))
1951 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1952
1953 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1954 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, ops.fpFeatures);
1955 return builder.createFSub(loc, ops.lhs, ops.rhs);
1956 }
1957
1958 if (ops.isFixedPointOp()) {
1960 cgf.cgm.errorNYI("fixed point");
1961 return {};
1962 }
1963
1964 return builder.createSub(loc, ops.lhs, ops.rhs);
1965 }
1966
1967 // If the RHS is not a pointer, then we have normal pointer
1968 // arithmetic.
1969 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1970 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1971
1972 // Otherwise, this is a pointer subtraction
1973
1974 // Do the raw subtraction part.
1975 //
1976 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1977 // LLVM we shall take VLA's, division by element size, etc.
1978 //
1979 // See more in `EmitSub` in CGExprScalar.cpp.
1981 return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.ptrDiffTy,
1982 ops.lhs, ops.rhs);
1983}
1984
1985mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1986 // TODO: This misses out on the sanitizer check below.
1987 if (ops.isFixedPointOp()) {
1989 cgf.cgm.errorNYI("fixed point");
1990 return {};
1991 }
1992
1993 // CIR accepts shift between different types, meaning nothing special
1994 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1995 // promote or truncate the RHS to the same size as the LHS.
1996
1997 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1998 ops.compType->hasSignedIntegerRepresentation() &&
2000 !cgf.getLangOpts().CPlusPlus20;
2001 bool sanitizeUnsignedBase =
2002 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
2003 ops.compType->hasUnsignedIntegerRepresentation();
2004 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
2005 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
2006
2007 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2008 if (cgf.getLangOpts().OpenCL)
2009 cgf.cgm.errorNYI("opencl");
2010 else if ((sanitizeBase || sanitizeExponent) &&
2011 mlir::isa<cir::IntType>(ops.lhs.getType()))
2012 cgf.cgm.errorNYI("sanitizers");
2013
2014 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2015}
2016
2017mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
2018 // TODO: This misses out on the sanitizer check below.
2019 if (ops.isFixedPointOp()) {
2021 cgf.cgm.errorNYI("fixed point");
2022 return {};
2023 }
2024
2025 // CIR accepts shift between different types, meaning nothing special
2026 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
2027 // promote or truncate the RHS to the same size as the LHS.
2028
2029 // OpenCL 6.3j: shift values are effectively % word size of LHS.
2030 if (cgf.getLangOpts().OpenCL)
2031 cgf.cgm.errorNYI("opencl");
2032 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
2033 mlir::isa<cir::IntType>(ops.lhs.getType()))
2034 cgf.cgm.errorNYI("sanitizers");
2035
2036 // Note that we don't need to distinguish unsigned treatment at this
2037 // point since it will be handled later by LLVM lowering.
2038 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2039}
2040
2041mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
2042 return cir::AndOp::create(builder, cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2043}
2044mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
2045 return cir::XorOp::create(builder, cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2046}
2047mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
2048 return cir::OrOp::create(builder, cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
2049}
2050
2051// Emit code for an explicit or implicit cast. Implicit
2052// casts have to handle a more broad range of conversions than explicit
2053// casts, as they handle things like function to ptr-to-function decay
2054// etc.
2055mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
2056 Expr *subExpr = ce->getSubExpr();
2057 QualType destTy = ce->getType();
2058 CastKind kind = ce->getCastKind();
2059
2060 // These cases are generally not written to ignore the result of evaluating
2061 // their sub-expressions, so we clear this now.
2062 ignoreResultAssign = false;
2063
2064 switch (kind) {
2065 case clang::CK_Dependent:
2066 llvm_unreachable("dependent cast kind in CIR gen!");
2067 case clang::CK_BuiltinFnToFnPtr:
2068 llvm_unreachable("builtin functions are handled elsewhere");
2069 case CK_LValueBitCast:
2070 case CK_LValueToRValueBitCast: {
2071 LValue sourceLVal = cgf.emitLValue(subExpr);
2072 Address sourceAddr = sourceLVal.getAddress();
2073
2074 mlir::Type destElemTy = cgf.convertTypeForMem(destTy);
2075 Address destAddr = sourceAddr.withElementType(cgf.getBuilder(), destElemTy);
2076 LValue destLVal = cgf.makeAddrLValue(destAddr, destTy);
2078 return emitLoadOfLValue(destLVal, ce->getExprLoc());
2079 }
2080
2081 case CK_CPointerToObjCPointerCast:
2082 case CK_BlockPointerToObjCPointerCast:
2083 case CK_AnyPointerToBlockPointerCast:
2084 case CK_BitCast: {
2085 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
2086 mlir::Type dstTy = cgf.convertType(destTy);
2087
2089
2090 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
2091 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2092 "sanitizer support");
2093
2094 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2095 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2096 "strict vtable pointers");
2097
2098 // Update heapallocsite metadata when there is an explicit pointer cast.
2100
2101 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2102 // same element type, use the llvm.vector.insert intrinsic to perform the
2103 // bitcast.
2105
2106 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2107 // same element type, use the llvm.vector.extract intrinsic to perform the
2108 // bitcast.
2110
2111 // Perform VLAT <-> VLST bitcast through memory.
2112 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
2113 // require the element types of the vectors to be the same, we
2114 // need to keep this around for bitcasts between VLAT <-> VLST where
2115 // the element types of the vectors are not the same, until we figure
2116 // out a better way of doing these casts.
2118
2119 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
2120 src, dstTy);
2121 }
2122 case CK_AddressSpaceConversion: {
2123 Expr::EvalResult result;
2124 if (subExpr->EvaluateAsRValue(result, cgf.getContext()) &&
2125 result.Val.isNullPointer()) {
2126 // If e has side effect, it is emitted even if its final result is a
2127 // null pointer. In that case, a DCE pass should be able to
2128 // eliminate the useless instructions emitted during translating E.
2129 if (result.HasSideEffects)
2130 Visit(subExpr);
2131 return cgf.cgm.emitNullConstant(destTy,
2132 cgf.getLoc(subExpr->getExprLoc()));
2133 }
2134 return cgf.performAddrSpaceCast(Visit(subExpr), convertType(destTy));
2135 }
2136
2137 case CK_AtomicToNonAtomic: {
2138 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2139 "CastExpr: ", ce->getCastKindName());
2140 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
2141 return cgf.createDummyValue(loc, destTy);
2142 }
2143 case CK_NonAtomicToAtomic:
2144 case CK_UserDefinedConversion:
2145 return Visit(const_cast<Expr *>(subExpr));
2146 case CK_NoOp: {
2147 auto v = Visit(const_cast<Expr *>(subExpr));
2148 if (v) {
2149 // CK_NoOp can model a pointer qualification conversion, which can remove
2150 // an array bound and change the IR type.
2151 // FIXME: Once pointee types are removed from IR, remove this.
2152 mlir::Type t = cgf.convertType(destTy);
2153 if (t != v.getType())
2154 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
2155 }
2156 return v;
2157 }
2158 case CK_IntegralToPointer: {
2159 mlir::Type destCIRTy = cgf.convertType(destTy);
2160 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
2161
2162 // Properly resize by casting to an int of the same size as the pointer.
2163 // Clang's IntegralToPointer includes 'bool' as the source, but in CIR
2164 // 'bool' is not an integral type. So check the source type to get the
2165 // correct CIR conversion.
2166 mlir::Type middleTy = cgf.cgm.getDataLayout().getIntPtrType(destCIRTy);
2167 mlir::Value middleVal = builder.createCast(
2168 subExpr->getType()->isBooleanType() ? cir::CastKind::bool_to_int
2169 : cir::CastKind::integral,
2170 src, middleTy);
2171
2172 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers) {
2173 cgf.cgm.errorNYI(subExpr->getSourceRange(),
2174 "IntegralToPointer: strict vtable pointers");
2175 return {};
2176 }
2177
2178 return builder.createIntToPtr(middleVal, destCIRTy);
2179 }
2180
2181 case CK_BaseToDerived: {
2182 const CXXRecordDecl *derivedClassDecl = destTy->getPointeeCXXRecordDecl();
2183 assert(derivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2184 Address base = cgf.emitPointerWithAlignment(subExpr);
2185 Address derived = cgf.getAddressOfDerivedClass(
2186 cgf.getLoc(ce->getSourceRange()), base, derivedClassDecl, ce->path(),
2188
2189 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2190 // performed and the object is not of the derived type.
2192
2193 return cgf.getAsNaturalPointerTo(derived, ce->getType()->getPointeeType());
2194 }
2195 case CK_UncheckedDerivedToBase:
2196 case CK_DerivedToBase: {
2197 // The EmitPointerWithAlignment path does this fine; just discard
2198 // the alignment.
2200 ce->getType()->getPointeeType());
2201 }
2202 case CK_Dynamic: {
2203 Address v = cgf.emitPointerWithAlignment(subExpr);
2204 const auto *dce = cast<CXXDynamicCastExpr>(ce);
2205 return cgf.emitDynamicCast(v, dce);
2206 }
2207 case CK_ArrayToPointerDecay:
2208 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
2209
2210 case CK_NullToPointer: {
2211 if (mustVisitNullValue(subExpr))
2212 cgf.emitIgnoredExpr(subExpr);
2213
2214 // Note that DestTy is used as the MLIR type instead of a custom
2215 // nullptr type.
2216 mlir::Type ty = cgf.convertType(destTy);
2217 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
2218 }
2219
2220 case CK_NullToMemberPointer: {
2221 if (mustVisitNullValue(subExpr))
2222 cgf.emitIgnoredExpr(subExpr);
2223
2225
2226 const MemberPointerType *mpt = ce->getType()->getAs<MemberPointerType>();
2227 mlir::Location loc = cgf.getLoc(subExpr->getExprLoc());
2228 return cgf.getBuilder().getConstant(
2229 loc, cgf.cgm.emitNullMemberAttr(destTy, mpt));
2230 }
2231
2232 case CK_ReinterpretMemberPointer: {
2233 mlir::Value src = Visit(subExpr);
2234 return builder.createBitcast(cgf.getLoc(subExpr->getExprLoc()), src,
2235 cgf.convertType(destTy));
2236 }
2237 case CK_BaseToDerivedMemberPointer:
2238 case CK_DerivedToBaseMemberPointer: {
2239 mlir::Value src = Visit(subExpr);
2240
2242
2243 QualType derivedTy =
2244 kind == CK_DerivedToBaseMemberPointer ? subExpr->getType() : destTy;
2245 const auto *mpType = derivedTy->castAs<MemberPointerType>();
2246 NestedNameSpecifier qualifier = mpType->getQualifier();
2247 assert(qualifier && "member pointer without class qualifier");
2248 const Type *qualifierType = qualifier.getAsType();
2249 assert(qualifierType && "member pointer qualifier is not a type");
2250 const CXXRecordDecl *derivedClass = qualifierType->getAsCXXRecordDecl();
2251 CharUnits offset =
2252 cgf.cgm.computeNonVirtualBaseClassOffset(derivedClass, ce->path());
2253
2254 mlir::Location loc = cgf.getLoc(subExpr->getExprLoc());
2255 mlir::Type resultTy = cgf.convertType(destTy);
2256 mlir::IntegerAttr offsetAttr = builder.getIndexAttr(offset.getQuantity());
2257
2258 if (subExpr->getType()->isMemberFunctionPointerType()) {
2259 if (kind == CK_BaseToDerivedMemberPointer)
2260 return cir::DerivedMethodOp::create(builder, loc, resultTy, src,
2261 offsetAttr);
2262 return cir::BaseMethodOp::create(builder, loc, resultTy, src, offsetAttr);
2263 }
2264
2265 if (kind == CK_BaseToDerivedMemberPointer)
2266 return cir::DerivedDataMemberOp::create(builder, loc, resultTy, src,
2267 offsetAttr);
2268 return cir::BaseDataMemberOp::create(builder, loc, resultTy, src,
2269 offsetAttr);
2270 }
2271
2272 case CK_LValueToRValue:
2273 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
2274 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2275 return Visit(const_cast<Expr *>(subExpr));
2276
2277 case CK_IntegralCast: {
2278 ScalarConversionOpts opts;
2279 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
2280 if (!ice->isPartOfExplicitCast())
2281 opts = ScalarConversionOpts(cgf.sanOpts);
2282 }
2283 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2284 ce->getExprLoc(), opts);
2285 }
2286
2287 case CK_FloatingComplexToReal:
2288 case CK_IntegralComplexToReal:
2289 case CK_FloatingComplexToBoolean:
2290 case CK_IntegralComplexToBoolean: {
2291 mlir::Value value = cgf.emitComplexExpr(subExpr);
2292 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
2293 kind, destTy);
2294 }
2295
2296 case CK_FloatingRealToComplex:
2297 case CK_FloatingComplexCast:
2298 case CK_IntegralRealToComplex:
2299 case CK_IntegralComplexCast:
2300 case CK_IntegralComplexToFloatingComplex:
2301 case CK_FloatingComplexToIntegralComplex:
2302 llvm_unreachable("scalar cast to non-scalar value");
2303
2304 case CK_PointerToIntegral: {
2305 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
2306 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2307 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2308 "strict vtable pointers");
2309 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
2310 }
2311 case CK_ToVoid:
2312 cgf.emitIgnoredExpr(subExpr);
2313 return {};
2314
2315 case CK_IntegralToFloating:
2316 case CK_FloatingToIntegral:
2317 case CK_FloatingCast:
2318 case CK_FixedPointToFloating:
2319 case CK_FloatingToFixedPoint: {
2320 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
2321 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2322 "fixed point casts");
2323 return {};
2324 }
2325 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, ce);
2326 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2327 ce->getExprLoc());
2328 }
2329
2330 case CK_IntegralToBoolean:
2331 return emitIntToBoolConversion(Visit(subExpr),
2332 cgf.getLoc(ce->getSourceRange()));
2333
2334 case CK_PointerToBoolean:
2335 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
2336 case CK_FloatingToBoolean:
2337 return emitFloatToBoolConversion(Visit(subExpr),
2338 cgf.getLoc(subExpr->getExprLoc()));
2339 case CK_MemberPointerToBoolean: {
2340 mlir::Value memPtr = Visit(subExpr);
2341 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
2342 cir::CastKind::member_ptr_to_bool, memPtr,
2343 cgf.convertType(destTy));
2344 }
2345
2346 case CK_VectorSplat: {
2347 // Create a vector object and fill all elements with the same scalar value.
2348 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
2349 return cir::VecSplatOp::create(builder,
2350 cgf.getLoc(subExpr->getSourceRange()),
2351 cgf.convertType(destTy), Visit(subExpr));
2352 }
2353 case CK_FunctionToPointerDecay:
2354 return cgf.emitLValue(subExpr).getPointer();
2355
2356 default:
2357 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2358 "CastExpr: ", ce->getCastKindName());
2359 }
2360 return {};
2361}
2362
2363mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
2365 return emitLoadOfLValue(e);
2366
2367 auto v = cgf.emitCallExpr(e).getValue();
2369 return v;
2370}
2371
2372mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2373 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2374 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2375 // for now.
2377 Expr::EvalResult result;
2378 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2379 llvm::APSInt value = result.Val.getInt();
2380 cgf.emitIgnoredExpr(e->getBase());
2381 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
2382 }
2383 return emitLoadOfLValue(e);
2384}
2385
2386mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2387 const unsigned numInitElements = e->getNumInits();
2388
2389 [[maybe_unused]] const bool ignore = std::exchange(ignoreResultAssign, false);
2390 assert((ignore == false ||
2391 (numInitElements == 0 && e->getType()->isVoidType())) &&
2392 "init list ignored");
2393
2394 if (e->hadArrayRangeDesignator()) {
2395 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2396 return {};
2397 }
2398
2399 if (e->getType()->isVectorType()) {
2400 const auto vectorType =
2401 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2402
2403 SmallVector<mlir::Value, 16> elements;
2404 for (Expr *init : e->inits()) {
2405 elements.push_back(Visit(init));
2406 }
2407
2408 // Zero-initialize any remaining values.
2409 if (numInitElements < vectorType.getSize()) {
2410 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2411 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2412 std::fill_n(std::back_inserter(elements),
2413 vectorType.getSize() - numInitElements, zeroValue);
2414 }
2415
2416 return cir::VecCreateOp::create(cgf.getBuilder(),
2417 cgf.getLoc(e->getSourceRange()), vectorType,
2418 elements);
2419 }
2420
2421 // C++11 value-initialization for the scalar.
2422 if (numInitElements == 0)
2423 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2424
2425 return Visit(e->getInit(0));
2426}
2427
2428mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2429 QualType srcTy, QualType dstTy,
2430 SourceLocation loc) {
2433 "Invalid scalar expression to emit");
2434 return ScalarExprEmitter(*this, builder)
2435 .emitScalarConversion(src, srcTy, dstTy, loc);
2436}
2437
2439 QualType srcTy,
2440 QualType dstTy,
2441 SourceLocation loc) {
2442 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2443 "Invalid complex -> scalar conversion");
2444
2445 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2446 if (dstTy->isBooleanType()) {
2447 auto kind = complexElemTy->isFloatingType()
2448 ? cir::CastKind::float_complex_to_bool
2449 : cir::CastKind::int_complex_to_bool;
2450 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2451 }
2452
2453 auto kind = complexElemTy->isFloatingType()
2454 ? cir::CastKind::float_complex_to_real
2455 : cir::CastKind::int_complex_to_real;
2456 mlir::Value real =
2457 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2458 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2459}
2460
2461mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2462 // Perform vector logical not on comparison with zero vector.
2463 if (e->getType()->isVectorType() &&
2464 e->getType()->castAs<VectorType>()->getVectorKind() ==
2466 mlir::Value oper = Visit(e->getSubExpr());
2467 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2468 auto operVecTy = mlir::cast<cir::VectorType>(oper.getType());
2469 auto exprVecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2470 mlir::Value zeroVec = builder.getNullValue(operVecTy, loc);
2471 return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq,
2472 oper, zeroVec);
2473 }
2474
2475 // Compare operand to zero.
2476 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2477
2478 // Invert value.
2479 boolVal = builder.createNot(boolVal);
2480
2481 // ZExt result to the expr type.
2482 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2483}
2484
2485mlir::Value ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *e) {
2486 // Try folding the offsetof to a constant.
2487 Expr::EvalResult evalResult;
2488 if (e->EvaluateAsInt(evalResult, cgf.getContext())) {
2489 mlir::Type type = cgf.convertType(e->getType());
2490 llvm::APSInt value = evalResult.Val.getInt();
2491 return builder.getConstAPInt(cgf.getLoc(e->getExprLoc()), type, value);
2492 }
2493
2495 e->getSourceRange(),
2496 "ScalarExprEmitter::VisitOffsetOfExpr Can't eval expr as int");
2497 return {};
2498}
2499
2500mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2501 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2502 mlir::Value result = VisitRealImag(e, promotionTy);
2503 if (result && !promotionTy.isNull())
2504 result = emitUnPromotedValue(result, e->getType());
2505 return result;
2506}
2507
2508mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2509 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2510 mlir::Value result = VisitRealImag(e, promotionTy);
2511 if (result && !promotionTy.isNull())
2512 result = emitUnPromotedValue(result, e->getType());
2513 return result;
2514}
2515
2516mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2517 QualType promotionTy) {
2518 assert(
2519 (e->getOpcode() == clang::UO_Real || e->getOpcode() == clang::UO_Imag) &&
2520 "Invalid UnaryOp kind for ComplexType Real or Imag");
2521
2522 Expr *op = e->getSubExpr();
2523 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2524 if (op->getType()->isAnyComplexType()) {
2525 // If it's an l-value, load through the appropriate subobject l-value.
2526 // Note that we have to ask `e` because `op` might be an l-value that
2527 // this won't work for, e.g. an Obj-C property
2528 mlir::Value complex = cgf.emitComplexExpr(op);
2529 if (e->isGLValue() && !promotionTy.isNull()) {
2530 promotionTy = promotionTy->isAnyComplexType()
2531 ? promotionTy
2532 : cgf.getContext().getComplexType(promotionTy);
2533 complex = cgf.emitPromotedValue(complex, promotionTy);
2534 }
2535
2536 return e->getOpcode() == clang::UO_Real
2537 ? builder.createComplexReal(loc, complex)
2538 : builder.createComplexImag(loc, complex);
2539 }
2540
2541 if (e->getOpcode() == UO_Real) {
2542 mlir::Value operand = promotionTy.isNull()
2543 ? Visit(op)
2544 : cgf.emitPromotedScalarExpr(op, promotionTy);
2545 return builder.createComplexReal(loc, operand);
2546 }
2547
2548 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2549 // effects are evaluated, but not the actual value.
2550 mlir::Value operand;
2551 if (op->isGLValue()) {
2552 operand = cgf.emitLValue(op).getPointer();
2553 operand = cir::LoadOp::create(builder, loc, operand);
2554 } else if (!promotionTy.isNull()) {
2555 operand = cgf.emitPromotedScalarExpr(op, promotionTy);
2556 } else {
2557 operand = cgf.emitScalarExpr(op);
2558 }
2559 return builder.createComplexImag(loc, operand);
2560}
2561
2562/// Return the size or alignment of the type of argument of the sizeof
2563/// expression as an integer.
2564mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2565 const UnaryExprOrTypeTraitExpr *e) {
2566 const QualType typeToSize = e->getTypeOfArgument();
2567 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2568 if (auto kind = e->getKind();
2569 kind == UETT_SizeOf || kind == UETT_DataSizeOf || kind == UETT_CountOf) {
2570 if (const VariableArrayType *vat =
2571 cgf.getContext().getAsVariableArrayType(typeToSize)) {
2572 // For _Countof, we only want to evaluate if the extent is actually
2573 // variable as opposed to a multi-dimensional array whose extent is
2574 // constant but whose element type is variable.
2575 bool evaluateExtent = true;
2576 if (kind == UETT_CountOf && vat->getElementType()->isArrayType()) {
2577 evaluateExtent =
2578 !vat->getSizeExpr()->isIntegerConstantExpr(cgf.getContext());
2579 }
2580
2581 if (evaluateExtent) {
2582 if (e->isArgumentType()) {
2583 // sizeof(type) - make sure to emit the VLA size.
2584 cgf.emitVariablyModifiedType(typeToSize);
2585 } else {
2586 // C99 6.5.3.4p2: If the argument is an expression of type
2587 // VLA, it is evaluated.
2589 }
2590
2591 // For _Countof, we just want to return the size of a single dimension.
2592 if (kind == UETT_CountOf)
2593 return cgf.getVLAElements1D(vat).numElts;
2594
2595 // For sizeof and __datasizeof, we need to scale the number of elements
2596 // by the size of the array element type.
2597 CIRGenFunction::VlaSizePair vlaSize = cgf.getVLASize(vat);
2598 mlir::Value numElts = vlaSize.numElts;
2599
2600 // Scale the number of non-VLA elements by the non-VLA element size.
2601 CharUnits eltSize = cgf.getContext().getTypeSizeInChars(vlaSize.type);
2602 if (!eltSize.isOne()) {
2603 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2604 mlir::Value eltSizeValue =
2605 builder.getConstAPInt(numElts.getLoc(), numElts.getType(),
2606 cgf.cgm.getSize(eltSize).getValue());
2607 return builder.createMul(loc, eltSizeValue, numElts,
2609 }
2610
2611 return numElts;
2612 }
2613 }
2614 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2616 cgf.getContext()
2619 .getQuantity();
2620 return builder.getConstantInt(loc, cgf.cgm.sizeTy, alignment);
2621 } else if (e->getKind() == UETT_VectorElements) {
2622 auto vecTy = cast<cir::VectorType>(convertType(e->getTypeOfArgument()));
2623 if (vecTy.getIsScalable()) {
2625 e->getSourceRange(),
2626 "VisitUnaryExprOrTypeTraitExpr: sizeOf scalable vector");
2627 return builder.getConstant(
2628 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2630 }
2631
2632 return builder.getConstant(
2633 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty, vecTy.getSize()));
2634 }
2635
2636 return builder.getConstant(
2637 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2639}
2640
2641/// Return true if the specified expression is cheap enough and side-effect-free
2642/// enough to evaluate unconditionally instead of conditionally. This is used
2643/// to convert control flow into selects in some cases.
2644/// TODO(cir): can be shared with LLVM codegen.
2646 CIRGenFunction &cgf) {
2647 // Anything that is an integer or floating point constant is fine.
2648 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2649
2650 // Even non-volatile automatic variables can't be evaluated unconditionally.
2651 // Referencing a thread_local may cause non-trivial initialization work to
2652 // occur. If we're inside a lambda and one of the variables is from the scope
2653 // outside the lambda, that function may have returned already. Reading its
2654 // locals is a bad idea. Also, these reads may introduce races there didn't
2655 // exist in the source-level program.
2656}
2657
2658mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2659 const AbstractConditionalOperator *e) {
2660 CIRGenBuilderTy &builder = cgf.getBuilder();
2661 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2662 ignoreResultAssign = false;
2663
2664 // Bind the common expression if necessary.
2665 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2666
2667 Expr *condExpr = e->getCond();
2668 Expr *lhsExpr = e->getTrueExpr();
2669 Expr *rhsExpr = e->getFalseExpr();
2670
2671 // If the condition constant folds and can be elided, try to avoid emitting
2672 // the condition and the dead arm.
2673 bool condExprBool;
2674 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2675 Expr *live = lhsExpr, *dead = rhsExpr;
2676 if (!condExprBool)
2677 std::swap(live, dead);
2678
2679 // If the dead side doesn't have labels we need, just emit the Live part.
2680 if (!cgf.containsLabel(dead)) {
2681 if (condExprBool)
2683 mlir::Value result = Visit(live);
2684
2685 // If the live part is a throw expression, it acts like it has a void
2686 // type, so evaluating it returns a null Value. However, a conditional
2687 // with non-void type must return a non-null Value.
2688 if (!result && !e->getType()->isVoidType()) {
2689 result = builder.getConstant(
2690 loc, cir::PoisonAttr::get(builder.getContext(),
2691 cgf.convertType(e->getType())));
2692 }
2693
2694 return result;
2695 }
2696 }
2697
2698 QualType condType = condExpr->getType();
2699
2700 // OpenCL: If the condition is a vector, we can treat this condition like
2701 // the select function.
2702 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2703 condType->isExtVectorType()) {
2705 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2706 }
2707
2708 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2709 if (!condType->isVectorType()) {
2711 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2712 return {};
2713 }
2714
2715 mlir::Value condValue = Visit(condExpr);
2716 mlir::Value lhsValue = Visit(lhsExpr);
2717 mlir::Value rhsValue = Visit(rhsExpr);
2718 return cir::VecTernaryOp::create(builder, loc, condValue, lhsValue,
2719 rhsValue);
2720 }
2721
2722 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2723 // select instead of as control flow. We can only do this if it is cheap
2724 // and safe to evaluate the LHS and RHS unconditionally.
2725 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2727 bool lhsIsVoid = false;
2728 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2730
2731 mlir::Value lhs = Visit(lhsExpr);
2732 if (!lhs) {
2733 lhs = builder.getNullValue(cgf.voidTy, loc);
2734 lhsIsVoid = true;
2735 }
2736
2737 mlir::Value rhs = Visit(rhsExpr);
2738 if (lhsIsVoid) {
2739 assert(!rhs && "lhs and rhs types must match");
2740 rhs = builder.getNullValue(cgf.voidTy, loc);
2741 }
2742
2743 return builder.createSelect(loc, condV, lhs, rhs);
2744 }
2745
2746 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2747 CIRGenFunction::ConditionalEvaluation eval(cgf);
2748 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2749 mlir::Type yieldTy{};
2750
2751 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2752 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2754
2756 eval.beginEvaluation();
2757 mlir::Value branch = Visit(expr);
2758 eval.endEvaluation();
2759
2760 if (branch) {
2761 yieldTy = branch.getType();
2762 cir::YieldOp::create(b, loc, branch);
2763 } else {
2764 // If LHS or RHS is a throw or void expression we need to patch
2765 // arms as to properly match yield types.
2766 insertPoints.push_back(b.saveInsertionPoint());
2767 }
2768 };
2769
2770 mlir::Value result = cir::TernaryOp::create(
2771 builder, loc, condV,
2772 /*trueBuilder=*/
2773 [&](mlir::OpBuilder &b, mlir::Location loc) {
2774 emitBranch(b, loc, lhsExpr);
2775 },
2776 /*falseBuilder=*/
2777 [&](mlir::OpBuilder &b, mlir::Location loc) {
2778 emitBranch(b, loc, rhsExpr);
2779 })
2780 .getResult();
2781
2782 if (!insertPoints.empty()) {
2783 // If both arms are void, so be it.
2784 if (!yieldTy)
2785 yieldTy = cgf.voidTy;
2786
2787 // Insert required yields.
2788 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2789 mlir::OpBuilder::InsertionGuard guard(builder);
2790 builder.restoreInsertionPoint(toInsert);
2791
2792 // Block does not return: build empty yield.
2793 if (mlir::isa<cir::VoidType>(yieldTy)) {
2794 cir::YieldOp::create(builder, loc);
2795 } else { // Block returns: set null yield value.
2796 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2797 cir::YieldOp::create(builder, loc, op0);
2798 }
2799 }
2800 }
2801
2802 return result;
2803}
2804
2806 LValue lv) {
2807 return ScalarExprEmitter(*this, builder).emitScalarPrePostIncDec(e, lv);
2808}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
mlir::Value createNSWSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
cir::ConstantOp getBool(bool state, mlir::Location loc)
mlir::Value getConstAPInt(mlir::Location loc, mlir::Type typ, const llvm::APInt &val)
mlir::Value createSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, OverflowBehavior ob=OverflowBehavior::None)
mlir::Value createNSWAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy)
mlir::Value createAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, OverflowBehavior ob=OverflowBehavior::None)
mlir::Value createComplexImag(mlir::Location loc, mlir::Value operand)
mlir::Value createNSWMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createShiftLeft(mlir::Location loc, mlir::Value lhs, unsigned bits)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createNot(mlir::Location loc, mlir::Value value)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Value createMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, OverflowBehavior ob=OverflowBehavior::None)
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty, int64_t value)
mlir::Value createComplexCreate(mlir::Location loc, mlir::Value real, mlir::Value imag)
mlir::Value createShiftRight(mlir::Location loc, mlir::Value lhs, unsigned bits)
mlir::Value createComplexReal(mlir::Location loc, mlir::Value operand)
mlir::Type getIntPtrType(mlir::Type ty) const
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:508
bool isNullPointer() const
Definition APValue.cpp:1051
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4540
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
LabelDecl * getLabel() const
Definition Expr.h:4576
uint64_t getValue() const
Definition ExprCXX.h:3045
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
SourceLocation getExprLoc() const
Definition Expr.h:4082
Expr * getRHS() const
Definition Expr.h:4093
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4254
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2205
Opcode getOpcode() const
Definition Expr.h:4086
BinaryOperatorKind Opcode
Definition Expr.h:4046
mlir::Value getPointer() const
Definition Address.h:96
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
mlir::Value createFAdd(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
mlir::Value createFMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
mlir::Value createFSub(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
mlir::Value createNeg(mlir::Value value)
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
const clang::LangOptions & getLangOpts() const
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value performAddrSpaceCast(mlir::Value v, mlir::Type destTy) const
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Type convertTypeForMem(QualType t)
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
CharUnits computeNonVirtualBaseClassOffset(const CXXRecordDecl *derivedClass, llvm::iterator_range< CastExpr::path_const_iterator > path)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
mlir::TypedAttr emitNullMemberAttr(QualType t, const MemberPointerType *mpt)
Returns a null attribute to represent either a null method or null data member, depending on the type...
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool getValue() const
Definition ExprCXX.h:741
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
bool getValue() const
Definition ExprCXX.h:4333
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:305
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastKind getCastKind() const
Definition Expr.h:3723
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3766
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1951
Expr * getSubExpr()
Definition Expr.h:3729
int64_t QuantityType
Definition CharUnits.h:40
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
unsigned getValue() const
Definition Expr.h:1632
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3325
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4303
QualType getComputationLHSType() const
Definition Expr.h:4337
QualType getComputationResultType() const
Definition Expr.h:4340
SourceLocation getExprLoc() const LLVM_READONLY
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4812
ChildElementIter< false > begin()
Definition Expr.h:5235
size_t getDataElementCount() const
Definition Expr.h:5151
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:677
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1669
const Expr * getSubExpr() const
Definition Expr.h:1065
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6467
unsigned getNumInits() const
Definition Expr.h:5332
bool hadArrayRangeDesignator() const
Definition Expr.h:5486
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
ArrayRef< Expr * > inits()
Definition Expr.h:5352
bool isSignedOverflowDefined() const
Expr * getBase() const
Definition Expr.h:3444
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3562
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3703
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:248
SourceRange getSourceRange() const
Definition ExprObjC.h:1753
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:190
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprObjC.h:415
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2530
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1211
Expr * getSelectedExpr() const
Definition ExprCXX.h:4640
const Expr * getSubExpr() const
Definition Expr.h:2202
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8431
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
QualType getCanonicalType() const
Definition TypeBase.h:8483
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1626
bool isCanonical() const
Definition TypeBase.h:8488
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
bool isSatisfied() const
Whether or not the requires clause is satisfied.
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4679
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4685
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition ExprCXX.h:4516
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition Expr.cpp:2282
SourceLocation getLocation() const
Definition Expr.h:5064
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
const char * getStmtClassName() const
Definition Stmt.cpp:86
bool getBoolValue() const
Definition ExprCXX.h:2948
const APValue & getAPValue() const
Definition ExprCXX.h:2953
bool isStoredAsBoolean() const
Definition ExprCXX.h:2944
bool isVoidType() const
Definition TypeBase.h:9034
bool isBooleanType() const
Definition TypeBase.h:9171
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2254
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2231
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantMatrixType() const
Definition TypeBase.h:8835
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9078
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
bool isReferenceType() const
Definition TypeBase.h:8692
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1923
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2652
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2329
bool isExtVectorType() const
Definition TypeBase.h:8811
bool isAnyComplexType() const
Definition TypeBase.h:8803
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:9094
bool isHalfType() const
Definition TypeBase.h:9038
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2275
bool isMatrixType() const
Definition TypeBase.h:8831
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2850
bool isMemberFunctionPointerType() const
Definition TypeBase.h:8753
bool isVectorType() const
Definition TypeBase.h:8807
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2358
bool isFloatingType() const
Definition Type.cpp:2342
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2285
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
bool isNullPtrType() const
Definition TypeBase.h:9071
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2697
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2660
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
SourceLocation getExprLoc() const
Definition Expr.h:2371
Expr * getSubExpr() const
Definition Expr.h:2288
Opcode getOpcode() const
Definition Expr.h:2283
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2329
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2322
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2301
Represents a GCC generic vector type.
Definition TypeBase.h:4225
VectorKind getVectorKind() const
Definition TypeBase.h:4245
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Type
The name was classified as a type.
Definition Sema.h:564
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4186
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool llvmLoweringPtrDiffConsidersPointee()
static bool scalableVectors()
static bool memberFuncPtrAuthInfo()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:615
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174