clang 22.0.0git
CIRGenExprScalar.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Emit Expr nodes with scalar CIR types as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14#include "CIRGenValue.h"
15
16#include "clang/AST/Expr.h"
20
21#include "mlir/IR/Location.h"
22#include "mlir/IR/Value.h"
23
24#include <cassert>
25#include <utility>
26
27using namespace clang;
28using namespace clang::CIRGen;
29
30namespace {
31
32struct BinOpInfo {
33 mlir::Value lhs;
34 mlir::Value rhs;
35 SourceRange loc;
36 QualType fullType; // Type of operands and result
37 QualType compType; // Type used for computations. Element type
38 // for vectors, otherwise same as FullType.
39 BinaryOperator::Opcode opcode; // Opcode of BinOp to perform
40 FPOptions fpfeatures;
41 const Expr *e; // Entire expr, for error unsupported. May not be binop.
42
43 /// Check if the binop computes a division or a remainder.
44 bool isDivRemOp() const {
45 return opcode == BO_Div || opcode == BO_Rem || opcode == BO_DivAssign ||
46 opcode == BO_RemAssign;
47 }
48
49 /// Check if the binop can result in integer overflow.
50 bool mayHaveIntegerOverflow() const {
51 // Without constant input, we can't rule out overflow.
52 auto lhsci = lhs.getDefiningOp<cir::ConstantOp>();
53 auto rhsci = rhs.getDefiningOp<cir::ConstantOp>();
54 if (!lhsci || !rhsci)
55 return true;
56
58 // TODO(cir): For now we just assume that we might overflow
59 return true;
60 }
61
62 /// Check if at least one operand is a fixed point type. In such cases,
63 /// this operation did not follow usual arithmetic conversion and both
64 /// operands might not be of the same type.
65 bool isFixedPointOp() const {
66 // We cannot simply check the result type since comparison operations
67 // return an int.
68 if (const auto *binOp = llvm::dyn_cast<BinaryOperator>(e)) {
69 QualType lhstype = binOp->getLHS()->getType();
70 QualType rhstype = binOp->getRHS()->getType();
71 return lhstype->isFixedPointType() || rhstype->isFixedPointType();
72 }
73 if (const auto *unop = llvm::dyn_cast<UnaryOperator>(e))
74 return unop->getSubExpr()->getType()->isFixedPointType();
75 return false;
76 }
77};
78
79class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, mlir::Value> {
80 CIRGenFunction &cgf;
81 CIRGenBuilderTy &builder;
82 // Unlike classic codegen we set this to false or use std::exchange to read
83 // the value instead of calling TestAndClearIgnoreResultAssign to make it
84 // explicit when the value is used
85 bool ignoreResultAssign;
86
87public:
88 ScalarExprEmitter(CIRGenFunction &cgf, CIRGenBuilderTy &builder,
89 bool ignoreResultAssign = false)
90 : cgf(cgf), builder(builder), ignoreResultAssign(ignoreResultAssign) {}
91
92 //===--------------------------------------------------------------------===//
93 // Utilities
94 //===--------------------------------------------------------------------===//
95 mlir::Type convertType(QualType ty) { return cgf.convertType(ty); }
96
97 mlir::Value emitComplexToScalarConversion(mlir::Location loc,
98 mlir::Value value, CastKind kind,
99 QualType destTy);
100
101 mlir::Value emitNullValue(QualType ty, mlir::Location loc) {
102 return cgf.cgm.emitNullConstant(ty, loc);
103 }
104
105 mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType) {
106 return builder.createFloatingCast(result, cgf.convertType(promotionType));
107 }
108
109 mlir::Value emitUnPromotedValue(mlir::Value result, QualType exprType) {
110 return builder.createFloatingCast(result, cgf.convertType(exprType));
111 }
112
113 mlir::Value emitPromoted(const Expr *e, QualType promotionType);
114
115 mlir::Value maybePromoteBoolResult(mlir::Value value,
116 mlir::Type dstTy) const {
117 if (mlir::isa<cir::IntType>(dstTy))
118 return builder.createBoolToInt(value, dstTy);
119 if (mlir::isa<cir::BoolType>(dstTy))
120 return value;
121 llvm_unreachable("Can only promote integer or boolean types");
122 }
123
124 //===--------------------------------------------------------------------===//
125 // Visitor Methods
126 //===--------------------------------------------------------------------===//
127
128 mlir::Value Visit(Expr *e) {
129 return StmtVisitor<ScalarExprEmitter, mlir::Value>::Visit(e);
130 }
131
132 mlir::Value VisitStmt(Stmt *s) {
133 llvm_unreachable("Statement passed to ScalarExprEmitter");
134 }
135
136 mlir::Value VisitExpr(Expr *e) {
137 cgf.getCIRGenModule().errorNYI(
138 e->getSourceRange(), "scalar expression kind: ", e->getStmtClassName());
139 return {};
140 }
141
142 mlir::Value VisitPackIndexingExpr(PackIndexingExpr *e) {
143 return Visit(e->getSelectedExpr());
144 }
145
146 mlir::Value VisitParenExpr(ParenExpr *pe) { return Visit(pe->getSubExpr()); }
147
148 mlir::Value VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
149 return Visit(ge->getResultExpr());
150 }
151
152 /// Emits the address of the l-value, then loads and returns the result.
153 mlir::Value emitLoadOfLValue(const Expr *e) {
154 LValue lv = cgf.emitLValue(e);
155 // FIXME: add some akin to EmitLValueAlignmentAssumption(E, V);
156 return cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
157 }
158
159 mlir::Value VisitCoawaitExpr(CoawaitExpr *s) {
160 return cgf.emitCoawaitExpr(*s).getValue();
161 }
162
163 mlir::Value emitLoadOfLValue(LValue lv, SourceLocation loc) {
164 return cgf.emitLoadOfLValue(lv, loc).getValue();
165 }
166
167 // l-values
168 mlir::Value VisitDeclRefExpr(DeclRefExpr *e) {
169 if (CIRGenFunction::ConstantEmission constant = cgf.tryEmitAsConstant(e))
170 return cgf.emitScalarConstant(constant, e);
171
172 return emitLoadOfLValue(e);
173 }
174
175 mlir::Value VisitAddrLabelExpr(const AddrLabelExpr *e) {
176 auto func = cast<cir::FuncOp>(cgf.curFn);
177 auto blockInfoAttr = cir::BlockAddrInfoAttr::get(
178 &cgf.getMLIRContext(), func.getSymName(), e->getLabel()->getName());
179 return cir::BlockAddressOp::create(builder, cgf.getLoc(e->getSourceRange()),
180 cgf.convertType(e->getType()),
181 blockInfoAttr);
182 }
183
184 mlir::Value VisitIntegerLiteral(const IntegerLiteral *e) {
185 mlir::Type type = cgf.convertType(e->getType());
186 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
187 cir::IntAttr::get(type, e->getValue()));
188 }
189
190 mlir::Value VisitFloatingLiteral(const FloatingLiteral *e) {
191 mlir::Type type = cgf.convertType(e->getType());
192 assert(mlir::isa<cir::FPTypeInterface>(type) &&
193 "expect floating-point type");
194 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()),
195 cir::FPAttr::get(type, e->getValue()));
196 }
197
198 mlir::Value VisitCharacterLiteral(const CharacterLiteral *e) {
199 mlir::Type ty = cgf.convertType(e->getType());
200 auto init = cir::IntAttr::get(ty, e->getValue());
201 return cir::ConstantOp::create(builder, cgf.getLoc(e->getExprLoc()), init);
202 }
203
204 mlir::Value VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *e) {
205 return builder.getBool(e->getValue(), cgf.getLoc(e->getExprLoc()));
206 }
207
208 mlir::Value VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *e) {
209 if (e->getType()->isVoidType())
210 return {};
211
212 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
213 }
214
215 mlir::Value VisitGNUNullExpr(const GNUNullExpr *e) {
216 return emitNullValue(e->getType(), cgf.getLoc(e->getSourceRange()));
217 }
218
219 mlir::Value VisitOffsetOfExpr(OffsetOfExpr *e);
220
221 mlir::Value VisitOpaqueValueExpr(OpaqueValueExpr *e) {
222 if (e->isGLValue())
223 return emitLoadOfLValue(cgf.getOrCreateOpaqueLValueMapping(e),
224 e->getExprLoc());
225
226 // Otherwise, assume the mapping is the scalar directly.
227 return cgf.getOrCreateOpaqueRValueMapping(e).getValue();
228 }
229
230 mlir::Value VisitCastExpr(CastExpr *e);
231 mlir::Value VisitCallExpr(const CallExpr *e);
232
233 mlir::Value VisitStmtExpr(StmtExpr *e) {
234 CIRGenFunction::StmtExprEvaluation eval(cgf);
235 if (e->getType()->isVoidType()) {
236 (void)cgf.emitCompoundStmt(*e->getSubStmt());
237 return {};
238 }
239
240 Address retAlloca =
241 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
242 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca);
243
244 return cgf.emitLoadOfScalar(cgf.makeAddrLValue(retAlloca, e->getType()),
245 e->getExprLoc());
246 }
247
248 mlir::Value VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
249 ignoreResultAssign = false;
250
251 if (e->getBase()->getType()->isVectorType()) {
253
254 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
255 const mlir::Value vecValue = Visit(e->getBase());
256 const mlir::Value indexValue = Visit(e->getIdx());
257 return cir::VecExtractOp::create(cgf.builder, loc, vecValue, indexValue);
258 }
259 // Just load the lvalue formed by the subscript expression.
260 return emitLoadOfLValue(e);
261 }
262
263 mlir::Value VisitShuffleVectorExpr(ShuffleVectorExpr *e) {
264 if (e->getNumSubExprs() == 2) {
265 // The undocumented form of __builtin_shufflevector.
266 mlir::Value inputVec = Visit(e->getExpr(0));
267 mlir::Value indexVec = Visit(e->getExpr(1));
268 return cir::VecShuffleDynamicOp::create(
269 cgf.builder, cgf.getLoc(e->getSourceRange()), inputVec, indexVec);
270 }
271
272 mlir::Value vec1 = Visit(e->getExpr(0));
273 mlir::Value vec2 = Visit(e->getExpr(1));
274
275 // The documented form of __builtin_shufflevector, where the indices are
276 // a variable number of integer constants. The constants will be stored
277 // in an ArrayAttr.
278 SmallVector<mlir::Attribute, 8> indices;
279 for (unsigned i = 2; i < e->getNumSubExprs(); ++i) {
280 indices.push_back(
281 cir::IntAttr::get(cgf.builder.getSInt64Ty(),
282 e->getExpr(i)
283 ->EvaluateKnownConstInt(cgf.getContext())
284 .getSExtValue()));
285 }
286
287 return cir::VecShuffleOp::create(cgf.builder,
288 cgf.getLoc(e->getSourceRange()),
289 cgf.convertType(e->getType()), vec1, vec2,
290 cgf.builder.getArrayAttr(indices));
291 }
292
293 mlir::Value VisitConvertVectorExpr(ConvertVectorExpr *e) {
294 // __builtin_convertvector is an element-wise cast, and is implemented as a
295 // regular cast. The back end handles casts of vectors correctly.
296 return emitScalarConversion(Visit(e->getSrcExpr()),
297 e->getSrcExpr()->getType(), e->getType(),
298 e->getSourceRange().getBegin());
299 }
300
301 mlir::Value VisitExtVectorElementExpr(Expr *e) { return emitLoadOfLValue(e); }
302
303 mlir::Value VisitMemberExpr(MemberExpr *e);
304
305 mlir::Value VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
306 return emitLoadOfLValue(e);
307 }
308
309 mlir::Value VisitInitListExpr(InitListExpr *e);
310
311 mlir::Value VisitExplicitCastExpr(ExplicitCastExpr *e) {
312 return VisitCastExpr(e);
313 }
314
315 mlir::Value VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *e) {
316 return cgf.cgm.emitNullConstant(e->getType(),
317 cgf.getLoc(e->getSourceRange()));
318 }
319
320 /// Perform a pointer to boolean conversion.
321 mlir::Value emitPointerToBoolConversion(mlir::Value v, QualType qt) {
322 // TODO(cir): comparing the ptr to null is done when lowering CIR to LLVM.
323 // We might want to have a separate pass for these types of conversions.
324 return cgf.getBuilder().createPtrToBoolCast(v);
325 }
326
327 mlir::Value emitFloatToBoolConversion(mlir::Value src, mlir::Location loc) {
328 cir::BoolType boolTy = builder.getBoolTy();
329 return cir::CastOp::create(builder, loc, boolTy,
330 cir::CastKind::float_to_bool, src);
331 }
332
333 mlir::Value emitIntToBoolConversion(mlir::Value srcVal, mlir::Location loc) {
334 // Because of the type rules of C, we often end up computing a
335 // logical value, then zero extending it to int, then wanting it
336 // as a logical value again.
337 // TODO: optimize this common case here or leave it for later
338 // CIR passes?
339 cir::BoolType boolTy = builder.getBoolTy();
340 return cir::CastOp::create(builder, loc, boolTy, cir::CastKind::int_to_bool,
341 srcVal);
342 }
343
344 /// Convert the specified expression value to a boolean (!cir.bool) truth
345 /// value. This is equivalent to "Val != 0".
346 mlir::Value emitConversionToBool(mlir::Value src, QualType srcType,
347 mlir::Location loc) {
348 assert(srcType.isCanonical() && "EmitScalarConversion strips typedefs");
349
350 if (srcType->isRealFloatingType())
351 return emitFloatToBoolConversion(src, loc);
352
353 if (llvm::isa<MemberPointerType>(srcType)) {
354 cgf.getCIRGenModule().errorNYI(loc, "member pointer to bool conversion");
355 return builder.getFalse(loc);
356 }
357
358 if (srcType->isIntegerType())
359 return emitIntToBoolConversion(src, loc);
360
361 assert(::mlir::isa<cir::PointerType>(src.getType()));
362 return emitPointerToBoolConversion(src, srcType);
363 }
364
365 // Emit a conversion from the specified type to the specified destination
366 // type, both of which are CIR scalar types.
367 struct ScalarConversionOpts {
368 bool treatBooleanAsSigned;
369 bool emitImplicitIntegerTruncationChecks;
370 bool emitImplicitIntegerSignChangeChecks;
371
372 ScalarConversionOpts()
373 : treatBooleanAsSigned(false),
374 emitImplicitIntegerTruncationChecks(false),
375 emitImplicitIntegerSignChangeChecks(false) {}
376
377 ScalarConversionOpts(clang::SanitizerSet sanOpts)
378 : treatBooleanAsSigned(false),
379 emitImplicitIntegerTruncationChecks(
380 sanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
381 emitImplicitIntegerSignChangeChecks(
382 sanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
383 };
384
385 // Conversion from bool, integral, or floating-point to integral or
386 // floating-point. Conversions involving other types are handled elsewhere.
387 // Conversion to bool is handled elsewhere because that's a comparison against
388 // zero, not a simple cast. This handles both individual scalars and vectors.
389 mlir::Value emitScalarCast(mlir::Value src, QualType srcType,
390 QualType dstType, mlir::Type srcTy,
391 mlir::Type dstTy, ScalarConversionOpts opts) {
392 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
393 "Internal error: matrix types not handled by this function.");
394 assert(!(mlir::isa<mlir::IntegerType>(srcTy) ||
395 mlir::isa<mlir::IntegerType>(dstTy)) &&
396 "Obsolete code. Don't use mlir::IntegerType with CIR.");
397
398 mlir::Type fullDstTy = dstTy;
399 if (mlir::isa<cir::VectorType>(srcTy) &&
400 mlir::isa<cir::VectorType>(dstTy)) {
401 // Use the element types of the vectors to figure out the CastKind.
402 srcTy = mlir::dyn_cast<cir::VectorType>(srcTy).getElementType();
403 dstTy = mlir::dyn_cast<cir::VectorType>(dstTy).getElementType();
404 }
405
406 std::optional<cir::CastKind> castKind;
407
408 if (mlir::isa<cir::BoolType>(srcTy)) {
409 if (opts.treatBooleanAsSigned)
410 cgf.getCIRGenModule().errorNYI("signed bool");
411 if (cgf.getBuilder().isInt(dstTy))
412 castKind = cir::CastKind::bool_to_int;
413 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
414 castKind = cir::CastKind::bool_to_float;
415 else
416 llvm_unreachable("Internal error: Cast to unexpected type");
417 } else if (cgf.getBuilder().isInt(srcTy)) {
418 if (cgf.getBuilder().isInt(dstTy))
419 castKind = cir::CastKind::integral;
420 else if (mlir::isa<cir::FPTypeInterface>(dstTy))
421 castKind = cir::CastKind::int_to_float;
422 else
423 llvm_unreachable("Internal error: Cast to unexpected type");
424 } else if (mlir::isa<cir::FPTypeInterface>(srcTy)) {
425 if (cgf.getBuilder().isInt(dstTy)) {
426 // If we can't recognize overflow as undefined behavior, assume that
427 // overflow saturates. This protects against normal optimizations if we
428 // are compiling with non-standard FP semantics.
429 if (!cgf.cgm.getCodeGenOpts().StrictFloatCastOverflow)
430 cgf.getCIRGenModule().errorNYI("strict float cast overflow");
432 castKind = cir::CastKind::float_to_int;
433 } else if (mlir::isa<cir::FPTypeInterface>(dstTy)) {
434 // TODO: split this to createFPExt/createFPTrunc
435 return builder.createFloatingCast(src, fullDstTy);
436 } else {
437 llvm_unreachable("Internal error: Cast to unexpected type");
438 }
439 } else {
440 llvm_unreachable("Internal error: Cast from unexpected type");
441 }
442
443 assert(castKind.has_value() && "Internal error: CastKind not set.");
444 return cir::CastOp::create(builder, src.getLoc(), fullDstTy, *castKind,
445 src);
446 }
447
448 mlir::Value
449 VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
450 return Visit(e->getReplacement());
451 }
452
453 mlir::Value VisitVAArgExpr(VAArgExpr *ve) {
454 QualType ty = ve->getType();
455
456 if (ty->isVariablyModifiedType()) {
457 cgf.cgm.errorNYI(ve->getSourceRange(),
458 "variably modified types in varargs");
459 }
460
461 return cgf.emitVAArg(ve);
462 }
463
464 mlir::Value VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
465 return Visit(e->getSemanticForm());
466 }
467
468 mlir::Value VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *e);
469 mlir::Value
470 VisitAbstractConditionalOperator(const AbstractConditionalOperator *e);
471
472 // Unary Operators.
473 mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
474 LValue lv = cgf.emitLValue(e->getSubExpr());
475 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, false);
476 }
477 mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
478 LValue lv = cgf.emitLValue(e->getSubExpr());
479 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, false);
480 }
481 mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
482 LValue lv = cgf.emitLValue(e->getSubExpr());
483 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Dec, true);
484 }
485 mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
486 LValue lv = cgf.emitLValue(e->getSubExpr());
487 return emitScalarPrePostIncDec(e, lv, cir::UnaryOpKind::Inc, true);
488 }
489 mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
490 cir::UnaryOpKind kind, bool isPre) {
491 if (cgf.getLangOpts().OpenMP)
492 cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
493
494 QualType type = e->getSubExpr()->getType();
495
496 mlir::Value value;
497 mlir::Value input;
498
499 if (type->getAs<AtomicType>()) {
500 cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
501 // TODO(cir): This is not correct, but it will produce reasonable code
502 // until atomic operations are implemented.
503 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
504 input = value;
505 } else {
506 value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getValue();
507 input = value;
508 }
509
510 // NOTE: When possible, more frequent cases are handled first.
511
512 // Special case of integer increment that we have to check first: bool++.
513 // Due to promotion rules, we get:
514 // bool++ -> bool = bool + 1
515 // -> bool = (int)bool + 1
516 // -> bool = ((int)bool + 1 != 0)
517 // An interesting aspect of this is that increment is always true.
518 // Decrement does not have this property.
519 if (kind == cir::UnaryOpKind::Inc && type->isBooleanType()) {
520 value = builder.getTrue(cgf.getLoc(e->getExprLoc()));
521 } else if (type->isIntegerType()) {
522 QualType promotedType;
523 [[maybe_unused]] bool canPerformLossyDemotionCheck = false;
524 if (cgf.getContext().isPromotableIntegerType(type)) {
525 promotedType = cgf.getContext().getPromotedIntegerType(type);
526 assert(promotedType != type && "Shouldn't promote to the same type.");
527 canPerformLossyDemotionCheck = true;
528 canPerformLossyDemotionCheck &=
529 cgf.getContext().getCanonicalType(type) !=
530 cgf.getContext().getCanonicalType(promotedType);
531 canPerformLossyDemotionCheck &=
532 type->isIntegerType() && promotedType->isIntegerType();
533
534 // TODO(cir): Currently, we store bitwidths in CIR types only for
535 // integers. This might also be required for other types.
536
537 assert(
538 (!canPerformLossyDemotionCheck ||
539 type->isSignedIntegerOrEnumerationType() ||
540 promotedType->isSignedIntegerOrEnumerationType() ||
541 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth() ==
542 mlir::cast<cir::IntType>(cgf.convertType(type)).getWidth()) &&
543 "The following check expects that if we do promotion to different "
544 "underlying canonical type, at least one of the types (either "
545 "base or promoted) will be signed, or the bitwidths will match.");
546 }
547
549 if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
550 value = emitIncDecConsiderOverflowBehavior(e, value, kind);
551 } else {
552 cir::UnaryOpKind kind =
553 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
554 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
555 value = emitUnaryOp(e, kind, input, /*nsw=*/false);
556 }
557 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
558 QualType type = ptr->getPointeeType();
559 if (cgf.getContext().getAsVariableArrayType(type)) {
560 // VLA types don't have constant size.
561 cgf.cgm.errorNYI(e->getSourceRange(), "Pointer arithmetic on VLA");
562 return {};
563 } else if (type->isFunctionType()) {
564 // Arithmetic on function pointers (!) is just +-1.
565 cgf.cgm.errorNYI(e->getSourceRange(),
566 "Pointer arithmetic on function pointer");
567 return {};
568 } else {
569 // For everything else, we can just do a simple increment.
570 mlir::Location loc = cgf.getLoc(e->getSourceRange());
571 CIRGenBuilderTy &builder = cgf.getBuilder();
572 int amount = kind == cir::UnaryOpKind::Inc ? 1 : -1;
573 mlir::Value amt = builder.getSInt32(amount, loc);
575 value = builder.createPtrStride(loc, value, amt);
576 }
577 } else if (type->isVectorType()) {
578 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
579 return {};
580 } else if (type->isRealFloatingType()) {
582
583 if (type->isHalfType() &&
584 !cgf.getContext().getLangOpts().NativeHalfType) {
585 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
586 return {};
587 }
588
589 if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
590 // Create the inc/dec operation.
591 // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
592 assert(kind == cir::UnaryOpKind::Inc ||
593 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
594 value = emitUnaryOp(e, kind, value);
595 } else {
596 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
597 return {};
598 }
599 } else if (type->isFixedPointType()) {
600 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
601 return {};
602 } else {
603 assert(type->castAs<ObjCObjectPointerType>());
604 cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC pointer");
605 return {};
606 }
607
608 CIRGenFunction::SourceLocRAIIObject sourceloc{
609 cgf, cgf.getLoc(e->getSourceRange())};
610
611 // Store the updated result through the lvalue
612 if (lv.isBitField())
613 return cgf.emitStoreThroughBitfieldLValue(RValue::get(value), lv);
614 else
615 cgf.emitStoreThroughLValue(RValue::get(value), lv);
616
617 // If this is a postinc, return the value read from memory, otherwise use
618 // the updated value.
619 return isPre ? value : input;
620 }
621
622 mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
623 mlir::Value inVal,
624 cir::UnaryOpKind kind) {
625 assert(kind == cir::UnaryOpKind::Inc ||
626 kind == cir::UnaryOpKind::Dec && "Invalid UnaryOp kind");
627 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
628 case LangOptions::SOB_Defined:
629 return emitUnaryOp(e, kind, inVal, /*nsw=*/false);
630 case LangOptions::SOB_Undefined:
632 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
633 case LangOptions::SOB_Trapping:
634 if (!e->canOverflow())
635 return emitUnaryOp(e, kind, inVal, /*nsw=*/true);
636 cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
637 return {};
638 }
639 llvm_unreachable("Unexpected signed overflow behavior kind");
640 }
641
642 mlir::Value VisitUnaryAddrOf(const UnaryOperator *e) {
643 if (llvm::isa<MemberPointerType>(e->getType())) {
644 cgf.cgm.errorNYI(e->getSourceRange(), "Address of member pointer");
645 return builder.getNullPtr(cgf.convertType(e->getType()),
646 cgf.getLoc(e->getExprLoc()));
647 }
648
649 return cgf.emitLValue(e->getSubExpr()).getPointer();
650 }
651
652 mlir::Value VisitUnaryDeref(const UnaryOperator *e) {
653 if (e->getType()->isVoidType())
654 return Visit(e->getSubExpr()); // the actual value should be unused
655 return emitLoadOfLValue(e);
656 }
657
658 mlir::Value VisitUnaryPlus(const UnaryOperator *e) {
659 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
660 mlir::Value result =
661 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Plus, promotionType);
662 if (result && !promotionType.isNull())
663 return emitUnPromotedValue(result, e->getType());
664 return result;
665 }
666
667 mlir::Value VisitUnaryMinus(const UnaryOperator *e) {
668 QualType promotionType = getPromotionType(e->getSubExpr()->getType());
669 mlir::Value result =
670 emitUnaryPlusOrMinus(e, cir::UnaryOpKind::Minus, promotionType);
671 if (result && !promotionType.isNull())
672 return emitUnPromotedValue(result, e->getType());
673 return result;
674 }
675
676 mlir::Value emitUnaryPlusOrMinus(const UnaryOperator *e,
677 cir::UnaryOpKind kind,
678 QualType promotionType) {
679 ignoreResultAssign = false;
680 mlir::Value operand;
681 if (!promotionType.isNull())
682 operand = cgf.emitPromotedScalarExpr(e->getSubExpr(), promotionType);
683 else
684 operand = Visit(e->getSubExpr());
685
686 bool nsw =
687 kind == cir::UnaryOpKind::Minus && e->getType()->isSignedIntegerType();
688
689 // NOTE: LLVM codegen will lower this directly to either a FNeg
690 // or a Sub instruction. In CIR this will be handled later in LowerToLLVM.
691 return emitUnaryOp(e, kind, operand, nsw);
692 }
693
694 mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
695 mlir::Value input, bool nsw = false) {
696 return cir::UnaryOp::create(builder,
697 cgf.getLoc(e->getSourceRange().getBegin()),
698 input.getType(), kind, input, nsw);
699 }
700
701 mlir::Value VisitUnaryNot(const UnaryOperator *e) {
702 ignoreResultAssign = false;
703 mlir::Value op = Visit(e->getSubExpr());
704 return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
705 }
706
707 mlir::Value VisitUnaryLNot(const UnaryOperator *e);
708
709 mlir::Value VisitUnaryReal(const UnaryOperator *e);
710 mlir::Value VisitUnaryImag(const UnaryOperator *e);
711 mlir::Value VisitRealImag(const UnaryOperator *e,
712 QualType promotionType = QualType());
713
714 mlir::Value VisitUnaryExtension(const UnaryOperator *e) {
715 return Visit(e->getSubExpr());
716 }
717
718 mlir::Value VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
719 CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
720 return Visit(dae->getExpr());
721 }
722 mlir::Value VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
723 CIRGenFunction::CXXDefaultInitExprScope scope(cgf, die);
724 return Visit(die->getExpr());
725 }
726
727 mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
728
729 mlir::Value VisitExprWithCleanups(ExprWithCleanups *e);
730 mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
731 return cgf.emitCXXNewExpr(e);
732 }
733 mlir::Value VisitCXXDeleteExpr(const CXXDeleteExpr *e) {
734 cgf.emitCXXDeleteExpr(e);
735 return {};
736 }
737
738 mlir::Value VisitCXXThrowExpr(const CXXThrowExpr *e) {
739 cgf.emitCXXThrowExpr(e);
740 return {};
741 }
742
743 /// Emit a conversion from the specified type to the specified destination
744 /// type, both of which are CIR scalar types.
745 /// TODO: do we need ScalarConversionOpts here? Should be done in another
746 /// pass.
747 mlir::Value
748 emitScalarConversion(mlir::Value src, QualType srcType, QualType dstType,
749 SourceLocation loc,
750 ScalarConversionOpts opts = ScalarConversionOpts()) {
751 // All conversions involving fixed point types should be handled by the
752 // emitFixedPoint family functions. This is done to prevent bloating up
753 // this function more, and although fixed point numbers are represented by
754 // integers, we do not want to follow any logic that assumes they should be
755 // treated as integers.
756 // TODO(leonardchan): When necessary, add another if statement checking for
757 // conversions to fixed point types from other types.
758 // conversions to fixed point types from other types.
759 if (srcType->isFixedPointType() || dstType->isFixedPointType()) {
760 cgf.getCIRGenModule().errorNYI(loc, "fixed point conversions");
761 return {};
762 }
763
764 srcType = srcType.getCanonicalType();
765 dstType = dstType.getCanonicalType();
766 if (srcType == dstType) {
767 if (opts.emitImplicitIntegerSignChangeChecks)
768 cgf.getCIRGenModule().errorNYI(loc,
769 "implicit integer sign change checks");
770 return src;
771 }
772
773 if (dstType->isVoidType())
774 return {};
775
776 mlir::Type mlirSrcType = src.getType();
777
778 // Handle conversions to bool first, they are special: comparisons against
779 // 0.
780 if (dstType->isBooleanType())
781 return emitConversionToBool(src, srcType, cgf.getLoc(loc));
782
783 mlir::Type mlirDstType = cgf.convertType(dstType);
784
785 if (srcType->isHalfType() &&
786 !cgf.getContext().getLangOpts().NativeHalfType) {
787 // Cast to FP using the intrinsic if the half type itself isn't supported.
788 if (mlir::isa<cir::FPTypeInterface>(mlirDstType)) {
789 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
790 cgf.getCIRGenModule().errorNYI(loc,
791 "cast via llvm.convert.from.fp16");
792 } else {
793 // Cast to other types through float, using either the intrinsic or
794 // FPExt, depending on whether the half type itself is supported (as
795 // opposed to operations on half, available with NativeHalfType).
796 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics())
797 cgf.getCIRGenModule().errorNYI(loc,
798 "cast via llvm.convert.from.fp16");
799 // FIXME(cir): For now lets pretend we shouldn't use the conversion
800 // intrinsics and insert a cast here unconditionally.
801 src = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, src,
802 cgf.floatTy);
803 srcType = cgf.getContext().FloatTy;
804 mlirSrcType = cgf.floatTy;
805 }
806 }
807
808 // TODO(cir): LLVM codegen ignore conversions like int -> uint,
809 // is there anything to be done for CIR here?
810 if (mlirSrcType == mlirDstType) {
811 if (opts.emitImplicitIntegerSignChangeChecks)
812 cgf.getCIRGenModule().errorNYI(loc,
813 "implicit integer sign change checks");
814 return src;
815 }
816
817 // Handle pointer conversions next: pointers can only be converted to/from
818 // other pointers and integers. Check for pointer types in terms of LLVM, as
819 // some native types (like Obj-C id) may map to a pointer type.
820 if (auto dstPT = dyn_cast<cir::PointerType>(mlirDstType)) {
821 cgf.getCIRGenModule().errorNYI(loc, "pointer casts");
822 return builder.getNullPtr(dstPT, src.getLoc());
823 }
824
825 if (isa<cir::PointerType>(mlirSrcType)) {
826 // Must be an ptr to int cast.
827 assert(isa<cir::IntType>(mlirDstType) && "not ptr->int?");
828 return builder.createPtrToInt(src, mlirDstType);
829 }
830
831 // A scalar can be splatted to an extended vector of the same element type
832 if (dstType->isExtVectorType() && !srcType->isVectorType()) {
833 // Sema should add casts to make sure that the source expression's type
834 // is the same as the vector's element type (sans qualifiers)
835 assert(dstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
836 srcType.getTypePtr() &&
837 "Splatted expr doesn't match with vector element type?");
838
839 cgf.getCIRGenModule().errorNYI(loc, "vector splatting");
840 return {};
841 }
842
843 if (srcType->isMatrixType() && dstType->isMatrixType()) {
844 cgf.getCIRGenModule().errorNYI(loc,
845 "matrix type to matrix type conversion");
846 return {};
847 }
848 assert(!srcType->isMatrixType() && !dstType->isMatrixType() &&
849 "Internal error: conversion between matrix type and scalar type");
850
851 // Finally, we have the arithmetic types or vectors of arithmetic types.
852 mlir::Value res = nullptr;
853 mlir::Type resTy = mlirDstType;
854
855 res = emitScalarCast(src, srcType, dstType, mlirSrcType, mlirDstType, opts);
856
857 if (mlirDstType != resTy) {
858 if (cgf.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
859 cgf.getCIRGenModule().errorNYI(loc, "cast via llvm.convert.to.fp16");
860 }
861 // FIXME(cir): For now we never use FP16 conversion intrinsics even if
862 // required by the target. Change that once this is implemented
863 res = builder.createCast(cgf.getLoc(loc), cir::CastKind::floating, res,
864 resTy);
865 }
866
867 if (opts.emitImplicitIntegerTruncationChecks)
868 cgf.getCIRGenModule().errorNYI(loc, "implicit integer truncation checks");
869
870 if (opts.emitImplicitIntegerSignChangeChecks)
871 cgf.getCIRGenModule().errorNYI(loc,
872 "implicit integer sign change checks");
873
874 return res;
875 }
876
877 BinOpInfo emitBinOps(const BinaryOperator *e,
878 QualType promotionType = QualType()) {
879 ignoreResultAssign = false;
880 BinOpInfo result;
881 result.lhs = cgf.emitPromotedScalarExpr(e->getLHS(), promotionType);
882 result.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionType);
883 if (!promotionType.isNull())
884 result.fullType = promotionType;
885 else
886 result.fullType = e->getType();
887 result.compType = result.fullType;
888 if (const auto *vecType = dyn_cast_or_null<VectorType>(result.fullType)) {
889 result.compType = vecType->getElementType();
890 }
891 result.opcode = e->getOpcode();
892 result.loc = e->getSourceRange();
893 // TODO(cir): Result.FPFeatures
895 result.e = e;
896 return result;
897 }
898
899 mlir::Value emitMul(const BinOpInfo &ops);
900 mlir::Value emitDiv(const BinOpInfo &ops);
901 mlir::Value emitRem(const BinOpInfo &ops);
902 mlir::Value emitAdd(const BinOpInfo &ops);
903 mlir::Value emitSub(const BinOpInfo &ops);
904 mlir::Value emitShl(const BinOpInfo &ops);
905 mlir::Value emitShr(const BinOpInfo &ops);
906 mlir::Value emitAnd(const BinOpInfo &ops);
907 mlir::Value emitXor(const BinOpInfo &ops);
908 mlir::Value emitOr(const BinOpInfo &ops);
909
910 LValue emitCompoundAssignLValue(
911 const CompoundAssignOperator *e,
912 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &),
913 mlir::Value &result);
914 mlir::Value
915 emitCompoundAssign(const CompoundAssignOperator *e,
916 mlir::Value (ScalarExprEmitter::*f)(const BinOpInfo &));
917
918 // TODO(cir): Candidate to be in a common AST helper between CIR and LLVM
919 // codegen.
920 QualType getPromotionType(QualType ty) {
921 const clang::ASTContext &ctx = cgf.getContext();
922 if (auto *complexTy = ty->getAs<ComplexType>()) {
923 QualType elementTy = complexTy->getElementType();
924 if (elementTy.UseExcessPrecision(ctx))
925 return ctx.getComplexType(ctx.FloatTy);
926 }
927
928 if (ty.UseExcessPrecision(cgf.getContext())) {
929 if (auto *vt = ty->getAs<VectorType>()) {
930 unsigned numElements = vt->getNumElements();
931 return ctx.getVectorType(ctx.FloatTy, numElements, vt->getVectorKind());
932 }
933 return cgf.getContext().FloatTy;
934 }
935
936 return QualType();
937 }
938
939// Binary operators and binary compound assignment operators.
940#define HANDLEBINOP(OP) \
941 mlir::Value VisitBin##OP(const BinaryOperator *e) { \
942 QualType promotionTy = getPromotionType(e->getType()); \
943 auto result = emit##OP(emitBinOps(e, promotionTy)); \
944 if (result && !promotionTy.isNull()) \
945 result = emitUnPromotedValue(result, e->getType()); \
946 return result; \
947 } \
948 mlir::Value VisitBin##OP##Assign(const CompoundAssignOperator *e) { \
949 return emitCompoundAssign(e, &ScalarExprEmitter::emit##OP); \
950 }
951
952 HANDLEBINOP(Mul)
953 HANDLEBINOP(Div)
954 HANDLEBINOP(Rem)
955 HANDLEBINOP(Add)
956 HANDLEBINOP(Sub)
957 HANDLEBINOP(Shl)
958 HANDLEBINOP(Shr)
960 HANDLEBINOP(Xor)
962#undef HANDLEBINOP
963
964 mlir::Value emitCmp(const BinaryOperator *e) {
965 ignoreResultAssign = false;
966 const mlir::Location loc = cgf.getLoc(e->getExprLoc());
967 mlir::Value result;
968 QualType lhsTy = e->getLHS()->getType();
969 QualType rhsTy = e->getRHS()->getType();
970
971 auto clangCmpToCIRCmp =
972 [](clang::BinaryOperatorKind clangCmp) -> cir::CmpOpKind {
973 switch (clangCmp) {
974 case BO_LT:
975 return cir::CmpOpKind::lt;
976 case BO_GT:
977 return cir::CmpOpKind::gt;
978 case BO_LE:
979 return cir::CmpOpKind::le;
980 case BO_GE:
981 return cir::CmpOpKind::ge;
982 case BO_EQ:
983 return cir::CmpOpKind::eq;
984 case BO_NE:
985 return cir::CmpOpKind::ne;
986 default:
987 llvm_unreachable("unsupported comparison kind for cir.cmp");
988 }
989 };
990
991 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
992 if (lhsTy->getAs<MemberPointerType>()) {
994 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
995 mlir::Value lhs = cgf.emitScalarExpr(e->getLHS());
996 mlir::Value rhs = cgf.emitScalarExpr(e->getRHS());
997 result = builder.createCompare(loc, kind, lhs, rhs);
998 } else if (!lhsTy->isAnyComplexType() && !rhsTy->isAnyComplexType()) {
999 BinOpInfo boInfo = emitBinOps(e);
1000 mlir::Value lhs = boInfo.lhs;
1001 mlir::Value rhs = boInfo.rhs;
1002
1003 if (lhsTy->isVectorType()) {
1004 if (!e->getType()->isVectorType()) {
1005 // If AltiVec, the comparison results in a numeric type, so we use
1006 // intrinsics comparing vectors and giving 0 or 1 as a result
1007 cgf.cgm.errorNYI(loc, "AltiVec comparison");
1008 } else {
1009 // Other kinds of vectors. Element-wise comparison returning
1010 // a vector.
1011 result = cir::VecCmpOp::create(builder, cgf.getLoc(boInfo.loc),
1012 cgf.convertType(boInfo.fullType), kind,
1013 boInfo.lhs, boInfo.rhs);
1014 }
1015 } else if (boInfo.isFixedPointOp()) {
1017 cgf.cgm.errorNYI(loc, "fixed point comparisons");
1018 result = builder.getBool(false, loc);
1019 } else {
1020 // integers and pointers
1021 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers &&
1022 mlir::isa<cir::PointerType>(lhs.getType()) &&
1023 mlir::isa<cir::PointerType>(rhs.getType())) {
1024 cgf.cgm.errorNYI(loc, "strict vtable pointer comparisons");
1025 }
1026
1027 cir::CmpOpKind kind = clangCmpToCIRCmp(e->getOpcode());
1028 result = builder.createCompare(loc, kind, lhs, rhs);
1029 }
1030 } else {
1031 // Complex Comparison: can only be an equality comparison.
1032 assert(e->getOpcode() == BO_EQ || e->getOpcode() == BO_NE);
1033
1034 BinOpInfo boInfo = emitBinOps(e);
1035 result = cir::CmpOp::create(builder, loc, kind, boInfo.lhs, boInfo.rhs);
1036 }
1037
1038 return emitScalarConversion(result, cgf.getContext().BoolTy, e->getType(),
1039 e->getExprLoc());
1040 }
1041
1042// Comparisons.
1043#define VISITCOMP(CODE) \
1044 mlir::Value VisitBin##CODE(const BinaryOperator *E) { return emitCmp(E); }
1045 VISITCOMP(LT)
1046 VISITCOMP(GT)
1047 VISITCOMP(LE)
1048 VISITCOMP(GE)
1049 VISITCOMP(EQ)
1050 VISITCOMP(NE)
1051#undef VISITCOMP
1052
1053 mlir::Value VisitBinAssign(const BinaryOperator *e) {
1054 const bool ignore = std::exchange(ignoreResultAssign, false);
1055
1056 mlir::Value rhs;
1057 LValue lhs;
1058
1059 switch (e->getLHS()->getType().getObjCLifetime()) {
1065 break;
1067 // __block variables need to have the rhs evaluated first, plus this
1068 // should improve codegen just a little.
1069 rhs = Visit(e->getRHS());
1071 // TODO(cir): This needs to be emitCheckedLValue() once we support
1072 // sanitizers
1073 lhs = cgf.emitLValue(e->getLHS());
1074
1075 // Store the value into the LHS. Bit-fields are handled specially because
1076 // the result is altered by the store, i.e., [C99 6.5.16p1]
1077 // 'An assignment expression has the value of the left operand after the
1078 // assignment...'.
1079 if (lhs.isBitField()) {
1080 rhs = cgf.emitStoreThroughBitfieldLValue(RValue::get(rhs), lhs);
1081 } else {
1082 cgf.emitNullabilityCheck(lhs, rhs, e->getExprLoc());
1084 cgf, cgf.getLoc(e->getSourceRange())};
1085 cgf.emitStoreThroughLValue(RValue::get(rhs), lhs);
1086 }
1087 }
1088
1089 // If the result is clearly ignored, return now.
1090 if (ignore)
1091 return nullptr;
1092
1093 // The result of an assignment in C is the assigned r-value.
1094 if (!cgf.getLangOpts().CPlusPlus)
1095 return rhs;
1096
1097 // If the lvalue is non-volatile, return the computed value of the
1098 // assignment.
1099 if (!lhs.isVolatile())
1100 return rhs;
1101
1102 // Otherwise, reload the value.
1103 return emitLoadOfLValue(lhs, e->getExprLoc());
1104 }
1105
1106 mlir::Value VisitBinComma(const BinaryOperator *e) {
1107 cgf.emitIgnoredExpr(e->getLHS());
1108 // NOTE: We don't need to EnsureInsertPoint() like LLVM codegen.
1109 return Visit(e->getRHS());
1110 }
1111
1112 mlir::Value VisitBinLAnd(const clang::BinaryOperator *e) {
1113 if (e->getType()->isVectorType()) {
1114 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1115 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1116 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1117 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1118 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1119
1120 mlir::Value lhs = Visit(e->getLHS());
1121 mlir::Value rhs = Visit(e->getRHS());
1122
1123 auto cmpOpKind = cir::CmpOpKind::ne;
1124 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1125 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1126 mlir::Value vecOr = builder.createAnd(loc, lhs, rhs);
1127 return builder.createIntCast(vecOr, vecTy);
1128 }
1129
1131 mlir::Type resTy = cgf.convertType(e->getType());
1132 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1133
1134 CIRGenFunction::ConditionalEvaluation eval(cgf);
1135
1136 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1137 auto resOp = cir::TernaryOp::create(
1138 builder, loc, lhsCondV, /*trueBuilder=*/
1139 [&](mlir::OpBuilder &b, mlir::Location loc) {
1140 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1141 b.getInsertionBlock()};
1142 cgf.curLexScope->setAsTernary();
1143 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1144 lexScope.forceCleanup();
1145 cir::YieldOp::create(b, loc, res);
1146 },
1147 /*falseBuilder*/
1148 [&](mlir::OpBuilder &b, mlir::Location loc) {
1149 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1150 b.getInsertionBlock()};
1151 cgf.curLexScope->setAsTernary();
1152 auto res = cir::ConstantOp::create(b, loc, builder.getFalseAttr());
1153 cir::YieldOp::create(b, loc, res.getRes());
1154 });
1155 return maybePromoteBoolResult(resOp.getResult(), resTy);
1156 }
1157
1158 mlir::Value VisitBinLOr(const clang::BinaryOperator *e) {
1159 if (e->getType()->isVectorType()) {
1160 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1161 auto vecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
1162 mlir::Value zeroValue = builder.getNullValue(vecTy.getElementType(), loc);
1163 SmallVector<mlir::Value, 16> elements(vecTy.getSize(), zeroValue);
1164 auto zeroVec = cir::VecCreateOp::create(builder, loc, vecTy, elements);
1165
1166 mlir::Value lhs = Visit(e->getLHS());
1167 mlir::Value rhs = Visit(e->getRHS());
1168
1169 auto cmpOpKind = cir::CmpOpKind::ne;
1170 lhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, lhs, zeroVec);
1171 rhs = cir::VecCmpOp::create(builder, loc, vecTy, cmpOpKind, rhs, zeroVec);
1172 mlir::Value vecOr = builder.createOr(loc, lhs, rhs);
1173 return builder.createIntCast(vecOr, vecTy);
1174 }
1175
1177 mlir::Type resTy = cgf.convertType(e->getType());
1178 mlir::Location loc = cgf.getLoc(e->getExprLoc());
1179
1180 CIRGenFunction::ConditionalEvaluation eval(cgf);
1181
1182 mlir::Value lhsCondV = cgf.evaluateExprAsBool(e->getLHS());
1183 auto resOp = cir::TernaryOp::create(
1184 builder, loc, lhsCondV, /*trueBuilder=*/
1185 [&](mlir::OpBuilder &b, mlir::Location loc) {
1186 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1187 b.getInsertionBlock()};
1188 cgf.curLexScope->setAsTernary();
1189 auto res = cir::ConstantOp::create(b, loc, builder.getTrueAttr());
1190 cir::YieldOp::create(b, loc, res.getRes());
1191 },
1192 /*falseBuilder*/
1193 [&](mlir::OpBuilder &b, mlir::Location loc) {
1194 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1195 b.getInsertionBlock()};
1196 cgf.curLexScope->setAsTernary();
1197 mlir::Value res = cgf.evaluateExprAsBool(e->getRHS());
1198 lexScope.forceCleanup();
1199 cir::YieldOp::create(b, loc, res);
1200 });
1201
1202 return maybePromoteBoolResult(resOp.getResult(), resTy);
1203 }
1204
1205 mlir::Value VisitAtomicExpr(AtomicExpr *e) {
1206 return cgf.emitAtomicExpr(e).getValue();
1207 }
1208};
1209
1210LValue ScalarExprEmitter::emitCompoundAssignLValue(
1211 const CompoundAssignOperator *e,
1212 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &),
1213 mlir::Value &result) {
1215 return cgf.emitScalarCompoundAssignWithComplex(e, result);
1216
1217 QualType lhsTy = e->getLHS()->getType();
1218 BinOpInfo opInfo;
1219
1220 // Emit the RHS first. __block variables need to have the rhs evaluated
1221 // first, plus this should improve codegen a little.
1222
1223 QualType promotionTypeCR = getPromotionType(e->getComputationResultType());
1224 if (promotionTypeCR.isNull())
1225 promotionTypeCR = e->getComputationResultType();
1226
1227 QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType());
1228 QualType promotionTypeRHS = getPromotionType(e->getRHS()->getType());
1229
1230 if (!promotionTypeRHS.isNull())
1231 opInfo.rhs = cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS);
1232 else
1233 opInfo.rhs = Visit(e->getRHS());
1234
1235 opInfo.fullType = promotionTypeCR;
1236 opInfo.compType = opInfo.fullType;
1237 if (const auto *vecType = dyn_cast_or_null<VectorType>(opInfo.fullType))
1238 opInfo.compType = vecType->getElementType();
1239 opInfo.opcode = e->getOpcode();
1240 opInfo.fpfeatures = e->getFPFeaturesInEffect(cgf.getLangOpts());
1241 opInfo.e = e;
1242 opInfo.loc = e->getSourceRange();
1243
1244 // Load/convert the LHS
1245 LValue lhsLV = cgf.emitLValue(e->getLHS());
1246
1247 if (lhsTy->getAs<AtomicType>()) {
1248 cgf.cgm.errorNYI(result.getLoc(), "atomic lvalue assign");
1249 return LValue();
1250 }
1251
1252 opInfo.lhs = emitLoadOfLValue(lhsLV, e->getExprLoc());
1253
1254 CIRGenFunction::SourceLocRAIIObject sourceloc{
1255 cgf, cgf.getLoc(e->getSourceRange())};
1256 SourceLocation loc = e->getExprLoc();
1257 if (!promotionTypeLHS.isNull())
1258 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy, promotionTypeLHS, loc);
1259 else
1260 opInfo.lhs = emitScalarConversion(opInfo.lhs, lhsTy,
1261 e->getComputationLHSType(), loc);
1262
1263 // Expand the binary operator.
1264 result = (this->*func)(opInfo);
1265
1266 // Convert the result back to the LHS type,
1267 // potentially with Implicit Conversion sanitizer check.
1268 result = emitScalarConversion(result, promotionTypeCR, lhsTy, loc,
1269 ScalarConversionOpts(cgf.sanOpts));
1270
1271 // Store the result value into the LHS lvalue. Bit-fields are handled
1272 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
1273 // 'An assignment expression has the value of the left operand after the
1274 // assignment...'.
1275 if (lhsLV.isBitField())
1276 cgf.emitStoreThroughBitfieldLValue(RValue::get(result), lhsLV);
1277 else
1278 cgf.emitStoreThroughLValue(RValue::get(result), lhsLV);
1279
1280 if (cgf.getLangOpts().OpenMP)
1281 cgf.cgm.errorNYI(e->getSourceRange(), "openmp");
1282
1283 return lhsLV;
1284}
1285
1286mlir::Value ScalarExprEmitter::emitComplexToScalarConversion(mlir::Location lov,
1287 mlir::Value value,
1288 CastKind kind,
1289 QualType destTy) {
1290 cir::CastKind castOpKind;
1291 switch (kind) {
1292 case CK_FloatingComplexToReal:
1293 castOpKind = cir::CastKind::float_complex_to_real;
1294 break;
1295 case CK_IntegralComplexToReal:
1296 castOpKind = cir::CastKind::int_complex_to_real;
1297 break;
1298 case CK_FloatingComplexToBoolean:
1299 castOpKind = cir::CastKind::float_complex_to_bool;
1300 break;
1301 case CK_IntegralComplexToBoolean:
1302 castOpKind = cir::CastKind::int_complex_to_bool;
1303 break;
1304 default:
1305 llvm_unreachable("invalid complex-to-scalar cast kind");
1306 }
1307
1308 return builder.createCast(lov, castOpKind, value, cgf.convertType(destTy));
1309}
1310
1311mlir::Value ScalarExprEmitter::emitPromoted(const Expr *e,
1312 QualType promotionType) {
1313 e = e->IgnoreParens();
1314 if (const auto *bo = dyn_cast<BinaryOperator>(e)) {
1315 switch (bo->getOpcode()) {
1316#define HANDLE_BINOP(OP) \
1317 case BO_##OP: \
1318 return emit##OP(emitBinOps(bo, promotionType));
1319 HANDLE_BINOP(Add)
1320 HANDLE_BINOP(Sub)
1321 HANDLE_BINOP(Mul)
1322 HANDLE_BINOP(Div)
1323#undef HANDLE_BINOP
1324 default:
1325 break;
1326 }
1327 } else if (const auto *uo = dyn_cast<UnaryOperator>(e)) {
1328 switch (uo->getOpcode()) {
1329 case UO_Imag:
1330 case UO_Real:
1331 return VisitRealImag(uo, promotionType);
1332 case UO_Minus:
1333 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Minus, promotionType);
1334 case UO_Plus:
1335 return emitUnaryPlusOrMinus(uo, cir::UnaryOpKind::Plus, promotionType);
1336 default:
1337 break;
1338 }
1339 }
1340 mlir::Value result = Visit(const_cast<Expr *>(e));
1341 if (result) {
1342 if (!promotionType.isNull())
1343 return emitPromotedValue(result, promotionType);
1344 return emitUnPromotedValue(result, e->getType());
1345 }
1346 return result;
1347}
1348
1349mlir::Value ScalarExprEmitter::emitCompoundAssign(
1350 const CompoundAssignOperator *e,
1351 mlir::Value (ScalarExprEmitter::*func)(const BinOpInfo &)) {
1352
1353 bool ignore = std::exchange(ignoreResultAssign, false);
1354 mlir::Value rhs;
1355 LValue lhs = emitCompoundAssignLValue(e, func, rhs);
1356
1357 // If the result is clearly ignored, return now.
1358 if (ignore)
1359 return {};
1360
1361 // The result of an assignment in C is the assigned r-value.
1362 if (!cgf.getLangOpts().CPlusPlus)
1363 return rhs;
1364
1365 // If the lvalue is non-volatile, return the computed value of the assignment.
1366 if (!lhs.isVolatile())
1367 return rhs;
1368
1369 // Otherwise, reload the value.
1370 return emitLoadOfLValue(lhs, e->getExprLoc());
1371}
1372
1373mlir::Value ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1374 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
1375 mlir::OpBuilder &builder = cgf.builder;
1376
1377 auto scope = cir::ScopeOp::create(
1378 builder, scopeLoc,
1379 /*scopeBuilder=*/
1380 [&](mlir::OpBuilder &b, mlir::Type &yieldTy, mlir::Location loc) {
1381 CIRGenFunction::LexicalScope lexScope{cgf, loc,
1382 builder.getInsertionBlock()};
1383 mlir::Value scopeYieldVal = Visit(e->getSubExpr());
1384 if (scopeYieldVal) {
1385 // Defend against dominance problems caused by jumps out of expression
1386 // evaluation through the shared cleanup block.
1387 lexScope.forceCleanup();
1388 cir::YieldOp::create(builder, loc, scopeYieldVal);
1389 yieldTy = scopeYieldVal.getType();
1390 }
1391 });
1392
1393 return scope.getNumResults() > 0 ? scope->getResult(0) : nullptr;
1394}
1395
1396} // namespace
1397
1398LValue
1400 ScalarExprEmitter emitter(*this, builder);
1401 mlir::Value result;
1402 switch (e->getOpcode()) {
1403#define COMPOUND_OP(Op) \
1404 case BO_##Op##Assign: \
1405 return emitter.emitCompoundAssignLValue(e, &ScalarExprEmitter::emit##Op, \
1406 result)
1407 COMPOUND_OP(Mul);
1408 COMPOUND_OP(Div);
1409 COMPOUND_OP(Rem);
1410 COMPOUND_OP(Add);
1411 COMPOUND_OP(Sub);
1412 COMPOUND_OP(Shl);
1413 COMPOUND_OP(Shr);
1415 COMPOUND_OP(Xor);
1416 COMPOUND_OP(Or);
1417#undef COMPOUND_OP
1418
1419 case BO_PtrMemD:
1420 case BO_PtrMemI:
1421 case BO_Mul:
1422 case BO_Div:
1423 case BO_Rem:
1424 case BO_Add:
1425 case BO_Sub:
1426 case BO_Shl:
1427 case BO_Shr:
1428 case BO_LT:
1429 case BO_GT:
1430 case BO_LE:
1431 case BO_GE:
1432 case BO_EQ:
1433 case BO_NE:
1434 case BO_Cmp:
1435 case BO_And:
1436 case BO_Xor:
1437 case BO_Or:
1438 case BO_LAnd:
1439 case BO_LOr:
1440 case BO_Assign:
1441 case BO_Comma:
1442 llvm_unreachable("Not valid compound assignment operators");
1443 }
1444 llvm_unreachable("Unhandled compound assignment operator");
1445}
1446
1447/// Emit the computation of the specified expression of scalar type.
1449 bool ignoreResultAssign) {
1450 assert(e && hasScalarEvaluationKind(e->getType()) &&
1451 "Invalid scalar expression to emit");
1452
1453 return ScalarExprEmitter(*this, builder, ignoreResultAssign)
1454 .Visit(const_cast<Expr *>(e));
1455}
1456
1458 QualType promotionType) {
1459 if (!promotionType.isNull())
1460 return ScalarExprEmitter(*this, builder).emitPromoted(e, promotionType);
1461 return ScalarExprEmitter(*this, builder).Visit(const_cast<Expr *>(e));
1462}
1463
1464[[maybe_unused]] static bool mustVisitNullValue(const Expr *e) {
1465 // If a null pointer expression's type is the C++0x nullptr_t and
1466 // the expression is not a simple literal, it must be evaluated
1467 // for its potential side effects.
1469 return false;
1470 return e->getType()->isNullPtrType();
1471}
1472
1473/// If \p e is a widened promoted integer, get its base (unpromoted) type.
1474static std::optional<QualType>
1475getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e) {
1476 const Expr *base = e->IgnoreImpCasts();
1477 if (e == base)
1478 return std::nullopt;
1479
1480 QualType baseTy = base->getType();
1481 if (!astContext.isPromotableIntegerType(baseTy) ||
1482 astContext.getTypeSize(baseTy) >= astContext.getTypeSize(e->getType()))
1483 return std::nullopt;
1484
1485 return baseTy;
1486}
1487
1488/// Check if \p e is a widened promoted integer.
1489[[maybe_unused]] static bool isWidenedIntegerOp(const ASTContext &astContext,
1490 const Expr *e) {
1491 return getUnwidenedIntegerType(astContext, e).has_value();
1492}
1493
1494/// Check if we can skip the overflow check for \p Op.
1495[[maybe_unused]] static bool canElideOverflowCheck(const ASTContext &astContext,
1496 const BinOpInfo &op) {
1497 assert((isa<UnaryOperator>(op.e) || isa<BinaryOperator>(op.e)) &&
1498 "Expected a unary or binary operator");
1499
1500 // If the binop has constant inputs and we can prove there is no overflow,
1501 // we can elide the overflow check.
1502 if (!op.mayHaveIntegerOverflow())
1503 return true;
1504
1505 // If a unary op has a widened operand, the op cannot overflow.
1506 if (const auto *uo = dyn_cast<UnaryOperator>(op.e))
1507 return !uo->canOverflow();
1508
1509 // We usually don't need overflow checks for binops with widened operands.
1510 // Multiplication with promoted unsigned operands is a special case.
1511 const auto *bo = cast<BinaryOperator>(op.e);
1512 std::optional<QualType> optionalLHSTy =
1513 getUnwidenedIntegerType(astContext, bo->getLHS());
1514 if (!optionalLHSTy)
1515 return false;
1516
1517 std::optional<QualType> optionalRHSTy =
1518 getUnwidenedIntegerType(astContext, bo->getRHS());
1519 if (!optionalRHSTy)
1520 return false;
1521
1522 QualType lhsTy = *optionalLHSTy;
1523 QualType rhsTy = *optionalRHSTy;
1524
1525 // This is the simple case: binops without unsigned multiplication, and with
1526 // widened operands. No overflow check is needed here.
1527 if ((op.opcode != BO_Mul && op.opcode != BO_MulAssign) ||
1528 !lhsTy->isUnsignedIntegerType() || !rhsTy->isUnsignedIntegerType())
1529 return true;
1530
1531 // For unsigned multiplication the overflow check can be elided if either one
1532 // of the unpromoted types are less than half the size of the promoted type.
1533 unsigned promotedSize = astContext.getTypeSize(op.e->getType());
1534 return (2 * astContext.getTypeSize(lhsTy)) < promotedSize ||
1535 (2 * astContext.getTypeSize(rhsTy)) < promotedSize;
1536}
1537
1538/// Emit pointer + index arithmetic.
1540 const BinOpInfo &op,
1541 bool isSubtraction) {
1542 // Must have binary (not unary) expr here. Unary pointer
1543 // increment/decrement doesn't use this path.
1545
1546 mlir::Value pointer = op.lhs;
1547 Expr *pointerOperand = expr->getLHS();
1548 mlir::Value index = op.rhs;
1549 Expr *indexOperand = expr->getRHS();
1550
1551 // In the case of subtraction, the FE has ensured that the LHS is always the
1552 // pointer. However, addition can have the pointer on either side. We will
1553 // always have a pointer operand and an integer operand, so if the LHS wasn't
1554 // a pointer, we need to swap our values.
1555 if (!isSubtraction && !mlir::isa<cir::PointerType>(pointer.getType())) {
1556 std::swap(pointer, index);
1557 std::swap(pointerOperand, indexOperand);
1558 }
1559 assert(mlir::isa<cir::PointerType>(pointer.getType()) &&
1560 "Need a pointer operand");
1561 assert(mlir::isa<cir::IntType>(index.getType()) && "Need an integer operand");
1562
1563 // Some versions of glibc and gcc use idioms (particularly in their malloc
1564 // routines) that add a pointer-sized integer (known to be a pointer value)
1565 // to a null pointer in order to cast the value back to an integer or as
1566 // part of a pointer alignment algorithm. This is undefined behavior, but
1567 // we'd like to be able to compile programs that use it.
1568 //
1569 // Normally, we'd generate a GEP with a null-pointer base here in response
1570 // to that code, but it's also UB to dereference a pointer created that
1571 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
1572 // generate a direct cast of the integer value to a pointer.
1573 //
1574 // The idiom (p = nullptr + N) is not met if any of the following are true:
1575 //
1576 // The operation is subtraction.
1577 // The index is not pointer-sized.
1578 // The pointer type is not byte-sized.
1579 //
1581 cgf.getContext(), op.opcode, expr->getLHS(), expr->getRHS()))
1582 return cgf.getBuilder().createIntToPtr(index, pointer.getType());
1583
1584 // Differently from LLVM codegen, ABI bits for index sizes is handled during
1585 // LLVM lowering.
1586
1587 // If this is subtraction, negate the index.
1588 if (isSubtraction)
1590
1592
1593 const PointerType *pointerType =
1594 pointerOperand->getType()->getAs<PointerType>();
1595 if (!pointerType) {
1596 cgf.cgm.errorNYI("Objective-C:pointer arithmetic with non-pointer type");
1597 return nullptr;
1598 }
1599
1600 QualType elementType = pointerType->getPointeeType();
1601 if (cgf.getContext().getAsVariableArrayType(elementType)) {
1602 cgf.cgm.errorNYI("variable array type");
1603 return nullptr;
1604 }
1605
1606 if (elementType->isVoidType() || elementType->isFunctionType()) {
1607 cgf.cgm.errorNYI("void* or function pointer arithmetic");
1608 return nullptr;
1609 }
1610
1612 return cir::PtrStrideOp::create(cgf.getBuilder(),
1613 cgf.getLoc(op.e->getExprLoc()),
1614 pointer.getType(), pointer, index);
1615}
1616
1617mlir::Value ScalarExprEmitter::emitMul(const BinOpInfo &ops) {
1618 const mlir::Location loc = cgf.getLoc(ops.loc);
1619 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1620 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1621 case LangOptions::SOB_Defined:
1622 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1623 return builder.createMul(loc, ops.lhs, ops.rhs);
1624 [[fallthrough]];
1625 case LangOptions::SOB_Undefined:
1626 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1627 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1628 [[fallthrough]];
1629 case LangOptions::SOB_Trapping:
1630 if (canElideOverflowCheck(cgf.getContext(), ops))
1631 return builder.createNSWMul(loc, ops.lhs, ops.rhs);
1632 cgf.cgm.errorNYI("sanitizers");
1633 }
1634 }
1635 if (ops.fullType->isConstantMatrixType()) {
1637 cgf.cgm.errorNYI("matrix types");
1638 return nullptr;
1639 }
1640 if (ops.compType->isUnsignedIntegerType() &&
1641 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1642 !canElideOverflowCheck(cgf.getContext(), ops))
1643 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1644
1645 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1647 return builder.createFMul(loc, ops.lhs, ops.rhs);
1648 }
1649
1650 if (ops.isFixedPointOp()) {
1652 cgf.cgm.errorNYI("fixed point");
1653 return nullptr;
1654 }
1655
1656 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1657 cgf.convertType(ops.fullType), cir::BinOpKind::Mul,
1658 ops.lhs, ops.rhs);
1659}
1660mlir::Value ScalarExprEmitter::emitDiv(const BinOpInfo &ops) {
1661 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1662 cgf.convertType(ops.fullType), cir::BinOpKind::Div,
1663 ops.lhs, ops.rhs);
1664}
1665mlir::Value ScalarExprEmitter::emitRem(const BinOpInfo &ops) {
1666 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1667 cgf.convertType(ops.fullType), cir::BinOpKind::Rem,
1668 ops.lhs, ops.rhs);
1669}
1670
1671mlir::Value ScalarExprEmitter::emitAdd(const BinOpInfo &ops) {
1672 if (mlir::isa<cir::PointerType>(ops.lhs.getType()) ||
1673 mlir::isa<cir::PointerType>(ops.rhs.getType()))
1674 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/false);
1675
1676 const mlir::Location loc = cgf.getLoc(ops.loc);
1677 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1678 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1679 case LangOptions::SOB_Defined:
1680 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1681 return builder.createAdd(loc, ops.lhs, ops.rhs);
1682 [[fallthrough]];
1683 case LangOptions::SOB_Undefined:
1684 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1685 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1686 [[fallthrough]];
1687 case LangOptions::SOB_Trapping:
1688 if (canElideOverflowCheck(cgf.getContext(), ops))
1689 return builder.createNSWAdd(loc, ops.lhs, ops.rhs);
1690 cgf.cgm.errorNYI("sanitizers");
1691 }
1692 }
1693 if (ops.fullType->isConstantMatrixType()) {
1695 cgf.cgm.errorNYI("matrix types");
1696 return nullptr;
1697 }
1698
1699 if (ops.compType->isUnsignedIntegerType() &&
1700 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1701 !canElideOverflowCheck(cgf.getContext(), ops))
1702 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1703
1704 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1706 return builder.createFAdd(loc, ops.lhs, ops.rhs);
1707 }
1708
1709 if (ops.isFixedPointOp()) {
1711 cgf.cgm.errorNYI("fixed point");
1712 return {};
1713 }
1714
1715 return cir::BinOp::create(builder, loc, cgf.convertType(ops.fullType),
1716 cir::BinOpKind::Add, ops.lhs, ops.rhs);
1717}
1718
1719mlir::Value ScalarExprEmitter::emitSub(const BinOpInfo &ops) {
1720 const mlir::Location loc = cgf.getLoc(ops.loc);
1721 // The LHS is always a pointer if either side is.
1722 if (!mlir::isa<cir::PointerType>(ops.lhs.getType())) {
1723 if (ops.compType->isSignedIntegerOrEnumerationType()) {
1724 switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
1725 case LangOptions::SOB_Defined: {
1726 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1727 return builder.createSub(loc, ops.lhs, ops.rhs);
1728 [[fallthrough]];
1729 }
1730 case LangOptions::SOB_Undefined:
1731 if (!cgf.sanOpts.has(SanitizerKind::SignedIntegerOverflow))
1732 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1733 [[fallthrough]];
1734 case LangOptions::SOB_Trapping:
1735 if (canElideOverflowCheck(cgf.getContext(), ops))
1736 return builder.createNSWSub(loc, ops.lhs, ops.rhs);
1737 cgf.cgm.errorNYI("sanitizers");
1738 }
1739 }
1740
1741 if (ops.fullType->isConstantMatrixType()) {
1743 cgf.cgm.errorNYI("matrix types");
1744 return nullptr;
1745 }
1746
1747 if (ops.compType->isUnsignedIntegerType() &&
1748 cgf.sanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
1749 !canElideOverflowCheck(cgf.getContext(), ops))
1750 cgf.cgm.errorNYI("unsigned int overflow sanitizer");
1751
1752 if (cir::isFPOrVectorOfFPType(ops.lhs.getType())) {
1754 return builder.createFSub(loc, ops.lhs, ops.rhs);
1755 }
1756
1757 if (ops.isFixedPointOp()) {
1759 cgf.cgm.errorNYI("fixed point");
1760 return {};
1761 }
1762
1763 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1764 cgf.convertType(ops.fullType),
1765 cir::BinOpKind::Sub, ops.lhs, ops.rhs);
1766 }
1767
1768 // If the RHS is not a pointer, then we have normal pointer
1769 // arithmetic.
1770 if (!mlir::isa<cir::PointerType>(ops.rhs.getType()))
1771 return emitPointerArithmetic(cgf, ops, /*isSubtraction=*/true);
1772
1773 // Otherwise, this is a pointer subtraction
1774
1775 // Do the raw subtraction part.
1776 //
1777 // TODO(cir): note for LLVM lowering out of this; when expanding this into
1778 // LLVM we shall take VLA's, division by element size, etc.
1779 //
1780 // See more in `EmitSub` in CGExprScalar.cpp.
1782 return cir::PtrDiffOp::create(builder, cgf.getLoc(ops.loc), cgf.ptrDiffTy,
1783 ops.lhs, ops.rhs);
1784}
1785
1786mlir::Value ScalarExprEmitter::emitShl(const BinOpInfo &ops) {
1787 // TODO: This misses out on the sanitizer check below.
1788 if (ops.isFixedPointOp()) {
1790 cgf.cgm.errorNYI("fixed point");
1791 return {};
1792 }
1793
1794 // CIR accepts shift between different types, meaning nothing special
1795 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1796 // promote or truncate the RHS to the same size as the LHS.
1797
1798 bool sanitizeSignedBase = cgf.sanOpts.has(SanitizerKind::ShiftBase) &&
1799 ops.compType->hasSignedIntegerRepresentation() &&
1801 !cgf.getLangOpts().CPlusPlus20;
1802 bool sanitizeUnsignedBase =
1803 cgf.sanOpts.has(SanitizerKind::UnsignedShiftBase) &&
1804 ops.compType->hasUnsignedIntegerRepresentation();
1805 bool sanitizeBase = sanitizeSignedBase || sanitizeUnsignedBase;
1806 bool sanitizeExponent = cgf.sanOpts.has(SanitizerKind::ShiftExponent);
1807
1808 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1809 if (cgf.getLangOpts().OpenCL)
1810 cgf.cgm.errorNYI("opencl");
1811 else if ((sanitizeBase || sanitizeExponent) &&
1812 mlir::isa<cir::IntType>(ops.lhs.getType()))
1813 cgf.cgm.errorNYI("sanitizers");
1814
1815 return builder.createShiftLeft(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1816}
1817
1818mlir::Value ScalarExprEmitter::emitShr(const BinOpInfo &ops) {
1819 // TODO: This misses out on the sanitizer check below.
1820 if (ops.isFixedPointOp()) {
1822 cgf.cgm.errorNYI("fixed point");
1823 return {};
1824 }
1825
1826 // CIR accepts shift between different types, meaning nothing special
1827 // to be done here. OTOH, LLVM requires the LHS and RHS to be the same type:
1828 // promote or truncate the RHS to the same size as the LHS.
1829
1830 // OpenCL 6.3j: shift values are effectively % word size of LHS.
1831 if (cgf.getLangOpts().OpenCL)
1832 cgf.cgm.errorNYI("opencl");
1833 else if (cgf.sanOpts.has(SanitizerKind::ShiftExponent) &&
1834 mlir::isa<cir::IntType>(ops.lhs.getType()))
1835 cgf.cgm.errorNYI("sanitizers");
1836
1837 // Note that we don't need to distinguish unsigned treatment at this
1838 // point since it will be handled later by LLVM lowering.
1839 return builder.createShiftRight(cgf.getLoc(ops.loc), ops.lhs, ops.rhs);
1840}
1841
1842mlir::Value ScalarExprEmitter::emitAnd(const BinOpInfo &ops) {
1843 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1844 cgf.convertType(ops.fullType), cir::BinOpKind::And,
1845 ops.lhs, ops.rhs);
1846}
1847mlir::Value ScalarExprEmitter::emitXor(const BinOpInfo &ops) {
1848 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1849 cgf.convertType(ops.fullType), cir::BinOpKind::Xor,
1850 ops.lhs, ops.rhs);
1851}
1852mlir::Value ScalarExprEmitter::emitOr(const BinOpInfo &ops) {
1853 return cir::BinOp::create(builder, cgf.getLoc(ops.loc),
1854 cgf.convertType(ops.fullType), cir::BinOpKind::Or,
1855 ops.lhs, ops.rhs);
1856}
1857
1858// Emit code for an explicit or implicit cast. Implicit
1859// casts have to handle a more broad range of conversions than explicit
1860// casts, as they handle things like function to ptr-to-function decay
1861// etc.
1862mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) {
1863 Expr *subExpr = ce->getSubExpr();
1864 QualType destTy = ce->getType();
1865 CastKind kind = ce->getCastKind();
1866
1867 // These cases are generally not written to ignore the result of evaluating
1868 // their sub-expressions, so we clear this now.
1869 ignoreResultAssign = false;
1870
1871 switch (kind) {
1872 case clang::CK_Dependent:
1873 llvm_unreachable("dependent cast kind in CIR gen!");
1874 case clang::CK_BuiltinFnToFnPtr:
1875 llvm_unreachable("builtin functions are handled elsewhere");
1876
1877 case CK_CPointerToObjCPointerCast:
1878 case CK_BlockPointerToObjCPointerCast:
1879 case CK_AnyPointerToBlockPointerCast:
1880 case CK_BitCast: {
1881 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1882 mlir::Type dstTy = cgf.convertType(destTy);
1883
1885
1886 if (cgf.sanOpts.has(SanitizerKind::CFIUnrelatedCast))
1887 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1888 "sanitizer support");
1889
1890 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
1891 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1892 "strict vtable pointers");
1893
1894 // Update heapallocsite metadata when there is an explicit pointer cast.
1896
1897 // If Src is a fixed vector and Dst is a scalable vector, and both have the
1898 // same element type, use the llvm.vector.insert intrinsic to perform the
1899 // bitcast.
1901
1902 // If Src is a scalable vector and Dst is a fixed vector, and both have the
1903 // same element type, use the llvm.vector.extract intrinsic to perform the
1904 // bitcast.
1906
1907 // Perform VLAT <-> VLST bitcast through memory.
1908 // TODO: since the llvm.experimental.vector.{insert,extract} intrinsics
1909 // require the element types of the vectors to be the same, we
1910 // need to keep this around for bitcasts between VLAT <-> VLST where
1911 // the element types of the vectors are not the same, until we figure
1912 // out a better way of doing these casts.
1914
1915 return cgf.getBuilder().createBitcast(cgf.getLoc(subExpr->getSourceRange()),
1916 src, dstTy);
1917 }
1918 case CK_AddressSpaceConversion: {
1919 Expr::EvalResult result;
1920 if (subExpr->EvaluateAsRValue(result, cgf.getContext()) &&
1921 result.Val.isNullPointer()) {
1922 // If e has side effect, it is emitted even if its final result is a
1923 // null pointer. In that case, a DCE pass should be able to
1924 // eliminate the useless instructions emitted during translating E.
1925 if (result.HasSideEffects)
1926 Visit(subExpr);
1927 return cgf.cgm.emitNullConstant(destTy,
1928 cgf.getLoc(subExpr->getExprLoc()));
1929 }
1930
1931 clang::QualType srcTy = subExpr->IgnoreImpCasts()->getType();
1932 if (srcTy->isPointerType() || srcTy->isReferenceType())
1933 srcTy = srcTy->getPointeeType();
1934
1935 clang::LangAS srcLangAS = srcTy.getAddressSpace();
1936 cir::TargetAddressSpaceAttr subExprAS;
1937 if (clang::isTargetAddressSpace(srcLangAS))
1938 subExprAS = cir::toCIRTargetAddressSpace(cgf.getMLIRContext(), srcLangAS);
1939 else
1940 cgf.cgm.errorNYI(subExpr->getSourceRange(),
1941 "non-target address space conversion");
1942 // Since target may map different address spaces in AST to the same address
1943 // space, an address space conversion may end up as a bitcast.
1945 cgf, Visit(subExpr), subExprAS, convertType(destTy));
1946 }
1947
1948 case CK_AtomicToNonAtomic: {
1949 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
1950 "CastExpr: ", ce->getCastKindName());
1951 mlir::Location loc = cgf.getLoc(subExpr->getSourceRange());
1952 return cgf.createDummyValue(loc, destTy);
1953 }
1954 case CK_NonAtomicToAtomic:
1955 case CK_UserDefinedConversion:
1956 return Visit(const_cast<Expr *>(subExpr));
1957 case CK_NoOp: {
1958 auto v = Visit(const_cast<Expr *>(subExpr));
1959 if (v) {
1960 // CK_NoOp can model a pointer qualification conversion, which can remove
1961 // an array bound and change the IR type.
1962 // FIXME: Once pointee types are removed from IR, remove this.
1963 mlir::Type t = cgf.convertType(destTy);
1964 if (t != v.getType())
1965 cgf.getCIRGenModule().errorNYI("pointer qualification conversion");
1966 }
1967 return v;
1968 }
1969 case CK_IntegralToPointer: {
1970 mlir::Type destCIRTy = cgf.convertType(destTy);
1971 mlir::Value src = Visit(const_cast<Expr *>(subExpr));
1972
1973 // Properly resize by casting to an int of the same size as the pointer.
1974 // Clang's IntegralToPointer includes 'bool' as the source, but in CIR
1975 // 'bool' is not an integral type. So check the source type to get the
1976 // correct CIR conversion.
1977 mlir::Type middleTy = cgf.cgm.getDataLayout().getIntPtrType(destCIRTy);
1978 mlir::Value middleVal = builder.createCast(
1979 subExpr->getType()->isBooleanType() ? cir::CastKind::bool_to_int
1980 : cir::CastKind::integral,
1981 src, middleTy);
1982
1983 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers) {
1984 cgf.cgm.errorNYI(subExpr->getSourceRange(),
1985 "IntegralToPointer: strict vtable pointers");
1986 return {};
1987 }
1988
1989 return builder.createIntToPtr(middleVal, destCIRTy);
1990 }
1991
1992 case CK_BaseToDerived: {
1993 const CXXRecordDecl *derivedClassDecl = destTy->getPointeeCXXRecordDecl();
1994 assert(derivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
1995 Address base = cgf.emitPointerWithAlignment(subExpr);
1996 Address derived = cgf.getAddressOfDerivedClass(
1997 cgf.getLoc(ce->getSourceRange()), base, derivedClassDecl, ce->path(),
1999
2000 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2001 // performed and the object is not of the derived type.
2003
2004 return cgf.getAsNaturalPointerTo(derived, ce->getType()->getPointeeType());
2005 }
2006 case CK_UncheckedDerivedToBase:
2007 case CK_DerivedToBase: {
2008 // The EmitPointerWithAlignment path does this fine; just discard
2009 // the alignment.
2011 ce->getType()->getPointeeType());
2012 }
2013 case CK_Dynamic: {
2014 Address v = cgf.emitPointerWithAlignment(subExpr);
2015 const auto *dce = cast<CXXDynamicCastExpr>(ce);
2016 return cgf.emitDynamicCast(v, dce);
2017 }
2018 case CK_ArrayToPointerDecay:
2019 return cgf.emitArrayToPointerDecay(subExpr).getPointer();
2020
2021 case CK_NullToPointer: {
2022 if (mustVisitNullValue(subExpr))
2023 cgf.emitIgnoredExpr(subExpr);
2024
2025 // Note that DestTy is used as the MLIR type instead of a custom
2026 // nullptr type.
2027 mlir::Type ty = cgf.convertType(destTy);
2028 return builder.getNullPtr(ty, cgf.getLoc(subExpr->getExprLoc()));
2029 }
2030
2031 case CK_LValueToRValue:
2032 assert(cgf.getContext().hasSameUnqualifiedType(subExpr->getType(), destTy));
2033 assert(subExpr->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2034 return Visit(const_cast<Expr *>(subExpr));
2035
2036 case CK_IntegralCast: {
2037 ScalarConversionOpts opts;
2038 if (auto *ice = dyn_cast<ImplicitCastExpr>(ce)) {
2039 if (!ice->isPartOfExplicitCast())
2040 opts = ScalarConversionOpts(cgf.sanOpts);
2041 }
2042 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2043 ce->getExprLoc(), opts);
2044 }
2045
2046 case CK_FloatingComplexToReal:
2047 case CK_IntegralComplexToReal:
2048 case CK_FloatingComplexToBoolean:
2049 case CK_IntegralComplexToBoolean: {
2050 mlir::Value value = cgf.emitComplexExpr(subExpr);
2051 return emitComplexToScalarConversion(cgf.getLoc(ce->getExprLoc()), value,
2052 kind, destTy);
2053 }
2054
2055 case CK_FloatingRealToComplex:
2056 case CK_FloatingComplexCast:
2057 case CK_IntegralRealToComplex:
2058 case CK_IntegralComplexCast:
2059 case CK_IntegralComplexToFloatingComplex:
2060 case CK_FloatingComplexToIntegralComplex:
2061 llvm_unreachable("scalar cast to non-scalar value");
2062
2063 case CK_PointerToIntegral: {
2064 assert(!destTy->isBooleanType() && "bool should use PointerToBool");
2065 if (cgf.cgm.getCodeGenOpts().StrictVTablePointers)
2066 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2067 "strict vtable pointers");
2068 return builder.createPtrToInt(Visit(subExpr), cgf.convertType(destTy));
2069 }
2070 case CK_ToVoid:
2071 cgf.emitIgnoredExpr(subExpr);
2072 return {};
2073
2074 case CK_IntegralToFloating:
2075 case CK_FloatingToIntegral:
2076 case CK_FloatingCast:
2077 case CK_FixedPointToFloating:
2078 case CK_FloatingToFixedPoint: {
2079 if (kind == CK_FixedPointToFloating || kind == CK_FloatingToFixedPoint) {
2080 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2081 "fixed point casts");
2082 return {};
2083 }
2085 return emitScalarConversion(Visit(subExpr), subExpr->getType(), destTy,
2086 ce->getExprLoc());
2087 }
2088
2089 case CK_IntegralToBoolean:
2090 return emitIntToBoolConversion(Visit(subExpr),
2091 cgf.getLoc(ce->getSourceRange()));
2092
2093 case CK_PointerToBoolean:
2094 return emitPointerToBoolConversion(Visit(subExpr), subExpr->getType());
2095 case CK_FloatingToBoolean:
2096 return emitFloatToBoolConversion(Visit(subExpr),
2097 cgf.getLoc(subExpr->getExprLoc()));
2098 case CK_MemberPointerToBoolean: {
2099 mlir::Value memPtr = Visit(subExpr);
2100 return builder.createCast(cgf.getLoc(ce->getSourceRange()),
2101 cir::CastKind::member_ptr_to_bool, memPtr,
2102 cgf.convertType(destTy));
2103 }
2104
2105 case CK_VectorSplat: {
2106 // Create a vector object and fill all elements with the same scalar value.
2107 assert(destTy->isVectorType() && "CK_VectorSplat to non-vector type");
2108 return cir::VecSplatOp::create(builder,
2109 cgf.getLoc(subExpr->getSourceRange()),
2110 cgf.convertType(destTy), Visit(subExpr));
2111 }
2112 case CK_FunctionToPointerDecay:
2113 return cgf.emitLValue(subExpr).getPointer();
2114
2115 default:
2116 cgf.getCIRGenModule().errorNYI(subExpr->getSourceRange(),
2117 "CastExpr: ", ce->getCastKindName());
2118 }
2119 return {};
2120}
2121
2122mlir::Value ScalarExprEmitter::VisitCallExpr(const CallExpr *e) {
2124 return emitLoadOfLValue(e);
2125
2126 auto v = cgf.emitCallExpr(e).getValue();
2128 return v;
2129}
2130
2131mlir::Value ScalarExprEmitter::VisitMemberExpr(MemberExpr *e) {
2132 // TODO(cir): The classic codegen calls tryEmitAsConstant() here. Folding
2133 // constants sound like work for MLIR optimizers, but we'll keep an assertion
2134 // for now.
2136 Expr::EvalResult result;
2137 if (e->EvaluateAsInt(result, cgf.getContext(), Expr::SE_AllowSideEffects)) {
2138 llvm::APSInt value = result.Val.getInt();
2139 cgf.emitIgnoredExpr(e->getBase());
2140 return builder.getConstInt(cgf.getLoc(e->getExprLoc()), value);
2141 }
2142 return emitLoadOfLValue(e);
2143}
2144
2145mlir::Value ScalarExprEmitter::VisitInitListExpr(InitListExpr *e) {
2146 const unsigned numInitElements = e->getNumInits();
2147
2148 [[maybe_unused]] const bool ignore = std::exchange(ignoreResultAssign, false);
2149 assert((ignore == false ||
2150 (numInitElements == 0 && e->getType()->isVoidType())) &&
2151 "init list ignored");
2152
2153 if (e->hadArrayRangeDesignator()) {
2154 cgf.cgm.errorNYI(e->getSourceRange(), "ArrayRangeDesignator");
2155 return {};
2156 }
2157
2158 if (e->getType()->isVectorType()) {
2159 const auto vectorType =
2160 mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2161
2162 SmallVector<mlir::Value, 16> elements;
2163 for (Expr *init : e->inits()) {
2164 elements.push_back(Visit(init));
2165 }
2166
2167 // Zero-initialize any remaining values.
2168 if (numInitElements < vectorType.getSize()) {
2169 const mlir::Value zeroValue = cgf.getBuilder().getNullValue(
2170 vectorType.getElementType(), cgf.getLoc(e->getSourceRange()));
2171 std::fill_n(std::back_inserter(elements),
2172 vectorType.getSize() - numInitElements, zeroValue);
2173 }
2174
2175 return cir::VecCreateOp::create(cgf.getBuilder(),
2176 cgf.getLoc(e->getSourceRange()), vectorType,
2177 elements);
2178 }
2179
2180 // C++11 value-initialization for the scalar.
2181 if (numInitElements == 0)
2182 return emitNullValue(e->getType(), cgf.getLoc(e->getExprLoc()));
2183
2184 return Visit(e->getInit(0));
2185}
2186
2187mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src,
2188 QualType srcTy, QualType dstTy,
2189 SourceLocation loc) {
2192 "Invalid scalar expression to emit");
2193 return ScalarExprEmitter(*this, builder)
2194 .emitScalarConversion(src, srcTy, dstTy, loc);
2195}
2196
2198 QualType srcTy,
2199 QualType dstTy,
2200 SourceLocation loc) {
2201 assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) &&
2202 "Invalid complex -> scalar conversion");
2203
2204 QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType();
2205 if (dstTy->isBooleanType()) {
2206 auto kind = complexElemTy->isFloatingType()
2207 ? cir::CastKind::float_complex_to_bool
2208 : cir::CastKind::int_complex_to_bool;
2209 return builder.createCast(getLoc(loc), kind, src, convertType(dstTy));
2210 }
2211
2212 auto kind = complexElemTy->isFloatingType()
2213 ? cir::CastKind::float_complex_to_real
2214 : cir::CastKind::int_complex_to_real;
2215 mlir::Value real =
2216 builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy));
2217 return emitScalarConversion(real, complexElemTy, dstTy, loc);
2218}
2219
2220mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) {
2221 // Perform vector logical not on comparison with zero vector.
2222 if (e->getType()->isVectorType() &&
2223 e->getType()->castAs<VectorType>()->getVectorKind() ==
2225 mlir::Value oper = Visit(e->getSubExpr());
2226 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2227 auto operVecTy = mlir::cast<cir::VectorType>(oper.getType());
2228 auto exprVecTy = mlir::cast<cir::VectorType>(cgf.convertType(e->getType()));
2229 mlir::Value zeroVec = builder.getNullValue(operVecTy, loc);
2230 return cir::VecCmpOp::create(builder, loc, exprVecTy, cir::CmpOpKind::eq,
2231 oper, zeroVec);
2232 }
2233
2234 // Compare operand to zero.
2235 mlir::Value boolVal = cgf.evaluateExprAsBool(e->getSubExpr());
2236
2237 // Invert value.
2238 boolVal = builder.createNot(boolVal);
2239
2240 // ZExt result to the expr type.
2241 return maybePromoteBoolResult(boolVal, cgf.convertType(e->getType()));
2242}
2243
2244mlir::Value ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *e) {
2245 // Try folding the offsetof to a constant.
2246 Expr::EvalResult evalResult;
2247 if (e->EvaluateAsInt(evalResult, cgf.getContext())) {
2248 mlir::Type type = cgf.convertType(e->getType());
2249 llvm::APSInt value = evalResult.Val.getInt();
2250 return builder.getConstAPInt(cgf.getLoc(e->getExprLoc()), type, value);
2251 }
2252
2254 e->getSourceRange(),
2255 "ScalarExprEmitter::VisitOffsetOfExpr Can't eval expr as int");
2256 return {};
2257}
2258
2259mlir::Value ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *e) {
2260 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2261 mlir::Value result = VisitRealImag(e, promotionTy);
2262 if (result && !promotionTy.isNull())
2263 result = emitUnPromotedValue(result, e->getType());
2264 return result;
2265}
2266
2267mlir::Value ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *e) {
2268 QualType promotionTy = getPromotionType(e->getSubExpr()->getType());
2269 mlir::Value result = VisitRealImag(e, promotionTy);
2270 if (result && !promotionTy.isNull())
2271 result = emitUnPromotedValue(result, e->getType());
2272 return result;
2273}
2274
2275mlir::Value ScalarExprEmitter::VisitRealImag(const UnaryOperator *e,
2276 QualType promotionTy) {
2277 assert(e->getOpcode() == clang::UO_Real ||
2278 e->getOpcode() == clang::UO_Imag &&
2279 "Invalid UnaryOp kind for ComplexType Real or Imag");
2280
2281 Expr *op = e->getSubExpr();
2282 mlir::Location loc = cgf.getLoc(e->getExprLoc());
2283 if (op->getType()->isAnyComplexType()) {
2284 // If it's an l-value, load through the appropriate subobject l-value.
2285 // Note that we have to ask `e` because `op` might be an l-value that
2286 // this won't work for, e.g. an Obj-C property
2287 mlir::Value complex = cgf.emitComplexExpr(op);
2288 if (e->isGLValue() && !promotionTy.isNull()) {
2289 promotionTy = promotionTy->isAnyComplexType()
2290 ? promotionTy
2291 : cgf.getContext().getComplexType(promotionTy);
2292 complex = cgf.emitPromotedValue(complex, promotionTy);
2293 }
2294
2295 return e->getOpcode() == clang::UO_Real
2296 ? builder.createComplexReal(loc, complex)
2297 : builder.createComplexImag(loc, complex);
2298 }
2299
2300 if (e->getOpcode() == UO_Real) {
2301 mlir::Value operand = promotionTy.isNull()
2302 ? Visit(op)
2303 : cgf.emitPromotedScalarExpr(op, promotionTy);
2304 return builder.createComplexReal(loc, operand);
2305 }
2306
2307 // __imag on a scalar returns zero. Emit the subexpr to ensure side
2308 // effects are evaluated, but not the actual value.
2309 mlir::Value operand;
2310 if (op->isGLValue()) {
2311 operand = cgf.emitLValue(op).getPointer();
2312 operand = cir::LoadOp::create(builder, loc, operand);
2313 } else if (!promotionTy.isNull()) {
2314 operand = cgf.emitPromotedScalarExpr(op, promotionTy);
2315 } else {
2316 operand = cgf.emitScalarExpr(op);
2317 }
2318 return builder.createComplexImag(loc, operand);
2319}
2320
2321/// Return the size or alignment of the type of argument of the sizeof
2322/// expression as an integer.
2323mlir::Value ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
2324 const UnaryExprOrTypeTraitExpr *e) {
2325 const QualType typeToSize = e->getTypeOfArgument();
2326 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
2327 if (auto kind = e->getKind();
2328 kind == UETT_SizeOf || kind == UETT_DataSizeOf) {
2329 if (cgf.getContext().getAsVariableArrayType(typeToSize)) {
2331 "sizeof operator for VariableArrayType",
2332 e->getStmtClassName());
2333 return builder.getConstant(
2334 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2335 llvm::APSInt(llvm::APInt(64, 1), true)));
2336 }
2337 } else if (e->getKind() == UETT_OpenMPRequiredSimdAlign) {
2339 e->getSourceRange(), "sizeof operator for OpenMpRequiredSimdAlign",
2340 e->getStmtClassName());
2341 return builder.getConstant(
2342 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2343 llvm::APSInt(llvm::APInt(64, 1), true)));
2344 }
2345
2346 return builder.getConstant(
2347 loc, cir::IntAttr::get(cgf.cgm.uInt64Ty,
2349}
2350
2351/// Return true if the specified expression is cheap enough and side-effect-free
2352/// enough to evaluate unconditionally instead of conditionally. This is used
2353/// to convert control flow into selects in some cases.
2354/// TODO(cir): can be shared with LLVM codegen.
2356 CIRGenFunction &cgf) {
2357 // Anything that is an integer or floating point constant is fine.
2358 return e->IgnoreParens()->isEvaluatable(cgf.getContext());
2359
2360 // Even non-volatile automatic variables can't be evaluated unconditionally.
2361 // Referencing a thread_local may cause non-trivial initialization work to
2362 // occur. If we're inside a lambda and one of the variables is from the scope
2363 // outside the lambda, that function may have returned already. Reading its
2364 // locals is a bad idea. Also, these reads may introduce races there didn't
2365 // exist in the source-level program.
2366}
2367
2368mlir::Value ScalarExprEmitter::VisitAbstractConditionalOperator(
2369 const AbstractConditionalOperator *e) {
2370 CIRGenBuilderTy &builder = cgf.getBuilder();
2371 mlir::Location loc = cgf.getLoc(e->getSourceRange());
2372 ignoreResultAssign = false;
2373
2374 // Bind the common expression if necessary.
2375 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
2376
2377 Expr *condExpr = e->getCond();
2378 Expr *lhsExpr = e->getTrueExpr();
2379 Expr *rhsExpr = e->getFalseExpr();
2380
2381 // If the condition constant folds and can be elided, try to avoid emitting
2382 // the condition and the dead arm.
2383 bool condExprBool;
2384 if (cgf.constantFoldsToBool(condExpr, condExprBool)) {
2385 Expr *live = lhsExpr, *dead = rhsExpr;
2386 if (!condExprBool)
2387 std::swap(live, dead);
2388
2389 // If the dead side doesn't have labels we need, just emit the Live part.
2390 if (!cgf.containsLabel(dead)) {
2391 if (condExprBool)
2393 mlir::Value result = Visit(live);
2394
2395 // If the live part is a throw expression, it acts like it has a void
2396 // type, so evaluating it returns a null Value. However, a conditional
2397 // with non-void type must return a non-null Value.
2398 if (!result && !e->getType()->isVoidType()) {
2399 result = builder.getConstant(
2400 loc, cir::PoisonAttr::get(builder.getContext(),
2401 cgf.convertType(e->getType())));
2402 }
2403
2404 return result;
2405 }
2406 }
2407
2408 QualType condType = condExpr->getType();
2409
2410 // OpenCL: If the condition is a vector, we can treat this condition like
2411 // the select function.
2412 if ((cgf.getLangOpts().OpenCL && condType->isVectorType()) ||
2413 condType->isExtVectorType()) {
2415 cgf.cgm.errorNYI(e->getSourceRange(), "vector ternary op");
2416 }
2417
2418 if (condType->isVectorType() || condType->isSveVLSBuiltinType()) {
2419 if (!condType->isVectorType()) {
2421 cgf.cgm.errorNYI(loc, "TernaryOp for SVE vector");
2422 return {};
2423 }
2424
2425 mlir::Value condValue = Visit(condExpr);
2426 mlir::Value lhsValue = Visit(lhsExpr);
2427 mlir::Value rhsValue = Visit(rhsExpr);
2428 return cir::VecTernaryOp::create(builder, loc, condValue, lhsValue,
2429 rhsValue);
2430 }
2431
2432 // If this is a really simple expression (like x ? 4 : 5), emit this as a
2433 // select instead of as control flow. We can only do this if it is cheap
2434 // and safe to evaluate the LHS and RHS unconditionally.
2435 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, cgf) &&
2437 bool lhsIsVoid = false;
2438 mlir::Value condV = cgf.evaluateExprAsBool(condExpr);
2440
2441 mlir::Value lhs = Visit(lhsExpr);
2442 if (!lhs) {
2443 lhs = builder.getNullValue(cgf.voidTy, loc);
2444 lhsIsVoid = true;
2445 }
2446
2447 mlir::Value rhs = Visit(rhsExpr);
2448 if (lhsIsVoid) {
2449 assert(!rhs && "lhs and rhs types must match");
2450 rhs = builder.getNullValue(cgf.voidTy, loc);
2451 }
2452
2453 return builder.createSelect(loc, condV, lhs, rhs);
2454 }
2455
2456 mlir::Value condV = cgf.emitOpOnBoolExpr(loc, condExpr);
2457 CIRGenFunction::ConditionalEvaluation eval(cgf);
2458 SmallVector<mlir::OpBuilder::InsertPoint, 2> insertPoints{};
2459 mlir::Type yieldTy{};
2460
2461 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc, Expr *expr) {
2462 CIRGenFunction::LexicalScope lexScope{cgf, loc, b.getInsertionBlock()};
2464
2466 eval.beginEvaluation();
2467 mlir::Value branch = Visit(expr);
2468 eval.endEvaluation();
2469
2470 if (branch) {
2471 yieldTy = branch.getType();
2472 cir::YieldOp::create(b, loc, branch);
2473 } else {
2474 // If LHS or RHS is a throw or void expression we need to patch
2475 // arms as to properly match yield types.
2476 insertPoints.push_back(b.saveInsertionPoint());
2477 }
2478 };
2479
2480 mlir::Value result = cir::TernaryOp::create(
2481 builder, loc, condV,
2482 /*trueBuilder=*/
2483 [&](mlir::OpBuilder &b, mlir::Location loc) {
2484 emitBranch(b, loc, lhsExpr);
2485 },
2486 /*falseBuilder=*/
2487 [&](mlir::OpBuilder &b, mlir::Location loc) {
2488 emitBranch(b, loc, rhsExpr);
2489 })
2490 .getResult();
2491
2492 if (!insertPoints.empty()) {
2493 // If both arms are void, so be it.
2494 if (!yieldTy)
2495 yieldTy = cgf.voidTy;
2496
2497 // Insert required yields.
2498 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2499 mlir::OpBuilder::InsertionGuard guard(builder);
2500 builder.restoreInsertionPoint(toInsert);
2501
2502 // Block does not return: build empty yield.
2503 if (mlir::isa<cir::VoidType>(yieldTy)) {
2504 cir::YieldOp::create(builder, loc);
2505 } else { // Block returns: set null yield value.
2506 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2507 cir::YieldOp::create(builder, loc, op0);
2508 }
2509 }
2510 }
2511
2512 return result;
2513}
2514
2516 LValue lv,
2517 cir::UnaryOpKind kind,
2518 bool isPre) {
2519 return ScalarExprEmitter(*this, builder)
2520 .emitScalarPrePostIncDec(e, lv, kind, isPre);
2521}
#define HANDLE_BINOP(OP)
static bool mustVisitNullValue(const Expr *e)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static bool isWidenedIntegerOp(const ASTContext &astContext, const Expr *e)
Check if e is a widened promoted integer.
static mlir::Value emitPointerArithmetic(CIRGenFunction &cgf, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *e, CIRGenFunction &cgf)
Return true if the specified expression is cheap enough and side-effect-free enough to evaluate uncon...
static bool canElideOverflowCheck(const ASTContext &astContext, const BinOpInfo &op)
Check if we can skip the overflow check for Op.
static std::optional< QualType > getUnwidenedIntegerType(const ASTContext &astContext, const Expr *e)
If e is a widened promoted integer, get its base (unpromoted) type.
#define VISITCOMP(CODE)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConstantOp getBool(bool state, mlir::Location loc)
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createCast(mlir::Location loc, cir::CastKind kind, mlir::Value src, mlir::Type newTy)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Type getIntPtrType(mlir::Type ty) const
llvm::APInt getValue() const
APSInt & getInt()
Definition APValue.h:489
bool isNullPointer() const
Definition APValue.cpp:1019
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CanQualType FloatTy
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
CanQualType BoolTy
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
const VariableArrayType * getAsVariableArrayType(QualType T) const
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
LabelDecl * getLabel() const
Definition Expr.h:4507
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
SourceLocation getExprLoc() const
Definition Expr.h:4013
Expr * getRHS() const
Definition Expr.h:4024
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition Expr.h:4185
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition Expr.cpp:2200
Opcode getOpcode() const
Definition Expr.h:4017
BinaryOperatorKind Opcode
Definition Expr.h:3977
mlir::Value getPointer() const
Definition Address.h:84
mlir::Value createNeg(mlir::Value value)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
mlir::Type convertType(clang::QualType t)
mlir::Value emitPromotedValue(mlir::Value result, QualType promotionType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
LValue emitScalarCompoundAssignWithComplex(const CompoundAssignOperator *e, mlir::Value &result)
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
mlir::Value getAsNaturalPointerTo(Address addr, QualType pointeeType)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
mlir::Value emitPromotedScalarExpr(const Expr *e, QualType promotionType)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
clang::ASTContext & getContext() const
void emitNullabilityCheck(LValue lhs, mlir::Value rhs, clang::SourceLocation loc)
Given an assignment *lhs = rhs, emit a test that checks if rhs is nonnull, if 1LHS is marked _Nonnull...
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
const TargetCIRGenInfo & getTargetCIRGenInfo()
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
mlir::Value getPointer() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
virtual mlir::Value performAddrSpaceCast(CIRGenFunction &cgf, mlir::Value v, cir::TargetAddressSpaceAttr srcAddr, mlir::Type destTy, bool isNonNull=false) const
Perform address space cast of an expression of pointer type.
bool getValue() const
Definition ExprCXX.h:740
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition ExprCXX.h:304
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastKind getCastKind() const
Definition Expr.h:3654
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3697
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
unsigned getValue() const
Definition Expr.h:1629
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundAssignOperator - For compound assignments (e.g.
Definition Expr.h:4234
QualType getComputationLHSType() const
Definition Expr.h:4268
QualType getComputationResultType() const
Definition Expr.h:4271
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition Expr.h:4743
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition Expr.h:674
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3065
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
llvm::APFloat getValue() const
Definition Expr.h:1666
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6396
unsigned getNumInits() const
Definition Expr.h:5263
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
ArrayRef< Expr * > inits()
Definition Expr.h:5283
bool isSignedOverflowDefined() const
Expr * getBase() const
Definition Expr.h:3375
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3653
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:1208
Expr * getSelectedExpr() const
Definition ExprCXX.h:4639
const Expr * getSubExpr() const
Definition Expr.h:2199
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8404
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType getCanonicalType() const
Definition TypeBase.h:8330
bool UseExcessPrecision(const ASTContext &Ctx)
Definition Type.cpp:1612
bool isCanonical() const
Definition TypeBase.h:8335
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition TypeBase.h:361
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition TypeBase.h:364
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition TypeBase.h:367
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition Expr.h:4610
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition Expr.h:4616
Encodes a location in the source.
SourceLocation getBegin() const
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8871
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
bool isConstantMatrixType() const
Definition TypeBase.h:8676
bool isPointerType() const
Definition TypeBase.h:8515
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
bool isReferenceType() const
Definition TypeBase.h:8539
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1909
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition Type.cpp:2607
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool hasUnsignedIntegerRepresentation() const
Determine whether this type has an unsigned integer representation of some sort, e....
Definition Type.cpp:2291
bool isExtVectorType() const
Definition TypeBase.h:8658
bool isAnyComplexType() const
Definition TypeBase.h:8650
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition TypeBase.h:8927
bool isHalfType() const
Definition TypeBase.h:8875
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition Type.cpp:2243
bool isMatrixType() const
Definition TypeBase.h:8672
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8511
bool isVectorType() const
Definition TypeBase.h:8654
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2253
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool isNullPtrType() const
Definition TypeBase.h:8908
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition Expr.h:2694
UnaryExprOrTypeTrait getKind() const
Definition Expr.h:2657
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition Expr.h:2298
Represents a GCC generic vector type.
Definition TypeBase.h:4175
VectorKind getVectorKind() const
Definition TypeBase.h:4195
cir::TargetAddressSpaceAttr toCIRTargetAddressSpace(mlir::MLIRContext &context, clang::LangAS langAS)
Definition CIRTypes.cpp:816
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const AstTypeMatcher< PointerType > pointerType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isTargetAddressSpace(LangAS AS)
LangAS
Defines the address space values used by the address space qualifier of QualType.
CastKind
CastKind - The kind of operation required for a conversion.
@ Generic
not a target-specific vector type
Definition TypeBase.h:4136
U cast(CodeGen::Address addr)
Definition Address.h:327
#define false
Definition stdbool.h:26
static bool instrumentation()
static bool dataMemberType()
static bool objCLifetime()
static bool addressSpace()
static bool fixedPointType()
static bool vecTernaryOp()
static bool cgFPOptionsRAII()
static bool fpConstraints()
static bool addHeapAllocSiteMetadata()
static bool mayHaveIntegerOverflow()
static bool tryEmitAsConstant()
static bool llvmLoweringPtrDiffConsidersPointee()
static bool scalableVectors()
static bool emitLValueAlignmentAssumption()
static bool incrementProfileCounter()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition Sanitizers.h:174