clang 22.0.0git
CIRGenExprAggregate.cpp
Go to the documentation of this file.
1//===- CIRGenExprAggregrate.cpp - Emit CIR Code from Aggregate Expressions ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
17
18#include "clang/AST/Expr.h"
21#include <cstdint>
22
23using namespace clang;
24using namespace clang::CIRGen;
25
26namespace {
27class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
28
29 CIRGenFunction &cgf;
30 AggValueSlot dest;
31
32 // Calls `fn` with a valid return value slot, potentially creating a temporary
33 // to do so. If a temporary is created, an appropriate copy into `Dest` will
34 // be emitted, as will lifetime markers.
35 //
36 // The given function should take a ReturnValueSlot, and return an RValue that
37 // points to said slot.
38 void withReturnValueSlot(const Expr *e,
39 llvm::function_ref<RValue(ReturnValueSlot)> fn);
40
41 AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
42 if (!dest.isIgnored())
43 return dest;
44
45 cgf.cgm.errorNYI(loc, "Slot for ignored address");
46 return dest;
47 }
48
49 void ensureDest(mlir::Location loc, QualType ty) {
50 if (!dest.isIgnored())
51 return;
52 dest = cgf.createAggTemp(ty, loc, "agg.tmp.ensured");
53 }
54
55public:
56 AggExprEmitter(CIRGenFunction &cgf, AggValueSlot dest)
57 : cgf(cgf), dest(dest) {}
58
59 /// Given an expression with aggregate type that represents a value lvalue,
60 /// this method emits the address of the lvalue, then loads the result into
61 /// DestPtr.
62 void emitAggLoadOfLValue(const Expr *e);
63
64 void emitArrayInit(Address destPtr, cir::ArrayType arrayTy, QualType arrayQTy,
65 Expr *exprToVisit, ArrayRef<Expr *> args,
66 Expr *arrayFiller);
67
68 /// Perform the final copy to DestPtr, if desired.
69 void emitFinalDestCopy(QualType type, const LValue &src);
70
71 void emitCopy(QualType type, const AggValueSlot &dest,
72 const AggValueSlot &src);
73
74 void emitInitializationToLValue(Expr *e, LValue lv);
75
76 void emitNullInitializationToLValue(mlir::Location loc, LValue lv);
77
78 void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
79
80 void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
81 emitAggLoadOfLValue(e);
82 }
83
84 void VisitCallExpr(const CallExpr *e);
85 void VisitStmtExpr(const StmtExpr *e) {
86 CIRGenFunction::StmtExprEvaluation eval(cgf);
87 Address retAlloca =
88 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
89 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
90 }
91
92 void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
93
94 void VisitInitListExpr(InitListExpr *e);
95 void VisitCXXConstructExpr(const CXXConstructExpr *e);
96
97 void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
98 FieldDecl *initializedFieldInUnion,
99 Expr *arrayFiller);
100 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
101 CIRGenFunction::CXXDefaultInitExprScope Scope(cgf, die);
102 Visit(die->getExpr());
103 }
104 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
105 // Ensure that we have a slot, but if we already do, remember
106 // whether it was externally destructed.
107 bool wasExternallyDestructed = dest.isExternallyDestructed();
108 ensureDest(cgf.getLoc(e->getSourceRange()), e->getType());
109
110 // We're going to push a destructor if there isn't already one.
111 dest.setExternallyDestructed();
112
113 Visit(e->getSubExpr());
114
115 // Push that destructor we promised.
116 if (!wasExternallyDestructed)
117 cgf.emitCXXTemporary(e->getTemporary(), e->getType(), dest.getAddress());
118 }
119 void VisitLambdaExpr(LambdaExpr *e);
120 void VisitExprWithCleanups(ExprWithCleanups *e);
121
122 // Stubs -- These should be moved up when they are implemented.
123 void VisitCastExpr(CastExpr *e) {
124 switch (e->getCastKind()) {
125 case CK_LValueToRValue:
126 // If we're loading from a volatile type, force the destination
127 // into existence.
129 cgf.cgm.errorNYI(e->getSourceRange(),
130 "AggExprEmitter: volatile lvalue-to-rvalue cast");
131 [[fallthrough]];
132 case CK_NoOp:
133 case CK_UserDefinedConversion:
134 case CK_ConstructorConversion:
135 assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
136 e->getType()) &&
137 "Implicit cast types must be compatible");
138 Visit(e->getSubExpr());
139 break;
140 default:
141 cgf.cgm.errorNYI(e->getSourceRange(),
142 std::string("AggExprEmitter: VisitCastExpr: ") +
143 e->getCastKindName());
144 break;
145 }
146 }
147 void VisitStmt(Stmt *s) {
148 cgf.cgm.errorNYI(s->getSourceRange(),
149 std::string("AggExprEmitter::VisitStmt: ") +
150 s->getStmtClassName());
151 }
152 void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); }
153 void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
154 Visit(ge->getResultExpr());
155 }
156 void VisitCoawaitExpr(CoawaitExpr *e) {
157 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
158 }
159 void VisitCoyieldExpr(CoyieldExpr *e) {
160 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
161 }
162 void VisitUnaryCoawait(UnaryOperator *e) {
163 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
164 }
165 void VisitUnaryExtension(UnaryOperator *e) { Visit(e->getSubExpr()); }
166 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
167 cgf.cgm.errorNYI(e->getSourceRange(),
168 "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
169 }
170 void VisitConstantExpr(ConstantExpr *e) {
171 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
172 }
173 void VisitMemberExpr(MemberExpr *e) {
174 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitMemberExpr");
175 }
176 void VisitUnaryDeref(UnaryOperator *e) {
177 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryDeref");
178 }
179 void VisitStringLiteral(StringLiteral *e) {
180 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitStringLiteral");
181 }
182 void VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
183 cgf.cgm.errorNYI(e->getSourceRange(),
184 "AggExprEmitter: VisitCompoundLiteralExpr");
185 }
186 void VisitPredefinedExpr(const PredefinedExpr *e) {
187 cgf.cgm.errorNYI(e->getSourceRange(),
188 "AggExprEmitter: VisitPredefinedExpr");
189 }
190 void VisitBinaryOperator(const BinaryOperator *e) {
191 cgf.cgm.errorNYI(e->getSourceRange(),
192 "AggExprEmitter: VisitBinaryOperator");
193 }
194 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
195 cgf.cgm.errorNYI(e->getSourceRange(),
196 "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
197 }
198 void VisitBinAssign(const BinaryOperator *e) {
199 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
200 }
201 void VisitBinComma(const BinaryOperator *e) {
202 cgf.emitIgnoredExpr(e->getLHS());
203 Visit(e->getRHS());
204 }
205 void VisitBinCmp(const BinaryOperator *e) {
206 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinCmp");
207 }
208 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
209 cgf.cgm.errorNYI(e->getSourceRange(),
210 "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
211 }
212 void VisitObjCMessageExpr(ObjCMessageExpr *e) {
213 cgf.cgm.errorNYI(e->getSourceRange(),
214 "AggExprEmitter: VisitObjCMessageExpr");
215 }
216 void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
217 cgf.cgm.errorNYI(e->getSourceRange(),
218 "AggExprEmitter: VisitObjCIVarRefExpr");
219 }
220
221 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
222 AggValueSlot dest = ensureSlot(cgf.getLoc(e->getExprLoc()), e->getType());
223 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
224 emitInitializationToLValue(e->getBase(), destLV);
225 VisitInitListExpr(e->getUpdater());
226 }
227 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
228 cgf.cgm.errorNYI(e->getSourceRange(),
229 "AggExprEmitter: VisitAbstractConditionalOperator");
230 }
231 void VisitChooseExpr(const ChooseExpr *e) { Visit(e->getChosenSubExpr()); }
232 void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
233 visitCXXParenListOrInitListExpr(e, e->getInitExprs(),
235 e->getArrayFiller());
236 }
237
238 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e,
239 llvm::Value *outerBegin = nullptr) {
240 cgf.cgm.errorNYI(e->getSourceRange(),
241 "AggExprEmitter: VisitArrayInitLoopExpr");
242 }
243 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
244 cgf.cgm.errorNYI(e->getSourceRange(),
245 "AggExprEmitter: VisitImplicitValueInitExpr");
246 }
247 void VisitNoInitExpr(NoInitExpr *e) {
248 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
249 }
250 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
251 cgf.cgm.errorNYI(dae->getSourceRange(),
252 "AggExprEmitter: VisitCXXDefaultArgExpr");
253 }
254 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
255 cgf.cgm.errorNYI(e->getSourceRange(),
256 "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
257 }
258 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
259 cgf.cgm.errorNYI(e->getSourceRange(),
260 "AggExprEmitter: VisitCXXStdInitializerListExpr");
261 }
262 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
263 cgf.cgm.errorNYI(e->getSourceRange(),
264 "AggExprEmitter: VisitCXXScalarValueInitExpr");
265 }
266 void VisitCXXTypeidExpr(CXXTypeidExpr *e) {
267 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
268 }
269 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
270 cgf.cgm.errorNYI(e->getSourceRange(),
271 "AggExprEmitter: VisitMaterializeTemporaryExpr");
272 }
273 void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
274 cgf.cgm.errorNYI(e->getSourceRange(),
275 "AggExprEmitter: VisitOpaqueValueExpr");
276 }
277
278 void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
279 cgf.cgm.errorNYI(e->getSourceRange(),
280 "AggExprEmitter: VisitPseudoObjectExpr");
281 }
282
283 void VisitVAArgExpr(VAArgExpr *e) {
284 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitVAArgExpr");
285 }
286
287 void VisitCXXThrowExpr(const CXXThrowExpr *e) {
288 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
289 }
290 void VisitAtomicExpr(AtomicExpr *e) {
291 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitAtomicExpr");
292 }
293};
294
295} // namespace
296
297static bool isTrivialFiller(Expr *e) {
298 if (!e)
299 return true;
300
302 return true;
303
304 if (auto *ile = dyn_cast<InitListExpr>(e)) {
305 if (ile->getNumInits())
306 return false;
307 return isTrivialFiller(ile->getArrayFiller());
308 }
309
310 if (const auto *cons = dyn_cast_or_null<CXXConstructExpr>(e))
311 return cons->getConstructor()->isDefaultConstructor() &&
312 cons->getConstructor()->isTrivial();
313
314 return false;
315}
316
317/// Given an expression with aggregate type that represents a value lvalue, this
318/// method emits the address of the lvalue, then loads the result into DestPtr.
319void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
320 LValue lv = cgf.emitLValue(e);
321
322 // If the type of the l-value is atomic, then do an atomic load.
324
325 emitFinalDestCopy(e->getType(), lv);
326}
327
328void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
329 QualType arrayQTy, Expr *e,
330 ArrayRef<Expr *> args, Expr *arrayFiller) {
331 CIRGenBuilderTy &builder = cgf.getBuilder();
332 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
333
334 const uint64_t numInitElements = args.size();
335
336 const QualType elementType =
337 cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
338
339 if (elementType.isDestructedType() && cgf.cgm.getLangOpts().Exceptions) {
340 cgf.cgm.errorNYI(loc, "initialized array requires destruction");
341 return;
342 }
343
344 const QualType elementPtrType = cgf.getContext().getPointerType(elementType);
345
346 const mlir::Type cirElementType = cgf.convertType(elementType);
347 const cir::PointerType cirElementPtrType =
348 builder.getPointerTo(cirElementType);
349
350 auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
351 cir::CastKind::array_to_ptrdecay,
352 destPtr.getPointer());
353
354 const CharUnits elementSize =
355 cgf.getContext().getTypeSizeInChars(elementType);
356 const CharUnits elementAlign =
357 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
358
359 // The 'current element to initialize'. The invariants on this
360 // variable are complicated. Essentially, after each iteration of
361 // the loop, it points to the last initialized element, except
362 // that it points to the beginning of the array before any
363 // elements have been initialized.
364 mlir::Value element = begin;
365
366 // Don't build the 'one' before the cycle to avoid
367 // emmiting the redundant `cir.const 1` instrs.
368 mlir::Value one;
369
370 // Emit the explicit initializers.
371 for (uint64_t i = 0; i != numInitElements; ++i) {
372 // Advance to the next element.
373 if (i > 0) {
374 one = builder.getConstantInt(loc, cgf.PtrDiffTy, i);
375 element = builder.createPtrStride(loc, begin, one);
376 }
377
378 const Address address = Address(element, cirElementType, elementAlign);
379 const LValue elementLV = cgf.makeAddrLValue(address, elementType);
380 emitInitializationToLValue(args[i], elementLV);
381 }
382
383 const uint64_t numArrayElements = arrayTy.getSize();
384
385 // Check whether there's a non-trivial array-fill expression.
386 const bool hasTrivialFiller = isTrivialFiller(arrayFiller);
387
388 // Any remaining elements need to be zero-initialized, possibly
389 // using the filler expression. We can skip this if the we're
390 // emitting to zeroed memory.
391 if (numInitElements != numArrayElements &&
392 !(dest.isZeroed() && hasTrivialFiller &&
393 cgf.getTypes().isZeroInitializable(elementType))) {
394 // Advance to the start of the rest of the array.
395 if (numInitElements) {
396 one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
397 element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
398 element, one);
399 }
400
401 // Allocate the temporary variable
402 // to store the pointer to first unitialized element
403 const Address tmpAddr = cgf.createTempAlloca(
404 cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
405 LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
406 cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
407
408 // Compute the end of array
409 cir::ConstantOp numArrayElementsConst = builder.getConstInt(
410 loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), numArrayElements);
411 mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
412 begin, numArrayElementsConst);
413
414 builder.createDoWhile(
415 loc,
416 /*condBuilder=*/
417 [&](mlir::OpBuilder &b, mlir::Location loc) {
418 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
419 mlir::Type boolTy = cgf.convertType(cgf.getContext().BoolTy);
420 cir::CmpOp cmp = cir::CmpOp::create(
421 builder, loc, boolTy, cir::CmpOpKind::ne, currentElement, end);
422 builder.createCondition(cmp);
423 },
424 /*bodyBuilder=*/
425 [&](mlir::OpBuilder &b, mlir::Location loc) {
426 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
427
429
430 // Emit the actual filler expression.
431 LValue elementLV = cgf.makeAddrLValue(
432 Address(currentElement, cirElementType, elementAlign),
433 elementType);
434 if (arrayFiller)
435 emitInitializationToLValue(arrayFiller, elementLV);
436 else
437 emitNullInitializationToLValue(loc, elementLV);
438
439 // Tell the EH cleanup that we finished with the last element.
440 if (cgf.cgm.getLangOpts().Exceptions) {
441 cgf.cgm.errorNYI(loc, "update destructed array element for EH");
442 return;
443 }
444
445 // Advance pointer and store them to temporary variable
446 cir::ConstantOp one = builder.getConstInt(
447 loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 1);
448 auto nextElement = cir::PtrStrideOp::create(
449 builder, loc, cirElementPtrType, currentElement, one);
450 cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
451
452 builder.createYield(loc);
453 });
454 }
455}
456
457/// Perform the final copy to destPtr, if desired.
458void AggExprEmitter::emitFinalDestCopy(QualType type, const LValue &src) {
459 // If dest is ignored, then we're evaluating an aggregate expression
460 // in a context that doesn't care about the result. Note that loads
461 // from volatile l-values force the existence of a non-ignored
462 // destination.
463 if (dest.isIgnored())
464 return;
465
469
470 AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
473 emitCopy(type, dest, srcAgg);
474}
475
476/// Perform a copy from the source into the destination.
477///
478/// \param type - the type of the aggregate being copied; qualifiers are
479/// ignored
480void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
481 const AggValueSlot &src) {
483
484 // If the result of the assignment is used, copy the LHS there also.
485 // It's volatile if either side is. Use the minimum alignment of
486 // the two sides.
487 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
488 LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
490 cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap());
491}
492
493void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
494 const QualType type = lv.getType();
495
497 const mlir::Location loc = e->getSourceRange().isValid()
498 ? cgf.getLoc(e->getSourceRange())
499 : *cgf.currSrcLoc;
500 return emitNullInitializationToLValue(loc, lv);
501 }
502
503 if (isa<NoInitExpr>(e))
504 return;
505
506 if (type->isReferenceType()) {
507 RValue rv = cgf.emitReferenceBindingToExpr(e);
508 return cgf.emitStoreThroughLValue(rv, lv);
509 }
510
511 switch (cgf.getEvaluationKind(type)) {
512 case cir::TEK_Complex:
513 cgf.emitComplexExprIntoLValue(e, lv, /*isInit*/ true);
514 break;
519 dest.isZeroed()));
520
521 return;
522 case cir::TEK_Scalar:
523 if (lv.isSimple())
524 cgf.emitScalarInit(e, cgf.getLoc(e->getSourceRange()), lv);
525 else
527 return;
528 }
529}
530
531void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *e) {
532 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
533 cgf.emitCXXConstructExpr(e, slot);
534}
535
536void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
537 LValue lv) {
538 const QualType type = lv.getType();
539
540 // If the destination slot is already zeroed out before the aggregate is
541 // copied into it, we don't have to emit any zeros here.
542 if (dest.isZeroed() && cgf.getTypes().isZeroInitializable(type))
543 return;
544
545 if (cgf.hasScalarEvaluationKind(type)) {
546 // For non-aggregates, we can store the appropriate null constant.
547 mlir::Value null = cgf.cgm.emitNullConstant(type, loc);
548 if (lv.isSimple()) {
549 cgf.emitStoreOfScalar(null, lv, /* isInitialization */ true);
550 return;
551 }
552
553 cgf.cgm.errorNYI("emitStoreThroughBitfieldLValue");
554 return;
555 }
556
557 // There's a potential optimization opportunity in combining
558 // memsets; that would be easy for arrays, but relatively
559 // difficult for structures with the current code.
560 cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
561}
562
563void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) {
564 CIRGenFunction::SourceLocRAIIObject loc{cgf, cgf.getLoc(e->getSourceRange())};
565 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
566 [[maybe_unused]] LValue slotLV =
567 cgf.makeAddrLValue(slot.getAddress(), e->getType());
568
569 // We'll need to enter cleanup scopes in case any of the element
570 // initializers throws an exception or contains branch out of the expressions.
572
573 for (auto [curField, capture, captureInit] : llvm::zip(
574 e->getLambdaClass()->fields(), e->captures(), e->capture_inits())) {
575 // Pick a name for the field.
576 llvm::StringRef fieldName = curField->getName();
577 if (capture.capturesVariable()) {
578 assert(!curField->isBitField() && "lambdas don't have bitfield members!");
579 ValueDecl *v = capture.getCapturedVar();
580 fieldName = v->getName();
581 cgf.cgm.lambdaFieldToName[curField] = fieldName;
582 } else if (capture.capturesThis()) {
583 cgf.cgm.lambdaFieldToName[curField] = "this";
584 } else {
585 cgf.cgm.errorNYI(e->getSourceRange(), "Unhandled capture kind");
586 cgf.cgm.lambdaFieldToName[curField] = "unhandled-capture-kind";
587 }
588
589 // Emit initialization
590 LValue lv =
591 cgf.emitLValueForFieldInitialization(slotLV, curField, fieldName);
592 if (curField->hasCapturedVLAType())
593 cgf.cgm.errorNYI(e->getSourceRange(), "lambda captured VLA type");
594
595 emitInitializationToLValue(captureInit, lv);
596
597 // Push a destructor if necessary.
598 if ([[maybe_unused]] QualType::DestructionKind DtorKind =
599 curField->getType().isDestructedType())
600 cgf.cgm.errorNYI(e->getSourceRange(), "lambda with destructed field");
601 }
602}
603
604void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
605 CIRGenFunction::RunCleanupsScope cleanups(cgf);
606 Visit(e->getSubExpr());
607}
608
609void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
611 cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");
612 return;
613 }
614
615 withReturnValueSlot(
616 e, [&](ReturnValueSlot slot) { return cgf.emitCallExpr(e, slot); });
617}
618
619void AggExprEmitter::withReturnValueSlot(
620 const Expr *e, llvm::function_ref<RValue(ReturnValueSlot)> fn) {
621 QualType retTy = e->getType();
622
624 bool requiresDestruction =
626 if (requiresDestruction)
627 cgf.cgm.errorNYI(
628 e->getSourceRange(),
629 "withReturnValueSlot: return value requiring destruction is NYI");
630
631 // If it makes no observable difference, save a memcpy + temporary.
632 //
633 // We need to always provide our own temporary if destruction is required.
634 // Otherwise, fn will emit its own, notice that it's "unused", and end its
635 // lifetime before we have the chance to emit a proper destructor call.
638
639 Address retAddr = dest.getAddress();
641
644 fn(ReturnValueSlot(retAddr));
645}
646
647void AggExprEmitter::VisitInitListExpr(InitListExpr *e) {
649 llvm_unreachable("GNU array range designator extension");
650
651 if (e->isTransparent())
652 return Visit(e->getInit(0));
653
654 visitCXXParenListOrInitListExpr(
656}
657
658void AggExprEmitter::visitCXXParenListOrInitListExpr(
659 Expr *e, ArrayRef<Expr *> args, FieldDecl *initializedFieldInUnion,
660 Expr *arrayFiller) {
661
662 const AggValueSlot dest =
663 ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
664
665 if (e->getType()->isConstantArrayType()) {
666 cir::ArrayType arrayTy =
668 emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
669 arrayFiller);
670 return;
671 } else if (e->getType()->isVariableArrayType()) {
672 cgf.cgm.errorNYI(e->getSourceRange(),
673 "visitCXXParenListOrInitListExpr variable array type");
674 return;
675 }
676
677 if (e->getType()->isArrayType()) {
678 cgf.cgm.errorNYI(e->getSourceRange(),
679 "visitCXXParenListOrInitListExpr array type");
680 return;
681 }
682
683 assert(e->getType()->isRecordType() && "Only support structs/unions here!");
684
685 // Do struct initialization; this code just sets each individual member
686 // to the approprate value. This makes bitfield support automatic;
687 // the disadvantage is that the generated code is more difficult for
688 // the optimizer, especially with bitfields.
689 unsigned numInitElements = args.size();
690 auto *record = e->getType()->castAsRecordDecl();
691
692 // We'll need to enter cleanup scopes in case any of the element
693 // initializers throws an exception.
695
696 unsigned curInitIndex = 0;
697
698 // Emit initialization of base classes.
699 if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
700 assert(numInitElements >= cxxrd->getNumBases() &&
701 "missing initializer for base class");
702 if (cxxrd->getNumBases() > 0) {
703 cgf.cgm.errorNYI(e->getSourceRange(),
704 "visitCXXParenListOrInitListExpr base class init");
705 return;
706 }
707 }
708
709 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
710
711 if (record->isUnion()) {
712 cgf.cgm.errorNYI(e->getSourceRange(),
713 "visitCXXParenListOrInitListExpr union type");
714 return;
715 }
716
717 // Here we iterate over the fields; this makes it simpler to both
718 // default-initialize fields and skip over unnamed fields.
719 for (const FieldDecl *field : record->fields()) {
720 // We're done once we hit the flexible array member.
721 if (field->getType()->isIncompleteArrayType())
722 break;
723
724 // Always skip anonymous bitfields.
725 if (field->isUnnamedBitField())
726 continue;
727
728 // We're done if we reach the end of the explicit initializers, we
729 // have a zeroed object, and the rest of the fields are
730 // zero-initializable.
731 if (curInitIndex == numInitElements && dest.isZeroed() &&
733 break;
734 LValue lv =
735 cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
736 // We never generate write-barriers for initialized fields.
738
739 if (curInitIndex < numInitElements) {
740 // Store the initializer into the field.
741 CIRGenFunction::SourceLocRAIIObject loc{
742 cgf, cgf.getLoc(record->getSourceRange())};
743 emitInitializationToLValue(args[curInitIndex++], lv);
744 } else {
745 // We're out of initializers; default-initialize to null
746 emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
747 }
748
749 // Push a destructor if necessary.
750 // FIXME: if we have an array of structures, all explicitly
751 // initialized, we can end up pushing a linear number of cleanups.
752 if (field->getType().isDestructedType()) {
753 cgf.cgm.errorNYI(e->getSourceRange(),
754 "visitCXXParenListOrInitListExpr destructor");
755 return;
756 }
757
758 // From classic codegen, maybe not useful for CIR:
759 // If the GEP didn't get used because of a dead zero init or something
760 // else, clean it up for -O0 builds and general tidiness.
761 }
762}
763
764// TODO(cir): This could be shared with classic codegen.
766 const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual) {
767 // If the most-derived object is a field declared with [[no_unique_address]],
768 // the tail padding of any virtual base could be reused for other subobjects
769 // of that field's class.
770 if (isVirtual)
772
773 // If the base class is laid out entirely within the nvsize of the derived
774 // class, its tail padding cannot yet be initialized, so we can issue
775 // stores at the full width of the base class.
776 const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
777 if (layout.getBaseClassOffset(baseRD) +
778 getContext().getASTRecordLayout(baseRD).getSize() <=
779 layout.getNonVirtualSize())
781
782 // The tail padding may contain values we need to preserve.
784}
785
787 AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
788}
789
791 AggValueSlot::Overlap_t mayOverlap) {
792 // TODO(cir): this function needs improvements, commented code for now since
793 // this will be touched again soon.
794 assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
795
796 Address destPtr = dest.getAddress();
797 Address srcPtr = src.getAddress();
798
799 if (getLangOpts().CPlusPlus) {
800 if (auto *record = ty->getAsCXXRecordDecl()) {
801 assert((record->hasTrivialCopyConstructor() ||
802 record->hasTrivialCopyAssignment() ||
803 record->hasTrivialMoveConstructor() ||
804 record->hasTrivialMoveAssignment() ||
805 record->hasAttr<TrivialABIAttr>() || record->isUnion()) &&
806 "Trying to aggregate-copy a type without a trivial copy/move "
807 "constructor or assignment operator");
808 // Ignore empty classes in C++.
809 if (record->isEmpty())
810 return;
811 }
812 }
813
815
816 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
817 // C99 6.5.16.1p3, which states "If the value being stored in an object is
818 // read from another object that overlaps in anyway the storage of the first
819 // object, then the overlap shall be exact and the two objects shall have
820 // qualified or unqualified versions of a compatible type."
821 //
822 // memcpy is not defined if the source and destination pointers are exactly
823 // equal, but other compilers do this optimization, and almost every memcpy
824 // implementation handles this case safely. If there is a libc that does not
825 // safely handle this, we can add a target hook.
826
827 // Get data size info for this aggregate. Don't copy the tail padding if this
828 // might be a potentially-overlapping subobject, since the tail padding might
829 // be occupied by a different object. Otherwise, copying it is fine.
830 TypeInfoChars typeInfo;
831 if (mayOverlap)
832 typeInfo = getContext().getTypeInfoDataSizeInChars(ty);
833 else
834 typeInfo = getContext().getTypeInfoInChars(ty);
835
837
838 // NOTE(cir): original codegen would normally convert destPtr and srcPtr to
839 // i8* since memcpy operates on bytes. We don't need that in CIR because
840 // cir.copy will operate on any CIR pointer that points to a sized type.
841
842 // Don't do any of the memmove_collectable tests if GC isn't set.
843 if (cgm.getLangOpts().getGC() != LangOptions::NonGC)
844 cgm.errorNYI("emitAggregateCopy: GC");
845
846 [[maybe_unused]] cir::CopyOp copyOp =
847 builder.createCopy(destPtr.getPointer(), srcPtr.getPointer());
848
850}
851
852// TODO(cir): This could be shared with classic codegen.
855 if (!fd->hasAttr<NoUniqueAddressAttr>() || !fd->getType()->isRecordType())
857
858 // If the field lies entirely within the enclosing class's nvsize, its tail
859 // padding cannot overlap any already-initialized object. (The only subobjects
860 // with greater addresses that might already be initialized are vbases.)
861 const RecordDecl *classRD = fd->getParent();
862 const ASTRecordLayout &layout = getContext().getASTRecordLayout(classRD);
863 if (layout.getFieldOffset(fd->getFieldIndex()) +
864 getContext().getTypeSize(fd->getType()) <=
865 (uint64_t)getContext().toBits(layout.getNonVirtualSize()))
867
868 // The tail padding may contain values we need to preserve.
870}
871
static bool isTrivialFiller(Expr *e)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConditionOp createCondition(mlir::Value condition)
Create a loop condition.
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
cir::PointerType getPointerTo(mlir::Type ty)
cir::DoWhileOp createDoWhile(mlir::Location loc, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> condBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> bodyBuilder)
Create a do-while operation.
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty, int64_t value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
QualType getElementType() const
Definition TypeBase.h:3734
Expr * getLHS() const
Definition Expr.h:4022
Expr * getRHS() const
Definition Expr.h:4024
mlir::Value getPointer() const
Definition Address.h:82
mlir::Type getElementType() const
Definition Address.h:109
clang::CharUnits getAlignment() const
Definition Address.h:117
An aggregate value slot.
IsZeroed_t isZeroed() const
Overlap_t mayOverlap() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
CIRGenTypes & getTypes() const
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
LValue emitAggExprToLValue(const Expr *e)
static bool hasAggregateEvaluationKind(clang::QualType type)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap)
Emit an aggregate copy.
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::LangOptions & getLangOpts() const
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
llvm::DenseMap< const clang::FieldDecl *, llvm::StringRef > lambdaFieldToName
Keep a map between lambda fields and names, this needs to be per module since lambdas might get gener...
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
Address getAddress() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
CXXTemporary * getTemporary()
Definition ExprCXX.h:1512
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5183
FieldDecl * getInitializedFieldInUnion()
Definition ExprCXX.h:5221
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:353
SourceRange getSourceRange() const LLVM_READONLY
Retrieve the source range of the expression.
Definition ExprCXX.h:828
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:902
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastKind getCastKind() const
Definition Expr.h:3654
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4818
bool hasAttr() const
Definition DeclBase.h:577
InitListExpr * getUpdater() const
Definition Expr.h:5870
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6396
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2457
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5359
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5335
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
ArrayRef< Expr * > inits()
Definition Expr.h:5283
llvm::iterator_range< capture_init_iterator > capture_inits()
Retrieve the initialization expressions for this lambda's captures.
Definition ExprCXX.h:2085
capture_range captures() const
Retrieve this lambda's captures.
Definition ExprCXX.cpp:1371
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1400
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
const Expr * getSubExpr() const
Definition Expr.h:2199
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8374
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
Represents a struct/union/class.
Definition Decl.h:4312
field_range fields() const
Definition Decl.h:4515
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8630
bool isArrayType() const
Definition TypeBase.h:8626
bool isReferenceType() const
Definition TypeBase.h:8551
bool isVariableArrayType() const
Definition TypeBase.h:8638
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8662
bool isRecordType() const
Definition TypeBase.h:8654
Expr * getSubExpr() const
Definition Expr.h:2285
QualType getType() const
Definition Decl.h:723
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
static bool emitLifetimeMarkers()
static bool aggValueSlotDestructedFlag()
static bool aggValueSlotGC()
static bool aggValueSlotAlias()
static bool opLoadStoreAtomic()
static bool aggEmitFinalDestCopyRValue()
static bool aggValueSlotVolatile()
static bool opScopeCleanupRegion()
static bool cudaSupport()
static bool requiresCleanups()
clang::CharUnits getPointerAlign() const