clang 22.0.0git
CIRGenExprAggregate.cpp
Go to the documentation of this file.
1//===- CIRGenExprAggregrate.cpp - Emit CIR Code from Aggregate Expressions ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
17
18#include "clang/AST/Expr.h"
21#include <cstdint>
22
23using namespace clang;
24using namespace clang::CIRGen;
25
26namespace {
27class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
28
29 CIRGenFunction &cgf;
30 AggValueSlot dest;
31
32 // Calls `fn` with a valid return value slot, potentially creating a temporary
33 // to do so. If a temporary is created, an appropriate copy into `Dest` will
34 // be emitted, as will lifetime markers.
35 //
36 // The given function should take a ReturnValueSlot, and return an RValue that
37 // points to said slot.
38 void withReturnValueSlot(const Expr *e,
39 llvm::function_ref<RValue(ReturnValueSlot)> fn);
40
41 AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
42 if (!dest.isIgnored())
43 return dest;
44
45 cgf.cgm.errorNYI(loc, "Slot for ignored address");
46 return dest;
47 }
48
49public:
50 AggExprEmitter(CIRGenFunction &cgf, AggValueSlot dest)
51 : cgf(cgf), dest(dest) {}
52
53 /// Given an expression with aggregate type that represents a value lvalue,
54 /// this method emits the address of the lvalue, then loads the result into
55 /// DestPtr.
56 void emitAggLoadOfLValue(const Expr *e);
57
58 void emitArrayInit(Address destPtr, cir::ArrayType arrayTy, QualType arrayQTy,
59 Expr *exprToVisit, ArrayRef<Expr *> args,
60 Expr *arrayFiller);
61
62 /// Perform the final copy to DestPtr, if desired.
63 void emitFinalDestCopy(QualType type, const LValue &src);
64
65 void emitCopy(QualType type, const AggValueSlot &dest,
66 const AggValueSlot &src);
67
68 void emitInitializationToLValue(Expr *e, LValue lv);
69
70 void emitNullInitializationToLValue(mlir::Location loc, LValue lv);
71
72 void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
73
74 void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
75 emitAggLoadOfLValue(e);
76 }
77
78 void VisitCallExpr(const CallExpr *e);
79 void VisitStmtExpr(const StmtExpr *e) {
80 CIRGenFunction::StmtExprEvaluation eval(cgf);
81 Address retAlloca =
82 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
83 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
84 }
85
86 void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
87
88 void VisitInitListExpr(InitListExpr *e);
89 void VisitCXXConstructExpr(const CXXConstructExpr *e);
90
91 void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
92 FieldDecl *initializedFieldInUnion,
93 Expr *arrayFiller);
94 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
95 CIRGenFunction::CXXDefaultInitExprScope Scope(cgf, die);
96 Visit(die->getExpr());
97 }
98 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
100 Visit(e->getSubExpr());
101 }
102 void VisitLambdaExpr(LambdaExpr *e);
103
104 // Stubs -- These should be moved up when they are implemented.
105 void VisitCastExpr(CastExpr *e) {
106 switch (e->getCastKind()) {
107 case CK_LValueToRValue:
108 // If we're loading from a volatile type, force the destination
109 // into existence.
111 cgf.cgm.errorNYI(e->getSourceRange(),
112 "AggExprEmitter: volatile lvalue-to-rvalue cast");
113 [[fallthrough]];
114 case CK_NoOp:
115 case CK_UserDefinedConversion:
116 case CK_ConstructorConversion:
117 assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
118 e->getType()) &&
119 "Implicit cast types must be compatible");
120 Visit(e->getSubExpr());
121 break;
122 default:
123 cgf.cgm.errorNYI(e->getSourceRange(),
124 std::string("AggExprEmitter: VisitCastExpr: ") +
125 e->getCastKindName());
126 break;
127 }
128 }
129 void VisitStmt(Stmt *s) {
130 cgf.cgm.errorNYI(s->getSourceRange(),
131 std::string("AggExprEmitter::VisitStmt: ") +
132 s->getStmtClassName());
133 }
134 void VisitParenExpr(ParenExpr *pe) {
135 cgf.cgm.errorNYI(pe->getSourceRange(), "AggExprEmitter: VisitParenExpr");
136 }
137 void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
138 cgf.cgm.errorNYI(ge->getSourceRange(),
139 "AggExprEmitter: VisitGenericSelectionExpr");
140 }
141 void VisitCoawaitExpr(CoawaitExpr *e) {
142 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
143 }
144 void VisitCoyieldExpr(CoyieldExpr *e) {
145 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
146 }
147 void VisitUnaryCoawait(UnaryOperator *e) {
148 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
149 }
150 void VisitUnaryExtension(UnaryOperator *e) {
151 cgf.cgm.errorNYI(e->getSourceRange(),
152 "AggExprEmitter: VisitUnaryExtension");
153 }
154 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
155 cgf.cgm.errorNYI(e->getSourceRange(),
156 "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
157 }
158 void VisitConstantExpr(ConstantExpr *e) {
159 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
160 }
161 void VisitMemberExpr(MemberExpr *e) {
162 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitMemberExpr");
163 }
164 void VisitUnaryDeref(UnaryOperator *e) {
165 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryDeref");
166 }
167 void VisitStringLiteral(StringLiteral *e) {
168 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitStringLiteral");
169 }
170 void VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
171 cgf.cgm.errorNYI(e->getSourceRange(),
172 "AggExprEmitter: VisitCompoundLiteralExpr");
173 }
174 void VisitPredefinedExpr(const PredefinedExpr *e) {
175 cgf.cgm.errorNYI(e->getSourceRange(),
176 "AggExprEmitter: VisitPredefinedExpr");
177 }
178 void VisitBinaryOperator(const BinaryOperator *e) {
179 cgf.cgm.errorNYI(e->getSourceRange(),
180 "AggExprEmitter: VisitBinaryOperator");
181 }
182 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
183 cgf.cgm.errorNYI(e->getSourceRange(),
184 "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
185 }
186 void VisitBinAssign(const BinaryOperator *e) {
187 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinAssign");
188 }
189 void VisitBinComma(const BinaryOperator *e) {
190 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinComma");
191 }
192 void VisitBinCmp(const BinaryOperator *e) {
193 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinCmp");
194 }
195 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
196 cgf.cgm.errorNYI(e->getSourceRange(),
197 "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
198 }
199 void VisitObjCMessageExpr(ObjCMessageExpr *e) {
200 cgf.cgm.errorNYI(e->getSourceRange(),
201 "AggExprEmitter: VisitObjCMessageExpr");
202 }
203 void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
204 cgf.cgm.errorNYI(e->getSourceRange(),
205 "AggExprEmitter: VisitObjCIVarRefExpr");
206 }
207
208 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
209 cgf.cgm.errorNYI(e->getSourceRange(),
210 "AggExprEmitter: VisitDesignatedInitUpdateExpr");
211 }
212 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
213 cgf.cgm.errorNYI(e->getSourceRange(),
214 "AggExprEmitter: VisitAbstractConditionalOperator");
215 }
216 void VisitChooseExpr(const ChooseExpr *e) {
217 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitChooseExpr");
218 }
219 void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
220 cgf.cgm.errorNYI(e->getSourceRange(),
221 "AggExprEmitter: VisitCXXParenListInitExpr");
222 }
223 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e,
224 llvm::Value *outerBegin = nullptr) {
225 cgf.cgm.errorNYI(e->getSourceRange(),
226 "AggExprEmitter: VisitArrayInitLoopExpr");
227 }
228 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
229 cgf.cgm.errorNYI(e->getSourceRange(),
230 "AggExprEmitter: VisitImplicitValueInitExpr");
231 }
232 void VisitNoInitExpr(NoInitExpr *e) {
233 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
234 }
235 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
236 cgf.cgm.errorNYI(dae->getSourceRange(),
237 "AggExprEmitter: VisitCXXDefaultArgExpr");
238 }
239 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
240 cgf.cgm.errorNYI(e->getSourceRange(),
241 "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
242 }
243 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
244 cgf.cgm.errorNYI(e->getSourceRange(),
245 "AggExprEmitter: VisitCXXStdInitializerListExpr");
246 }
247
248 void VisitExprWithCleanups(ExprWithCleanups *e) {
249 cgf.cgm.errorNYI(e->getSourceRange(),
250 "AggExprEmitter: VisitExprWithCleanups");
251 }
252 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
253 cgf.cgm.errorNYI(e->getSourceRange(),
254 "AggExprEmitter: VisitCXXScalarValueInitExpr");
255 }
256 void VisitCXXTypeidExpr(CXXTypeidExpr *e) {
257 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
258 }
259 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
260 cgf.cgm.errorNYI(e->getSourceRange(),
261 "AggExprEmitter: VisitMaterializeTemporaryExpr");
262 }
263 void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
264 cgf.cgm.errorNYI(e->getSourceRange(),
265 "AggExprEmitter: VisitOpaqueValueExpr");
266 }
267
268 void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
269 cgf.cgm.errorNYI(e->getSourceRange(),
270 "AggExprEmitter: VisitPseudoObjectExpr");
271 }
272
273 void VisitVAArgExpr(VAArgExpr *e) {
274 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitVAArgExpr");
275 }
276
277 void VisitCXXThrowExpr(const CXXThrowExpr *e) {
278 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
279 }
280 void VisitAtomicExpr(AtomicExpr *e) {
281 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitAtomicExpr");
282 }
283};
284
285} // namespace
286
287static bool isTrivialFiller(Expr *e) {
288 if (!e)
289 return true;
290
292 return true;
293
294 if (auto *ile = dyn_cast<InitListExpr>(e)) {
295 if (ile->getNumInits())
296 return false;
297 return isTrivialFiller(ile->getArrayFiller());
298 }
299
300 if (const auto *cons = dyn_cast_or_null<CXXConstructExpr>(e))
301 return cons->getConstructor()->isDefaultConstructor() &&
302 cons->getConstructor()->isTrivial();
303
304 return false;
305}
306
307/// Given an expression with aggregate type that represents a value lvalue, this
308/// method emits the address of the lvalue, then loads the result into DestPtr.
309void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
310 LValue lv = cgf.emitLValue(e);
311
312 // If the type of the l-value is atomic, then do an atomic load.
314
315 emitFinalDestCopy(e->getType(), lv);
316}
317
318void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
319 QualType arrayQTy, Expr *e,
320 ArrayRef<Expr *> args, Expr *arrayFiller) {
321 CIRGenBuilderTy &builder = cgf.getBuilder();
322 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
323
324 const uint64_t numInitElements = args.size();
325
326 const QualType elementType =
327 cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
328
329 if (elementType.isDestructedType() && cgf.cgm.getLangOpts().Exceptions) {
330 cgf.cgm.errorNYI(loc, "initialized array requires destruction");
331 return;
332 }
333
334 const QualType elementPtrType = cgf.getContext().getPointerType(elementType);
335
336 const mlir::Type cirElementType = cgf.convertType(elementType);
337 const cir::PointerType cirElementPtrType =
338 builder.getPointerTo(cirElementType);
339
340 auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
341 cir::CastKind::array_to_ptrdecay,
342 destPtr.getPointer());
343
344 const CharUnits elementSize =
345 cgf.getContext().getTypeSizeInChars(elementType);
346 const CharUnits elementAlign =
347 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
348
349 // The 'current element to initialize'. The invariants on this
350 // variable are complicated. Essentially, after each iteration of
351 // the loop, it points to the last initialized element, except
352 // that it points to the beginning of the array before any
353 // elements have been initialized.
354 mlir::Value element = begin;
355
356 // Don't build the 'one' before the cycle to avoid
357 // emmiting the redundant `cir.const 1` instrs.
358 mlir::Value one;
359
360 // Emit the explicit initializers.
361 for (uint64_t i = 0; i != numInitElements; ++i) {
362 // Advance to the next element.
363 if (i > 0) {
364 one = builder.getConstantInt(loc, cgf.PtrDiffTy, i);
365 element = builder.createPtrStride(loc, begin, one);
366 }
367
368 const Address address = Address(element, cirElementType, elementAlign);
369 const LValue elementLV = cgf.makeAddrLValue(address, elementType);
370 emitInitializationToLValue(args[i], elementLV);
371 }
372
373 const uint64_t numArrayElements = arrayTy.getSize();
374
375 // Check whether there's a non-trivial array-fill expression.
376 const bool hasTrivialFiller = isTrivialFiller(arrayFiller);
377
378 // Any remaining elements need to be zero-initialized, possibly
379 // using the filler expression. We can skip this if the we're
380 // emitting to zeroed memory.
381 if (numInitElements != numArrayElements &&
382 !(dest.isZeroed() && hasTrivialFiller &&
383 cgf.getTypes().isZeroInitializable(elementType))) {
384 // Advance to the start of the rest of the array.
385 if (numInitElements) {
386 one = builder.getConstantInt(loc, cgf.PtrDiffTy, 1);
387 element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
388 element, one);
389 }
390
391 // Allocate the temporary variable
392 // to store the pointer to first unitialized element
393 const Address tmpAddr = cgf.createTempAlloca(
394 cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
395 LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
396 cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
397
398 // Compute the end of array
399 cir::ConstantOp numArrayElementsConst = builder.getConstInt(
400 loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), numArrayElements);
401 mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
402 begin, numArrayElementsConst);
403
404 builder.createDoWhile(
405 loc,
406 /*condBuilder=*/
407 [&](mlir::OpBuilder &b, mlir::Location loc) {
408 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
409 mlir::Type boolTy = cgf.convertType(cgf.getContext().BoolTy);
410 cir::CmpOp cmp = cir::CmpOp::create(
411 builder, loc, boolTy, cir::CmpOpKind::ne, currentElement, end);
412 builder.createCondition(cmp);
413 },
414 /*bodyBuilder=*/
415 [&](mlir::OpBuilder &b, mlir::Location loc) {
416 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
417
419
420 // Emit the actual filler expression.
421 LValue elementLV = cgf.makeAddrLValue(
422 Address(currentElement, cirElementType, elementAlign),
423 elementType);
424 if (arrayFiller)
425 emitInitializationToLValue(arrayFiller, elementLV);
426 else
427 emitNullInitializationToLValue(loc, elementLV);
428
429 // Tell the EH cleanup that we finished with the last element.
430 if (cgf.cgm.getLangOpts().Exceptions) {
431 cgf.cgm.errorNYI(loc, "update destructed array element for EH");
432 return;
433 }
434
435 // Advance pointer and store them to temporary variable
436 cir::ConstantOp one = builder.getConstInt(
437 loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 1);
438 auto nextElement = cir::PtrStrideOp::create(
439 builder, loc, cirElementPtrType, currentElement, one);
440 cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
441
442 builder.createYield(loc);
443 });
444 }
445}
446
447/// Perform the final copy to destPtr, if desired.
448void AggExprEmitter::emitFinalDestCopy(QualType type, const LValue &src) {
449 // If dest is ignored, then we're evaluating an aggregate expression
450 // in a context that doesn't care about the result. Note that loads
451 // from volatile l-values force the existence of a non-ignored
452 // destination.
453 if (dest.isIgnored())
454 return;
455
459
460 AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
463 emitCopy(type, dest, srcAgg);
464}
465
466/// Perform a copy from the source into the destination.
467///
468/// \param type - the type of the aggregate being copied; qualifiers are
469/// ignored
470void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
471 const AggValueSlot &src) {
473
474 // If the result of the assignment is used, copy the LHS there also.
475 // It's volatile if either side is. Use the minimum alignment of
476 // the two sides.
477 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
478 LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
480 cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap());
481}
482
483void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
484 const QualType type = lv.getType();
485
487 const mlir::Location loc = e->getSourceRange().isValid()
488 ? cgf.getLoc(e->getSourceRange())
489 : *cgf.currSrcLoc;
490 return emitNullInitializationToLValue(loc, lv);
491 }
492
493 if (isa<NoInitExpr>(e))
494 return;
495
496 if (type->isReferenceType()) {
497 RValue rv = cgf.emitReferenceBindingToExpr(e);
498 return cgf.emitStoreThroughLValue(rv, lv);
499 }
500
501 switch (cgf.getEvaluationKind(type)) {
502 case cir::TEK_Complex:
503 cgf.emitComplexExprIntoLValue(e, lv, /*isInit*/ true);
504 break;
509 dest.isZeroed()));
510
511 return;
512 case cir::TEK_Scalar:
513 if (lv.isSimple())
514 cgf.emitScalarInit(e, cgf.getLoc(e->getSourceRange()), lv);
515 else
517 return;
518 }
519}
520
521void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *e) {
522 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
523 cgf.emitCXXConstructExpr(e, slot);
524}
525
526void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
527 LValue lv) {
528 const QualType type = lv.getType();
529
530 // If the destination slot is already zeroed out before the aggregate is
531 // copied into it, we don't have to emit any zeros here.
532 if (dest.isZeroed() && cgf.getTypes().isZeroInitializable(type))
533 return;
534
535 if (cgf.hasScalarEvaluationKind(type)) {
536 // For non-aggregates, we can store the appropriate null constant.
537 mlir::Value null = cgf.cgm.emitNullConstant(type, loc);
538 if (lv.isSimple()) {
539 cgf.emitStoreOfScalar(null, lv, /* isInitialization */ true);
540 return;
541 }
542
543 cgf.cgm.errorNYI("emitStoreThroughBitfieldLValue");
544 return;
545 }
546
547 // There's a potential optimization opportunity in combining
548 // memsets; that would be easy for arrays, but relatively
549 // difficult for structures with the current code.
550 cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
551}
552
553void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) {
554 CIRGenFunction::SourceLocRAIIObject loc{cgf, cgf.getLoc(e->getSourceRange())};
555 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
556 [[maybe_unused]] LValue slotLV =
557 cgf.makeAddrLValue(slot.getAddress(), e->getType());
558
559 // We'll need to enter cleanup scopes in case any of the element
560 // initializers throws an exception or contains branch out of the expressions.
562
563 for (auto [curField, capture, captureInit] : llvm::zip(
564 e->getLambdaClass()->fields(), e->captures(), e->capture_inits())) {
565 // Pick a name for the field.
566 llvm::StringRef fieldName = curField->getName();
567 if (capture.capturesVariable()) {
568 assert(!curField->isBitField() && "lambdas don't have bitfield members!");
569 ValueDecl *v = capture.getCapturedVar();
570 fieldName = v->getName();
571 cgf.cgm.lambdaFieldToName[curField] = fieldName;
572 } else if (capture.capturesThis()) {
573 cgf.cgm.lambdaFieldToName[curField] = "this";
574 } else {
575 cgf.cgm.errorNYI(e->getSourceRange(), "Unhandled capture kind");
576 cgf.cgm.lambdaFieldToName[curField] = "unhandled-capture-kind";
577 }
578
579 // Emit initialization
580 LValue lv =
581 cgf.emitLValueForFieldInitialization(slotLV, curField, fieldName);
582 if (curField->hasCapturedVLAType())
583 cgf.cgm.errorNYI(e->getSourceRange(), "lambda captured VLA type");
584
585 emitInitializationToLValue(captureInit, lv);
586
587 // Push a destructor if necessary.
588 if ([[maybe_unused]] QualType::DestructionKind DtorKind =
589 curField->getType().isDestructedType())
590 cgf.cgm.errorNYI(e->getSourceRange(), "lambda with destructed field");
591 }
592}
593
594void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
596 cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");
597 return;
598 }
599
600 withReturnValueSlot(
601 e, [&](ReturnValueSlot slot) { return cgf.emitCallExpr(e, slot); });
602}
603
604void AggExprEmitter::withReturnValueSlot(
605 const Expr *e, llvm::function_ref<RValue(ReturnValueSlot)> fn) {
606 QualType retTy = e->getType();
607
609 bool requiresDestruction =
611 if (requiresDestruction)
612 cgf.cgm.errorNYI(
613 e->getSourceRange(),
614 "withReturnValueSlot: return value requiring destruction is NYI");
615
616 // If it makes no observable difference, save a memcpy + temporary.
617 //
618 // We need to always provide our own temporary if destruction is required.
619 // Otherwise, fn will emit its own, notice that it's "unused", and end its
620 // lifetime before we have the chance to emit a proper destructor call.
623
624 Address retAddr = dest.getAddress();
626
629 fn(ReturnValueSlot(retAddr));
630}
631
632void AggExprEmitter::VisitInitListExpr(InitListExpr *e) {
634 llvm_unreachable("GNU array range designator extension");
635
636 if (e->isTransparent())
637 return Visit(e->getInit(0));
638
639 visitCXXParenListOrInitListExpr(
641}
642
643void AggExprEmitter::visitCXXParenListOrInitListExpr(
644 Expr *e, ArrayRef<Expr *> args, FieldDecl *initializedFieldInUnion,
645 Expr *arrayFiller) {
646
647 const AggValueSlot dest =
648 ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
649
650 if (e->getType()->isConstantArrayType()) {
651 cir::ArrayType arrayTy =
653 emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
654 arrayFiller);
655 return;
656 } else if (e->getType()->isVariableArrayType()) {
657 cgf.cgm.errorNYI(e->getSourceRange(),
658 "visitCXXParenListOrInitListExpr variable array type");
659 return;
660 }
661
662 if (e->getType()->isArrayType()) {
663 cgf.cgm.errorNYI(e->getSourceRange(),
664 "visitCXXParenListOrInitListExpr array type");
665 return;
666 }
667
668 assert(e->getType()->isRecordType() && "Only support structs/unions here!");
669
670 // Do struct initialization; this code just sets each individual member
671 // to the approprate value. This makes bitfield support automatic;
672 // the disadvantage is that the generated code is more difficult for
673 // the optimizer, especially with bitfields.
674 unsigned numInitElements = args.size();
675 auto *record = e->getType()->castAsRecordDecl();
676
677 // We'll need to enter cleanup scopes in case any of the element
678 // initializers throws an exception.
680
681 unsigned curInitIndex = 0;
682
683 // Emit initialization of base classes.
684 if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
685 assert(numInitElements >= cxxrd->getNumBases() &&
686 "missing initializer for base class");
687 if (cxxrd->getNumBases() > 0) {
688 cgf.cgm.errorNYI(e->getSourceRange(),
689 "visitCXXParenListOrInitListExpr base class init");
690 return;
691 }
692 }
693
694 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
695
696 if (record->isUnion()) {
697 cgf.cgm.errorNYI(e->getSourceRange(),
698 "visitCXXParenListOrInitListExpr union type");
699 return;
700 }
701
702 // Here we iterate over the fields; this makes it simpler to both
703 // default-initialize fields and skip over unnamed fields.
704 for (const FieldDecl *field : record->fields()) {
705 // We're done once we hit the flexible array member.
706 if (field->getType()->isIncompleteArrayType())
707 break;
708
709 // Always skip anonymous bitfields.
710 if (field->isUnnamedBitField())
711 continue;
712
713 // We're done if we reach the end of the explicit initializers, we
714 // have a zeroed object, and the rest of the fields are
715 // zero-initializable.
716 if (curInitIndex == numInitElements && dest.isZeroed() &&
718 break;
719 LValue lv =
720 cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
721 // We never generate write-barriers for initialized fields.
723
724 if (curInitIndex < numInitElements) {
725 // Store the initializer into the field.
726 CIRGenFunction::SourceLocRAIIObject loc{
727 cgf, cgf.getLoc(record->getSourceRange())};
728 emitInitializationToLValue(args[curInitIndex++], lv);
729 } else {
730 // We're out of initializers; default-initialize to null
731 emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
732 }
733
734 // Push a destructor if necessary.
735 // FIXME: if we have an array of structures, all explicitly
736 // initialized, we can end up pushing a linear number of cleanups.
737 if (field->getType().isDestructedType()) {
738 cgf.cgm.errorNYI(e->getSourceRange(),
739 "visitCXXParenListOrInitListExpr destructor");
740 return;
741 }
742
743 // From classic codegen, maybe not useful for CIR:
744 // If the GEP didn't get used because of a dead zero init or something
745 // else, clean it up for -O0 builds and general tidiness.
746 }
747}
748
749// TODO(cir): This could be shared with classic codegen.
751 const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual) {
752 // If the most-derived object is a field declared with [[no_unique_address]],
753 // the tail padding of any virtual base could be reused for other subobjects
754 // of that field's class.
755 if (isVirtual)
757
758 // If the base class is laid out entirely within the nvsize of the derived
759 // class, its tail padding cannot yet be initialized, so we can issue
760 // stores at the full width of the base class.
761 const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
762 if (layout.getBaseClassOffset(baseRD) +
763 getContext().getASTRecordLayout(baseRD).getSize() <=
764 layout.getNonVirtualSize())
766
767 // The tail padding may contain values we need to preserve.
769}
770
772 AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
773}
774
776 AggValueSlot::Overlap_t mayOverlap) {
777 // TODO(cir): this function needs improvements, commented code for now since
778 // this will be touched again soon.
779 assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
780
781 Address destPtr = dest.getAddress();
782 Address srcPtr = src.getAddress();
783
784 if (getLangOpts().CPlusPlus) {
785 if (auto *record = ty->getAsCXXRecordDecl()) {
786 assert((record->hasTrivialCopyConstructor() ||
787 record->hasTrivialCopyAssignment() ||
788 record->hasTrivialMoveConstructor() ||
789 record->hasTrivialMoveAssignment() ||
790 record->hasAttr<TrivialABIAttr>() || record->isUnion()) &&
791 "Trying to aggregate-copy a type without a trivial copy/move "
792 "constructor or assignment operator");
793 // Ignore empty classes in C++.
794 if (record->isEmpty())
795 return;
796 }
797 }
798
800
801 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
802 // C99 6.5.16.1p3, which states "If the value being stored in an object is
803 // read from another object that overlaps in anyway the storage of the first
804 // object, then the overlap shall be exact and the two objects shall have
805 // qualified or unqualified versions of a compatible type."
806 //
807 // memcpy is not defined if the source and destination pointers are exactly
808 // equal, but other compilers do this optimization, and almost every memcpy
809 // implementation handles this case safely. If there is a libc that does not
810 // safely handle this, we can add a target hook.
811
812 // Get data size info for this aggregate. Don't copy the tail padding if this
813 // might be a potentially-overlapping subobject, since the tail padding might
814 // be occupied by a different object. Otherwise, copying it is fine.
815 TypeInfoChars typeInfo;
816 if (mayOverlap)
817 typeInfo = getContext().getTypeInfoDataSizeInChars(ty);
818 else
819 typeInfo = getContext().getTypeInfoInChars(ty);
820
822
823 // NOTE(cir): original codegen would normally convert destPtr and srcPtr to
824 // i8* since memcpy operates on bytes. We don't need that in CIR because
825 // cir.copy will operate on any CIR pointer that points to a sized type.
826
827 // Don't do any of the memmove_collectable tests if GC isn't set.
828 if (cgm.getLangOpts().getGC() != LangOptions::NonGC)
829 cgm.errorNYI("emitAggregateCopy: GC");
830
831 [[maybe_unused]] cir::CopyOp copyOp =
832 builder.createCopy(destPtr.getPointer(), srcPtr.getPointer());
833
835}
836
837// TODO(cir): This could be shared with classic codegen.
840 if (!fd->hasAttr<NoUniqueAddressAttr>() || !fd->getType()->isRecordType())
842
843 // If the field lies entirely within the enclosing class's nvsize, its tail
844 // padding cannot overlap any already-initialized object. (The only subobjects
845 // with greater addresses that might already be initialized are vbases.)
846 const RecordDecl *classRD = fd->getParent();
847 const ASTRecordLayout &layout = getContext().getASTRecordLayout(classRD);
848 if (layout.getFieldOffset(fd->getFieldIndex()) +
849 getContext().getTypeSize(fd->getType()) <=
850 (uint64_t)getContext().toBits(layout.getNonVirtualSize()))
852
853 // The tail padding may contain values we need to preserve.
855}
856
static bool isTrivialFiller(Expr *e)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConditionOp createCondition(mlir::Value condition)
Create a loop condition.
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
cir::PointerType getPointerTo(mlir::Type ty)
cir::DoWhileOp createDoWhile(mlir::Location loc, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> condBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> bodyBuilder)
Create a do-while operation.
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty, int64_t value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
QualType getElementType() const
Definition TypeBase.h:3732
mlir::Value getPointer() const
Definition Address.h:81
mlir::Type getElementType() const
Definition Address.h:101
clang::CharUnits getAlignment() const
Definition Address.h:109
An aggregate value slot.
IsZeroed_t isZeroed() const
Overlap_t mayOverlap() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
CIRGenTypes & getTypes() const
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
LValue emitAggExprToLValue(const Expr *e)
static bool hasAggregateEvaluationKind(clang::QualType type)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap)
Emit an aggregate copy.
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::LangOptions & getLangOpts() const
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
llvm::DenseMap< const clang::FieldDecl *, llvm::StringRef > lambdaFieldToName
Keep a map between lambda fields and names, this needs to be per module since lambdas might get gener...
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
Address getAddress() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:5195
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:353
SourceRange getSourceRange() const LLVM_READONLY
Retrieve the source range of the expression.
Definition ExprCXX.h:828
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:902
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastKind getCastKind() const
Definition Expr.h:3654
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1946
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3157
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3242
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3393
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2457
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5359
bool hadArrayRangeDesignator() const
Definition Expr.h:5417
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5335
const Expr * getInit(unsigned Init) const
Definition Expr.h:5287
ArrayRef< Expr * > inits()
Definition Expr.h:5283
llvm::iterator_range< capture_init_iterator > capture_inits()
Retrieve the initialization expressions for this lambda's captures.
Definition ExprCXX.h:2085
capture_range captures() const
Retrieve this lambda's captures.
Definition ExprCXX.cpp:1371
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1400
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8369
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
Represents a struct/union/class.
Definition Decl.h:4309
field_range fields() const
Definition Decl.h:4512
CompoundStmt * getSubStmt()
Definition Expr.h:4546
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8625
bool isArrayType() const
Definition TypeBase.h:8621
bool isReferenceType() const
Definition TypeBase.h:8546
bool isVariableArrayType() const
Definition TypeBase.h:8633
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8657
bool isRecordType() const
Definition TypeBase.h:8649
QualType getType() const
Definition Decl.h:722
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
static bool emitLifetimeMarkers()
static bool aggValueSlotDestructedFlag()
static bool aggValueSlotGC()
static bool aggValueSlotAlias()
static bool opLoadStoreAtomic()
static bool aggEmitFinalDestCopyRValue()
static bool aggValueSlotVolatile()
static bool opScopeCleanupRegion()
static bool cudaSupport()
static bool requiresCleanups()
clang::CharUnits getPointerAlign() const