clang 23.0.0git
CIRGenExprAggregate.cpp
Go to the documentation of this file.
1//===- CIRGenExprAggregrate.cpp - Emit CIR Code from Aggregate Expressions ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
16#include "mlir/IR/Builders.h"
18
19#include "clang/AST/Expr.h"
22#include "llvm/IR/Value.h"
23#include <cstdint>
24
25using namespace clang;
26using namespace clang::CIRGen;
27
28namespace {
29// FIXME(cir): This should be a common helper between CIRGen
30// and traditional CodeGen
31/// Is the value of the given expression possibly a reference to or
32/// into a __block variable?
33static bool isBlockVarRef(const Expr *e) {
34 // Make sure we look through parens.
35 e = e->IgnoreParens();
36
37 // Check for a direct reference to a __block variable.
38 if (const DeclRefExpr *dre = dyn_cast<DeclRefExpr>(e)) {
39 const VarDecl *var = dyn_cast<VarDecl>(dre->getDecl());
40 return (var && var->hasAttr<BlocksAttr>());
41 }
42
43 // More complicated stuff.
44
45 // Binary operators.
46 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(e)) {
47 // For an assignment or pointer-to-member operation, just care
48 // about the LHS.
49 if (op->isAssignmentOp() || op->isPtrMemOp())
50 return isBlockVarRef(op->getLHS());
51
52 // For a comma, just care about the RHS.
53 if (op->getOpcode() == BO_Comma)
54 return isBlockVarRef(op->getRHS());
55
56 // FIXME: pointer arithmetic?
57 return false;
58
59 // Check both sides of a conditional operator.
60 } else if (const AbstractConditionalOperator *op =
61 dyn_cast<AbstractConditionalOperator>(e)) {
62 return isBlockVarRef(op->getTrueExpr()) ||
63 isBlockVarRef(op->getFalseExpr());
64
65 // OVEs are required to support BinaryConditionalOperators.
66 } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(e)) {
67 if (const Expr *src = op->getSourceExpr())
68 return isBlockVarRef(src);
69
70 // Casts are necessary to get things like (*(int*)&var) = foo().
71 // We don't really care about the kind of cast here, except
72 // we don't want to look through l2r casts, because it's okay
73 // to get the *value* in a __block variable.
74 } else if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
75 if (cast->getCastKind() == CK_LValueToRValue)
76 return false;
77 return isBlockVarRef(cast->getSubExpr());
78
79 // Handle unary operators. Again, just aggressively look through
80 // it, ignoring the operation.
81 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
82 return isBlockVarRef(uop->getSubExpr());
83
84 // Look into the base of a field access.
85 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(e)) {
86 return isBlockVarRef(mem->getBase());
87
88 // Look into the base of a subscript.
89 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(e)) {
90 return isBlockVarRef(sub->getBase());
91 }
92
93 return false;
94}
95
96class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
97
98 CIRGenFunction &cgf;
99 AggValueSlot dest;
100
101 // Calls `fn` with a valid return value slot, potentially creating a temporary
102 // to do so. If a temporary is created, an appropriate copy into `Dest` will
103 // be emitted, as will lifetime markers.
104 //
105 // The given function should take a ReturnValueSlot, and return an RValue that
106 // points to said slot.
107 void withReturnValueSlot(const Expr *e,
108 llvm::function_ref<RValue(ReturnValueSlot)> fn);
109
110 AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
111 if (!dest.isIgnored())
112 return dest;
113 return cgf.createAggTemp(t, loc, "agg.tmp.ensured");
114 }
115
116 void ensureDest(mlir::Location loc, QualType ty) {
117 if (!dest.isIgnored())
118 return;
119 dest = cgf.createAggTemp(ty, loc, "agg.tmp.ensured");
120 }
121
122public:
123 AggExprEmitter(CIRGenFunction &cgf, AggValueSlot dest)
124 : cgf(cgf), dest(dest) {}
125
126 /// Given an expression with aggregate type that represents a value lvalue,
127 /// this method emits the address of the lvalue, then loads the result into
128 /// DestPtr.
129 void emitAggLoadOfLValue(const Expr *e);
130
131 void emitArrayInit(Address destPtr, cir::ArrayType arrayTy, QualType arrayQTy,
132 Expr *exprToVisit, ArrayRef<Expr *> args,
133 Expr *arrayFiller);
134
135 void emitFinalDestCopy(QualType type, RValue src);
136
137 /// Perform the final copy to DestPtr, if desired.
138 void emitFinalDestCopy(QualType type, const LValue &src,
139 CIRGenFunction::ExprValueKind srcValueKind =
141
142 void emitCopy(QualType type, const AggValueSlot &dest,
143 const AggValueSlot &src);
144
145 void emitInitializationToLValue(Expr *e, LValue lv);
146
147 void emitNullInitializationToLValue(mlir::Location loc, LValue lv);
148
149 void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
150
151 void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
152 emitAggLoadOfLValue(e);
153 }
154
155 void VisitCallExpr(const CallExpr *e);
156 void VisitStmtExpr(const StmtExpr *e) {
157 CIRGenFunction::StmtExprEvaluation eval(cgf);
158 Address retAlloca =
159 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
160 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
161 }
162
163 void VisitBinAssign(const BinaryOperator *e) {
164 // For an assignment to work, the value on the right has
165 // to be compatible with the value on the left.
166 assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
167 e->getRHS()->getType()) &&
168 "Invalid assignment");
169
170 if (isBlockVarRef(e->getLHS()) &&
171 e->getRHS()->HasSideEffects(cgf.getContext())) {
172 cgf.cgm.errorNYI(e->getSourceRange(),
173 "block var reference with side effects");
174 return;
175 }
176
177 LValue lhs = cgf.emitLValue(e->getLHS());
178
179 // If we have an atomic type, evaluate into the destination and then
180 // do an atomic copy.
182
183 // Codegen the RHS so that it stores directly into the LHS.
185 AggValueSlot lhsSlot = AggValueSlot::forLValue(
188
189 // A non-volatile aggregate destination might have volatile member.
190 if (!lhsSlot.isVolatile() && cgf.hasVolatileMember(e->getLHS()->getType()))
191 lhsSlot.setVolatile(true);
192
193 cgf.emitAggExpr(e->getRHS(), lhsSlot);
194
195 // Copy into the destination if the assignment isn't ignored.
196 emitFinalDestCopy(e->getType(), lhs);
197
198 if (!dest.isIgnored() && !dest.isExternallyDestructed() &&
200 cgf.pushDestroy(QualType::DK_nontrivial_c_struct, dest.getAddress(),
201 e->getType());
202 }
203
204 void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
205
206 void VisitInitListExpr(InitListExpr *e);
207 void VisitCXXConstructExpr(const CXXConstructExpr *e);
208
209 void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
210 FieldDecl *initializedFieldInUnion,
211 Expr *arrayFiller);
212 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
213 CIRGenFunction::CXXDefaultInitExprScope Scope(cgf, die);
214 Visit(die->getExpr());
215 }
216 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
217 // Ensure that we have a slot, but if we already do, remember
218 // whether it was externally destructed.
219 bool wasExternallyDestructed = dest.isExternallyDestructed();
220 ensureDest(cgf.getLoc(e->getSourceRange()), e->getType());
221
222 // We're going to push a destructor if there isn't already one.
223 dest.setExternallyDestructed();
224
225 Visit(e->getSubExpr());
226
227 // Push that destructor we promised.
228 if (!wasExternallyDestructed)
229 cgf.emitCXXTemporary(e->getTemporary(), e->getType(), dest.getAddress());
230 }
231 void VisitLambdaExpr(LambdaExpr *e);
232 void VisitExprWithCleanups(ExprWithCleanups *e);
233
234 // Stubs -- These should be moved up when they are implemented.
235 void VisitCastExpr(CastExpr *e) {
236 switch (e->getCastKind()) {
237 case CK_LValueToRValueBitCast: {
238 if (dest.isIgnored()) {
239 cgf.emitAnyExpr(e->getSubExpr(), AggValueSlot::ignored(),
240 /*ignoreResult=*/true);
241 break;
242 }
243
244 LValue sourceLV = cgf.emitLValue(e->getSubExpr());
245 Address sourceAddress =
246 sourceLV.getAddress().withElementType(cgf.getBuilder(), cgf.voidTy);
247 Address destAddress =
248 dest.getAddress().withElementType(cgf.getBuilder(), cgf.voidTy);
249
250 mlir::Location loc = cgf.getLoc(e->getExprLoc());
251
252 mlir::Value sizeVal = cgf.getBuilder().getConstInt(
253 loc, cgf.sizeTy,
254 cgf.getContext().getTypeSizeInChars(e->getType()).getQuantity());
255 cgf.getBuilder().createMemCpy(loc, destAddress.getPointer(),
256 sourceAddress.getPointer(), sizeVal);
257
258 break;
259 }
260 case CK_LValueToRValue:
261 // If we're loading from a volatile type, force the destination
262 // into existence.
264 cgf.cgm.errorNYI(e->getSourceRange(),
265 "AggExprEmitter: volatile lvalue-to-rvalue cast");
266 [[fallthrough]];
267 case CK_NoOp:
268 case CK_UserDefinedConversion:
269 case CK_ConstructorConversion:
270 assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
271 e->getType()) &&
272 "Implicit cast types must be compatible");
273 Visit(e->getSubExpr());
274 break;
275 default:
276 cgf.cgm.errorNYI(e->getSourceRange(),
277 std::string("AggExprEmitter: VisitCastExpr: ") +
278 e->getCastKindName());
279 break;
280 }
281 }
282 void VisitStmt(Stmt *s) {
283 cgf.cgm.errorNYI(s->getSourceRange(),
284 std::string("AggExprEmitter::VisitStmt: ") +
285 s->getStmtClassName());
286 }
287 void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); }
288 void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
289 Visit(ge->getResultExpr());
290 }
291 void VisitCoawaitExpr(CoawaitExpr *e) {
292 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
293 }
294 void VisitCoyieldExpr(CoyieldExpr *e) {
295 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
296 }
297 void VisitUnaryCoawait(UnaryOperator *e) {
298 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
299 }
300 void VisitUnaryExtension(UnaryOperator *e) { Visit(e->getSubExpr()); }
301 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
302 cgf.cgm.errorNYI(e->getSourceRange(),
303 "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
304 }
305 void VisitConstantExpr(ConstantExpr *e) {
306 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
307 }
308 void VisitMemberExpr(MemberExpr *e) { emitAggLoadOfLValue(e); }
309 void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
310 void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
311 void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
312
313 void VisitPredefinedExpr(const PredefinedExpr *e) {
314 cgf.cgm.errorNYI(e->getSourceRange(),
315 "AggExprEmitter: VisitPredefinedExpr");
316 }
317 void VisitBinaryOperator(const BinaryOperator *e) {
318 cgf.cgm.errorNYI(e->getSourceRange(),
319 "AggExprEmitter: VisitBinaryOperator");
320 }
321 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
322 cgf.cgm.errorNYI(e->getSourceRange(),
323 "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
324 }
325 void VisitBinComma(const BinaryOperator *e) {
326 cgf.emitIgnoredExpr(e->getLHS());
327 Visit(e->getRHS());
328 }
329 void VisitBinCmp(const BinaryOperator *e) {
330 assert(cgf.getContext().hasSameType(e->getLHS()->getType(),
331 e->getRHS()->getType()));
332 const ComparisonCategoryInfo &cmpInfo =
333 cgf.getContext().CompCategories.getInfoForType(e->getType());
334 assert(cmpInfo.Record->isTriviallyCopyable() &&
335 "cannot copy non-trivially copyable aggregate");
336
337 QualType argTy = e->getLHS()->getType();
338
339 if (!argTy->isIntegralOrEnumerationType() && !argTy->isRealFloatingType() &&
340 !argTy->isNullPtrType() && !argTy->isPointerType() &&
341 !argTy->isMemberPointerType() && !argTy->isAnyComplexType())
342 cgf.cgm.errorNYI(e->getBeginLoc(), "aggregate three-way comparison");
343
344 mlir::Location loc = cgf.getLoc(e->getSourceRange());
345 CIRGenBuilderTy builder = cgf.getBuilder();
346
347 if (e->getType()->isAnyComplexType())
348 cgf.cgm.errorNYI(e->getBeginLoc(), "VisitBinCmp: complex type");
349
350 if (e->getType()->isAggregateType())
351 cgf.cgm.errorNYI(e->getBeginLoc(), "VisitBinCmp: aggregate type");
352
353 mlir::Value lhs = cgf.emitAnyExpr(e->getLHS()).getValue();
354 mlir::Value rhs = cgf.emitAnyExpr(e->getRHS()).getValue();
355
356 mlir::Value resultScalar;
357 if (argTy->isNullPtrType()) {
358 resultScalar =
359 builder.getConstInt(loc, cmpInfo.getEqualOrEquiv()->getIntValue());
360 } else {
361 llvm::APSInt ltRes = cmpInfo.getLess()->getIntValue();
362 llvm::APSInt eqRes = cmpInfo.getEqualOrEquiv()->getIntValue();
363 llvm::APSInt gtRes = cmpInfo.getGreater()->getIntValue();
364 if (!cmpInfo.isPartial()) {
365 cir::CmpOrdering ordering = cmpInfo.isStrong()
366 ? cir::CmpOrdering::Strong
367 : cir::CmpOrdering::Weak;
368 resultScalar = builder.createThreeWayCmpTotalOrdering(
369 loc, lhs, rhs, ltRes, eqRes, gtRes, ordering);
370 } else {
371 // Partial ordering.
372 llvm::APSInt unorderedRes = cmpInfo.getUnordered()->getIntValue();
373 resultScalar = builder.createThreeWayCmpPartialOrdering(
374 loc, lhs, rhs, ltRes, eqRes, gtRes, unorderedRes);
375 }
376 }
377
378 // Create the return value in the destination slot.
379 ensureDest(loc, e->getType());
380 LValue destLVal = cgf.makeAddrLValue(dest.getAddress(), e->getType());
381
382 // Emit the address of the first (and only) field in the comparison category
383 // type, and initialize it from the constant integer value produced above.
384 const FieldDecl *resultField = *cmpInfo.Record->field_begin();
385 LValue fieldLVal = cgf.emitLValueForFieldInitialization(
386 destLVal, resultField, resultField->getName());
387 cgf.emitStoreThroughLValue(RValue::get(resultScalar), fieldLVal);
388
389 // All done! The result is in the dest slot.
390 }
391
392 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
393 cgf.cgm.errorNYI(e->getSourceRange(),
394 "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
395 }
396 void VisitObjCMessageExpr(ObjCMessageExpr *e) {
397 cgf.cgm.errorNYI(e->getSourceRange(),
398 "AggExprEmitter: VisitObjCMessageExpr");
399 }
400 void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
401 cgf.cgm.errorNYI(e->getSourceRange(),
402 "AggExprEmitter: VisitObjCIVarRefExpr");
403 }
404
405 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
406 AggValueSlot dest = ensureSlot(cgf.getLoc(e->getExprLoc()), e->getType());
407 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
408 emitInitializationToLValue(e->getBase(), destLV);
409 VisitInitListExpr(e->getUpdater());
410 }
411 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
412 mlir::Location loc = cgf.getLoc(e->getSourceRange());
413
414 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
415 CIRGenFunction::ConditionalEvaluation eval(cgf);
416
417 // Save whether the destination's lifetime is externally managed.
418 bool isExternallyDestructed = dest.isExternallyDestructed();
419 bool destructNonTrivialCStruct =
420 !isExternallyDestructed &&
422 isExternallyDestructed |= destructNonTrivialCStruct;
423
424 cgf.emitIfOnBoolExpr(
425 e->getCond(),
426 /*thenBuilder=*/
427 [&](mlir::OpBuilder &b, mlir::Location loc) {
428 eval.beginEvaluation();
429 {
430 CIRGenFunction::LexicalScope lexScope{cgf, loc,
431 b.getInsertionBlock()};
432 cgf.curLexScope->setAsTernary();
433 dest.setExternallyDestructed(isExternallyDestructed);
434 assert(!cir::MissingFeatures::incrementProfileCounter());
435 Visit(e->getTrueExpr());
436 cir::YieldOp::create(b, loc);
437 }
438 eval.endEvaluation();
439 },
440 loc,
441 /*elseBuilder=*/
442 [&](mlir::OpBuilder &b, mlir::Location loc) {
443 eval.beginEvaluation();
444 {
445 CIRGenFunction::LexicalScope lexScope{cgf, loc,
446 b.getInsertionBlock()};
447 cgf.curLexScope->setAsTernary();
448
449 // If the result of an agg expression is unused, then the emission
450 // of the LHS might need to create a destination slot. That's fine
451 // with us, and we can safely emit the RHS into the same slot, but
452 // we shouldn't claim that it's already being destructed.
453 dest.setExternallyDestructed(isExternallyDestructed);
455 Visit(e->getFalseExpr());
456 cir::YieldOp::create(b, loc);
457 }
458 eval.endEvaluation();
459 },
460 loc);
461
462 if (destructNonTrivialCStruct)
463 cgf.cgm.errorNYI(
464 e->getSourceRange(),
465 "Abstract conditional aggregate: destructNonTrivialCStruct");
466 }
467 void VisitChooseExpr(const ChooseExpr *e) { Visit(e->getChosenSubExpr()); }
468 void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
469 visitCXXParenListOrInitListExpr(e, e->getInitExprs(),
471 e->getArrayFiller());
472 }
473
474 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e,
475 llvm::Value *outerBegin = nullptr) {
476 cgf.cgm.errorNYI(e->getSourceRange(),
477 "AggExprEmitter: VisitArrayInitLoopExpr");
478 }
479 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
480 cgf.cgm.errorNYI(e->getSourceRange(),
481 "AggExprEmitter: VisitImplicitValueInitExpr");
482 }
483 void VisitNoInitExpr(NoInitExpr *e) {
484 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
485 }
486 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
488 Visit(dae->getExpr());
489 }
490 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
491 cgf.cgm.errorNYI(e->getSourceRange(),
492 "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
493 }
494
495 /// Emit the initializer for a std::initializer_list initialized with a
496 /// real initializer list.
497 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
498 ASTContext &ctx = cgf.getContext();
499 CIRGenBuilderTy builder = cgf.getBuilder();
500 mlir::Location loc = cgf.getLoc(e->getExprLoc());
501
502 LValue array = cgf.emitLValue(e->getSubExpr());
503 assert(array.isSimple() && "initializer_list array not a simple lvalue");
504 Address arrayPtr = array.getAddress();
505
508 assert(arrayType && "std::initializer_list constructed from non-array");
509
510 auto *record = e->getType()->castAsRecordDecl();
511 assert(record->getNumFields() == 2 &&
512 "Expected std::initializer_list to only have two fields");
513
514 RecordDecl::field_iterator field = record->field_begin();
515 assert(field != record->field_end() &&
516 ctx.hasSameType(field->getType()->getPointeeType(),
517 arrayType->getElementType()) &&
518 "Expected std::initializer_list first field to be const E *");
519
520 // Start pointer.
521 AggValueSlot dest = ensureSlot(loc, e->getType());
522 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
523 LValue start =
524 cgf.emitLValueForFieldInitialization(destLV, *field, field->getName());
525
526 mlir::Value arrayStart = arrayPtr.emitRawPointer();
527 cgf.emitStoreThroughLValue(RValue::get(arrayStart), start);
528 ++field;
529 assert(field != record->field_end() &&
530 "Expected std::initializer_list to have two fields");
531
532 cir::ConstantOp size = builder.getConstInt(loc, arrayType->getSize());
533 LValue endOrLength =
534 cgf.emitLValueForFieldInitialization(destLV, *field, field->getName());
535 if (ctx.hasSameType(field->getType(), ctx.getSizeType())) {
536 // Length.
537 cgf.emitStoreThroughLValue(RValue::get(size), endOrLength);
538 } else {
539 // End pointer.
540 assert(field->getType()->isPointerType() &&
541 ctx.hasSameType(field->getType()->getPointeeType(),
542 arrayType->getElementType()) &&
543 "Expected std::initializer_list second field to be const E *");
544 mlir::Value arrayEnd = builder.createPtrStride(loc, arrayStart, size);
545 cgf.emitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
546 }
547 }
548
549 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
550 cgf.cgm.errorNYI(e->getSourceRange(),
551 "AggExprEmitter: VisitCXXScalarValueInitExpr");
552 }
553 void VisitCXXTypeidExpr(CXXTypeidExpr *e) { emitAggLoadOfLValue(e); }
554 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
555 Visit(e->getSubExpr());
556 }
557 void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
558 cgf.cgm.errorNYI(e->getSourceRange(),
559 "AggExprEmitter: VisitOpaqueValueExpr");
560 }
561
562 void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
563 cgf.cgm.errorNYI(e->getSourceRange(),
564 "AggExprEmitter: VisitPseudoObjectExpr");
565 }
566
567 void VisitVAArgExpr(VAArgExpr *e) {
568 // emitVAArg returns an aggregate value (not a pointer) at the CIR level.
569 // ABI-specific pointer handling will be done later in LoweringPrepare.
570 mlir::Value vaArgValue = cgf.emitVAArg(e);
571
572 // Create a temporary alloca to hold the aggregate value.
573 mlir::Location loc = cgf.getLoc(e->getSourceRange());
574 Address tmpAddr = cgf.createMemTemp(e->getType(), loc, "vaarg.tmp");
575
576 // Store the va_arg result into the temporary.
577 cgf.emitAggregateStore(vaArgValue, tmpAddr);
578
579 // Create an LValue from the temporary address.
580 LValue tmpLValue = cgf.makeAddrLValue(tmpAddr, e->getType());
581
582 // Copy the aggregate value from temporary to destination.
583 emitFinalDestCopy(e->getType(), tmpLValue);
584 }
585
586 void VisitCXXThrowExpr(const CXXThrowExpr *e) {
587 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
588 }
589 void VisitAtomicExpr(AtomicExpr *e) {
590 RValue result = cgf.emitAtomicExpr(e);
591 emitFinalDestCopy(e->getType(), result);
592 }
593};
594
595} // namespace
596
597static bool isTrivialFiller(Expr *e) {
598 if (!e)
599 return true;
600
602 return true;
603
604 if (auto *ile = dyn_cast<InitListExpr>(e)) {
605 if (ile->getNumInits())
606 return false;
607 return isTrivialFiller(ile->getArrayFiller());
608 }
609
610 if (const auto *cons = dyn_cast_or_null<CXXConstructExpr>(e))
611 return cons->getConstructor()->isDefaultConstructor() &&
612 cons->getConstructor()->isTrivial();
613
614 return false;
615}
616
617/// Given an expression with aggregate type that represents a value lvalue, this
618/// method emits the address of the lvalue, then loads the result into DestPtr.
619void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
620 LValue lv = cgf.emitLValue(e);
621
622 // If the type of the l-value is atomic, then do an atomic load.
624
625 emitFinalDestCopy(e->getType(), lv);
626}
627
628void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
629 if (dest.isPotentiallyAliased() && e->getType().isPODType(cgf.getContext())) {
630 // For a POD type, just emit a load of the lvalue + a copy, because our
631 // compound literal might alias the destination.
632 emitAggLoadOfLValue(e);
633 return;
634 }
635
636 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
637
638 // Block-scope compound literals are destroyed at the end of the enclosing
639 // scope in C.
640 bool destruct =
641 !cgf.getLangOpts().CPlusPlus && !slot.isExternallyDestructed();
642 if (destruct)
644
645 cgf.emitAggExpr(e->getInitializer(), slot);
646
647 if (destruct)
648 if ([[maybe_unused]] QualType::DestructionKind dtorKind =
650 cgf.cgm.errorNYI(e->getSourceRange(), "compound literal with destructor");
651}
652
653void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
654 QualType arrayQTy, Expr *e,
655 ArrayRef<Expr *> args, Expr *arrayFiller) {
656 CIRGenBuilderTy &builder = cgf.getBuilder();
657 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
658
659 const uint64_t numInitElements = args.size();
660
661 const QualType elementType =
662 cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
663
664 if (elementType.isDestructedType() && cgf.cgm.getLangOpts().Exceptions) {
665 cgf.cgm.errorNYI(loc, "initialized array requires destruction");
666 return;
667 }
668
669 const QualType elementPtrType = cgf.getContext().getPointerType(elementType);
670
671 const mlir::Type cirElementType = cgf.convertType(elementType);
672 const cir::PointerType cirElementPtrType =
673 builder.getPointerTo(cirElementType);
674
675 auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
676 cir::CastKind::array_to_ptrdecay,
677 destPtr.getPointer());
678
679 const CharUnits elementSize =
680 cgf.getContext().getTypeSizeInChars(elementType);
681 const CharUnits elementAlign =
682 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
683
684 // The 'current element to initialize'. The invariants on this
685 // variable are complicated. Essentially, after each iteration of
686 // the loop, it points to the last initialized element, except
687 // that it points to the beginning of the array before any
688 // elements have been initialized.
689 mlir::Value element = begin;
690
691 // Don't build the 'one' before the cycle to avoid
692 // emmiting the redundant `cir.const 1` instrs.
693 mlir::Value one;
694
695 // Emit the explicit initializers.
696 for (uint64_t i = 0; i != numInitElements; ++i) {
697 // Advance to the next element.
698 if (i > 0) {
699 one = builder.getConstantInt(loc, cgf.ptrDiffTy, i);
700 element = builder.createPtrStride(loc, begin, one);
701 }
702
703 const Address address = Address(element, cirElementType, elementAlign);
704 const LValue elementLV = cgf.makeAddrLValue(address, elementType);
705 emitInitializationToLValue(args[i], elementLV);
706 }
707
708 const uint64_t numArrayElements = arrayTy.getSize();
709
710 // Check whether there's a non-trivial array-fill expression.
711 const bool hasTrivialFiller = isTrivialFiller(arrayFiller);
712
713 // Any remaining elements need to be zero-initialized, possibly
714 // using the filler expression. We can skip this if the we're
715 // emitting to zeroed memory.
716 if (numInitElements != numArrayElements &&
717 !(dest.isZeroed() && hasTrivialFiller &&
718 cgf.getTypes().isZeroInitializable(elementType))) {
719 // Advance to the start of the rest of the array.
720 if (numInitElements) {
721 one = builder.getConstantInt(loc, cgf.ptrDiffTy, 1);
722 element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
723 element, one);
724 }
725
726 // Allocate the temporary variable
727 // to store the pointer to first unitialized element
728 const Address tmpAddr = cgf.createTempAlloca(
729 cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
730 LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
731 cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
732
733 // Compute the end of array
734 cir::ConstantOp numArrayElementsConst = builder.getConstInt(
735 loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), numArrayElements);
736 mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
737 begin, numArrayElementsConst);
738
739 builder.createDoWhile(
740 loc,
741 /*condBuilder=*/
742 [&](mlir::OpBuilder &b, mlir::Location loc) {
743 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
744 cir::CmpOp cmp = cir::CmpOp::create(builder, loc, cir::CmpOpKind::ne,
745 currentElement, end);
746 builder.createCondition(cmp);
747 },
748 /*bodyBuilder=*/
749 [&](mlir::OpBuilder &b, mlir::Location loc) {
750 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
751
753
754 // Emit the actual filler expression.
755 LValue elementLV = cgf.makeAddrLValue(
756 Address(currentElement, cirElementType, elementAlign),
757 elementType);
758 if (arrayFiller)
759 emitInitializationToLValue(arrayFiller, elementLV);
760 else
761 emitNullInitializationToLValue(loc, elementLV);
762
763 // Tell the EH cleanup that we finished with the last element.
764 if (cgf.cgm.getLangOpts().Exceptions) {
765 cgf.cgm.errorNYI(loc, "update destructed array element for EH");
766 return;
767 }
768
769 // Advance pointer and store them to temporary variable
770 cir::ConstantOp one = builder.getConstInt(
771 loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), 1);
772 auto nextElement = cir::PtrStrideOp::create(
773 builder, loc, cirElementPtrType, currentElement, one);
774 cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
775
776 builder.createYield(loc);
777 });
778 }
779}
780
781/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
782void AggExprEmitter::emitFinalDestCopy(QualType type, RValue src) {
783 assert(src.isAggregate() && "value must be aggregate value!");
784 LValue srcLV = cgf.makeAddrLValue(src.getAggregateAddress(), type);
785 emitFinalDestCopy(type, srcLV, CIRGenFunction::EVK_RValue);
786}
787
788/// Perform the final copy to destPtr, if desired.
789void AggExprEmitter::emitFinalDestCopy(
790 QualType type, const LValue &src,
791 CIRGenFunction::ExprValueKind srcValueKind) {
792 // If dest is ignored, then we're evaluating an aggregate expression
793 // in a context that doesn't care about the result. Note that loads
794 // from volatile l-values force the existence of a non-ignored
795 // destination.
796 if (dest.isIgnored())
797 return;
798
799 if (srcValueKind == CIRGenFunction::EVK_RValue) {
800 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
801 cgf.cgm.errorNYI("emitFinalDestCopy: EVK_RValue & PCK_Struct");
802 }
803 } else {
804 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
805 cgf.cgm.errorNYI("emitFinalDestCopy: !EVK_RValue & PCK_Struct");
806 }
807 }
808
812
813 AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
816 emitCopy(type, dest, srcAgg);
817}
818
819/// Perform a copy from the source into the destination.
820///
821/// \param type - the type of the aggregate being copied; qualifiers are
822/// ignored
823void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
824 const AggValueSlot &src) {
826
827 // If the result of the assignment is used, copy the LHS there also.
828 // It's volatile if either side is. Use the minimum alignment of
829 // the two sides.
830 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
831 LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
833 cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap(),
834 dest.isVolatile() || src.isVolatile());
835}
836
837void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
838 const QualType type = lv.getType();
839
841 const mlir::Location loc = e->getSourceRange().isValid()
842 ? cgf.getLoc(e->getSourceRange())
843 : *cgf.currSrcLoc;
844 return emitNullInitializationToLValue(loc, lv);
845 }
846
847 if (isa<NoInitExpr>(e))
848 return;
849
850 if (type->isReferenceType()) {
851 RValue rv = cgf.emitReferenceBindingToExpr(e);
852 return cgf.emitStoreThroughLValue(rv, lv);
853 }
854
855 switch (cgf.getEvaluationKind(type)) {
856 case cir::TEK_Complex:
857 cgf.emitComplexExprIntoLValue(e, lv, /*isInit*/ true);
858 break;
863 dest.isZeroed()));
864
865 return;
866 case cir::TEK_Scalar:
867 if (lv.isSimple())
868 cgf.emitScalarInit(e, cgf.getLoc(e->getSourceRange()), lv);
869 else
871 return;
872 }
873}
874
875void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *e) {
876 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
877 cgf.emitCXXConstructExpr(e, slot);
878}
879
880void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
881 LValue lv) {
882 const QualType type = lv.getType();
883
884 // If the destination slot is already zeroed out before the aggregate is
885 // copied into it, we don't have to emit any zeros here.
886 if (dest.isZeroed() && cgf.getTypes().isZeroInitializable(type))
887 return;
888
889 if (cgf.hasScalarEvaluationKind(type)) {
890 // For non-aggregates, we can store the appropriate null constant.
891 mlir::Value null = cgf.cgm.emitNullConstant(type, loc);
892 if (lv.isSimple()) {
893 cgf.emitStoreOfScalar(null, lv, /* isInitialization */ true);
894 return;
895 }
896
898 return;
899 }
900
901 // There's a potential optimization opportunity in combining
902 // memsets; that would be easy for arrays, but relatively
903 // difficult for structures with the current code.
904 cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
905}
906
907void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) {
908 CIRGenFunction::SourceLocRAIIObject loc{cgf, cgf.getLoc(e->getSourceRange())};
909 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
910 [[maybe_unused]] LValue slotLV =
911 cgf.makeAddrLValue(slot.getAddress(), e->getType());
912
913 // We'll need to enter cleanup scopes in case any of the element
914 // initializers throws an exception or contains branch out of the expressions.
916
917 for (auto [curField, capture, captureInit] : llvm::zip(
918 e->getLambdaClass()->fields(), e->captures(), e->capture_inits())) {
919 // Pick a name for the field.
920 llvm::StringRef fieldName = curField->getName();
921 if (capture.capturesVariable()) {
922 assert(!curField->isBitField() && "lambdas don't have bitfield members!");
923 ValueDecl *v = capture.getCapturedVar();
924 fieldName = v->getName();
925 cgf.cgm.lambdaFieldToName[curField] = fieldName;
926 } else if (capture.capturesThis()) {
927 cgf.cgm.lambdaFieldToName[curField] = "this";
928 } else {
929 cgf.cgm.errorNYI(e->getSourceRange(), "Unhandled capture kind");
930 cgf.cgm.lambdaFieldToName[curField] = "unhandled-capture-kind";
931 }
932
933 // Emit initialization
934 LValue lv =
935 cgf.emitLValueForFieldInitialization(slotLV, curField, fieldName);
936 if (curField->hasCapturedVLAType())
937 cgf.cgm.errorNYI(e->getSourceRange(), "lambda captured VLA type");
938
939 emitInitializationToLValue(captureInit, lv);
940
941 // Push a destructor if necessary.
942 if ([[maybe_unused]] QualType::DestructionKind DtorKind =
943 curField->getType().isDestructedType())
944 cgf.cgm.errorNYI(e->getSourceRange(), "lambda with destructed field");
945 }
946}
947
948void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
949 CIRGenFunction::RunCleanupsScope cleanups(cgf);
950 CIRGenBuilderTy &builder = cgf.getBuilder();
951 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
952 mlir::OpBuilder::InsertPoint scopeBegin;
953
954 // Explicitly introduce a scope for cleanup expressions, even though this
955 // overlaps with the RunCleanupsScope above.
956 //
957 // CIR does not yet model cleanup scopes explicitly, so a lexical scope is
958 // used as a temporary approximation. This is expected to be revisited once
959 // cleanup handling is redesigned.
960 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
961 [&](mlir::OpBuilder &b, mlir::Location loc) {
962 scopeBegin = b.saveInsertionPoint();
963 });
964
965 {
966 mlir::OpBuilder::InsertionGuard guard(builder);
967 builder.restoreInsertionPoint(scopeBegin);
968 CIRGenFunction::LexicalScope lexScope{cgf, scopeLoc,
969 builder.getInsertionBlock()};
970 Visit(e->getSubExpr());
971 }
972}
973
974void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
976 cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");
977 return;
978 }
979
980 withReturnValueSlot(
981 e, [&](ReturnValueSlot slot) { return cgf.emitCallExpr(e, slot); });
982}
983
984void AggExprEmitter::withReturnValueSlot(
985 const Expr *e, llvm::function_ref<RValue(ReturnValueSlot)> fn) {
986 QualType retTy = e->getType();
987
989 bool requiresDestruction =
991 if (requiresDestruction)
992 cgf.cgm.errorNYI(
993 e->getSourceRange(),
994 "withReturnValueSlot: return value requiring destruction is NYI");
995
996 // If it makes no observable difference, save a memcpy + temporary.
997 //
998 // We need to always provide our own temporary if destruction is required.
999 // Otherwise, fn will emit its own, notice that it's "unused", and end its
1000 // lifetime before we have the chance to emit a proper destructor call.
1003
1004 Address retAddr = dest.getAddress();
1006
1009 fn(ReturnValueSlot(retAddr));
1010}
1011
1012void AggExprEmitter::VisitInitListExpr(InitListExpr *e) {
1013 if (e->hadArrayRangeDesignator())
1014 llvm_unreachable("GNU array range designator extension");
1015
1016 if (e->isTransparent())
1017 return Visit(e->getInit(0));
1018
1019 visitCXXParenListOrInitListExpr(
1020 e, e->inits(), e->getInitializedFieldInUnion(), e->getArrayFiller());
1021}
1022
1023void AggExprEmitter::visitCXXParenListOrInitListExpr(
1024 Expr *e, ArrayRef<Expr *> args, FieldDecl *initializedFieldInUnion,
1025 Expr *arrayFiller) {
1026
1027 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
1028 const AggValueSlot dest = ensureSlot(loc, e->getType());
1029
1030 if (e->getType()->isConstantArrayType()) {
1031 cir::ArrayType arrayTy =
1033 emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
1034 arrayFiller);
1035 return;
1036 } else if (e->getType()->isVariableArrayType()) {
1037 cgf.cgm.errorNYI(e->getSourceRange(),
1038 "visitCXXParenListOrInitListExpr variable array type");
1039 return;
1040 }
1041
1042 if (e->getType()->isArrayType()) {
1043 cgf.cgm.errorNYI(e->getSourceRange(),
1044 "visitCXXParenListOrInitListExpr array type");
1045 return;
1046 }
1047
1048 assert(e->getType()->isRecordType() && "Only support structs/unions here!");
1049
1050 // Do struct initialization; this code just sets each individual member
1051 // to the approprate value. This makes bitfield support automatic;
1052 // the disadvantage is that the generated code is more difficult for
1053 // the optimizer, especially with bitfields.
1054 unsigned numInitElements = args.size();
1055 auto *record = e->getType()->castAsRecordDecl();
1056
1057 // We'll need to enter cleanup scopes in case any of the element
1058 // initializers throws an exception.
1060
1061 unsigned curInitIndex = 0;
1062
1063 // Emit initialization of base classes.
1064 if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
1065 assert(numInitElements >= cxxrd->getNumBases() &&
1066 "missing initializer for base class");
1067 for (auto &base : cxxrd->bases()) {
1068 assert(!base.isVirtual() && "should not see vbases here");
1069 CXXRecordDecl *baseRD = base.getType()->getAsCXXRecordDecl();
1070 Address address = cgf.getAddressOfDirectBaseInCompleteClass(
1071 loc, dest.getAddress(), cxxrd, baseRD,
1072 /*baseIsVirtual=*/false);
1074 AggValueSlot aggSlot = AggValueSlot::forAddr(
1075 address, Qualifiers(), AggValueSlot::IsDestructed,
1077 cgf.getOverlapForBaseInit(cxxrd, baseRD, false));
1078 cgf.emitAggExpr(args[curInitIndex++], aggSlot);
1079 if (base.getType().isDestructedType()) {
1080 cgf.cgm.errorNYI(e->getSourceRange(),
1081 "push deferred deactivation cleanup");
1082 return;
1083 }
1084 }
1085 }
1086
1087 // Prepare a 'this' for CXXDefaultInitExprs.
1088 CIRGenFunction::FieldConstructionScope fcScope(cgf, dest.getAddress());
1089
1090 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
1091
1092 if (record->isUnion()) {
1093 cgf.cgm.errorNYI(e->getSourceRange(),
1094 "visitCXXParenListOrInitListExpr union type");
1095 return;
1096 }
1097
1098 // Here we iterate over the fields; this makes it simpler to both
1099 // default-initialize fields and skip over unnamed fields.
1100 for (const FieldDecl *field : record->fields()) {
1101 // We're done once we hit the flexible array member.
1102 if (field->getType()->isIncompleteArrayType())
1103 break;
1104
1105 // Always skip anonymous bitfields.
1106 if (field->isUnnamedBitField())
1107 continue;
1108
1109 // We're done if we reach the end of the explicit initializers, we
1110 // have a zeroed object, and the rest of the fields are
1111 // zero-initializable.
1112 if (curInitIndex == numInitElements && dest.isZeroed() &&
1114 break;
1115 LValue lv =
1116 cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
1117 // We never generate write-barriers for initialized fields.
1119
1120 if (curInitIndex < numInitElements) {
1121 // Store the initializer into the field.
1122 CIRGenFunction::SourceLocRAIIObject loc{
1123 cgf, cgf.getLoc(record->getSourceRange())};
1124 emitInitializationToLValue(args[curInitIndex++], lv);
1125 } else {
1126 // We're out of initializers; default-initialize to null
1127 emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
1128 }
1129
1130 // Push a destructor if necessary.
1131 // FIXME: if we have an array of structures, all explicitly
1132 // initialized, we can end up pushing a linear number of cleanups.
1133 if (field->getType().isDestructedType()) {
1134 cgf.cgm.errorNYI(e->getSourceRange(),
1135 "visitCXXParenListOrInitListExpr destructor");
1136 return;
1137 }
1138
1139 // From classic codegen, maybe not useful for CIR:
1140 // If the GEP didn't get used because of a dead zero init or something
1141 // else, clean it up for -O0 builds and general tidiness.
1142 }
1143}
1144
1145// TODO(cir): This could be shared with classic codegen.
1147 const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual) {
1148 // If the most-derived object is a field declared with [[no_unique_address]],
1149 // the tail padding of any virtual base could be reused for other subobjects
1150 // of that field's class.
1151 if (isVirtual)
1153
1154 // If the base class is laid out entirely within the nvsize of the derived
1155 // class, its tail padding cannot yet be initialized, so we can issue
1156 // stores at the full width of the base class.
1157 const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
1158 if (layout.getBaseClassOffset(baseRD) +
1159 getContext().getASTRecordLayout(baseRD).getSize() <=
1160 layout.getNonVirtualSize())
1162
1163 // The tail padding may contain values we need to preserve.
1165}
1166
1168 AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
1169}
1170
1172 AggValueSlot::Overlap_t mayOverlap,
1173 bool isVolatile) {
1174 // TODO(cir): this function needs improvements, commented code for now since
1175 // this will be touched again soon.
1176 assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
1177
1178 Address destPtr = dest.getAddress();
1179 Address srcPtr = src.getAddress();
1180
1181 if (getLangOpts().CPlusPlus) {
1182 if (auto *record = ty->getAsCXXRecordDecl()) {
1183 assert((record->hasTrivialCopyConstructor() ||
1184 record->hasTrivialCopyAssignment() ||
1185 record->hasTrivialMoveConstructor() ||
1186 record->hasTrivialMoveAssignment() ||
1187 record->hasAttr<TrivialABIAttr>() || record->isUnion()) &&
1188 "Trying to aggregate-copy a type without a trivial copy/move "
1189 "constructor or assignment operator");
1190 // Ignore empty classes in C++.
1191 if (record->isEmpty())
1192 return;
1193 }
1194 }
1195
1197
1198 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
1199 // C99 6.5.16.1p3, which states "If the value being stored in an object is
1200 // read from another object that overlaps in anyway the storage of the first
1201 // object, then the overlap shall be exact and the two objects shall have
1202 // qualified or unqualified versions of a compatible type."
1203 //
1204 // memcpy is not defined if the source and destination pointers are exactly
1205 // equal, but other compilers do this optimization, and almost every memcpy
1206 // implementation handles this case safely. If there is a libc that does not
1207 // safely handle this, we can add a target hook.
1208
1209 // Get data size info for this aggregate. Don't copy the tail padding if this
1210 // might be a potentially-overlapping subobject, since the tail padding might
1211 // be occupied by a different object. Otherwise, copying it is fine.
1212 TypeInfoChars typeInfo;
1213 if (mayOverlap)
1214 typeInfo = getContext().getTypeInfoDataSizeInChars(ty);
1215 else
1216 typeInfo = getContext().getTypeInfoInChars(ty);
1217
1219
1220 // NOTE(cir): original codegen would normally convert destPtr and srcPtr to
1221 // i8* since memcpy operates on bytes. We don't need that in CIR because
1222 // cir.copy will operate on any CIR pointer that points to a sized type.
1223
1224 // Don't do any of the memmove_collectable tests if GC isn't set.
1225 if (cgm.getLangOpts().getGC() != LangOptions::NonGC)
1226 cgm.errorNYI("emitAggregateCopy: GC");
1227
1228 [[maybe_unused]] cir::CopyOp copyOp =
1229 builder.createCopy(destPtr.getPointer(), srcPtr.getPointer(), isVolatile);
1230
1232}
1233
1234// TODO(cir): This could be shared with classic codegen.
1237 if (!fd->hasAttr<NoUniqueAddressAttr>() || !fd->getType()->isRecordType())
1239
1240 // If the field lies entirely within the enclosing class's nvsize, its tail
1241 // padding cannot overlap any already-initialized object. (The only subobjects
1242 // with greater addresses that might already be initialized are vbases.)
1243 const RecordDecl *classRD = fd->getParent();
1244 const ASTRecordLayout &layout = getContext().getASTRecordLayout(classRD);
1245 if (layout.getFieldOffset(fd->getFieldIndex()) +
1246 getContext().getTypeSize(fd->getType()) <=
1247 (uint64_t)getContext().toBits(layout.getNonVirtualSize()))
1249
1250 // The tail padding may contain values we need to preserve.
1252}
1253
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
static bool isTrivialFiller(Expr *e)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConditionOp createCondition(mlir::Value condition)
Create a loop condition.
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
cir::PointerType getPointerTo(mlir::Type ty)
cir::DoWhileOp createDoWhile(mlir::Location loc, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> condBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> bodyBuilder)
Create a do-while operation.
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty, int64_t value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
const ConstantArrayType * getAsConstantArrayType(QualType T) const
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
Represents a loop initializing the elements of an array.
Definition Expr.h:5971
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
QualType getElementType() const
Definition TypeBase.h:3784
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6927
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:4096
Expr * getRHS() const
Definition Expr.h:4093
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
An aggregate value slot.
IsZeroed_t isZeroed() const
Overlap_t mayOverlap() const
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
IsDestructed_t isExternallyDestructed() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
void setExternallyDestructed(bool destructed=true)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
IsAliased_t isPotentiallyAliased() const
void setVolatile(bool flag)
cir::CmpThreeWayOp createThreeWayCmpTotalOrdering(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, const llvm::APSInt &ltRes, const llvm::APSInt &eqRes, const llvm::APSInt &gtRes, cir::CmpOrdering ordering)
cir::CmpThreeWayOp createThreeWayCmpPartialOrdering(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, const llvm::APSInt &ltRes, const llvm::APSInt &eqRes, const llvm::APSInt &gtRes, const llvm::APSInt &unorderedRes)
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
CIRGenTypes & getTypes() const
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAggregateStore(mlir::Value value, Address dest)
RValue emitAtomicExpr(AtomicExpr *e)
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
static bool hasAggregateEvaluationKind(clang::QualType type)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
CIRGenBuilderTy & getBuilder()
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::LangOptions & getLangOpts() const
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
llvm::DenseMap< const clang::FieldDecl *, llvm::StringRef > lambdaFieldToName
Keep a map between lambda fields and names, this needs to be per module since lambdas might get gener...
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
Address getAddress() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
CXXTemporary * getTemporary()
Definition ExprCXX.h:1512
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1271
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition ExprCXX.h:1752
Represents a list-initialization with parenthesis.
Definition ExprCXX.h:5142
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5182
FieldDecl * getInitializedFieldInUnion()
Definition ExprCXX.h:5220
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition DeclCXX.cpp:610
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:354
An expression "T()" which creates an rvalue of a non-class type T.
Definition ExprCXX.h:2197
Implicit construction of a std::initializer_list<T> object from an array temporary within list-initia...
Definition ExprCXX.h:801
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1209
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1951
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition Expr.h:4851
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
bool isStrong() const
True iff the comparison is "strong".
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
const Expr * getInitializer() const
Definition Expr.h:3636
Represents the canonical version of C arrays with a specified constant size.
Definition TypeBase.h:3810
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool hasAttr() const
Definition DeclBase.h:577
InitListExpr * getUpdater() const
Definition Expr.h:5939
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3175
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3260
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3411
const Expr * getSubExpr() const
Definition Expr.h:1065
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6467
Represents an implicitly-generated value initialization of an object of a given type.
Definition Expr.h:6060
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5428
bool hadArrayRangeDesignator() const
Definition Expr.h:5486
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5404
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
ArrayRef< Expr * > inits()
Definition Expr.h:5352
llvm::iterator_range< capture_init_iterator > capture_inits()
Retrieve the initialization expressions for this lambda's captures.
Definition ExprCXX.h:2084
capture_range captures() const
Retrieve this lambda's captures.
Definition ExprCXX.cpp:1371
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1400
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4921
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4938
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
Represents a place-holder for an object not to be initialized by anything.
Definition Expr.h:5880
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
const Expr * getSubExpr() const
Definition Expr.h:2202
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6803
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8515
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition Type.cpp:2739
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition TypeBase.h:1523
Represents a struct/union/class.
Definition Decl.h:4342
field_range fields() const
Definition Decl.h:4545
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4542
field_iterator field_begin() const
Definition Decl.cpp:5276
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8771
bool isArrayType() const
Definition TypeBase.h:8767
bool isPointerType() const
Definition TypeBase.h:8668
bool isReferenceType() const
Definition TypeBase.h:8692
bool isVariableArrayType() const
Definition TypeBase.h:8779
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9156
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type.
Definition Type.cpp:2456
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8803
bool isMemberPointerType() const
Definition TypeBase.h:8749
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2358
bool isNullPtrType() const
Definition TypeBase.h:9071
bool isRecordType() const
Definition TypeBase.h:8795
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Expr * getSubExpr() const
Definition Expr.h:2288
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
constexpr Variable var(Literal L)
Returns the variable of L.
Definition CNFFormula.h:64
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
static bool emitLifetimeMarkers()
static bool aggValueSlotDestructedFlag()
static bool aggValueSlotGC()
static bool aggValueSlotAlias()
static bool opLoadStoreAtomic()
static bool aggEmitFinalDestCopyRValue()
static bool aggValueSlotVolatile()
static bool opScopeCleanupRegion()
static bool atomicTypes()
static bool cudaSupport()
static bool requiresCleanups()
static bool incrementProfileCounter()
clang::CharUnits getPointerAlign() const
llvm::APSInt getIntValue() const
Get the constant integer value used by this variable to represent the comparison category result type...