clang 23.0.0git
CIRGenExprAggregate.cpp
Go to the documentation of this file.
1//===- CIRGenExprAggregrate.cpp - Emit CIR Code from Aggregate Expressions ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
15#include "CIRGenFunction.h"
16#include "CIRGenValue.h"
17#include "mlir/IR/Builders.h"
19
20#include "clang/AST/Expr.h"
23#include "llvm/IR/Value.h"
24#include <cstdint>
25
26using namespace clang;
27using namespace clang::CIRGen;
28
29namespace {
30// FIXME(cir): This should be a common helper between CIRGen
31// and traditional CodeGen
32/// Is the value of the given expression possibly a reference to or
33/// into a __block variable?
34static bool isBlockVarRef(const Expr *e) {
35 // Make sure we look through parens.
36 e = e->IgnoreParens();
37
38 // Check for a direct reference to a __block variable.
39 if (const DeclRefExpr *dre = dyn_cast<DeclRefExpr>(e)) {
40 const VarDecl *var = dyn_cast<VarDecl>(dre->getDecl());
41 return (var && var->hasAttr<BlocksAttr>());
42 }
43
44 // More complicated stuff.
45
46 // Binary operators.
47 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(e)) {
48 // For an assignment or pointer-to-member operation, just care
49 // about the LHS.
50 if (op->isAssignmentOp() || op->isPtrMemOp())
51 return isBlockVarRef(op->getLHS());
52
53 // For a comma, just care about the RHS.
54 if (op->getOpcode() == BO_Comma)
55 return isBlockVarRef(op->getRHS());
56
57 // FIXME: pointer arithmetic?
58 return false;
59
60 // Check both sides of a conditional operator.
61 } else if (const AbstractConditionalOperator *op =
62 dyn_cast<AbstractConditionalOperator>(e)) {
63 return isBlockVarRef(op->getTrueExpr()) ||
64 isBlockVarRef(op->getFalseExpr());
65
66 // OVEs are required to support BinaryConditionalOperators.
67 } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(e)) {
68 if (const Expr *src = op->getSourceExpr())
69 return isBlockVarRef(src);
70
71 // Casts are necessary to get things like (*(int*)&var) = foo().
72 // We don't really care about the kind of cast here, except
73 // we don't want to look through l2r casts, because it's okay
74 // to get the *value* in a __block variable.
75 } else if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
76 if (cast->getCastKind() == CK_LValueToRValue)
77 return false;
78 return isBlockVarRef(cast->getSubExpr());
79
80 // Handle unary operators. Again, just aggressively look through
81 // it, ignoring the operation.
82 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
83 return isBlockVarRef(uop->getSubExpr());
84
85 // Look into the base of a field access.
86 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(e)) {
87 return isBlockVarRef(mem->getBase());
88
89 // Look into the base of a subscript.
90 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(e)) {
91 return isBlockVarRef(sub->getBase());
92 }
93
94 return false;
95}
96
97class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
98
99 CIRGenFunction &cgf;
100 AggValueSlot dest;
101
102 // Calls `fn` with a valid return value slot, potentially creating a temporary
103 // to do so. If a temporary is created, an appropriate copy into `Dest` will
104 // be emitted, as will lifetime markers.
105 //
106 // The given function should take a ReturnValueSlot, and return an RValue that
107 // points to said slot.
108 void withReturnValueSlot(const Expr *e,
109 llvm::function_ref<RValue(ReturnValueSlot)> fn);
110
111 AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
112 if (!dest.isIgnored())
113 return dest;
114 return cgf.createAggTemp(t, loc, "agg.tmp.ensured");
115 }
116
117 void ensureDest(mlir::Location loc, QualType ty) {
118 if (!dest.isIgnored())
119 return;
120 dest = cgf.createAggTemp(ty, loc, "agg.tmp.ensured");
121 }
122
123public:
124 AggExprEmitter(CIRGenFunction &cgf, AggValueSlot dest)
125 : cgf(cgf), dest(dest) {}
126
127 /// Given an expression with aggregate type that represents a value lvalue,
128 /// this method emits the address of the lvalue, then loads the result into
129 /// DestPtr.
130 void emitAggLoadOfLValue(const Expr *e);
131
132 void emitArrayInit(Address destPtr, cir::ArrayType arrayTy, QualType arrayQTy,
133 Expr *exprToVisit, ArrayRef<Expr *> args,
134 Expr *arrayFiller);
135
136 void emitFinalDestCopy(QualType type, RValue src);
137
138 /// Perform the final copy to DestPtr, if desired.
139 void emitFinalDestCopy(QualType type, const LValue &src,
140 CIRGenFunction::ExprValueKind srcValueKind =
142
143 void emitCopy(QualType type, const AggValueSlot &dest,
144 const AggValueSlot &src);
145
146 void emitInitializationToLValue(Expr *e, LValue lv);
147
148 void emitNullInitializationToLValue(mlir::Location loc, LValue lv);
149
150 void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
151
152 void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
153 emitAggLoadOfLValue(e);
154 }
155
156 void VisitCallExpr(const CallExpr *e);
157 void VisitStmtExpr(const StmtExpr *e) {
158 CIRGenFunction::StmtExprEvaluation eval(cgf);
159 Address retAlloca =
160 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
161 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
162 }
163
164 void VisitBinAssign(const BinaryOperator *e) {
165 // For an assignment to work, the value on the right has
166 // to be compatible with the value on the left.
167 assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
168 e->getRHS()->getType()) &&
169 "Invalid assignment");
170
171 if (isBlockVarRef(e->getLHS()) &&
172 e->getRHS()->HasSideEffects(cgf.getContext())) {
173 cgf.cgm.errorNYI(e->getSourceRange(),
174 "block var reference with side effects");
175 return;
176 }
177
178 LValue lhs = cgf.emitLValue(e->getLHS());
179
180 // If we have an atomic type, evaluate into the destination and then
181 // do an atomic copy.
183
184 // Codegen the RHS so that it stores directly into the LHS.
186 AggValueSlot lhsSlot = AggValueSlot::forLValue(
189
190 // A non-volatile aggregate destination might have volatile member.
191 if (!lhsSlot.isVolatile() && cgf.hasVolatileMember(e->getLHS()->getType()))
192 lhsSlot.setVolatile(true);
193
194 cgf.emitAggExpr(e->getRHS(), lhsSlot);
195
196 // Copy into the destination if the assignment isn't ignored.
197 emitFinalDestCopy(e->getType(), lhs);
198
199 if (!dest.isIgnored() && !dest.isExternallyDestructed() &&
201 cgf.pushDestroy(QualType::DK_nontrivial_c_struct, dest.getAddress(),
202 e->getType());
203 }
204
205 void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
206
207 void VisitInitListExpr(InitListExpr *e);
208 void VisitCXXConstructExpr(const CXXConstructExpr *e);
209
210 void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
211 FieldDecl *initializedFieldInUnion,
212 Expr *arrayFiller);
213 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
214 CIRGenFunction::CXXDefaultInitExprScope Scope(cgf, die);
215 Visit(die->getExpr());
216 }
217 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
218 // Ensure that we have a slot, but if we already do, remember
219 // whether it was externally destructed.
220 bool wasExternallyDestructed = dest.isExternallyDestructed();
221 ensureDest(cgf.getLoc(e->getSourceRange()), e->getType());
222
223 // We're going to push a destructor if there isn't already one.
224 dest.setExternallyDestructed();
225
226 Visit(e->getSubExpr());
227
228 // Push that destructor we promised.
229 if (!wasExternallyDestructed)
230 cgf.emitCXXTemporary(e->getTemporary(), e->getType(), dest.getAddress());
231 }
232 void VisitLambdaExpr(LambdaExpr *e);
233 void VisitExprWithCleanups(ExprWithCleanups *e);
234
235 // Stubs -- These should be moved up when they are implemented.
236 void VisitCastExpr(CastExpr *e) {
237 switch (e->getCastKind()) {
238 case CK_LValueToRValueBitCast: {
239 if (dest.isIgnored()) {
240 cgf.emitAnyExpr(e->getSubExpr(), AggValueSlot::ignored(),
241 /*ignoreResult=*/true);
242 break;
243 }
244
245 LValue sourceLV = cgf.emitLValue(e->getSubExpr());
246 Address sourceAddress =
247 sourceLV.getAddress().withElementType(cgf.getBuilder(), cgf.voidTy);
248 Address destAddress =
249 dest.getAddress().withElementType(cgf.getBuilder(), cgf.voidTy);
250
251 mlir::Location loc = cgf.getLoc(e->getExprLoc());
252
253 mlir::Value sizeVal = cgf.getBuilder().getConstInt(
254 loc, cgf.sizeTy,
255 cgf.getContext().getTypeSizeInChars(e->getType()).getQuantity());
256 cgf.getBuilder().createMemCpy(loc, destAddress.getPointer(),
257 sourceAddress.getPointer(), sizeVal);
258
259 break;
260 }
261 case CK_LValueToRValue:
262 // If we're loading from a volatile type, force the destination
263 // into existence.
265 cgf.cgm.errorNYI(e->getSourceRange(),
266 "AggExprEmitter: volatile lvalue-to-rvalue cast");
267 [[fallthrough]];
268 case CK_NoOp:
269 case CK_UserDefinedConversion:
270 case CK_ConstructorConversion:
271 assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
272 e->getType()) &&
273 "Implicit cast types must be compatible");
274 Visit(e->getSubExpr());
275 break;
276 case CK_ToUnion: {
277 if (dest.isIgnored()) {
278 cgf.emitAnyExpr(e->getSubExpr(), AggValueSlot::ignored(),
279 /*ignoreResult=*/true);
280 break;
281 }
282 QualType ty = e->getSubExpr()->getType();
283 Address castPtr = dest.getAddress().withElementType(cgf.getBuilder(),
284 cgf.convertType(ty));
285 emitInitializationToLValue(e->getSubExpr(),
286 cgf.makeAddrLValue(castPtr, ty));
287 break;
288 }
289 default:
290 cgf.cgm.errorNYI(e->getSourceRange(),
291 std::string("AggExprEmitter: VisitCastExpr: ") +
292 e->getCastKindName());
293 break;
294 }
295 }
296 void VisitStmt(Stmt *s) {
297 cgf.cgm.errorNYI(s->getSourceRange(),
298 std::string("AggExprEmitter::VisitStmt: ") +
299 s->getStmtClassName());
300 }
301 void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); }
302 void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
303 Visit(ge->getResultExpr());
304 }
305 void VisitCoawaitExpr(CoawaitExpr *e) {
306 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
307 }
308 void VisitCoyieldExpr(CoyieldExpr *e) {
309 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
310 }
311 void VisitUnaryCoawait(UnaryOperator *e) {
312 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
313 }
314 void VisitUnaryExtension(UnaryOperator *e) { Visit(e->getSubExpr()); }
315 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
316 cgf.cgm.errorNYI(e->getSourceRange(),
317 "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
318 }
319 void VisitConstantExpr(ConstantExpr *e) {
320 ensureDest(cgf.getLoc(e->getSourceRange()), e->getType());
321
322 if (mlir::Attribute result = ConstantEmitter(cgf).tryEmitConstantExpr(e)) {
323 mlir::Value resultVal = cgf.getBuilder().getConstant(
324 cgf.getLoc(e->getSourceRange()), mlir::cast<mlir::TypedAttr>(result));
325 LValue destLVal = cgf.makeAddrLValue(dest.getAddress(), e->getType());
326 cgf.emitStoreThroughLValue(RValue::get(resultVal), destLVal);
327 return;
328 }
329
330 // It isn't clear that it is possible to get to here, but this branch is
331 // present in classic codegen, so we leave it here too.
332 return Visit(e->getSubExpr());
333 }
334 void VisitMemberExpr(MemberExpr *e) { emitAggLoadOfLValue(e); }
335 void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
336 void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
337 void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
338
339 void VisitPredefinedExpr(const PredefinedExpr *e) {
340 cgf.cgm.errorNYI(e->getSourceRange(),
341 "AggExprEmitter: VisitPredefinedExpr");
342 }
343 void VisitBinaryOperator(const BinaryOperator *e) {
344 cgf.cgm.errorNYI(e->getSourceRange(),
345 "AggExprEmitter: VisitBinaryOperator");
346 }
347 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
348 cgf.cgm.errorNYI(e->getSourceRange(),
349 "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
350 }
351 void VisitBinComma(const BinaryOperator *e) {
352 cgf.emitIgnoredExpr(e->getLHS());
353 Visit(e->getRHS());
354 }
355 void VisitBinCmp(const BinaryOperator *e) {
356 assert(cgf.getContext().hasSameType(e->getLHS()->getType(),
357 e->getRHS()->getType()));
358 const ComparisonCategoryInfo &cmpInfo =
359 cgf.getContext().CompCategories.getInfoForType(e->getType());
360 assert(cmpInfo.Record->isTriviallyCopyable() &&
361 "cannot copy non-trivially copyable aggregate");
362
363 QualType argTy = e->getLHS()->getType();
364
365 if (!argTy->isIntegralOrEnumerationType() && !argTy->isRealFloatingType() &&
366 !argTy->isNullPtrType() && !argTy->isPointerType() &&
367 !argTy->isMemberPointerType() && !argTy->isAnyComplexType())
368 cgf.cgm.errorNYI(e->getBeginLoc(), "aggregate three-way comparison");
369
370 mlir::Location loc = cgf.getLoc(e->getSourceRange());
371 CIRGenBuilderTy builder = cgf.getBuilder();
372
373 if (e->getType()->isAnyComplexType())
374 cgf.cgm.errorNYI(e->getBeginLoc(), "VisitBinCmp: complex type");
375
376 if (e->getType()->isAggregateType())
377 cgf.cgm.errorNYI(e->getBeginLoc(), "VisitBinCmp: aggregate type");
378
379 mlir::Value lhs = cgf.emitAnyExpr(e->getLHS()).getValue();
380 mlir::Value rhs = cgf.emitAnyExpr(e->getRHS()).getValue();
381
382 mlir::Value resultScalar;
383 if (argTy->isNullPtrType()) {
384 resultScalar =
385 builder.getConstInt(loc, cmpInfo.getEqualOrEquiv()->getIntValue());
386 } else {
387 llvm::APSInt ltRes = cmpInfo.getLess()->getIntValue();
388 llvm::APSInt eqRes = cmpInfo.getEqualOrEquiv()->getIntValue();
389 llvm::APSInt gtRes = cmpInfo.getGreater()->getIntValue();
390 if (!cmpInfo.isPartial()) {
391 cir::CmpOrdering ordering = cmpInfo.isStrong()
392 ? cir::CmpOrdering::Strong
393 : cir::CmpOrdering::Weak;
394 resultScalar = builder.createThreeWayCmpTotalOrdering(
395 loc, lhs, rhs, ltRes, eqRes, gtRes, ordering);
396 } else {
397 // Partial ordering.
398 llvm::APSInt unorderedRes = cmpInfo.getUnordered()->getIntValue();
399 resultScalar = builder.createThreeWayCmpPartialOrdering(
400 loc, lhs, rhs, ltRes, eqRes, gtRes, unorderedRes);
401 }
402 }
403
404 // Create the return value in the destination slot.
405 ensureDest(loc, e->getType());
406 LValue destLVal = cgf.makeAddrLValue(dest.getAddress(), e->getType());
407
408 // Emit the address of the first (and only) field in the comparison category
409 // type, and initialize it from the constant integer value produced above.
410 const FieldDecl *resultField = *cmpInfo.Record->field_begin();
411 LValue fieldLVal = cgf.emitLValueForFieldInitialization(
412 destLVal, resultField, resultField->getName());
413 cgf.emitStoreThroughLValue(RValue::get(resultScalar), fieldLVal);
414
415 // All done! The result is in the dest slot.
416 }
417
418 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
419 cgf.cgm.errorNYI(e->getSourceRange(),
420 "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
421 }
422 void VisitObjCMessageExpr(ObjCMessageExpr *e) {
423 cgf.cgm.errorNYI(e->getSourceRange(),
424 "AggExprEmitter: VisitObjCMessageExpr");
425 }
426 void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
427 cgf.cgm.errorNYI(e->getSourceRange(),
428 "AggExprEmitter: VisitObjCIVarRefExpr");
429 }
430
431 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
432 AggValueSlot dest = ensureSlot(cgf.getLoc(e->getExprLoc()), e->getType());
433 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
434 emitInitializationToLValue(e->getBase(), destLV);
435 VisitInitListExpr(e->getUpdater());
436 }
437 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
438 mlir::Location loc = cgf.getLoc(e->getSourceRange());
439
440 CIRGenFunction::OpaqueValueMapping binding(cgf, e);
441 CIRGenFunction::ConditionalEvaluation eval(cgf);
442
443 // Save whether the destination's lifetime is externally managed.
444 bool isExternallyDestructed = dest.isExternallyDestructed();
445 bool destructNonTrivialCStruct =
446 !isExternallyDestructed &&
448 isExternallyDestructed |= destructNonTrivialCStruct;
449
450 cgf.emitIfOnBoolExpr(
451 e->getCond(),
452 /*thenBuilder=*/
453 [&](mlir::OpBuilder &b, mlir::Location loc) {
454 eval.beginEvaluation();
455 {
456 CIRGenFunction::LexicalScope lexScope{cgf, loc,
457 b.getInsertionBlock()};
458 cgf.curLexScope->setAsTernary();
459 dest.setExternallyDestructed(isExternallyDestructed);
460 assert(!cir::MissingFeatures::incrementProfileCounter());
461 Visit(e->getTrueExpr());
462 cir::YieldOp::create(b, loc);
463 }
464 eval.endEvaluation();
465 },
466 loc,
467 /*elseBuilder=*/
468 [&](mlir::OpBuilder &b, mlir::Location loc) {
469 eval.beginEvaluation();
470 {
471 CIRGenFunction::LexicalScope lexScope{cgf, loc,
472 b.getInsertionBlock()};
473 cgf.curLexScope->setAsTernary();
474
475 // If the result of an agg expression is unused, then the emission
476 // of the LHS might need to create a destination slot. That's fine
477 // with us, and we can safely emit the RHS into the same slot, but
478 // we shouldn't claim that it's already being destructed.
479 dest.setExternallyDestructed(isExternallyDestructed);
481 Visit(e->getFalseExpr());
482 cir::YieldOp::create(b, loc);
483 }
484 eval.endEvaluation();
485 },
486 loc);
487
488 if (destructNonTrivialCStruct)
489 cgf.cgm.errorNYI(
490 e->getSourceRange(),
491 "Abstract conditional aggregate: destructNonTrivialCStruct");
492 }
493 void VisitChooseExpr(const ChooseExpr *e) { Visit(e->getChosenSubExpr()); }
494 void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
495 visitCXXParenListOrInitListExpr(e, e->getInitExprs(),
497 e->getArrayFiller());
498 }
499
500 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e) {
502 uint64_t numElements = e->getArraySize().getZExtValue();
503
504 if (!numElements)
505 return;
506
507 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
508
509 if (!e->getType()->isConstantArrayType())
510 cgf.cgm.errorNYI(e->getSourceRange(),
511 "VisitArrayInitLoopExpr: Non-constant array");
512
513 Address dest = ensureSlot(loc, e->getType()).getAddress();
514 cir::ArrayType arrayTy = cast<cir::ArrayType>(dest.getElementType());
515
516 emitArrayInit(dest, arrayTy, e->getType(),
517 const_cast<ArrayInitLoopExpr *>(e), {}, e->getSubExpr());
518 }
519
520 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
521 cgf.cgm.errorNYI(e->getSourceRange(),
522 "AggExprEmitter: VisitImplicitValueInitExpr");
523 }
524 void VisitNoInitExpr(NoInitExpr *e) {
525 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
526 }
527 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
529 Visit(dae->getExpr());
530 }
531 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
532 AggValueSlot slot =
533 ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
535 e->constructsVBase(), slot.getAddress(),
536 e->inheritedFromVBase(), e);
537 }
538
539 /// Emit the initializer for a std::initializer_list initialized with a
540 /// real initializer list.
541 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
542 ASTContext &ctx = cgf.getContext();
543 CIRGenBuilderTy builder = cgf.getBuilder();
544 mlir::Location loc = cgf.getLoc(e->getExprLoc());
545
546 LValue array = cgf.emitLValue(e->getSubExpr());
547 assert(array.isSimple() && "initializer_list array not a simple lvalue");
548 Address arrayPtr = array.getAddress();
549
552 assert(arrayType && "std::initializer_list constructed from non-array");
553
554 auto *record = e->getType()->castAsRecordDecl();
555 assert(record->getNumFields() == 2 &&
556 "Expected std::initializer_list to only have two fields");
557
558 RecordDecl::field_iterator field = record->field_begin();
559 assert(field != record->field_end() &&
560 ctx.hasSameType(field->getType()->getPointeeType(),
561 arrayType->getElementType()) &&
562 "Expected std::initializer_list first field to be const E *");
563
564 // Start pointer.
565 AggValueSlot dest = ensureSlot(loc, e->getType());
566 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
567 LValue start =
568 cgf.emitLValueForFieldInitialization(destLV, *field, field->getName());
569
570 mlir::Value arrayStart = arrayPtr.emitRawPointer();
571 cgf.emitStoreThroughLValue(RValue::get(arrayStart), start);
572 ++field;
573 assert(field != record->field_end() &&
574 "Expected std::initializer_list to have two fields");
575
576 cir::ConstantOp size = builder.getConstInt(loc, arrayType->getSize());
577 LValue endOrLength =
578 cgf.emitLValueForFieldInitialization(destLV, *field, field->getName());
579 if (ctx.hasSameType(field->getType(), ctx.getSizeType())) {
580 // Length.
581 cgf.emitStoreThroughLValue(RValue::get(size), endOrLength);
582 } else {
583 // End pointer.
584 assert(field->getType()->isPointerType() &&
585 ctx.hasSameType(field->getType()->getPointeeType(),
586 arrayType->getElementType()) &&
587 "Expected std::initializer_list second field to be const E *");
588 mlir::Value arrayEnd = builder.createPtrStride(loc, arrayStart, size);
589 cgf.emitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
590 }
591 }
592
593 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
594 cgf.cgm.errorNYI(e->getSourceRange(),
595 "AggExprEmitter: VisitCXXScalarValueInitExpr");
596 }
597 void VisitCXXTypeidExpr(CXXTypeidExpr *e) { emitAggLoadOfLValue(e); }
598 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
599 Visit(e->getSubExpr());
600 }
601 void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
602 cgf.cgm.errorNYI(e->getSourceRange(),
603 "AggExprEmitter: VisitOpaqueValueExpr");
604 }
605
606 void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
607 cgf.cgm.errorNYI(e->getSourceRange(),
608 "AggExprEmitter: VisitPseudoObjectExpr");
609 }
610
611 void VisitVAArgExpr(VAArgExpr *e) {
612 // emitVAArg returns an aggregate value (not a pointer) at the CIR level.
613 // ABI-specific pointer handling will be done later in LoweringPrepare.
614 mlir::Value vaArgValue = cgf.emitVAArg(e);
615
616 // Create a temporary alloca to hold the aggregate value.
617 mlir::Location loc = cgf.getLoc(e->getSourceRange());
618 Address tmpAddr = cgf.createMemTemp(e->getType(), loc, "vaarg.tmp");
619
620 // Store the va_arg result into the temporary.
621 cgf.emitAggregateStore(vaArgValue, tmpAddr);
622
623 // Create an LValue from the temporary address.
624 LValue tmpLValue = cgf.makeAddrLValue(tmpAddr, e->getType());
625
626 // Copy the aggregate value from temporary to destination.
627 emitFinalDestCopy(e->getType(), tmpLValue);
628 }
629
630 void VisitCXXThrowExpr(const CXXThrowExpr *e) {
631 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
632 }
633 void VisitAtomicExpr(AtomicExpr *e) {
634 RValue result = cgf.emitAtomicExpr(e);
635 emitFinalDestCopy(e->getType(), result);
636 }
637};
638
639} // namespace
640
641static bool isTrivialFiller(Expr *e) {
642 if (!e)
643 return true;
644
646 return true;
647
648 if (auto *ile = dyn_cast<InitListExpr>(e)) {
649 if (ile->getNumInits())
650 return false;
651 return isTrivialFiller(ile->getArrayFiller());
652 }
653
654 if (const auto *cons = dyn_cast_or_null<CXXConstructExpr>(e))
655 return cons->getConstructor()->isDefaultConstructor() &&
656 cons->getConstructor()->isTrivial();
657
658 return false;
659}
660
661/// Given an expression with aggregate type that represents a value lvalue, this
662/// method emits the address of the lvalue, then loads the result into DestPtr.
663void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
664 LValue lv = cgf.emitLValue(e);
665
666 // If the type of the l-value is atomic, then do an atomic load.
668
669 emitFinalDestCopy(e->getType(), lv);
670}
671
672void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
673 if (dest.isPotentiallyAliased() && e->getType().isPODType(cgf.getContext())) {
674 // For a POD type, just emit a load of the lvalue + a copy, because our
675 // compound literal might alias the destination.
676 emitAggLoadOfLValue(e);
677 return;
678 }
679
680 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
681
682 // Block-scope compound literals are destroyed at the end of the enclosing
683 // scope in C.
684 bool destruct =
685 !cgf.getLangOpts().CPlusPlus && !slot.isExternallyDestructed();
686 if (destruct)
688
689 cgf.emitAggExpr(e->getInitializer(), slot);
690
691 if (destruct)
692 if ([[maybe_unused]] QualType::DestructionKind dtorKind =
694 cgf.cgm.errorNYI(e->getSourceRange(), "compound literal with destructor");
695}
696
697void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
698 QualType arrayQTy, Expr *e,
699 ArrayRef<Expr *> args, Expr *arrayFiller) {
700 CIRGenBuilderTy &builder = cgf.getBuilder();
701 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
702
703 const uint64_t numInitElements = args.size();
704
705 bool setArrayInitLoopExprScope = isa<ArrayInitLoopExpr>(e);
706
707 const QualType elementType =
708 cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
709
710 const QualType elementPtrType = cgf.getContext().getPointerType(elementType);
711
712 const mlir::Type cirElementType = cgf.convertType(elementType);
713 const cir::PointerType cirElementPtrType =
714 builder.getPointerTo(cirElementType);
715
716 auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
717 cir::CastKind::array_to_ptrdecay,
718 destPtr.getPointer());
719
720 const CharUnits elementSize =
721 cgf.getContext().getTypeSizeInChars(elementType);
722 const CharUnits elementAlign =
723 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
724
725 // Exception safety requires us to destroy all the already-constructed
726 // members if an initializer throws. For that, we'll need an EH cleanup.
727 QualType::DestructionKind dtorKind = elementType.isDestructedType();
728 Address endOfInit = Address::invalid();
730
731 if (dtorKind && cgf.getLangOpts().Exceptions) {
732 endOfInit = cgf.createTempAlloca(cirElementPtrType, cgf.getPointerAlign(),
733 loc, "arrayinit.endOfInit");
734 builder.createStore(loc, begin, endOfInit);
735
736 cgf.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
737 elementAlign,
738 cgf.getDestroyer(dtorKind));
739 }
740
741 // The 'current element to initialize'. The invariants on this
742 // variable are complicated. Essentially, after each iteration of
743 // the loop, it points to the last initialized element, except
744 // that it points to the beginning of the array before any
745 // elements have been initialized.
746 mlir::Value element = begin;
747
748 // Don't build the 'one' before the cycle to avoid
749 // emmiting the redundant `cir.const 1` instrs.
750 mlir::Value one;
751
752 // Emit the explicit initializers.
753 for (uint64_t i = 0; i != numInitElements; ++i) {
754 // Advance to the next element.
755 if (i > 0) {
756 one = builder.getConstantInt(loc, cgf.ptrDiffTy, i);
757 element = builder.createPtrStride(loc, begin, one);
758
759 // Tell the cleanup that it needs to destroy up to this element.
760 if (endOfInit.isValid())
761 builder.createStore(loc, element, endOfInit);
762 }
763
764 const Address address = Address(element, cirElementType, elementAlign);
765 const LValue elementLV = cgf.makeAddrLValue(address, elementType);
766 emitInitializationToLValue(args[i], elementLV);
767 }
768
769 const uint64_t numArrayElements = arrayTy.getSize();
770
771 // Check whether there's a non-trivial array-fill expression.
772 const bool hasTrivialFiller = isTrivialFiller(arrayFiller);
773
774 // Any remaining elements need to be zero-initialized, possibly
775 // using the filler expression. We can skip this if the we're
776 // emitting to zeroed memory.
777 if (numInitElements != numArrayElements &&
778 !(dest.isZeroed() && hasTrivialFiller &&
779 cgf.getTypes().isZeroInitializable(elementType))) {
780 // Advance to the start of the rest of the array.
781 if (numInitElements) {
782 one = builder.getConstantInt(loc, cgf.ptrDiffTy, 1);
783 element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
784 element, one);
785
786 if (endOfInit.isValid())
787 builder.createStore(loc, element, endOfInit);
788 }
789
790 // Allocate the temporary variable
791 // to store the pointer to first unitialized element
792 const Address tmpAddr = cgf.createTempAlloca(
793 cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
794 LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
795 cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
796
797 // Compute the end of array
798 cir::ConstantOp numArrayElementsConst = builder.getConstInt(
799 loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), numArrayElements);
800 mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
801 begin, numArrayElementsConst);
802
803 builder.createDoWhile(
804 loc,
805 /*condBuilder=*/
806 [&](mlir::OpBuilder &b, mlir::Location loc) {
807 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
808 cir::CmpOp cmp = cir::CmpOp::create(builder, loc, cir::CmpOpKind::ne,
809 currentElement, end);
810 builder.createCondition(cmp);
811 },
812 /*bodyBuilder=*/
813 [&](mlir::OpBuilder &b, mlir::Location loc) {
814 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
815
816 // Emit the actual filler expression.
817 LValue elementLV = cgf.makeAddrLValue(
818 Address(currentElement, cirElementType, elementAlign),
819 elementType);
820
821 mlir::Value idx;
822 if (setArrayInitLoopExprScope)
823 idx = cir::PtrDiffOp::create(b, loc, cgf.ptrDiffTy, currentElement,
824 begin);
825
826 CIRGenFunction::ArrayInitLoopExprScope loopExprScope(
827 cgf, setArrayInitLoopExprScope, idx);
828
829 if (arrayFiller)
830 emitInitializationToLValue(arrayFiller, elementLV);
831 else
832 emitNullInitializationToLValue(loc, elementLV);
833
834 // Advance pointer and store them to temporary variable
835 cir::ConstantOp one = builder.getConstInt(
836 loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), 1);
837 auto nextElement = cir::PtrStrideOp::create(
838 builder, loc, cirElementPtrType, currentElement, one);
839
840 // Tell the EH cleanup that we finished with the last element.
841 if (endOfInit.isValid())
842 builder.createStore(loc, nextElement, endOfInit);
843
844 cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
845
846 builder.createYield(loc);
847 });
848 }
849}
850
851/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
852void AggExprEmitter::emitFinalDestCopy(QualType type, RValue src) {
853 assert(src.isAggregate() && "value must be aggregate value!");
854 LValue srcLV = cgf.makeAddrLValue(src.getAggregateAddress(), type);
855 emitFinalDestCopy(type, srcLV, CIRGenFunction::EVK_RValue);
856}
857
858/// Perform the final copy to destPtr, if desired.
859void AggExprEmitter::emitFinalDestCopy(
860 QualType type, const LValue &src,
861 CIRGenFunction::ExprValueKind srcValueKind) {
862 // If dest is ignored, then we're evaluating an aggregate expression
863 // in a context that doesn't care about the result. Note that loads
864 // from volatile l-values force the existence of a non-ignored
865 // destination.
866 if (dest.isIgnored())
867 return;
868
869 if (srcValueKind == CIRGenFunction::EVK_RValue) {
870 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
871 cgf.cgm.errorNYI("emitFinalDestCopy: EVK_RValue & PCK_Struct");
872 }
873 } else {
874 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
875 cgf.cgm.errorNYI("emitFinalDestCopy: !EVK_RValue & PCK_Struct");
876 }
877 }
878
882
883 AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
886 emitCopy(type, dest, srcAgg);
887}
888
889/// Perform a copy from the source into the destination.
890///
891/// \param type - the type of the aggregate being copied; qualifiers are
892/// ignored
893void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
894 const AggValueSlot &src) {
896
897 // If the result of the assignment is used, copy the LHS there also.
898 // It's volatile if either side is. Use the minimum alignment of
899 // the two sides.
900 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
901 LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
903 cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap(),
904 dest.isVolatile() || src.isVolatile());
905}
906
907void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
908 const QualType type = lv.getType();
909
911 const mlir::Location loc = e->getSourceRange().isValid()
912 ? cgf.getLoc(e->getSourceRange())
913 : *cgf.currSrcLoc;
914 return emitNullInitializationToLValue(loc, lv);
915 }
916
917 if (isa<NoInitExpr>(e))
918 return;
919
920 if (type->isReferenceType()) {
921 RValue rv = cgf.emitReferenceBindingToExpr(e);
922 return cgf.emitStoreThroughLValue(rv, lv);
923 }
924
925 switch (cgf.getEvaluationKind(type)) {
926 case cir::TEK_Complex:
927 cgf.emitComplexExprIntoLValue(e, lv, /*isInit*/ true);
928 break;
933 dest.isZeroed()));
934
935 return;
936 case cir::TEK_Scalar:
937 if (lv.isSimple())
938 cgf.emitScalarInit(e, cgf.getLoc(e->getSourceRange()), lv);
939 else
941 return;
942 }
943}
944
945void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *e) {
946 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
947 cgf.emitCXXConstructExpr(e, slot);
948}
949
950void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
951 LValue lv) {
952 const QualType type = lv.getType();
953
954 // If the destination slot is already zeroed out before the aggregate is
955 // copied into it, we don't have to emit any zeros here.
956 if (dest.isZeroed() && cgf.getTypes().isZeroInitializable(type))
957 return;
958
959 if (cgf.hasScalarEvaluationKind(type)) {
960 // For non-aggregates, we can store the appropriate null constant.
961 mlir::Value null = cgf.cgm.emitNullConstant(type, loc);
962 if (lv.isSimple()) {
963 cgf.emitStoreOfScalar(null, lv, /* isInitialization */ true);
964 return;
965 }
966
968 return;
969 }
970
971 // There's a potential optimization opportunity in combining
972 // memsets; that would be easy for arrays, but relatively
973 // difficult for structures with the current code.
974 cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
975}
976
977void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) {
978 CIRGenFunction::SourceLocRAIIObject loc{cgf, cgf.getLoc(e->getSourceRange())};
979 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
980 LValue slotLV = cgf.makeAddrLValue(slot.getAddress(), e->getType());
981
982 // We'll need to enter cleanup scopes in case any of the element
983 // initializers throws an exception or contains branch out of the expressions.
984 CIRGenFunction::CleanupDeactivationScope deactivationScope(cgf);
985
986 for (auto [curField, capture, captureInit] : llvm::zip(
987 e->getLambdaClass()->fields(), e->captures(), e->capture_inits())) {
988 // Pick a name for the field.
989 llvm::StringRef fieldName = curField->getName();
990 if (capture.capturesVariable()) {
991 assert(!curField->isBitField() && "lambdas don't have bitfield members!");
992 ValueDecl *v = capture.getCapturedVar();
993 fieldName = v->getName();
994 cgf.cgm.lambdaFieldToName[curField] = fieldName;
995 } else if (capture.capturesThis()) {
996 cgf.cgm.lambdaFieldToName[curField] = "this";
997 } else {
998 cgf.cgm.errorNYI(e->getSourceRange(), "Unhandled capture kind");
999 cgf.cgm.lambdaFieldToName[curField] = "unhandled-capture-kind";
1000 }
1001
1002 // Emit initialization
1003 LValue lv =
1004 cgf.emitLValueForFieldInitialization(slotLV, curField, fieldName);
1005 if (curField->hasCapturedVLAType())
1006 cgf.cgm.errorNYI(e->getSourceRange(), "lambda captured VLA type");
1007
1008 emitInitializationToLValue(captureInit, lv);
1009
1010 // Push a destructor if necessary.
1011 if (QualType::DestructionKind dtorKind =
1012 curField->getType().isDestructedType()) {
1013 assert(lv.isSimple());
1015 curField->getType(),
1016 cgf.getDestroyer(dtorKind), false);
1017 }
1018 }
1019}
1020
1021void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
1022 CIRGenFunction::FullExprCleanupScope fullExprScope(cgf, e->getSubExpr());
1023 Visit(e->getSubExpr());
1024}
1025
1026void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
1027 if (e->getCallReturnType(cgf.getContext())->isReferenceType()) {
1028 cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");
1029 return;
1030 }
1031
1032 withReturnValueSlot(
1033 e, [&](ReturnValueSlot slot) { return cgf.emitCallExpr(e, slot); });
1034}
1035
1036void AggExprEmitter::withReturnValueSlot(
1037 const Expr *e, llvm::function_ref<RValue(ReturnValueSlot)> fn) {
1038 QualType retTy = e->getType();
1039
1041 bool requiresDestruction =
1043 if (requiresDestruction)
1044 cgf.cgm.errorNYI(
1045 e->getSourceRange(),
1046 "withReturnValueSlot: return value requiring destruction is NYI");
1047
1048 // If it makes no observable difference, save a memcpy + temporary.
1049 //
1050 // We need to always provide our own temporary if destruction is required.
1051 // Otherwise, fn will emit its own, notice that it's "unused", and end its
1052 // lifetime before we have the chance to emit a proper destructor call.
1055
1056 Address retAddr = dest.getAddress();
1058
1061 fn(ReturnValueSlot(retAddr));
1062}
1063
1064void AggExprEmitter::VisitInitListExpr(InitListExpr *e) {
1065 if (e->hadArrayRangeDesignator())
1066 llvm_unreachable("GNU array range designator extension");
1067
1068 if (e->isTransparent())
1069 return Visit(e->getInit(0));
1070
1071 visitCXXParenListOrInitListExpr(
1072 e, e->inits(), e->getInitializedFieldInUnion(), e->getArrayFiller());
1073}
1074
1075void AggExprEmitter::visitCXXParenListOrInitListExpr(
1076 Expr *e, ArrayRef<Expr *> args, FieldDecl *initializedFieldInUnion,
1077 Expr *arrayFiller) {
1078
1079 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
1080 const AggValueSlot dest = ensureSlot(loc, e->getType());
1081
1082 if (e->getType()->isConstantArrayType()) {
1083 cir::ArrayType arrayTy =
1085 emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
1086 arrayFiller);
1087 return;
1088 } else if (e->getType()->isVariableArrayType()) {
1089 cgf.cgm.errorNYI(e->getSourceRange(),
1090 "visitCXXParenListOrInitListExpr variable array type");
1091 return;
1092 }
1093
1094 if (e->getType()->isArrayType()) {
1095 cgf.cgm.errorNYI(e->getSourceRange(),
1096 "visitCXXParenListOrInitListExpr array type");
1097 return;
1098 }
1099
1100 assert(e->getType()->isRecordType() && "Only support structs/unions here!");
1101
1102 // Do struct initialization; this code just sets each individual member
1103 // to the approprate value. This makes bitfield support automatic;
1104 // the disadvantage is that the generated code is more difficult for
1105 // the optimizer, especially with bitfields.
1106 unsigned numInitElements = args.size();
1107 auto *record = e->getType()->castAsRecordDecl();
1108
1109 // We'll need to enter cleanup scopes in case any of the element
1110 // initializers throws an exception.
1111 CIRGenFunction::CleanupDeactivationScope deactivateCleanups(cgf);
1112
1113 unsigned curInitIndex = 0;
1114
1115 // Emit initialization of base classes.
1116 if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
1117 assert(numInitElements >= cxxrd->getNumBases() &&
1118 "missing initializer for base class");
1119 for (auto &base : cxxrd->bases()) {
1120 assert(!base.isVirtual() && "should not see vbases here");
1121 CXXRecordDecl *baseRD = base.getType()->getAsCXXRecordDecl();
1123 loc, dest.getAddress(), cxxrd, baseRD,
1124 /*baseIsVirtual=*/false);
1126 AggValueSlot aggSlot = AggValueSlot::forAddr(
1127 address, Qualifiers(), AggValueSlot::IsDestructed,
1129 cgf.getOverlapForBaseInit(cxxrd, baseRD, false));
1130 cgf.emitAggExpr(args[curInitIndex++], aggSlot);
1131 if (base.getType().isDestructedType()) {
1132 cgf.cgm.errorNYI(e->getSourceRange(),
1133 "push deferred deactivation cleanup");
1134 return;
1135 }
1136 }
1137 }
1138
1139 // Prepare a 'this' for CXXDefaultInitExprs.
1140 CIRGenFunction::FieldConstructionScope fcScope(cgf, dest.getAddress());
1141
1142 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
1143
1144 if (record->isUnion()) {
1145 // Only initialize one field of a union. The field itself is
1146 // specified by the initializer list.
1147 if (!initializedFieldInUnion) {
1148 // Empty union; we have nothing to do.
1149
1150 // Make sure that it's really an empty and not a failure of
1151 // semantic analysis.
1152 assert(llvm::all_of(record->fields(),
1153 [](const FieldDecl *f) {
1154 return f->isUnnamedBitField() ||
1155 f->isAnonymousStructOrUnion();
1156 }) &&
1157 "Only unnamed bitfields or anonymous class allowed");
1158 return;
1159 }
1160
1161 // FIXME: volatility
1162 FieldDecl *initedField = initializedFieldInUnion;
1163
1164 LValue fieldLV = cgf.emitLValueForFieldInitialization(
1165 destLV, initedField, initedField->getName());
1166
1167 if (numInitElements) {
1168 // Store the initializer into the field
1169 emitInitializationToLValue(args[0], fieldLV);
1170 } else {
1171 // Default-initialize to null.
1172 emitNullInitializationToLValue(loc, fieldLV);
1173 }
1174 return;
1175 }
1176
1177 // Here we iterate over the fields; this makes it simpler to both
1178 // default-initialize fields and skip over unnamed fields.
1179 for (const FieldDecl *field : record->fields()) {
1180 // We're done once we hit the flexible array member.
1181 if (field->getType()->isIncompleteArrayType())
1182 break;
1183
1184 // Always skip anonymous bitfields.
1185 if (field->isUnnamedBitField())
1186 continue;
1187
1188 // We're done if we reach the end of the explicit initializers, we
1189 // have a zeroed object, and the rest of the fields are
1190 // zero-initializable.
1191 if (curInitIndex == numInitElements && dest.isZeroed() &&
1193 break;
1194 LValue lv =
1195 cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
1196 // We never generate write-barriers for initialized fields.
1198
1199 if (curInitIndex < numInitElements) {
1200 // Store the initializer into the field.
1201 CIRGenFunction::SourceLocRAIIObject loc{
1202 cgf, cgf.getLoc(record->getSourceRange())};
1203 emitInitializationToLValue(args[curInitIndex++], lv);
1204 } else {
1205 // We're out of initializers; default-initialize to null
1206 emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
1207 }
1208
1209 // Push a destructor if necessary.
1210 // FIXME: if we have an array of structures, all explicitly
1211 // initialized, we can end up pushing a linear number of cleanups.
1212 if (QualType::DestructionKind dtorKind =
1213 field->getType().isDestructedType()) {
1214 assert(lv.isSimple());
1216 field->getType(),
1217 cgf.getDestroyer(dtorKind), false);
1218 }
1219
1220 // From classic codegen, maybe not useful for CIR:
1221 // If the GEP didn't get used because of a dead zero init or something
1222 // else, clean it up for -O0 builds and general tidiness.
1223 }
1224}
1225
1226// TODO(cir): This could be shared with classic codegen.
1228 const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual) {
1229 // If the most-derived object is a field declared with [[no_unique_address]],
1230 // the tail padding of any virtual base could be reused for other subobjects
1231 // of that field's class.
1232 if (isVirtual)
1234
1235 // If the base class is laid out entirely within the nvsize of the derived
1236 // class, its tail padding cannot yet be initialized, so we can issue
1237 // stores at the full width of the base class.
1238 const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
1239 if (layout.getBaseClassOffset(baseRD) +
1240 getContext().getASTRecordLayout(baseRD).getSize() <=
1241 layout.getNonVirtualSize())
1243
1244 // The tail padding may contain values we need to preserve.
1246}
1247
1249 AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
1250}
1251
1253 AggValueSlot::Overlap_t mayOverlap,
1254 bool isVolatile) {
1255 // TODO(cir): this function needs improvements, commented code for now since
1256 // this will be touched again soon.
1257 assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
1258
1259 Address destPtr = dest.getAddress();
1260 Address srcPtr = src.getAddress();
1261
1262 if (getLangOpts().CPlusPlus) {
1263 if (auto *record = ty->getAsCXXRecordDecl()) {
1264 assert((record->hasTrivialCopyConstructor() ||
1265 record->hasTrivialCopyAssignment() ||
1266 record->hasTrivialMoveConstructor() ||
1267 record->hasTrivialMoveAssignment() ||
1268 record->hasAttr<TrivialABIAttr>() || record->isUnion()) &&
1269 "Trying to aggregate-copy a type without a trivial copy/move "
1270 "constructor or assignment operator");
1271 // Ignore empty classes in C++.
1272 if (record->isEmpty())
1273 return;
1274 }
1275 }
1276
1278
1279 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
1280 // C99 6.5.16.1p3, which states "If the value being stored in an object is
1281 // read from another object that overlaps in anyway the storage of the first
1282 // object, then the overlap shall be exact and the two objects shall have
1283 // qualified or unqualified versions of a compatible type."
1284 //
1285 // memcpy is not defined if the source and destination pointers are exactly
1286 // equal, but other compilers do this optimization, and almost every memcpy
1287 // implementation handles this case safely. If there is a libc that does not
1288 // safely handle this, we can add a target hook.
1289
1290 // Get data size info for this aggregate. Don't copy the tail padding if this
1291 // might be a potentially-overlapping subobject, since the tail padding might
1292 // be occupied by a different object. Otherwise, copying it is fine.
1293 TypeInfoChars typeInfo;
1294 if (mayOverlap)
1295 typeInfo = getContext().getTypeInfoDataSizeInChars(ty);
1296 else
1297 typeInfo = getContext().getTypeInfoInChars(ty);
1298
1300
1301 // Don't do any of the memmove_collectable tests if GC isn't set.
1302 if (cgm.getLangOpts().getGC() != LangOptions::NonGC)
1303 cgm.errorNYI("emitAggregateCopy: GC");
1304
1305 // If the data size (excluding tail padding) differs from the full type size,
1306 // use skip_tail_padding to avoid clobbering tail padding that may be occupied
1307 // by other objects (e.g. fields marked with [[no_unique_address]]).
1308 CharUnits dataSize = typeInfo.Width;
1309 bool skipTailPadding =
1310 mayOverlap && dataSize != getContext().getTypeSizeInChars(ty);
1311 // NOTE(cir): original codegen would normally convert destPtr and srcPtr to
1312 // i8* since memcpy operates on bytes. We don't need that in CIR because
1313 // cir.copy will operate on any CIR pointer that points to a sized type.
1314 builder.createCopy(destPtr.getPointer(), srcPtr.getPointer(), isVolatile,
1315 skipTailPadding);
1316
1318}
1319
1320// TODO(cir): This could be shared with classic codegen.
1323 if (!fd->hasAttr<NoUniqueAddressAttr>() || !fd->getType()->isRecordType())
1325
1326 // If the field lies entirely within the enclosing class's nvsize, its tail
1327 // padding cannot overlap any already-initialized object. (The only subobjects
1328 // with greater addresses that might already be initialized are vbases.)
1329 const RecordDecl *classRD = fd->getParent();
1330 const ASTRecordLayout &layout = getContext().getASTRecordLayout(classRD);
1331 if (layout.getFieldOffset(fd->getFieldIndex()) +
1332 getContext().getTypeSize(fd->getType()) <=
1333 (uint64_t)getContext().toBits(layout.getNonVirtualSize()))
1335
1336 // The tail padding may contain values we need to preserve.
1338}
1339
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
static bool isTrivialFiller(Expr *e)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConditionOp createCondition(mlir::Value condition)
Create a loop condition.
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
cir::PointerType getPointerTo(mlir::Type ty)
cir::DoWhileOp createDoWhile(mlir::Location loc, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> condBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> bodyBuilder)
Create a do-while operation.
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty, int64_t value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
const ConstantArrayType * getAsConstantArrayType(QualType T) const
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4534
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4546
Represents a loop initializing the elements of an array.
Definition Expr.h:5971
llvm::APInt getArraySize() const
Definition Expr.h:5993
OpaqueValueExpr * getCommonExpr() const
Get the common subexpression shared by all initializations (the source array).
Definition Expr.h:5986
Expr * getSubExpr() const
Get the initializer to use for each array element.
Definition Expr.h:5991
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
QualType getElementType() const
Definition TypeBase.h:3784
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6927
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:4096
Expr * getRHS() const
Definition Expr.h:4093
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
clang::CharUnits getAlignment() const
Definition Address.h:136
bool isValid() const
Definition Address.h:75
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
An aggregate value slot.
IsZeroed_t isZeroed() const
Overlap_t mayOverlap() const
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
IsDestructed_t isExternallyDestructed() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
void setExternallyDestructed(bool destructed=true)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
IsAliased_t isPotentiallyAliased() const
void setVolatile(bool flag)
cir::CmpThreeWayOp createThreeWayCmpTotalOrdering(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, const llvm::APSInt &ltRes, const llvm::APSInt &eqRes, const llvm::APSInt &gtRes, cir::CmpOrdering ordering)
cir::CmpThreeWayOp createThreeWayCmpPartialOrdering(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, const llvm::APSInt &ltRes, const llvm::APSInt &eqRes, const llvm::APSInt &gtRes, const llvm::APSInt &unorderedRes)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
CIRGenTypes & getTypes() const
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
void pushIrregularPartialArrayCleanup(mlir::Value arrayBegin, Address arrayEndPointer, QualType elementType, CharUnits elementAlign, Destroyer *destroyer)
Push an EH cleanup to destroy already-constructed elements of the given array.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAggregateStore(mlir::Value value, Address dest)
RValue emitAtomicExpr(AtomicExpr *e)
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
void pushDestroyAndDeferDeactivation(QualType::DestructionKind dtorKind, Address addr, QualType type)
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
static bool hasAggregateEvaluationKind(clang::QualType type)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
CIRGenBuilderTy & getBuilder()
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
Destroyer * getDestroyer(clang::QualType::DestructionKind kind)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
void emitInheritedCXXConstructorCall(const CXXConstructorDecl *d, bool forVirtualBase, Address thisAddr, bool inheritedFromVBase, const CXXInheritedCtorInitExpr *e)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
llvm::DenseMap< const clang::FieldDecl *, llvm::StringRef > lambdaFieldToName
Keep a map between lambda fields and names, this needs to be per module since lambdas might get gener...
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
Address getAddress() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
CXXTemporary * getTemporary()
Definition ExprCXX.h:1515
const Expr * getSubExpr() const
Definition ExprCXX.h:1519
A default argument (C++ [dcl.fct.default]).
Definition ExprCXX.h:1274
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1107
Represents a call to an inherited base class constructor from an inheriting constructor.
Definition ExprCXX.h:1755
bool constructsVBase() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1796
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will call.
Definition ExprCXX.h:1792
bool inheritedFromVBase() const
Determine whether the inherited constructor is inherited from a virtual base of the object we constru...
Definition ExprCXX.h:1806
Represents a list-initialization with parenthesis.
Definition ExprCXX.h:5141
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5181
FieldDecl * getInitializedFieldInUnion()
Definition ExprCXX.h:5219
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isTriviallyCopyable() const
Determine whether this class is considered trivially copyable per (C++11 [class]p6).
Definition DeclCXX.cpp:610
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:357
An expression "T()" which creates an rvalue of a non-class type T.
Definition ExprCXX.h:2200
Implicit construction of a std::initializer_list<T> object from an array temporary within list-initia...
Definition ExprCXX.h:804
A C++ throw-expression (C++ [except.throw]).
Definition ExprCXX.h:1212
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:852
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1951
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition Expr.h:4851
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
bool isPartial() const
True iff the comparison is not totally ordered.
const ValueInfo * getLess() const
const ValueInfo * getUnordered() const
const CXXRecordDecl * Record
The declaration for the comparison category type from the standard library.
bool isStrong() const
True iff the comparison is "strong".
const ValueInfo * getGreater() const
const ValueInfo * getEqualOrEquiv() const
const Expr * getInitializer() const
Definition Expr.h:3636
Represents the canonical version of C arrays with a specified constant size.
Definition TypeBase.h:3810
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool hasAttr() const
Definition DeclBase.h:585
InitListExpr * getUpdater() const
Definition Expr.h:5939
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3175
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3260
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3411
const Expr * getSubExpr() const
Definition Expr.h:1065
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6467
Represents an implicitly-generated value initialization of an object of a given type.
Definition Expr.h:6060
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5428
bool hadArrayRangeDesignator() const
Definition Expr.h:5486
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5404
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
ArrayRef< Expr * > inits()
Definition Expr.h:5352
llvm::iterator_range< capture_init_iterator > capture_inits()
Retrieve the initialization expressions for this lambda's captures.
Definition ExprCXX.h:2087
capture_range captures() const
Retrieve this lambda's captures.
Definition ExprCXX.cpp:1373
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1402
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4920
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
Represents a place-holder for an object not to be initialized by anything.
Definition Expr.h:5880
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
const Expr * getSubExpr() const
Definition Expr.h:2202
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6803
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8515
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition Type.cpp:2741
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition TypeBase.h:1523
Represents a struct/union/class.
Definition Decl.h:4342
field_range fields() const
Definition Decl.h:4545
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4542
field_iterator field_begin() const
Definition Decl.cpp:5277
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8771
bool isArrayType() const
Definition TypeBase.h:8767
bool isPointerType() const
Definition TypeBase.h:8668
bool isReferenceType() const
Definition TypeBase.h:8692
bool isVariableArrayType() const
Definition TypeBase.h:8779
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9156
bool isAggregateType() const
Determines whether the type is a C++ aggregate type or C aggregate or union type.
Definition Type.cpp:2456
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8803
bool isMemberPointerType() const
Definition TypeBase.h:8749
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2358
bool isNullPtrType() const
Definition TypeBase.h:9071
bool isRecordType() const
Definition TypeBase.h:8795
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Expr * getSubExpr() const
Definition Expr.h:2288
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
constexpr Variable var(Literal L)
Returns the variable of L.
Definition CNFFormula.h:64
@ Address
A pointer to a ValueDecl.
Definition Primitives.h:28
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
static bool emitLifetimeMarkers()
static bool aggValueSlotDestructedFlag()
static bool aggValueSlotGC()
static bool aggValueSlotAlias()
static bool opLoadStoreAtomic()
static bool aggEmitFinalDestCopyRValue()
static bool cleanupDeactivationScope()
static bool aggValueSlotVolatile()
static bool atomicTypes()
static bool cudaSupport()
static bool incrementProfileCounter()
clang::CharUnits getPointerAlign() const
llvm::APSInt getIntValue() const
Get the constant integer value used by this variable to represent the comparison category result type...