clang 23.0.0git
CIRGenExprAggregate.cpp
Go to the documentation of this file.
1//===- CIRGenExprAggregrate.cpp - Emit CIR Code from Aggregate Expressions ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
16#include "mlir/IR/Builders.h"
18
19#include "clang/AST/Expr.h"
22#include <cstdint>
23
24using namespace clang;
25using namespace clang::CIRGen;
26
27namespace {
28// FIXME(cir): This should be a common helper between CIRGen
29// and traditional CodeGen
30/// Is the value of the given expression possibly a reference to or
31/// into a __block variable?
32static bool isBlockVarRef(const Expr *e) {
33 // Make sure we look through parens.
34 e = e->IgnoreParens();
35
36 // Check for a direct reference to a __block variable.
37 if (const DeclRefExpr *dre = dyn_cast<DeclRefExpr>(e)) {
38 const VarDecl *var = dyn_cast<VarDecl>(dre->getDecl());
39 return (var && var->hasAttr<BlocksAttr>());
40 }
41
42 // More complicated stuff.
43
44 // Binary operators.
45 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(e)) {
46 // For an assignment or pointer-to-member operation, just care
47 // about the LHS.
48 if (op->isAssignmentOp() || op->isPtrMemOp())
49 return isBlockVarRef(op->getLHS());
50
51 // For a comma, just care about the RHS.
52 if (op->getOpcode() == BO_Comma)
53 return isBlockVarRef(op->getRHS());
54
55 // FIXME: pointer arithmetic?
56 return false;
57
58 // Check both sides of a conditional operator.
59 } else if (const AbstractConditionalOperator *op =
60 dyn_cast<AbstractConditionalOperator>(e)) {
61 return isBlockVarRef(op->getTrueExpr()) ||
62 isBlockVarRef(op->getFalseExpr());
63
64 // OVEs are required to support BinaryConditionalOperators.
65 } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(e)) {
66 if (const Expr *src = op->getSourceExpr())
67 return isBlockVarRef(src);
68
69 // Casts are necessary to get things like (*(int*)&var) = foo().
70 // We don't really care about the kind of cast here, except
71 // we don't want to look through l2r casts, because it's okay
72 // to get the *value* in a __block variable.
73 } else if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
74 if (cast->getCastKind() == CK_LValueToRValue)
75 return false;
76 return isBlockVarRef(cast->getSubExpr());
77
78 // Handle unary operators. Again, just aggressively look through
79 // it, ignoring the operation.
80 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
81 return isBlockVarRef(uop->getSubExpr());
82
83 // Look into the base of a field access.
84 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(e)) {
85 return isBlockVarRef(mem->getBase());
86
87 // Look into the base of a subscript.
88 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(e)) {
89 return isBlockVarRef(sub->getBase());
90 }
91
92 return false;
93}
94
95class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
96
97 CIRGenFunction &cgf;
98 AggValueSlot dest;
99
100 // Calls `fn` with a valid return value slot, potentially creating a temporary
101 // to do so. If a temporary is created, an appropriate copy into `Dest` will
102 // be emitted, as will lifetime markers.
103 //
104 // The given function should take a ReturnValueSlot, and return an RValue that
105 // points to said slot.
106 void withReturnValueSlot(const Expr *e,
107 llvm::function_ref<RValue(ReturnValueSlot)> fn);
108
109 AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
110 if (!dest.isIgnored())
111 return dest;
112 return cgf.createAggTemp(t, loc, "agg.tmp.ensured");
113 }
114
115 void ensureDest(mlir::Location loc, QualType ty) {
116 if (!dest.isIgnored())
117 return;
118 dest = cgf.createAggTemp(ty, loc, "agg.tmp.ensured");
119 }
120
121public:
122 AggExprEmitter(CIRGenFunction &cgf, AggValueSlot dest)
123 : cgf(cgf), dest(dest) {}
124
125 /// Given an expression with aggregate type that represents a value lvalue,
126 /// this method emits the address of the lvalue, then loads the result into
127 /// DestPtr.
128 void emitAggLoadOfLValue(const Expr *e);
129
130 void emitArrayInit(Address destPtr, cir::ArrayType arrayTy, QualType arrayQTy,
131 Expr *exprToVisit, ArrayRef<Expr *> args,
132 Expr *arrayFiller);
133
134 void emitFinalDestCopy(QualType type, RValue src);
135
136 /// Perform the final copy to DestPtr, if desired.
137 void emitFinalDestCopy(QualType type, const LValue &src,
138 CIRGenFunction::ExprValueKind srcValueKind =
140
141 void emitCopy(QualType type, const AggValueSlot &dest,
142 const AggValueSlot &src);
143
144 void emitInitializationToLValue(Expr *e, LValue lv);
145
146 void emitNullInitializationToLValue(mlir::Location loc, LValue lv);
147
148 void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
149
150 void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
151 emitAggLoadOfLValue(e);
152 }
153
154 void VisitCallExpr(const CallExpr *e);
155 void VisitStmtExpr(const StmtExpr *e) {
156 CIRGenFunction::StmtExprEvaluation eval(cgf);
157 Address retAlloca =
158 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
159 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
160 }
161
162 void VisitBinAssign(const BinaryOperator *e) {
163 // For an assignment to work, the value on the right has
164 // to be compatible with the value on the left.
165 assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
166 e->getRHS()->getType()) &&
167 "Invalid assignment");
168
169 if (isBlockVarRef(e->getLHS()) &&
170 e->getRHS()->HasSideEffects(cgf.getContext())) {
171 cgf.cgm.errorNYI(e->getSourceRange(),
172 "block var reference with side effects");
173 return;
174 }
175
176 LValue lhs = cgf.emitLValue(e->getLHS());
177
178 // If we have an atomic type, evaluate into the destination and then
179 // do an atomic copy.
181
182 // Codegen the RHS so that it stores directly into the LHS.
184 AggValueSlot lhsSlot = AggValueSlot::forLValue(
187
188 // A non-volatile aggregate destination might have volatile member.
189 if (!lhsSlot.isVolatile() && cgf.hasVolatileMember(e->getLHS()->getType()))
190 lhsSlot.setVolatile(true);
191
192 cgf.emitAggExpr(e->getRHS(), lhsSlot);
193
194 // Copy into the destination if the assignment isn't ignored.
195 emitFinalDestCopy(e->getType(), lhs);
196
197 if (!dest.isIgnored() && !dest.isExternallyDestructed() &&
199 cgf.pushDestroy(QualType::DK_nontrivial_c_struct, dest.getAddress(),
200 e->getType());
201 }
202
203 void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
204
205 void VisitInitListExpr(InitListExpr *e);
206 void VisitCXXConstructExpr(const CXXConstructExpr *e);
207
208 void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
209 FieldDecl *initializedFieldInUnion,
210 Expr *arrayFiller);
211 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
212 CIRGenFunction::CXXDefaultInitExprScope Scope(cgf, die);
213 Visit(die->getExpr());
214 }
215 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
216 // Ensure that we have a slot, but if we already do, remember
217 // whether it was externally destructed.
218 bool wasExternallyDestructed = dest.isExternallyDestructed();
219 ensureDest(cgf.getLoc(e->getSourceRange()), e->getType());
220
221 // We're going to push a destructor if there isn't already one.
222 dest.setExternallyDestructed();
223
224 Visit(e->getSubExpr());
225
226 // Push that destructor we promised.
227 if (!wasExternallyDestructed)
228 cgf.emitCXXTemporary(e->getTemporary(), e->getType(), dest.getAddress());
229 }
230 void VisitLambdaExpr(LambdaExpr *e);
231 void VisitExprWithCleanups(ExprWithCleanups *e);
232
233 // Stubs -- These should be moved up when they are implemented.
234 void VisitCastExpr(CastExpr *e) {
235 switch (e->getCastKind()) {
236 case CK_LValueToRValueBitCast: {
237 if (dest.isIgnored()) {
238 cgf.emitAnyExpr(e->getSubExpr(), AggValueSlot::ignored(),
239 /*ignoreResult=*/true);
240 break;
241 }
242
243 LValue sourceLV = cgf.emitLValue(e->getSubExpr());
244 Address sourceAddress =
245 sourceLV.getAddress().withElementType(cgf.getBuilder(), cgf.voidTy);
246 Address destAddress =
247 dest.getAddress().withElementType(cgf.getBuilder(), cgf.voidTy);
248
249 mlir::Location loc = cgf.getLoc(e->getExprLoc());
250
251 mlir::Value sizeVal = cgf.getBuilder().getConstInt(
252 loc, cgf.sizeTy,
253 cgf.getContext().getTypeSizeInChars(e->getType()).getQuantity());
254 cgf.getBuilder().createMemCpy(loc, destAddress.getPointer(),
255 sourceAddress.getPointer(), sizeVal);
256
257 break;
258 }
259 case CK_LValueToRValue:
260 // If we're loading from a volatile type, force the destination
261 // into existence.
263 cgf.cgm.errorNYI(e->getSourceRange(),
264 "AggExprEmitter: volatile lvalue-to-rvalue cast");
265 [[fallthrough]];
266 case CK_NoOp:
267 case CK_UserDefinedConversion:
268 case CK_ConstructorConversion:
269 assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
270 e->getType()) &&
271 "Implicit cast types must be compatible");
272 Visit(e->getSubExpr());
273 break;
274 default:
275 cgf.cgm.errorNYI(e->getSourceRange(),
276 std::string("AggExprEmitter: VisitCastExpr: ") +
277 e->getCastKindName());
278 break;
279 }
280 }
281 void VisitStmt(Stmt *s) {
282 cgf.cgm.errorNYI(s->getSourceRange(),
283 std::string("AggExprEmitter::VisitStmt: ") +
284 s->getStmtClassName());
285 }
286 void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); }
287 void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
288 Visit(ge->getResultExpr());
289 }
290 void VisitCoawaitExpr(CoawaitExpr *e) {
291 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
292 }
293 void VisitCoyieldExpr(CoyieldExpr *e) {
294 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
295 }
296 void VisitUnaryCoawait(UnaryOperator *e) {
297 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
298 }
299 void VisitUnaryExtension(UnaryOperator *e) { Visit(e->getSubExpr()); }
300 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
301 cgf.cgm.errorNYI(e->getSourceRange(),
302 "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
303 }
304 void VisitConstantExpr(ConstantExpr *e) {
305 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
306 }
307 void VisitMemberExpr(MemberExpr *e) { emitAggLoadOfLValue(e); }
308 void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
309 void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
310 void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
311
312 void VisitPredefinedExpr(const PredefinedExpr *e) {
313 cgf.cgm.errorNYI(e->getSourceRange(),
314 "AggExprEmitter: VisitPredefinedExpr");
315 }
316 void VisitBinaryOperator(const BinaryOperator *e) {
317 cgf.cgm.errorNYI(e->getSourceRange(),
318 "AggExprEmitter: VisitBinaryOperator");
319 }
320 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
321 cgf.cgm.errorNYI(e->getSourceRange(),
322 "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
323 }
324 void VisitBinComma(const BinaryOperator *e) {
325 cgf.emitIgnoredExpr(e->getLHS());
326 Visit(e->getRHS());
327 }
328 void VisitBinCmp(const BinaryOperator *e) {
329 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinCmp");
330 }
331 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
332 cgf.cgm.errorNYI(e->getSourceRange(),
333 "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
334 }
335 void VisitObjCMessageExpr(ObjCMessageExpr *e) {
336 cgf.cgm.errorNYI(e->getSourceRange(),
337 "AggExprEmitter: VisitObjCMessageExpr");
338 }
339 void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
340 cgf.cgm.errorNYI(e->getSourceRange(),
341 "AggExprEmitter: VisitObjCIVarRefExpr");
342 }
343
344 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
345 AggValueSlot dest = ensureSlot(cgf.getLoc(e->getExprLoc()), e->getType());
346 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
347 emitInitializationToLValue(e->getBase(), destLV);
348 VisitInitListExpr(e->getUpdater());
349 }
350 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
351 cgf.cgm.errorNYI(e->getSourceRange(),
352 "AggExprEmitter: VisitAbstractConditionalOperator");
353 }
354 void VisitChooseExpr(const ChooseExpr *e) { Visit(e->getChosenSubExpr()); }
355 void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
356 visitCXXParenListOrInitListExpr(e, e->getInitExprs(),
358 e->getArrayFiller());
359 }
360
361 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e,
362 llvm::Value *outerBegin = nullptr) {
363 cgf.cgm.errorNYI(e->getSourceRange(),
364 "AggExprEmitter: VisitArrayInitLoopExpr");
365 }
366 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
367 cgf.cgm.errorNYI(e->getSourceRange(),
368 "AggExprEmitter: VisitImplicitValueInitExpr");
369 }
370 void VisitNoInitExpr(NoInitExpr *e) {
371 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
372 }
373 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
374 CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
375 Visit(dae->getExpr());
376 }
377 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
378 cgf.cgm.errorNYI(e->getSourceRange(),
379 "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
380 }
381
382 /// Emit the initializer for a std::initializer_list initialized with a
383 /// real initializer list.
384 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
385 ASTContext &ctx = cgf.getContext();
386 CIRGenBuilderTy builder = cgf.getBuilder();
387 mlir::Location loc = cgf.getLoc(e->getExprLoc());
388
389 LValue array = cgf.emitLValue(e->getSubExpr());
390 assert(array.isSimple() && "initializer_list array not a simple lvalue");
391 Address arrayPtr = array.getAddress();
392
393 const ConstantArrayType *arrayType =
395 assert(arrayType && "std::initializer_list constructed from non-array");
396
397 auto *record = e->getType()->castAsRecordDecl();
398 RecordDecl::field_iterator field = record->field_begin();
399 assert(field != record->field_end() &&
400 ctx.hasSameType(field->getType()->getPointeeType(),
401 arrayType->getElementType()) &&
402 "Expected std::initializer_list first field to be const E *");
403
404 // Start pointer.
405 AggValueSlot dest = ensureSlot(loc, e->getType());
406 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
407 LValue start =
408 cgf.emitLValueForFieldInitialization(destLV, *field, field->getName());
409
410 mlir::Value arrayStart = arrayPtr.emitRawPointer();
411 cgf.emitStoreThroughLValue(RValue::get(arrayStart), start);
412 ++field;
413 assert(field != record->field_end() &&
414 "Expected std::initializer_list to have two fields");
415
416 cir::ConstantOp size = builder.getConstInt(loc, arrayType->getSize());
417 LValue endOrLength =
418 cgf.emitLValueForFieldInitialization(destLV, *field, field->getName());
419 if (ctx.hasSameType(field->getType(), ctx.getSizeType())) {
420 // Length.
421 cgf.emitStoreThroughLValue(RValue::get(size), endOrLength);
422 } else {
423 cgf.cgm.errorNYI(
424 "Aggregate VisitCXXStdInitializerListExpr: field type != sizeTy");
425 return;
426 }
427
428 assert(++field == record->field_end() &&
429 "Expected std::initializer_list to only have two fields");
430 }
431
432 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
433 cgf.cgm.errorNYI(e->getSourceRange(),
434 "AggExprEmitter: VisitCXXScalarValueInitExpr");
435 }
436 void VisitCXXTypeidExpr(CXXTypeidExpr *e) { emitAggLoadOfLValue(e); }
437 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
438 Visit(e->getSubExpr());
439 }
440 void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
441 cgf.cgm.errorNYI(e->getSourceRange(),
442 "AggExprEmitter: VisitOpaqueValueExpr");
443 }
444
445 void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
446 cgf.cgm.errorNYI(e->getSourceRange(),
447 "AggExprEmitter: VisitPseudoObjectExpr");
448 }
449
450 void VisitVAArgExpr(VAArgExpr *e) {
451 // emitVAArg returns an aggregate value (not a pointer) at the CIR level.
452 // ABI-specific pointer handling will be done later in LoweringPrepare.
453 mlir::Value vaArgValue = cgf.emitVAArg(e);
454
455 // Create a temporary alloca to hold the aggregate value.
456 mlir::Location loc = cgf.getLoc(e->getSourceRange());
457 Address tmpAddr = cgf.createMemTemp(e->getType(), loc, "vaarg.tmp");
458
459 // Store the va_arg result into the temporary.
460 cgf.emitAggregateStore(vaArgValue, tmpAddr);
461
462 // Create an LValue from the temporary address.
463 LValue tmpLValue = cgf.makeAddrLValue(tmpAddr, e->getType());
464
465 // Copy the aggregate value from temporary to destination.
466 emitFinalDestCopy(e->getType(), tmpLValue);
467 }
468
469 void VisitCXXThrowExpr(const CXXThrowExpr *e) {
470 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
471 }
472 void VisitAtomicExpr(AtomicExpr *e) {
473 RValue result = cgf.emitAtomicExpr(e);
474 emitFinalDestCopy(e->getType(), result);
475 }
476};
477
478} // namespace
479
480static bool isTrivialFiller(Expr *e) {
481 if (!e)
482 return true;
483
485 return true;
486
487 if (auto *ile = dyn_cast<InitListExpr>(e)) {
488 if (ile->getNumInits())
489 return false;
490 return isTrivialFiller(ile->getArrayFiller());
491 }
492
493 if (const auto *cons = dyn_cast_or_null<CXXConstructExpr>(e))
494 return cons->getConstructor()->isDefaultConstructor() &&
495 cons->getConstructor()->isTrivial();
496
497 return false;
498}
499
500/// Given an expression with aggregate type that represents a value lvalue, this
501/// method emits the address of the lvalue, then loads the result into DestPtr.
502void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
503 LValue lv = cgf.emitLValue(e);
504
505 // If the type of the l-value is atomic, then do an atomic load.
507
508 emitFinalDestCopy(e->getType(), lv);
509}
510
511void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
512 if (dest.isPotentiallyAliased() && e->getType().isPODType(cgf.getContext())) {
513 // For a POD type, just emit a load of the lvalue + a copy, because our
514 // compound literal might alias the destination.
515 emitAggLoadOfLValue(e);
516 return;
517 }
518
519 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
520
521 // Block-scope compound literals are destroyed at the end of the enclosing
522 // scope in C.
523 bool destruct =
524 !cgf.getLangOpts().CPlusPlus && !slot.isExternallyDestructed();
525 if (destruct)
527
528 cgf.emitAggExpr(e->getInitializer(), slot);
529
530 if (destruct)
531 if ([[maybe_unused]] QualType::DestructionKind dtorKind =
533 cgf.cgm.errorNYI(e->getSourceRange(), "compound literal with destructor");
534}
535
536void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
537 QualType arrayQTy, Expr *e,
538 ArrayRef<Expr *> args, Expr *arrayFiller) {
539 CIRGenBuilderTy &builder = cgf.getBuilder();
540 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
541
542 const uint64_t numInitElements = args.size();
543
544 const QualType elementType =
545 cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
546
547 if (elementType.isDestructedType() && cgf.cgm.getLangOpts().Exceptions) {
548 cgf.cgm.errorNYI(loc, "initialized array requires destruction");
549 return;
550 }
551
552 const QualType elementPtrType = cgf.getContext().getPointerType(elementType);
553
554 const mlir::Type cirElementType = cgf.convertType(elementType);
555 const cir::PointerType cirElementPtrType =
556 builder.getPointerTo(cirElementType);
557
558 auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
559 cir::CastKind::array_to_ptrdecay,
560 destPtr.getPointer());
561
562 const CharUnits elementSize =
563 cgf.getContext().getTypeSizeInChars(elementType);
564 const CharUnits elementAlign =
565 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
566
567 // The 'current element to initialize'. The invariants on this
568 // variable are complicated. Essentially, after each iteration of
569 // the loop, it points to the last initialized element, except
570 // that it points to the beginning of the array before any
571 // elements have been initialized.
572 mlir::Value element = begin;
573
574 // Don't build the 'one' before the cycle to avoid
575 // emmiting the redundant `cir.const 1` instrs.
576 mlir::Value one;
577
578 // Emit the explicit initializers.
579 for (uint64_t i = 0; i != numInitElements; ++i) {
580 // Advance to the next element.
581 if (i > 0) {
582 one = builder.getConstantInt(loc, cgf.ptrDiffTy, i);
583 element = builder.createPtrStride(loc, begin, one);
584 }
585
586 const Address address = Address(element, cirElementType, elementAlign);
587 const LValue elementLV = cgf.makeAddrLValue(address, elementType);
588 emitInitializationToLValue(args[i], elementLV);
589 }
590
591 const uint64_t numArrayElements = arrayTy.getSize();
592
593 // Check whether there's a non-trivial array-fill expression.
594 const bool hasTrivialFiller = isTrivialFiller(arrayFiller);
595
596 // Any remaining elements need to be zero-initialized, possibly
597 // using the filler expression. We can skip this if the we're
598 // emitting to zeroed memory.
599 if (numInitElements != numArrayElements &&
600 !(dest.isZeroed() && hasTrivialFiller &&
601 cgf.getTypes().isZeroInitializable(elementType))) {
602 // Advance to the start of the rest of the array.
603 if (numInitElements) {
604 one = builder.getConstantInt(loc, cgf.ptrDiffTy, 1);
605 element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
606 element, one);
607 }
608
609 // Allocate the temporary variable
610 // to store the pointer to first unitialized element
611 const Address tmpAddr = cgf.createTempAlloca(
612 cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
613 LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
614 cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
615
616 // Compute the end of array
617 cir::ConstantOp numArrayElementsConst = builder.getConstInt(
618 loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), numArrayElements);
619 mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
620 begin, numArrayElementsConst);
621
622 builder.createDoWhile(
623 loc,
624 /*condBuilder=*/
625 [&](mlir::OpBuilder &b, mlir::Location loc) {
626 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
627 cir::CmpOp cmp = cir::CmpOp::create(builder, loc, cir::CmpOpKind::ne,
628 currentElement, end);
629 builder.createCondition(cmp);
630 },
631 /*bodyBuilder=*/
632 [&](mlir::OpBuilder &b, mlir::Location loc) {
633 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
634
636
637 // Emit the actual filler expression.
638 LValue elementLV = cgf.makeAddrLValue(
639 Address(currentElement, cirElementType, elementAlign),
640 elementType);
641 if (arrayFiller)
642 emitInitializationToLValue(arrayFiller, elementLV);
643 else
644 emitNullInitializationToLValue(loc, elementLV);
645
646 // Tell the EH cleanup that we finished with the last element.
647 if (cgf.cgm.getLangOpts().Exceptions) {
648 cgf.cgm.errorNYI(loc, "update destructed array element for EH");
649 return;
650 }
651
652 // Advance pointer and store them to temporary variable
653 cir::ConstantOp one = builder.getConstInt(
654 loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), 1);
655 auto nextElement = cir::PtrStrideOp::create(
656 builder, loc, cirElementPtrType, currentElement, one);
657 cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
658
659 builder.createYield(loc);
660 });
661 }
662}
663
664/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
665void AggExprEmitter::emitFinalDestCopy(QualType type, RValue src) {
666 assert(src.isAggregate() && "value must be aggregate value!");
667 LValue srcLV = cgf.makeAddrLValue(src.getAggregateAddress(), type);
668 emitFinalDestCopy(type, srcLV, CIRGenFunction::EVK_RValue);
669}
670
671/// Perform the final copy to destPtr, if desired.
672void AggExprEmitter::emitFinalDestCopy(
673 QualType type, const LValue &src,
674 CIRGenFunction::ExprValueKind srcValueKind) {
675 // If dest is ignored, then we're evaluating an aggregate expression
676 // in a context that doesn't care about the result. Note that loads
677 // from volatile l-values force the existence of a non-ignored
678 // destination.
679 if (dest.isIgnored())
680 return;
681
682 if (srcValueKind == CIRGenFunction::EVK_RValue) {
683 if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) {
684 cgf.cgm.errorNYI("emitFinalDestCopy: EVK_RValue & PCK_Struct");
685 }
686 } else {
687 if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) {
688 cgf.cgm.errorNYI("emitFinalDestCopy: !EVK_RValue & PCK_Struct");
689 }
690 }
691
695
696 AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
699 emitCopy(type, dest, srcAgg);
700}
701
702/// Perform a copy from the source into the destination.
703///
704/// \param type - the type of the aggregate being copied; qualifiers are
705/// ignored
706void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
707 const AggValueSlot &src) {
709
710 // If the result of the assignment is used, copy the LHS there also.
711 // It's volatile if either side is. Use the minimum alignment of
712 // the two sides.
713 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
714 LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
716 cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap(),
717 dest.isVolatile() || src.isVolatile());
718}
719
720void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
721 const QualType type = lv.getType();
722
724 const mlir::Location loc = e->getSourceRange().isValid()
725 ? cgf.getLoc(e->getSourceRange())
726 : *cgf.currSrcLoc;
727 return emitNullInitializationToLValue(loc, lv);
728 }
729
730 if (isa<NoInitExpr>(e))
731 return;
732
733 if (type->isReferenceType()) {
734 RValue rv = cgf.emitReferenceBindingToExpr(e);
735 return cgf.emitStoreThroughLValue(rv, lv);
736 }
737
738 switch (cgf.getEvaluationKind(type)) {
739 case cir::TEK_Complex:
740 cgf.emitComplexExprIntoLValue(e, lv, /*isInit*/ true);
741 break;
746 dest.isZeroed()));
747
748 return;
749 case cir::TEK_Scalar:
750 if (lv.isSimple())
751 cgf.emitScalarInit(e, cgf.getLoc(e->getSourceRange()), lv);
752 else
754 return;
755 }
756}
757
758void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *e) {
759 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
760 cgf.emitCXXConstructExpr(e, slot);
761}
762
763void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
764 LValue lv) {
765 const QualType type = lv.getType();
766
767 // If the destination slot is already zeroed out before the aggregate is
768 // copied into it, we don't have to emit any zeros here.
769 if (dest.isZeroed() && cgf.getTypes().isZeroInitializable(type))
770 return;
771
772 if (cgf.hasScalarEvaluationKind(type)) {
773 // For non-aggregates, we can store the appropriate null constant.
774 mlir::Value null = cgf.cgm.emitNullConstant(type, loc);
775 if (lv.isSimple()) {
776 cgf.emitStoreOfScalar(null, lv, /* isInitialization */ true);
777 return;
778 }
779
781 return;
782 }
783
784 // There's a potential optimization opportunity in combining
785 // memsets; that would be easy for arrays, but relatively
786 // difficult for structures with the current code.
787 cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
788}
789
790void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) {
791 CIRGenFunction::SourceLocRAIIObject loc{cgf, cgf.getLoc(e->getSourceRange())};
792 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
793 [[maybe_unused]] LValue slotLV =
794 cgf.makeAddrLValue(slot.getAddress(), e->getType());
795
796 // We'll need to enter cleanup scopes in case any of the element
797 // initializers throws an exception or contains branch out of the expressions.
799
800 for (auto [curField, capture, captureInit] : llvm::zip(
801 e->getLambdaClass()->fields(), e->captures(), e->capture_inits())) {
802 // Pick a name for the field.
803 llvm::StringRef fieldName = curField->getName();
804 if (capture.capturesVariable()) {
805 assert(!curField->isBitField() && "lambdas don't have bitfield members!");
806 ValueDecl *v = capture.getCapturedVar();
807 fieldName = v->getName();
808 cgf.cgm.lambdaFieldToName[curField] = fieldName;
809 } else if (capture.capturesThis()) {
810 cgf.cgm.lambdaFieldToName[curField] = "this";
811 } else {
812 cgf.cgm.errorNYI(e->getSourceRange(), "Unhandled capture kind");
813 cgf.cgm.lambdaFieldToName[curField] = "unhandled-capture-kind";
814 }
815
816 // Emit initialization
817 LValue lv =
818 cgf.emitLValueForFieldInitialization(slotLV, curField, fieldName);
819 if (curField->hasCapturedVLAType())
820 cgf.cgm.errorNYI(e->getSourceRange(), "lambda captured VLA type");
821
822 emitInitializationToLValue(captureInit, lv);
823
824 // Push a destructor if necessary.
825 if ([[maybe_unused]] QualType::DestructionKind DtorKind =
826 curField->getType().isDestructedType())
827 cgf.cgm.errorNYI(e->getSourceRange(), "lambda with destructed field");
828 }
829}
830
831void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
832 CIRGenFunction::RunCleanupsScope cleanups(cgf);
833 CIRGenBuilderTy &builder = cgf.getBuilder();
834 mlir::Location scopeLoc = cgf.getLoc(e->getSourceRange());
835 mlir::OpBuilder::InsertPoint scopeBegin;
836
837 // Explicitly introduce a scope for cleanup expressions, even though this
838 // overlaps with the RunCleanupsScope above.
839 //
840 // CIR does not yet model cleanup scopes explicitly, so a lexical scope is
841 // used as a temporary approximation. This is expected to be revisited once
842 // cleanup handling is redesigned.
843 cir::ScopeOp::create(builder, scopeLoc, /*scopeBuilder=*/
844 [&](mlir::OpBuilder &b, mlir::Location loc) {
845 scopeBegin = b.saveInsertionPoint();
846 });
847
848 {
849 mlir::OpBuilder::InsertionGuard guard(builder);
850 builder.restoreInsertionPoint(scopeBegin);
851 CIRGenFunction::LexicalScope lexScope{cgf, scopeLoc,
852 builder.getInsertionBlock()};
853 Visit(e->getSubExpr());
854 }
855}
856
857void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
859 cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");
860 return;
861 }
862
863 withReturnValueSlot(
864 e, [&](ReturnValueSlot slot) { return cgf.emitCallExpr(e, slot); });
865}
866
867void AggExprEmitter::withReturnValueSlot(
868 const Expr *e, llvm::function_ref<RValue(ReturnValueSlot)> fn) {
869 QualType retTy = e->getType();
870
872 bool requiresDestruction =
874 if (requiresDestruction)
875 cgf.cgm.errorNYI(
876 e->getSourceRange(),
877 "withReturnValueSlot: return value requiring destruction is NYI");
878
879 // If it makes no observable difference, save a memcpy + temporary.
880 //
881 // We need to always provide our own temporary if destruction is required.
882 // Otherwise, fn will emit its own, notice that it's "unused", and end its
883 // lifetime before we have the chance to emit a proper destructor call.
886
887 Address retAddr = dest.getAddress();
889
892 fn(ReturnValueSlot(retAddr));
893}
894
895void AggExprEmitter::VisitInitListExpr(InitListExpr *e) {
897 llvm_unreachable("GNU array range designator extension");
898
899 if (e->isTransparent())
900 return Visit(e->getInit(0));
901
902 visitCXXParenListOrInitListExpr(
904}
905
906void AggExprEmitter::visitCXXParenListOrInitListExpr(
907 Expr *e, ArrayRef<Expr *> args, FieldDecl *initializedFieldInUnion,
908 Expr *arrayFiller) {
909
910 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
911 const AggValueSlot dest = ensureSlot(loc, e->getType());
912
913 if (e->getType()->isConstantArrayType()) {
914 cir::ArrayType arrayTy =
916 emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
917 arrayFiller);
918 return;
919 } else if (e->getType()->isVariableArrayType()) {
920 cgf.cgm.errorNYI(e->getSourceRange(),
921 "visitCXXParenListOrInitListExpr variable array type");
922 return;
923 }
924
925 if (e->getType()->isArrayType()) {
926 cgf.cgm.errorNYI(e->getSourceRange(),
927 "visitCXXParenListOrInitListExpr array type");
928 return;
929 }
930
931 assert(e->getType()->isRecordType() && "Only support structs/unions here!");
932
933 // Do struct initialization; this code just sets each individual member
934 // to the approprate value. This makes bitfield support automatic;
935 // the disadvantage is that the generated code is more difficult for
936 // the optimizer, especially with bitfields.
937 unsigned numInitElements = args.size();
938 auto *record = e->getType()->castAsRecordDecl();
939
940 // We'll need to enter cleanup scopes in case any of the element
941 // initializers throws an exception.
943
944 unsigned curInitIndex = 0;
945
946 // Emit initialization of base classes.
947 if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
948 assert(numInitElements >= cxxrd->getNumBases() &&
949 "missing initializer for base class");
950 for (auto &base : cxxrd->bases()) {
951 assert(!base.isVirtual() && "should not see vbases here");
952 CXXRecordDecl *baseRD = base.getType()->getAsCXXRecordDecl();
953 Address address = cgf.getAddressOfDirectBaseInCompleteClass(
954 loc, dest.getAddress(), cxxrd, baseRD,
955 /*baseIsVirtual=*/false);
957 AggValueSlot aggSlot = AggValueSlot::forAddr(
958 address, Qualifiers(), AggValueSlot::IsDestructed,
960 cgf.getOverlapForBaseInit(cxxrd, baseRD, false));
961 cgf.emitAggExpr(args[curInitIndex++], aggSlot);
962 if (base.getType().isDestructedType()) {
963 cgf.cgm.errorNYI(e->getSourceRange(),
964 "push deferred deactivation cleanup");
965 return;
966 }
967 }
968 }
969
970 // Prepare a 'this' for CXXDefaultInitExprs.
971 CIRGenFunction::FieldConstructionScope fcScope(cgf, dest.getAddress());
972
973 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
974
975 if (record->isUnion()) {
976 cgf.cgm.errorNYI(e->getSourceRange(),
977 "visitCXXParenListOrInitListExpr union type");
978 return;
979 }
980
981 // Here we iterate over the fields; this makes it simpler to both
982 // default-initialize fields and skip over unnamed fields.
983 for (const FieldDecl *field : record->fields()) {
984 // We're done once we hit the flexible array member.
985 if (field->getType()->isIncompleteArrayType())
986 break;
987
988 // Always skip anonymous bitfields.
989 if (field->isUnnamedBitField())
990 continue;
991
992 // We're done if we reach the end of the explicit initializers, we
993 // have a zeroed object, and the rest of the fields are
994 // zero-initializable.
995 if (curInitIndex == numInitElements && dest.isZeroed() &&
997 break;
998 LValue lv =
999 cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
1000 // We never generate write-barriers for initialized fields.
1002
1003 if (curInitIndex < numInitElements) {
1004 // Store the initializer into the field.
1005 CIRGenFunction::SourceLocRAIIObject loc{
1006 cgf, cgf.getLoc(record->getSourceRange())};
1007 emitInitializationToLValue(args[curInitIndex++], lv);
1008 } else {
1009 // We're out of initializers; default-initialize to null
1010 emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
1011 }
1012
1013 // Push a destructor if necessary.
1014 // FIXME: if we have an array of structures, all explicitly
1015 // initialized, we can end up pushing a linear number of cleanups.
1016 if (field->getType().isDestructedType()) {
1017 cgf.cgm.errorNYI(e->getSourceRange(),
1018 "visitCXXParenListOrInitListExpr destructor");
1019 return;
1020 }
1021
1022 // From classic codegen, maybe not useful for CIR:
1023 // If the GEP didn't get used because of a dead zero init or something
1024 // else, clean it up for -O0 builds and general tidiness.
1025 }
1026}
1027
1028// TODO(cir): This could be shared with classic codegen.
1030 const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual) {
1031 // If the most-derived object is a field declared with [[no_unique_address]],
1032 // the tail padding of any virtual base could be reused for other subobjects
1033 // of that field's class.
1034 if (isVirtual)
1036
1037 // If the base class is laid out entirely within the nvsize of the derived
1038 // class, its tail padding cannot yet be initialized, so we can issue
1039 // stores at the full width of the base class.
1040 const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
1041 if (layout.getBaseClassOffset(baseRD) +
1042 getContext().getASTRecordLayout(baseRD).getSize() <=
1043 layout.getNonVirtualSize())
1045
1046 // The tail padding may contain values we need to preserve.
1048}
1049
1051 AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
1052}
1053
1055 AggValueSlot::Overlap_t mayOverlap,
1056 bool isVolatile) {
1057 // TODO(cir): this function needs improvements, commented code for now since
1058 // this will be touched again soon.
1059 assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
1060
1061 Address destPtr = dest.getAddress();
1062 Address srcPtr = src.getAddress();
1063
1064 if (getLangOpts().CPlusPlus) {
1065 if (auto *record = ty->getAsCXXRecordDecl()) {
1066 assert((record->hasTrivialCopyConstructor() ||
1067 record->hasTrivialCopyAssignment() ||
1068 record->hasTrivialMoveConstructor() ||
1069 record->hasTrivialMoveAssignment() ||
1070 record->hasAttr<TrivialABIAttr>() || record->isUnion()) &&
1071 "Trying to aggregate-copy a type without a trivial copy/move "
1072 "constructor or assignment operator");
1073 // Ignore empty classes in C++.
1074 if (record->isEmpty())
1075 return;
1076 }
1077 }
1078
1080
1081 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
1082 // C99 6.5.16.1p3, which states "If the value being stored in an object is
1083 // read from another object that overlaps in anyway the storage of the first
1084 // object, then the overlap shall be exact and the two objects shall have
1085 // qualified or unqualified versions of a compatible type."
1086 //
1087 // memcpy is not defined if the source and destination pointers are exactly
1088 // equal, but other compilers do this optimization, and almost every memcpy
1089 // implementation handles this case safely. If there is a libc that does not
1090 // safely handle this, we can add a target hook.
1091
1092 // Get data size info for this aggregate. Don't copy the tail padding if this
1093 // might be a potentially-overlapping subobject, since the tail padding might
1094 // be occupied by a different object. Otherwise, copying it is fine.
1095 TypeInfoChars typeInfo;
1096 if (mayOverlap)
1097 typeInfo = getContext().getTypeInfoDataSizeInChars(ty);
1098 else
1099 typeInfo = getContext().getTypeInfoInChars(ty);
1100
1102
1103 // NOTE(cir): original codegen would normally convert destPtr and srcPtr to
1104 // i8* since memcpy operates on bytes. We don't need that in CIR because
1105 // cir.copy will operate on any CIR pointer that points to a sized type.
1106
1107 // Don't do any of the memmove_collectable tests if GC isn't set.
1108 if (cgm.getLangOpts().getGC() != LangOptions::NonGC)
1109 cgm.errorNYI("emitAggregateCopy: GC");
1110
1111 [[maybe_unused]] cir::CopyOp copyOp =
1112 builder.createCopy(destPtr.getPointer(), srcPtr.getPointer(), isVolatile);
1113
1115}
1116
1117// TODO(cir): This could be shared with classic codegen.
1120 if (!fd->hasAttr<NoUniqueAddressAttr>() || !fd->getType()->isRecordType())
1122
1123 // If the field lies entirely within the enclosing class's nvsize, its tail
1124 // padding cannot overlap any already-initialized object. (The only subobjects
1125 // with greater addresses that might already be initialized are vbases.)
1126 const RecordDecl *classRD = fd->getParent();
1127 const ASTRecordLayout &layout = getContext().getASTRecordLayout(classRD);
1128 if (layout.getFieldOffset(fd->getFieldIndex()) +
1129 getContext().getTypeSize(fd->getType()) <=
1130 (uint64_t)getContext().toBits(layout.getNonVirtualSize()))
1132
1133 // The tail padding may contain values we need to preserve.
1135}
1136
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
static bool isTrivialFiller(Expr *e)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConditionOp createCondition(mlir::Value condition)
Create a loop condition.
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
cir::PointerType getPointerTo(mlir::Type ty)
cir::DoWhileOp createDoWhile(mlir::Location loc, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> condBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> bodyBuilder)
Create a do-while operation.
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty, int64_t value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4356
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2724
QualType getElementType() const
Definition TypeBase.h:3742
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
clang::CharUnits getAlignment() const
Definition Address.h:136
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
An aggregate value slot.
IsZeroed_t isZeroed() const
Overlap_t mayOverlap() const
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
IsDestructed_t isExternallyDestructed() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
void setExternallyDestructed(bool destructed=true)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
IsAliased_t isPotentiallyAliased() const
void setVolatile(bool flag)
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
CIRGenTypes & getTypes() const
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
static bool hasAggregateEvaluationKind(clang::QualType type)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
CIRGenBuilderTy & getBuilder()
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::LangOptions & getLangOpts() const
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
llvm::DenseMap< const clang::FieldDecl *, llvm::StringRef > lambdaFieldToName
Keep a map between lambda fields and names, this needs to be per module since lambdas might get gener...
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
Address getAddress() const
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
CXXTemporary * getTemporary()
Definition ExprCXX.h:1512
const Expr * getSubExpr() const
Definition ExprCXX.h:1516
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5182
FieldDecl * getInitializedFieldInUnion()
Definition ExprCXX.h:5220
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:354
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1603
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1951
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4887
const Expr * getInitializer() const
Definition Expr.h:3636
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1273
bool hasAttr() const
Definition DeclBase.h:577
InitListExpr * getUpdater() const
Definition Expr.h:5939
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3670
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
const Expr * getSubExpr() const
Definition Expr.h:1065
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6467
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5428
bool hadArrayRangeDesignator() const
Definition Expr.h:5486
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5404
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
ArrayRef< Expr * > inits()
Definition Expr.h:5352
llvm::iterator_range< capture_init_iterator > capture_inits()
Retrieve the initialization expressions for this lambda's captures.
Definition ExprCXX.h:2084
capture_range captures() const
Retrieve this lambda's captures.
Definition ExprCXX.cpp:1371
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1400
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4938
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1181
const Expr * getSubExpr() const
Definition Expr.h:2202
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8472
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition Type.cpp:2738
@ PCK_Struct
The type is a struct containing a field whose type is neither PCK_Trivial nor PCK_VolatileTrivial.
Definition TypeBase.h:1523
Represents a struct/union/class.
Definition Decl.h:4327
field_range fields() const
Definition Decl.h:4530
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4527
field_iterator field_begin() const
Definition Decl.cpp:5276
CompoundStmt * getSubStmt()
Definition Expr.h:4615
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8728
bool isArrayType() const
Definition TypeBase.h:8724
bool isReferenceType() const
Definition TypeBase.h:8649
bool isVariableArrayType() const
Definition TypeBase.h:8736
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8760
bool isRecordType() const
Definition TypeBase.h:8752
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2247
Expr * getSubExpr() const
Definition Expr.h:2288
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
constexpr Variable var(Literal L)
Returns the variable of L.
Definition CNFFormula.h:64
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
static bool emitLifetimeMarkers()
static bool aggValueSlotDestructedFlag()
static bool aggValueSlotGC()
static bool aggValueSlotAlias()
static bool opLoadStoreAtomic()
static bool aggEmitFinalDestCopyRValue()
static bool aggValueSlotVolatile()
static bool opScopeCleanupRegion()
static bool atomicTypes()
static bool cudaSupport()
static bool requiresCleanups()
clang::CharUnits getPointerAlign() const