clang 22.0.0git
CIRGenExprAggregate.cpp
Go to the documentation of this file.
1//===- CIRGenExprAggregrate.cpp - Emit CIR Code from Aggregate Expressions ===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Aggregate Expr nodes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenFunction.h"
15#include "CIRGenValue.h"
17
18#include "clang/AST/Expr.h"
21#include <cstdint>
22
23using namespace clang;
24using namespace clang::CIRGen;
25
26namespace {
27// FIXME(cir): This should be a common helper between CIRGen
28// and traditional CodeGen
29/// Is the value of the given expression possibly a reference to or
30/// into a __block variable?
31static bool isBlockVarRef(const Expr *e) {
32 // Make sure we look through parens.
33 e = e->IgnoreParens();
34
35 // Check for a direct reference to a __block variable.
36 if (const DeclRefExpr *dre = dyn_cast<DeclRefExpr>(e)) {
37 const VarDecl *var = dyn_cast<VarDecl>(dre->getDecl());
38 return (var && var->hasAttr<BlocksAttr>());
39 }
40
41 // More complicated stuff.
42
43 // Binary operators.
44 if (const BinaryOperator *op = dyn_cast<BinaryOperator>(e)) {
45 // For an assignment or pointer-to-member operation, just care
46 // about the LHS.
47 if (op->isAssignmentOp() || op->isPtrMemOp())
48 return isBlockVarRef(op->getLHS());
49
50 // For a comma, just care about the RHS.
51 if (op->getOpcode() == BO_Comma)
52 return isBlockVarRef(op->getRHS());
53
54 // FIXME: pointer arithmetic?
55 return false;
56
57 // Check both sides of a conditional operator.
58 } else if (const AbstractConditionalOperator *op =
59 dyn_cast<AbstractConditionalOperator>(e)) {
60 return isBlockVarRef(op->getTrueExpr()) ||
61 isBlockVarRef(op->getFalseExpr());
62
63 // OVEs are required to support BinaryConditionalOperators.
64 } else if (const OpaqueValueExpr *op = dyn_cast<OpaqueValueExpr>(e)) {
65 if (const Expr *src = op->getSourceExpr())
66 return isBlockVarRef(src);
67
68 // Casts are necessary to get things like (*(int*)&var) = foo().
69 // We don't really care about the kind of cast here, except
70 // we don't want to look through l2r casts, because it's okay
71 // to get the *value* in a __block variable.
72 } else if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
73 if (cast->getCastKind() == CK_LValueToRValue)
74 return false;
75 return isBlockVarRef(cast->getSubExpr());
76
77 // Handle unary operators. Again, just aggressively look through
78 // it, ignoring the operation.
79 } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
80 return isBlockVarRef(uop->getSubExpr());
81
82 // Look into the base of a field access.
83 } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(e)) {
84 return isBlockVarRef(mem->getBase());
85
86 // Look into the base of a subscript.
87 } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(e)) {
88 return isBlockVarRef(sub->getBase());
89 }
90
91 return false;
92}
93
94class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
95
96 CIRGenFunction &cgf;
97 AggValueSlot dest;
98
99 // Calls `fn` with a valid return value slot, potentially creating a temporary
100 // to do so. If a temporary is created, an appropriate copy into `Dest` will
101 // be emitted, as will lifetime markers.
102 //
103 // The given function should take a ReturnValueSlot, and return an RValue that
104 // points to said slot.
105 void withReturnValueSlot(const Expr *e,
106 llvm::function_ref<RValue(ReturnValueSlot)> fn);
107
108 AggValueSlot ensureSlot(mlir::Location loc, QualType t) {
109 if (!dest.isIgnored())
110 return dest;
111 return cgf.createAggTemp(t, loc, "agg.tmp.ensured");
112 }
113
114 void ensureDest(mlir::Location loc, QualType ty) {
115 if (!dest.isIgnored())
116 return;
117 dest = cgf.createAggTemp(ty, loc, "agg.tmp.ensured");
118 }
119
120public:
121 AggExprEmitter(CIRGenFunction &cgf, AggValueSlot dest)
122 : cgf(cgf), dest(dest) {}
123
124 /// Given an expression with aggregate type that represents a value lvalue,
125 /// this method emits the address of the lvalue, then loads the result into
126 /// DestPtr.
127 void emitAggLoadOfLValue(const Expr *e);
128
129 void emitArrayInit(Address destPtr, cir::ArrayType arrayTy, QualType arrayQTy,
130 Expr *exprToVisit, ArrayRef<Expr *> args,
131 Expr *arrayFiller);
132
133 /// Perform the final copy to DestPtr, if desired.
134 void emitFinalDestCopy(QualType type, const LValue &src);
135
136 void emitCopy(QualType type, const AggValueSlot &dest,
137 const AggValueSlot &src);
138
139 void emitInitializationToLValue(Expr *e, LValue lv);
140
141 void emitNullInitializationToLValue(mlir::Location loc, LValue lv);
142
143 void Visit(Expr *e) { StmtVisitor<AggExprEmitter>::Visit(e); }
144
145 void VisitArraySubscriptExpr(ArraySubscriptExpr *e) {
146 emitAggLoadOfLValue(e);
147 }
148
149 void VisitCallExpr(const CallExpr *e);
150 void VisitStmtExpr(const StmtExpr *e) {
151 CIRGenFunction::StmtExprEvaluation eval(cgf);
152 Address retAlloca =
153 cgf.createMemTemp(e->getType(), cgf.getLoc(e->getSourceRange()));
154 (void)cgf.emitCompoundStmt(*e->getSubStmt(), &retAlloca, dest);
155 }
156
157 void VisitBinAssign(const BinaryOperator *e) {
158 // For an assignment to work, the value on the right has
159 // to be compatible with the value on the left.
160 assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(),
161 e->getRHS()->getType()) &&
162 "Invalid assignment");
163
164 if (isBlockVarRef(e->getLHS()) &&
165 e->getRHS()->HasSideEffects(cgf.getContext())) {
166 cgf.cgm.errorNYI(e->getSourceRange(),
167 "block var reference with side effects");
168 return;
169 }
170
171 LValue lhs = cgf.emitLValue(e->getLHS());
172
173 // If we have an atomic type, evaluate into the destination and then
174 // do an atomic copy.
176
177 // Codegen the RHS so that it stores directly into the LHS.
179 AggValueSlot lhsSlot = AggValueSlot::forLValue(
182
183 // A non-volatile aggregate destination might have volatile member.
184 if (!lhsSlot.isVolatile() && cgf.hasVolatileMember(e->getLHS()->getType()))
185 lhsSlot.setVolatile(true);
186
187 cgf.emitAggExpr(e->getRHS(), lhsSlot);
188
189 // Copy into the destination if the assignment isn't ignored.
190 emitFinalDestCopy(e->getType(), lhs);
191
192 if (!dest.isIgnored() && !dest.isExternallyDestructed() &&
194 cgf.pushDestroy(QualType::DK_nontrivial_c_struct, dest.getAddress(),
195 e->getType());
196 }
197
198 void VisitDeclRefExpr(DeclRefExpr *e) { emitAggLoadOfLValue(e); }
199
200 void VisitInitListExpr(InitListExpr *e);
201 void VisitCXXConstructExpr(const CXXConstructExpr *e);
202
203 void visitCXXParenListOrInitListExpr(Expr *e, ArrayRef<Expr *> args,
204 FieldDecl *initializedFieldInUnion,
205 Expr *arrayFiller);
206 void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *die) {
207 CIRGenFunction::CXXDefaultInitExprScope Scope(cgf, die);
208 Visit(die->getExpr());
209 }
210 void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *e) {
211 // Ensure that we have a slot, but if we already do, remember
212 // whether it was externally destructed.
213 bool wasExternallyDestructed = dest.isExternallyDestructed();
214 ensureDest(cgf.getLoc(e->getSourceRange()), e->getType());
215
216 // We're going to push a destructor if there isn't already one.
217 dest.setExternallyDestructed();
218
219 Visit(e->getSubExpr());
220
221 // Push that destructor we promised.
222 if (!wasExternallyDestructed)
223 cgf.emitCXXTemporary(e->getTemporary(), e->getType(), dest.getAddress());
224 }
225 void VisitLambdaExpr(LambdaExpr *e);
226 void VisitExprWithCleanups(ExprWithCleanups *e);
227
228 // Stubs -- These should be moved up when they are implemented.
229 void VisitCastExpr(CastExpr *e) {
230 switch (e->getCastKind()) {
231 case CK_LValueToRValue:
232 // If we're loading from a volatile type, force the destination
233 // into existence.
235 cgf.cgm.errorNYI(e->getSourceRange(),
236 "AggExprEmitter: volatile lvalue-to-rvalue cast");
237 [[fallthrough]];
238 case CK_NoOp:
239 case CK_UserDefinedConversion:
240 case CK_ConstructorConversion:
241 assert(cgf.getContext().hasSameUnqualifiedType(e->getSubExpr()->getType(),
242 e->getType()) &&
243 "Implicit cast types must be compatible");
244 Visit(e->getSubExpr());
245 break;
246 default:
247 cgf.cgm.errorNYI(e->getSourceRange(),
248 std::string("AggExprEmitter: VisitCastExpr: ") +
249 e->getCastKindName());
250 break;
251 }
252 }
253 void VisitStmt(Stmt *s) {
254 cgf.cgm.errorNYI(s->getSourceRange(),
255 std::string("AggExprEmitter::VisitStmt: ") +
256 s->getStmtClassName());
257 }
258 void VisitParenExpr(ParenExpr *pe) { Visit(pe->getSubExpr()); }
259 void VisitGenericSelectionExpr(GenericSelectionExpr *ge) {
260 Visit(ge->getResultExpr());
261 }
262 void VisitCoawaitExpr(CoawaitExpr *e) {
263 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoawaitExpr");
264 }
265 void VisitCoyieldExpr(CoyieldExpr *e) {
266 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCoyieldExpr");
267 }
268 void VisitUnaryCoawait(UnaryOperator *e) {
269 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitUnaryCoawait");
270 }
271 void VisitUnaryExtension(UnaryOperator *e) { Visit(e->getSubExpr()); }
272 void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *e) {
273 cgf.cgm.errorNYI(e->getSourceRange(),
274 "AggExprEmitter: VisitSubstNonTypeTemplateParmExpr");
275 }
276 void VisitConstantExpr(ConstantExpr *e) {
277 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitConstantExpr");
278 }
279 void VisitMemberExpr(MemberExpr *e) { emitAggLoadOfLValue(e); }
280 void VisitUnaryDeref(UnaryOperator *e) { emitAggLoadOfLValue(e); }
281 void VisitStringLiteral(StringLiteral *e) { emitAggLoadOfLValue(e); }
282 void VisitCompoundLiteralExpr(CompoundLiteralExpr *e);
283
284 void VisitPredefinedExpr(const PredefinedExpr *e) {
285 cgf.cgm.errorNYI(e->getSourceRange(),
286 "AggExprEmitter: VisitPredefinedExpr");
287 }
288 void VisitBinaryOperator(const BinaryOperator *e) {
289 cgf.cgm.errorNYI(e->getSourceRange(),
290 "AggExprEmitter: VisitBinaryOperator");
291 }
292 void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *e) {
293 cgf.cgm.errorNYI(e->getSourceRange(),
294 "AggExprEmitter: VisitPointerToDataMemberBinaryOperator");
295 }
296 void VisitBinComma(const BinaryOperator *e) {
297 cgf.emitIgnoredExpr(e->getLHS());
298 Visit(e->getRHS());
299 }
300 void VisitBinCmp(const BinaryOperator *e) {
301 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitBinCmp");
302 }
303 void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *e) {
304 cgf.cgm.errorNYI(e->getSourceRange(),
305 "AggExprEmitter: VisitCXXRewrittenBinaryOperator");
306 }
307 void VisitObjCMessageExpr(ObjCMessageExpr *e) {
308 cgf.cgm.errorNYI(e->getSourceRange(),
309 "AggExprEmitter: VisitObjCMessageExpr");
310 }
311 void VisitObjCIVarRefExpr(ObjCIvarRefExpr *e) {
312 cgf.cgm.errorNYI(e->getSourceRange(),
313 "AggExprEmitter: VisitObjCIVarRefExpr");
314 }
315
316 void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *e) {
317 AggValueSlot dest = ensureSlot(cgf.getLoc(e->getExprLoc()), e->getType());
318 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
319 emitInitializationToLValue(e->getBase(), destLV);
320 VisitInitListExpr(e->getUpdater());
321 }
322 void VisitAbstractConditionalOperator(const AbstractConditionalOperator *e) {
323 cgf.cgm.errorNYI(e->getSourceRange(),
324 "AggExprEmitter: VisitAbstractConditionalOperator");
325 }
326 void VisitChooseExpr(const ChooseExpr *e) { Visit(e->getChosenSubExpr()); }
327 void VisitCXXParenListInitExpr(CXXParenListInitExpr *e) {
328 visitCXXParenListOrInitListExpr(e, e->getInitExprs(),
330 e->getArrayFiller());
331 }
332
333 void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *e,
334 llvm::Value *outerBegin = nullptr) {
335 cgf.cgm.errorNYI(e->getSourceRange(),
336 "AggExprEmitter: VisitArrayInitLoopExpr");
337 }
338 void VisitImplicitValueInitExpr(ImplicitValueInitExpr *e) {
339 cgf.cgm.errorNYI(e->getSourceRange(),
340 "AggExprEmitter: VisitImplicitValueInitExpr");
341 }
342 void VisitNoInitExpr(NoInitExpr *e) {
343 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitNoInitExpr");
344 }
345 void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *dae) {
346 CIRGenFunction::CXXDefaultArgExprScope scope(cgf, dae);
347 Visit(dae->getExpr());
348 }
349 void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *e) {
350 cgf.cgm.errorNYI(e->getSourceRange(),
351 "AggExprEmitter: VisitCXXInheritedCtorInitExpr");
352 }
353 void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *e) {
354 cgf.cgm.errorNYI(e->getSourceRange(),
355 "AggExprEmitter: VisitCXXStdInitializerListExpr");
356 }
357 void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *e) {
358 cgf.cgm.errorNYI(e->getSourceRange(),
359 "AggExprEmitter: VisitCXXScalarValueInitExpr");
360 }
361 void VisitCXXTypeidExpr(CXXTypeidExpr *e) {
362 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXTypeidExpr");
363 }
364 void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *e) {
365 Visit(e->getSubExpr());
366 }
367 void VisitOpaqueValueExpr(OpaqueValueExpr *e) {
368 cgf.cgm.errorNYI(e->getSourceRange(),
369 "AggExprEmitter: VisitOpaqueValueExpr");
370 }
371
372 void VisitPseudoObjectExpr(PseudoObjectExpr *e) {
373 cgf.cgm.errorNYI(e->getSourceRange(),
374 "AggExprEmitter: VisitPseudoObjectExpr");
375 }
376
377 void VisitVAArgExpr(VAArgExpr *e) {
378 // emitVAArg returns an aggregate value (not a pointer) at the CIR level.
379 // ABI-specific pointer handling will be done later in LoweringPrepare.
380 mlir::Value vaArgValue = cgf.emitVAArg(e);
381
382 // Create a temporary alloca to hold the aggregate value.
383 mlir::Location loc = cgf.getLoc(e->getSourceRange());
384 Address tmpAddr = cgf.createMemTemp(e->getType(), loc, "vaarg.tmp");
385
386 // Store the va_arg result into the temporary.
387 cgf.emitAggregateStore(vaArgValue, tmpAddr);
388
389 // Create an LValue from the temporary address.
390 LValue tmpLValue = cgf.makeAddrLValue(tmpAddr, e->getType());
391
392 // Copy the aggregate value from temporary to destination.
393 emitFinalDestCopy(e->getType(), tmpLValue);
394 }
395
396 void VisitCXXThrowExpr(const CXXThrowExpr *e) {
397 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitCXXThrowExpr");
398 }
399 void VisitAtomicExpr(AtomicExpr *e) {
400 cgf.cgm.errorNYI(e->getSourceRange(), "AggExprEmitter: VisitAtomicExpr");
401 }
402};
403
404} // namespace
405
406static bool isTrivialFiller(Expr *e) {
407 if (!e)
408 return true;
409
411 return true;
412
413 if (auto *ile = dyn_cast<InitListExpr>(e)) {
414 if (ile->getNumInits())
415 return false;
416 return isTrivialFiller(ile->getArrayFiller());
417 }
418
419 if (const auto *cons = dyn_cast_or_null<CXXConstructExpr>(e))
420 return cons->getConstructor()->isDefaultConstructor() &&
421 cons->getConstructor()->isTrivial();
422
423 return false;
424}
425
426/// Given an expression with aggregate type that represents a value lvalue, this
427/// method emits the address of the lvalue, then loads the result into DestPtr.
428void AggExprEmitter::emitAggLoadOfLValue(const Expr *e) {
429 LValue lv = cgf.emitLValue(e);
430
431 // If the type of the l-value is atomic, then do an atomic load.
433
434 emitFinalDestCopy(e->getType(), lv);
435}
436
437void AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *e) {
438 if (dest.isPotentiallyAliased() && e->getType().isPODType(cgf.getContext())) {
439 // For a POD type, just emit a load of the lvalue + a copy, because our
440 // compound literal might alias the destination.
441 emitAggLoadOfLValue(e);
442 return;
443 }
444
445 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
446
447 // Block-scope compound literals are destroyed at the end of the enclosing
448 // scope in C.
449 bool destruct =
450 !cgf.getLangOpts().CPlusPlus && !slot.isExternallyDestructed();
451 if (destruct)
453
454 cgf.emitAggExpr(e->getInitializer(), slot);
455
456 if (destruct)
457 if ([[maybe_unused]] QualType::DestructionKind dtorKind =
459 cgf.cgm.errorNYI(e->getSourceRange(), "compound literal with destructor");
460}
461
462void AggExprEmitter::emitArrayInit(Address destPtr, cir::ArrayType arrayTy,
463 QualType arrayQTy, Expr *e,
464 ArrayRef<Expr *> args, Expr *arrayFiller) {
465 CIRGenBuilderTy &builder = cgf.getBuilder();
466 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
467
468 const uint64_t numInitElements = args.size();
469
470 const QualType elementType =
471 cgf.getContext().getAsArrayType(arrayQTy)->getElementType();
472
473 if (elementType.isDestructedType() && cgf.cgm.getLangOpts().Exceptions) {
474 cgf.cgm.errorNYI(loc, "initialized array requires destruction");
475 return;
476 }
477
478 const QualType elementPtrType = cgf.getContext().getPointerType(elementType);
479
480 const mlir::Type cirElementType = cgf.convertType(elementType);
481 const cir::PointerType cirElementPtrType =
482 builder.getPointerTo(cirElementType);
483
484 auto begin = cir::CastOp::create(builder, loc, cirElementPtrType,
485 cir::CastKind::array_to_ptrdecay,
486 destPtr.getPointer());
487
488 const CharUnits elementSize =
489 cgf.getContext().getTypeSizeInChars(elementType);
490 const CharUnits elementAlign =
491 destPtr.getAlignment().alignmentOfArrayElement(elementSize);
492
493 // The 'current element to initialize'. The invariants on this
494 // variable are complicated. Essentially, after each iteration of
495 // the loop, it points to the last initialized element, except
496 // that it points to the beginning of the array before any
497 // elements have been initialized.
498 mlir::Value element = begin;
499
500 // Don't build the 'one' before the cycle to avoid
501 // emmiting the redundant `cir.const 1` instrs.
502 mlir::Value one;
503
504 // Emit the explicit initializers.
505 for (uint64_t i = 0; i != numInitElements; ++i) {
506 // Advance to the next element.
507 if (i > 0) {
508 one = builder.getConstantInt(loc, cgf.ptrDiffTy, i);
509 element = builder.createPtrStride(loc, begin, one);
510 }
511
512 const Address address = Address(element, cirElementType, elementAlign);
513 const LValue elementLV = cgf.makeAddrLValue(address, elementType);
514 emitInitializationToLValue(args[i], elementLV);
515 }
516
517 const uint64_t numArrayElements = arrayTy.getSize();
518
519 // Check whether there's a non-trivial array-fill expression.
520 const bool hasTrivialFiller = isTrivialFiller(arrayFiller);
521
522 // Any remaining elements need to be zero-initialized, possibly
523 // using the filler expression. We can skip this if the we're
524 // emitting to zeroed memory.
525 if (numInitElements != numArrayElements &&
526 !(dest.isZeroed() && hasTrivialFiller &&
527 cgf.getTypes().isZeroInitializable(elementType))) {
528 // Advance to the start of the rest of the array.
529 if (numInitElements) {
530 one = builder.getConstantInt(loc, cgf.ptrDiffTy, 1);
531 element = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
532 element, one);
533 }
534
535 // Allocate the temporary variable
536 // to store the pointer to first unitialized element
537 const Address tmpAddr = cgf.createTempAlloca(
538 cirElementPtrType, cgf.getPointerAlign(), loc, "arrayinit.temp");
539 LValue tmpLV = cgf.makeAddrLValue(tmpAddr, elementPtrType);
540 cgf.emitStoreThroughLValue(RValue::get(element), tmpLV);
541
542 // Compute the end of array
543 cir::ConstantOp numArrayElementsConst = builder.getConstInt(
544 loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), numArrayElements);
545 mlir::Value end = cir::PtrStrideOp::create(builder, loc, cirElementPtrType,
546 begin, numArrayElementsConst);
547
548 builder.createDoWhile(
549 loc,
550 /*condBuilder=*/
551 [&](mlir::OpBuilder &b, mlir::Location loc) {
552 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
553 cir::CmpOp cmp = cir::CmpOp::create(builder, loc, cir::CmpOpKind::ne,
554 currentElement, end);
555 builder.createCondition(cmp);
556 },
557 /*bodyBuilder=*/
558 [&](mlir::OpBuilder &b, mlir::Location loc) {
559 cir::LoadOp currentElement = builder.createLoad(loc, tmpAddr);
560
562
563 // Emit the actual filler expression.
564 LValue elementLV = cgf.makeAddrLValue(
565 Address(currentElement, cirElementType, elementAlign),
566 elementType);
567 if (arrayFiller)
568 emitInitializationToLValue(arrayFiller, elementLV);
569 else
570 emitNullInitializationToLValue(loc, elementLV);
571
572 // Tell the EH cleanup that we finished with the last element.
573 if (cgf.cgm.getLangOpts().Exceptions) {
574 cgf.cgm.errorNYI(loc, "update destructed array element for EH");
575 return;
576 }
577
578 // Advance pointer and store them to temporary variable
579 cir::ConstantOp one = builder.getConstInt(
580 loc, mlir::cast<cir::IntType>(cgf.ptrDiffTy), 1);
581 auto nextElement = cir::PtrStrideOp::create(
582 builder, loc, cirElementPtrType, currentElement, one);
583 cgf.emitStoreThroughLValue(RValue::get(nextElement), tmpLV);
584
585 builder.createYield(loc);
586 });
587 }
588}
589
590/// Perform the final copy to destPtr, if desired.
591void AggExprEmitter::emitFinalDestCopy(QualType type, const LValue &src) {
592 // If dest is ignored, then we're evaluating an aggregate expression
593 // in a context that doesn't care about the result. Note that loads
594 // from volatile l-values force the existence of a non-ignored
595 // destination.
596 if (dest.isIgnored())
597 return;
598
602
603 AggValueSlot srcAgg = AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
606 emitCopy(type, dest, srcAgg);
607}
608
609/// Perform a copy from the source into the destination.
610///
611/// \param type - the type of the aggregate being copied; qualifiers are
612/// ignored
613void AggExprEmitter::emitCopy(QualType type, const AggValueSlot &dest,
614 const AggValueSlot &src) {
616
617 // If the result of the assignment is used, copy the LHS there also.
618 // It's volatile if either side is. Use the minimum alignment of
619 // the two sides.
620 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), type);
621 LValue srcLV = cgf.makeAddrLValue(src.getAddress(), type);
623 cgf.emitAggregateCopy(destLV, srcLV, type, dest.mayOverlap(),
624 dest.isVolatile() || src.isVolatile());
625}
626
627void AggExprEmitter::emitInitializationToLValue(Expr *e, LValue lv) {
628 const QualType type = lv.getType();
629
631 const mlir::Location loc = e->getSourceRange().isValid()
632 ? cgf.getLoc(e->getSourceRange())
633 : *cgf.currSrcLoc;
634 return emitNullInitializationToLValue(loc, lv);
635 }
636
637 if (isa<NoInitExpr>(e))
638 return;
639
640 if (type->isReferenceType()) {
641 RValue rv = cgf.emitReferenceBindingToExpr(e);
642 return cgf.emitStoreThroughLValue(rv, lv);
643 }
644
645 switch (cgf.getEvaluationKind(type)) {
646 case cir::TEK_Complex:
647 cgf.emitComplexExprIntoLValue(e, lv, /*isInit*/ true);
648 break;
653 dest.isZeroed()));
654
655 return;
656 case cir::TEK_Scalar:
657 if (lv.isSimple())
658 cgf.emitScalarInit(e, cgf.getLoc(e->getSourceRange()), lv);
659 else
661 return;
662 }
663}
664
665void AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *e) {
666 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
667 cgf.emitCXXConstructExpr(e, slot);
668}
669
670void AggExprEmitter::emitNullInitializationToLValue(mlir::Location loc,
671 LValue lv) {
672 const QualType type = lv.getType();
673
674 // If the destination slot is already zeroed out before the aggregate is
675 // copied into it, we don't have to emit any zeros here.
676 if (dest.isZeroed() && cgf.getTypes().isZeroInitializable(type))
677 return;
678
679 if (cgf.hasScalarEvaluationKind(type)) {
680 // For non-aggregates, we can store the appropriate null constant.
681 mlir::Value null = cgf.cgm.emitNullConstant(type, loc);
682 if (lv.isSimple()) {
683 cgf.emitStoreOfScalar(null, lv, /* isInitialization */ true);
684 return;
685 }
686
688 return;
689 }
690
691 // There's a potential optimization opportunity in combining
692 // memsets; that would be easy for arrays, but relatively
693 // difficult for structures with the current code.
694 cgf.emitNullInitialization(loc, lv.getAddress(), lv.getType());
695}
696
697void AggExprEmitter::VisitLambdaExpr(LambdaExpr *e) {
698 CIRGenFunction::SourceLocRAIIObject loc{cgf, cgf.getLoc(e->getSourceRange())};
699 AggValueSlot slot = ensureSlot(cgf.getLoc(e->getSourceRange()), e->getType());
700 [[maybe_unused]] LValue slotLV =
701 cgf.makeAddrLValue(slot.getAddress(), e->getType());
702
703 // We'll need to enter cleanup scopes in case any of the element
704 // initializers throws an exception or contains branch out of the expressions.
706
707 for (auto [curField, capture, captureInit] : llvm::zip(
708 e->getLambdaClass()->fields(), e->captures(), e->capture_inits())) {
709 // Pick a name for the field.
710 llvm::StringRef fieldName = curField->getName();
711 if (capture.capturesVariable()) {
712 assert(!curField->isBitField() && "lambdas don't have bitfield members!");
713 ValueDecl *v = capture.getCapturedVar();
714 fieldName = v->getName();
715 cgf.cgm.lambdaFieldToName[curField] = fieldName;
716 } else if (capture.capturesThis()) {
717 cgf.cgm.lambdaFieldToName[curField] = "this";
718 } else {
719 cgf.cgm.errorNYI(e->getSourceRange(), "Unhandled capture kind");
720 cgf.cgm.lambdaFieldToName[curField] = "unhandled-capture-kind";
721 }
722
723 // Emit initialization
724 LValue lv =
725 cgf.emitLValueForFieldInitialization(slotLV, curField, fieldName);
726 if (curField->hasCapturedVLAType())
727 cgf.cgm.errorNYI(e->getSourceRange(), "lambda captured VLA type");
728
729 emitInitializationToLValue(captureInit, lv);
730
731 // Push a destructor if necessary.
732 if ([[maybe_unused]] QualType::DestructionKind DtorKind =
733 curField->getType().isDestructedType())
734 cgf.cgm.errorNYI(e->getSourceRange(), "lambda with destructed field");
735 }
736}
737
738void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *e) {
739 CIRGenFunction::RunCleanupsScope cleanups(cgf);
740 Visit(e->getSubExpr());
741}
742
743void AggExprEmitter::VisitCallExpr(const CallExpr *e) {
745 cgf.cgm.errorNYI(e->getSourceRange(), "reference return type");
746 return;
747 }
748
749 withReturnValueSlot(
750 e, [&](ReturnValueSlot slot) { return cgf.emitCallExpr(e, slot); });
751}
752
753void AggExprEmitter::withReturnValueSlot(
754 const Expr *e, llvm::function_ref<RValue(ReturnValueSlot)> fn) {
755 QualType retTy = e->getType();
756
758 bool requiresDestruction =
760 if (requiresDestruction)
761 cgf.cgm.errorNYI(
762 e->getSourceRange(),
763 "withReturnValueSlot: return value requiring destruction is NYI");
764
765 // If it makes no observable difference, save a memcpy + temporary.
766 //
767 // We need to always provide our own temporary if destruction is required.
768 // Otherwise, fn will emit its own, notice that it's "unused", and end its
769 // lifetime before we have the chance to emit a proper destructor call.
772
773 Address retAddr = dest.getAddress();
775
778 fn(ReturnValueSlot(retAddr));
779}
780
781void AggExprEmitter::VisitInitListExpr(InitListExpr *e) {
783 llvm_unreachable("GNU array range designator extension");
784
785 if (e->isTransparent())
786 return Visit(e->getInit(0));
787
788 visitCXXParenListOrInitListExpr(
790}
791
792void AggExprEmitter::visitCXXParenListOrInitListExpr(
793 Expr *e, ArrayRef<Expr *> args, FieldDecl *initializedFieldInUnion,
794 Expr *arrayFiller) {
795
796 const mlir::Location loc = cgf.getLoc(e->getSourceRange());
797 const AggValueSlot dest = ensureSlot(loc, e->getType());
798
799 if (e->getType()->isConstantArrayType()) {
800 cir::ArrayType arrayTy =
802 emitArrayInit(dest.getAddress(), arrayTy, e->getType(), e, args,
803 arrayFiller);
804 return;
805 } else if (e->getType()->isVariableArrayType()) {
806 cgf.cgm.errorNYI(e->getSourceRange(),
807 "visitCXXParenListOrInitListExpr variable array type");
808 return;
809 }
810
811 if (e->getType()->isArrayType()) {
812 cgf.cgm.errorNYI(e->getSourceRange(),
813 "visitCXXParenListOrInitListExpr array type");
814 return;
815 }
816
817 assert(e->getType()->isRecordType() && "Only support structs/unions here!");
818
819 // Do struct initialization; this code just sets each individual member
820 // to the approprate value. This makes bitfield support automatic;
821 // the disadvantage is that the generated code is more difficult for
822 // the optimizer, especially with bitfields.
823 unsigned numInitElements = args.size();
824 auto *record = e->getType()->castAsRecordDecl();
825
826 // We'll need to enter cleanup scopes in case any of the element
827 // initializers throws an exception.
829
830 unsigned curInitIndex = 0;
831
832 // Emit initialization of base classes.
833 if (auto *cxxrd = dyn_cast<CXXRecordDecl>(record)) {
834 assert(numInitElements >= cxxrd->getNumBases() &&
835 "missing initializer for base class");
836 for (auto &base : cxxrd->bases()) {
837 assert(!base.isVirtual() && "should not see vbases here");
838 CXXRecordDecl *baseRD = base.getType()->getAsCXXRecordDecl();
839 Address address = cgf.getAddressOfDirectBaseInCompleteClass(
840 loc, dest.getAddress(), cxxrd, baseRD,
841 /*baseIsVirtual=*/false);
843 AggValueSlot aggSlot = AggValueSlot::forAddr(
844 address, Qualifiers(), AggValueSlot::IsDestructed,
846 cgf.getOverlapForBaseInit(cxxrd, baseRD, false));
847 cgf.emitAggExpr(args[curInitIndex++], aggSlot);
848 if (base.getType().isDestructedType()) {
849 cgf.cgm.errorNYI(e->getSourceRange(),
850 "push deferred deactivation cleanup");
851 return;
852 }
853 }
854 }
855
856 // Prepare a 'this' for CXXDefaultInitExprs.
857 CIRGenFunction::FieldConstructionScope fcScope(cgf, dest.getAddress());
858
859 LValue destLV = cgf.makeAddrLValue(dest.getAddress(), e->getType());
860
861 if (record->isUnion()) {
862 cgf.cgm.errorNYI(e->getSourceRange(),
863 "visitCXXParenListOrInitListExpr union type");
864 return;
865 }
866
867 // Here we iterate over the fields; this makes it simpler to both
868 // default-initialize fields and skip over unnamed fields.
869 for (const FieldDecl *field : record->fields()) {
870 // We're done once we hit the flexible array member.
871 if (field->getType()->isIncompleteArrayType())
872 break;
873
874 // Always skip anonymous bitfields.
875 if (field->isUnnamedBitField())
876 continue;
877
878 // We're done if we reach the end of the explicit initializers, we
879 // have a zeroed object, and the rest of the fields are
880 // zero-initializable.
881 if (curInitIndex == numInitElements && dest.isZeroed() &&
883 break;
884 LValue lv =
885 cgf.emitLValueForFieldInitialization(destLV, field, field->getName());
886 // We never generate write-barriers for initialized fields.
888
889 if (curInitIndex < numInitElements) {
890 // Store the initializer into the field.
891 CIRGenFunction::SourceLocRAIIObject loc{
892 cgf, cgf.getLoc(record->getSourceRange())};
893 emitInitializationToLValue(args[curInitIndex++], lv);
894 } else {
895 // We're out of initializers; default-initialize to null
896 emitNullInitializationToLValue(cgf.getLoc(e->getSourceRange()), lv);
897 }
898
899 // Push a destructor if necessary.
900 // FIXME: if we have an array of structures, all explicitly
901 // initialized, we can end up pushing a linear number of cleanups.
902 if (field->getType().isDestructedType()) {
903 cgf.cgm.errorNYI(e->getSourceRange(),
904 "visitCXXParenListOrInitListExpr destructor");
905 return;
906 }
907
908 // From classic codegen, maybe not useful for CIR:
909 // If the GEP didn't get used because of a dead zero init or something
910 // else, clean it up for -O0 builds and general tidiness.
911 }
912}
913
914// TODO(cir): This could be shared with classic codegen.
916 const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual) {
917 // If the most-derived object is a field declared with [[no_unique_address]],
918 // the tail padding of any virtual base could be reused for other subobjects
919 // of that field's class.
920 if (isVirtual)
922
923 // If the base class is laid out entirely within the nvsize of the derived
924 // class, its tail padding cannot yet be initialized, so we can issue
925 // stores at the full width of the base class.
926 const ASTRecordLayout &layout = getContext().getASTRecordLayout(rd);
927 if (layout.getBaseClassOffset(baseRD) +
928 getContext().getASTRecordLayout(baseRD).getSize() <=
929 layout.getNonVirtualSize())
931
932 // The tail padding may contain values we need to preserve.
934}
935
937 AggExprEmitter(*this, slot).Visit(const_cast<Expr *>(e));
938}
939
941 AggValueSlot::Overlap_t mayOverlap,
942 bool isVolatile) {
943 // TODO(cir): this function needs improvements, commented code for now since
944 // this will be touched again soon.
945 assert(!ty->isAnyComplexType() && "Unexpected copy of complex");
946
947 Address destPtr = dest.getAddress();
948 Address srcPtr = src.getAddress();
949
950 if (getLangOpts().CPlusPlus) {
951 if (auto *record = ty->getAsCXXRecordDecl()) {
952 assert((record->hasTrivialCopyConstructor() ||
953 record->hasTrivialCopyAssignment() ||
954 record->hasTrivialMoveConstructor() ||
955 record->hasTrivialMoveAssignment() ||
956 record->hasAttr<TrivialABIAttr>() || record->isUnion()) &&
957 "Trying to aggregate-copy a type without a trivial copy/move "
958 "constructor or assignment operator");
959 // Ignore empty classes in C++.
960 if (record->isEmpty())
961 return;
962 }
963 }
964
966
967 // Aggregate assignment turns into llvm.memcpy. This is almost valid per
968 // C99 6.5.16.1p3, which states "If the value being stored in an object is
969 // read from another object that overlaps in anyway the storage of the first
970 // object, then the overlap shall be exact and the two objects shall have
971 // qualified or unqualified versions of a compatible type."
972 //
973 // memcpy is not defined if the source and destination pointers are exactly
974 // equal, but other compilers do this optimization, and almost every memcpy
975 // implementation handles this case safely. If there is a libc that does not
976 // safely handle this, we can add a target hook.
977
978 // Get data size info for this aggregate. Don't copy the tail padding if this
979 // might be a potentially-overlapping subobject, since the tail padding might
980 // be occupied by a different object. Otherwise, copying it is fine.
981 TypeInfoChars typeInfo;
982 if (mayOverlap)
983 typeInfo = getContext().getTypeInfoDataSizeInChars(ty);
984 else
985 typeInfo = getContext().getTypeInfoInChars(ty);
986
988
989 // NOTE(cir): original codegen would normally convert destPtr and srcPtr to
990 // i8* since memcpy operates on bytes. We don't need that in CIR because
991 // cir.copy will operate on any CIR pointer that points to a sized type.
992
993 // Don't do any of the memmove_collectable tests if GC isn't set.
994 if (cgm.getLangOpts().getGC() != LangOptions::NonGC)
995 cgm.errorNYI("emitAggregateCopy: GC");
996
997 [[maybe_unused]] cir::CopyOp copyOp =
998 builder.createCopy(destPtr.getPointer(), srcPtr.getPointer(), isVolatile);
999
1001}
1002
1003// TODO(cir): This could be shared with classic codegen.
1006 if (!fd->hasAttr<NoUniqueAddressAttr>() || !fd->getType()->isRecordType())
1008
1009 // If the field lies entirely within the enclosing class's nvsize, its tail
1010 // padding cannot overlap any already-initialized object. (The only subobjects
1011 // with greater addresses that might already be initialized are vbases.)
1012 const RecordDecl *classRD = fd->getParent();
1013 const ASTRecordLayout &layout = getContext().getASTRecordLayout(classRD);
1014 if (layout.getFieldOffset(fd->getFieldIndex()) +
1015 getContext().getTypeSize(fd->getType()) <=
1016 (uint64_t)getContext().toBits(layout.getNonVirtualSize()))
1018
1019 // The tail padding may contain values we need to preserve.
1021}
1022
static bool isBlockVarRef(const Expr *E)
Is the value of the given expression possibly a reference to or into a __block variable?
static bool isTrivialFiller(Expr *e)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
cir::ConditionOp createCondition(mlir::Value condition)
Create a loop condition.
cir::PtrStrideOp createPtrStride(mlir::Location loc, mlir::Value base, mlir::Value stride)
cir::PointerType getPointerTo(mlir::Type ty)
cir::DoWhileOp createDoWhile(mlir::Location loc, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> condBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> bodyBuilder)
Create a do-while operation.
cir::ConstantOp getConstantInt(mlir::Location loc, mlir::Type ty, int64_t value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4353
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
QualType getElementType() const
Definition TypeBase.h:3735
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4038
Expr * getLHS() const
Definition Expr.h:4088
Expr * getRHS() const
Definition Expr.h:4090
mlir::Value getPointer() const
Definition Address.h:90
mlir::Type getElementType() const
Definition Address.h:117
clang::CharUnits getAlignment() const
Definition Address.h:130
An aggregate value slot.
IsZeroed_t isZeroed() const
Overlap_t mayOverlap() const
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
IsDestructed_t isExternallyDestructed() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
void setExternallyDestructed(bool destructed=true)
IsAliased_t isPotentiallyAliased() const
void setVolatile(bool flag)
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
static bool hasScalarEvaluationKind(clang::QualType type)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
CIRGenTypes & getTypes() const
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
void emitAggregateCopy(LValue dest, LValue src, QualType eltTy, AggValueSlot::Overlap_t mayOverlap, bool isVolatile=false)
Emit an aggregate copy.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
AggValueSlot::Overlap_t getOverlapForFieldInit(const FieldDecl *fd)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
static bool hasAggregateEvaluationKind(clang::QualType type)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address getAddressOfDirectBaseInCompleteClass(mlir::Location loc, Address value, const CXXRecordDecl *derived, const CXXRecordDecl *base, bool baseIsVirtual)
Convert the given pointer to a complete class to the given direct base.
CIRGenBuilderTy & getBuilder()
AggValueSlot::Overlap_t getOverlapForBaseInit(const CXXRecordDecl *rd, const CXXRecordDecl *baseRD, bool isVirtual)
Determine whether a base class initialization may overlap some other object.
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::LangOptions & getLangOpts() const
mlir::Value emitNullConstant(QualType t, mlir::Location loc)
Return the result of value-initializing the given type, i.e.
llvm::DenseMap< const clang::FieldDecl *, llvm::StringRef > lambdaFieldToName
Keep a map between lambda fields and names, this needs to be per module since lambdas might get gener...
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
Address getAddress() const
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
CXXTemporary * getTemporary()
Definition ExprCXX.h:1511
const Expr * getSubExpr() const
Definition ExprCXX.h:1515
Expr * getExpr()
Get the initialization expression that will be used.
Definition ExprCXX.cpp:1105
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5181
FieldDecl * getInitializedFieldInUnion()
Definition ExprCXX.h:5219
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:353
SourceRange getSourceRange() const LLVM_READONLY
Retrieve the source range of the expression.
Definition ExprCXX.h:828
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:902
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1602
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
CastKind getCastKind() const
Definition Expr.h:3720
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1950
Expr * getSubExpr()
Definition Expr.h:3726
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition Expr.h:4884
const Expr * getInitializer() const
Definition Expr.h:3633
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool hasAttr() const
Definition DeclBase.h:577
InitListExpr * getUpdater() const
Definition Expr.h:5936
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
const Expr * getSubExpr() const
Definition Expr.h:1062
Expr * getResultExpr()
Return the result expression of this controlling expression.
Definition Expr.h:6462
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2461
FieldDecl * getInitializedFieldInUnion()
If this initializes a union, specifies which field in the union to initialize.
Definition Expr.h:5425
bool hadArrayRangeDesignator() const
Definition Expr.h:5483
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5401
const Expr * getInit(unsigned Init) const
Definition Expr.h:5353
ArrayRef< Expr * > inits()
Definition Expr.h:5349
llvm::iterator_range< capture_init_iterator > capture_inits()
Retrieve the initialization expressions for this lambda's captures.
Definition ExprCXX.h:2083
capture_range captures() const
Retrieve this lambda's captures.
Definition ExprCXX.cpp:1371
CXXRecordDecl * getLambdaClass() const
Retrieve the class that corresponds to the lambda.
Definition ExprCXX.cpp:1400
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
const Expr * getSubExpr() const
Definition Expr.h:2199
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8376
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition Type.cpp:2695
Represents a struct/union/class.
Definition Decl.h:4324
field_range fields() const
Definition Decl.h:4527
CompoundStmt * getSubStmt()
Definition Expr.h:4612
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8632
bool isArrayType() const
Definition TypeBase.h:8628
bool isReferenceType() const
Definition TypeBase.h:8553
bool isVariableArrayType() const
Definition TypeBase.h:8640
RecordDecl * castAsRecordDecl() const
Definition Type.h:48
bool isAnyComplexType() const
Definition TypeBase.h:8664
bool isRecordType() const
Definition TypeBase.h:8656
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
Expr * getSubExpr() const
Definition Expr.h:2285
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
constexpr Variable var(Literal L)
Returns the variable of L.
Definition CNFFormula.h:64
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
static bool emitLifetimeMarkers()
static bool aggValueSlotDestructedFlag()
static bool aggValueSlotGC()
static bool aggValueSlotAlias()
static bool opLoadStoreAtomic()
static bool aggEmitFinalDestCopyRValue()
static bool aggValueSlotVolatile()
static bool opScopeCleanupRegion()
static bool atomicTypes()
static bool cudaSupport()
static bool requiresCleanups()
clang::CharUnits getPointerAlign() const