clang 22.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/ExprCXX.h"
22
23#include <cassert>
24
25namespace clang::CIRGen {
26
28 bool suppressNewContext)
29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
30 ehStack.setCGF(this);
31}
32
34
35// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
37 type = type.getCanonicalType();
38 while (true) {
39 switch (type->getTypeClass()) {
40#define TYPE(name, parent)
41#define ABSTRACT_TYPE(name, parent)
42#define NON_CANONICAL_TYPE(name, parent) case Type::name:
43#define DEPENDENT_TYPE(name, parent) case Type::name:
44#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
45#include "clang/AST/TypeNodes.inc"
46 llvm_unreachable("non-canonical or dependent type in IR-generation");
47
48 case Type::Auto:
49 case Type::DeducedTemplateSpecialization:
50 llvm_unreachable("undeduced type in IR-generation");
51
52 // Various scalar types.
53 case Type::Builtin:
54 case Type::Pointer:
55 case Type::BlockPointer:
56 case Type::LValueReference:
57 case Type::RValueReference:
58 case Type::MemberPointer:
59 case Type::Vector:
60 case Type::ExtVector:
61 case Type::ConstantMatrix:
62 case Type::FunctionProto:
63 case Type::FunctionNoProto:
64 case Type::Enum:
65 case Type::ObjCObjectPointer:
66 case Type::Pipe:
67 case Type::BitInt:
68 case Type::HLSLAttributedResource:
69 case Type::HLSLInlineSpirv:
70 return cir::TEK_Scalar;
71
72 // Complexes.
73 case Type::Complex:
74 return cir::TEK_Complex;
75
76 // Arrays, records, and Objective-C objects.
77 case Type::ConstantArray:
78 case Type::IncompleteArray:
79 case Type::VariableArray:
80 case Type::Record:
81 case Type::ObjCObject:
82 case Type::ObjCInterface:
83 case Type::ArrayParameter:
84 return cir::TEK_Aggregate;
85
86 // We operate on atomic values according to their underlying type.
87 case Type::Atomic:
88 type = cast<AtomicType>(type)->getValueType();
89 continue;
90 }
91 llvm_unreachable("unknown type kind!");
92 }
93}
94
96 return cgm.getTypes().convertTypeForMem(t);
97}
98
100 return cgm.getTypes().convertType(t);
101}
102
104 // Some AST nodes might contain invalid source locations (e.g.
105 // CXXDefaultArgExpr), workaround that to still get something out.
106 if (srcLoc.isValid()) {
108 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
109 StringRef filename = pLoc.getFilename();
110 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
111 pLoc.getLine(), pLoc.getColumn());
112 }
113 // Do our best...
114 assert(currSrcLoc && "expected to inherit some source location");
115 return *currSrcLoc;
116}
117
118mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
119 // Some AST nodes might contain invalid source locations (e.g.
120 // CXXDefaultArgExpr), workaround that to still get something out.
121 if (srcLoc.isValid()) {
122 mlir::Location beg = getLoc(srcLoc.getBegin());
123 mlir::Location end = getLoc(srcLoc.getEnd());
124 SmallVector<mlir::Location, 2> locs = {beg, end};
125 mlir::Attribute metadata;
126 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
127 }
128 if (currSrcLoc) {
129 return *currSrcLoc;
130 }
131 // We're brave, but time to give up.
132 return builder.getUnknownLoc();
133}
134
135mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
136 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
137 mlir::Attribute metadata;
138 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
139}
140
141bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
142 // Null statement, not a label!
143 if (!s)
144 return false;
145
146 // If this is a label, we have to emit the code, consider something like:
147 // if (0) { ... foo: bar(); } goto foo;
148 //
149 // TODO: If anyone cared, we could track __label__'s, since we know that you
150 // can't jump to one from outside their declared region.
151 if (isa<LabelStmt>(s))
152 return true;
153
154 // If this is a case/default statement, and we haven't seen a switch, we
155 // have to emit the code.
156 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
157 return true;
158
159 // If this is a switch statement, we want to ignore case statements when we
160 // recursively process the sub-statements of the switch. If we haven't
161 // encountered a switch statement, we treat case statements like labels, but
162 // if we are processing a switch statement, case statements are expected.
163 if (isa<SwitchStmt>(s))
164 ignoreCaseStmts = true;
165
166 // Scan subexpressions for verboten labels.
167 return std::any_of(s->child_begin(), s->child_end(),
168 [=](const Stmt *subStmt) {
169 return containsLabel(subStmt, ignoreCaseStmts);
170 });
171}
172
173/// If the specified expression does not fold to a constant, or if it does but
174/// contains a label, return false. If it constant folds return true and set
175/// the boolean result in Result.
176bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
177 bool allowLabels) {
178 llvm::APSInt resultInt;
179 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
180 return false;
181
182 resultBool = resultInt.getBoolValue();
183 return true;
184}
185
186/// If the specified expression does not fold to a constant, or if it does
187/// fold but contains a label, return false. If it constant folds, return
188/// true and set the folded value.
190 llvm::APSInt &resultInt,
191 bool allowLabels) {
192 // FIXME: Rename and handle conversion of other evaluatable things
193 // to bool.
194 Expr::EvalResult result;
195 if (!cond->EvaluateAsInt(result, getContext()))
196 return false; // Not foldable, not integer or not fully evaluatable.
197
198 llvm::APSInt intValue = result.Val.getInt();
199 if (!allowLabels && containsLabel(cond))
200 return false; // Contains a label.
201
202 resultInt = intValue;
203 return true;
204}
205
206void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
207 CharUnits alignment) {
208 if (!type->isVoidType()) {
209 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
210 /*insertIntoFnEntryBlock=*/false);
211 fnRetAlloca = addr;
212 returnValue = Address(addr, alignment);
213 }
214}
215
216void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
217 mlir::Location loc, CharUnits alignment,
218 bool isParam) {
219 assert(isa<NamedDecl>(var) && "Needs a named decl");
220 assert(!symbolTable.count(var) && "not supposed to be available just yet");
221
222 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
223 assert(allocaOp && "expected cir::AllocaOp");
224
225 if (isParam)
226 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
227 if (ty->isReferenceType() || ty.isConstQualified())
228 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
229
230 symbolTable.insert(var, allocaOp);
231}
232
234 CIRGenBuilderTy &builder = cgf.builder;
235 LexicalScope *localScope = cgf.curLexScope;
236
237 auto applyCleanup = [&]() {
238 if (performCleanup) {
239 // ApplyDebugLocation
241 forceCleanup();
242 }
243 };
244
245 if (returnBlock != nullptr) {
246 // Write out the return block, which loads the value from `__retval` and
247 // issues the `cir.return`.
248 mlir::OpBuilder::InsertionGuard guard(builder);
249 builder.setInsertionPointToEnd(returnBlock);
250 (void)emitReturn(*returnLoc);
251 }
252
253 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
254 mlir::OpBuilder::InsertionGuard guard(builder);
255 builder.setInsertionPointToEnd(insPt);
256
257 // If we still don't have a cleanup block, it means that `applyCleanup`
258 // below might be able to get us one.
259 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
260
261 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
262 applyCleanup();
263
264 // If we now have one after `applyCleanup`, hook it up properly.
265 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
266 cleanupBlock = localScope->getCleanupBlock(builder);
267 builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
268 if (!cleanupBlock->mightHaveTerminator()) {
269 mlir::OpBuilder::InsertionGuard guard(builder);
270 builder.setInsertionPointToEnd(cleanupBlock);
271 builder.create<cir::YieldOp>(localScope->endLoc);
272 }
273 }
274
275 if (localScope->depth == 0) {
276 // Reached the end of the function.
277 if (returnBlock != nullptr) {
278 if (returnBlock->getUses().empty()) {
279 returnBlock->erase();
280 } else {
281 // Thread return block via cleanup block.
282 if (cleanupBlock) {
283 for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
284 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
285 brOp.setSuccessor(cleanupBlock);
286 }
287 }
288
289 builder.create<cir::BrOp>(*returnLoc, returnBlock);
290 return;
291 }
292 }
293 emitImplicitReturn();
294 return;
295 }
296
297 // End of any local scope != function
298 // Ternary ops have to deal with matching arms for yielding types
299 // and do return a value, it must do its own cir.yield insertion.
300 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
301 !retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
302 : builder.create<cir::YieldOp>(localScope->endLoc, retVal);
303 }
304 };
305
306 // If a cleanup block has been created at some point, branch to it
307 // and set the insertion point to continue at the cleanup block.
308 // Terminators are then inserted either in the cleanup block or
309 // inline in this current block.
310 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
311 if (cleanupBlock)
312 insertCleanupAndLeave(cleanupBlock);
313
314 // Now deal with any pending block wrap up like implicit end of
315 // scope.
316
317 mlir::Block *curBlock = builder.getBlock();
318 if (isGlobalInit() && !curBlock)
319 return;
320 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
321 return;
322
323 // Get rid of any empty block at the end of the scope.
324 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
325 if (!entryBlock && curBlock->empty()) {
326 curBlock->erase();
327 if (returnBlock != nullptr && returnBlock->getUses().empty())
328 returnBlock->erase();
329 return;
330 }
331
332 // If there's a cleanup block, branch to it, nothing else to do.
333 if (cleanupBlock) {
334 builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
335 return;
336 }
337
338 // No pre-existent cleanup block, emit cleanup code and yield/return.
339 insertCleanupAndLeave(curBlock);
340}
341
342cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
343 CIRGenBuilderTy &builder = cgf.getBuilder();
344
345 // If we are on a coroutine, add the coro_end builtin call.
347
348 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
349 assert(fn && "emitReturn from non-function");
350 if (!fn.getFunctionType().hasVoidReturn()) {
351 // Load the value from `__retval` and return it via the `cir.return` op.
352 auto value = builder.create<cir::LoadOp>(
353 loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
354 return builder.create<cir::ReturnOp>(loc,
355 llvm::ArrayRef(value.getResult()));
356 }
357 return builder.create<cir::ReturnOp>(loc);
358}
359
360// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
361// candidate for sharing between CIRGen and CodeGen.
362static bool mayDropFunctionReturn(const ASTContext &astContext,
363 QualType returnType) {
364 // We can't just discard the return value for a record type with a complex
365 // destructor or a non-trivially copyable type.
366 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
367 return classDecl->hasTrivialDestructor();
368 return returnType.isTriviallyCopyableType(astContext);
369}
370
371void CIRGenFunction::LexicalScope::emitImplicitReturn() {
372 CIRGenBuilderTy &builder = cgf.getBuilder();
373 LexicalScope *localScope = cgf.curLexScope;
374
375 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
376
377 // In C++, flowing off the end of a non-void function is always undefined
378 // behavior. In C, flowing off the end of a non-void function is undefined
379 // behavior only if the non-existent return value is used by the caller.
380 // That influences whether the terminating op is trap, unreachable, or
381 // return.
382 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
383 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
384 builder.getInsertionBlock()) {
385 bool shouldEmitUnreachable =
386 cgf.cgm.getCodeGenOpts().StrictReturn ||
387 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
388
389 if (shouldEmitUnreachable) {
391 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
392 builder.create<cir::TrapOp>(localScope->endLoc);
393 else
394 builder.create<cir::UnreachableOp>(localScope->endLoc);
395 builder.clearInsertionPoint();
396 return;
397 }
398 }
399
400 (void)emitReturn(localScope->endLoc);
401}
402
404 cir::FuncOp fn, cir::FuncType funcType,
406 SourceLocation startLoc) {
407 assert(!curFn &&
408 "CIRGenFunction can only be used for one function at a time");
409
410 curFn = fn;
411
412 const Decl *d = gd.getDecl();
413 curCodeDecl = d;
414 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
416
417 prologueCleanupDepth = ehStack.stable_begin();
418
419 mlir::Block *entryBB = &fn.getBlocks().front();
420 builder.setInsertionPointToStart(entryBB);
421
422 // TODO(cir): this should live in `emitFunctionProlog
423 // Declare all the function arguments in the symbol table.
424 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
425 const VarDecl *paramVar = std::get<0>(nameValue);
426 mlir::Value paramVal = std::get<1>(nameValue);
427 CharUnits alignment = getContext().getDeclAlign(paramVar);
428 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
429 paramVal.setLoc(paramLoc);
430
431 mlir::Value addrVal =
432 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
433 convertType(paramVar->getType()), paramLoc, alignment,
434 /*insertIntoFnEntryBlock=*/true);
435
436 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
437 /*isParam=*/true);
438
439 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
440
441 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
442 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
444 if (isPromoted)
445 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion");
446
447 // Location of the store to the param storage tracked as beginning of
448 // the function body.
449 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc());
450 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
451 }
452 assert(builder.getInsertionBlock() && "Should be valid");
453
454 // When the current function is not void, create an address to store the
455 // result value.
456 if (!returnType->isVoidType())
457 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
458 getContext().getTypeAlignInChars(returnType));
459
460 if (isa_and_nonnull<CXXMethodDecl>(d) &&
461 cast<CXXMethodDecl>(d)->isInstance()) {
462 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
463
464 const auto *md = cast<CXXMethodDecl>(d);
465 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
466 // We're in a lambda.
467 auto fn = dyn_cast<cir::FuncOp>(curFn);
468 assert(fn && "lambda in non-function region");
469 fn.setLambda(true);
470
471 // Figure out the captures.
472 md->getParent()->getCaptureFields(lambdaCaptureFields,
475 // If the lambda captures the object referred to by '*this' - either by
476 // value or by reference, make sure CXXThisValue points to the correct
477 // object.
478
479 // Get the lvalue for the field (which is a copy of the enclosing object
480 // or contains the address of the enclosing object).
481 LValue thisFieldLValue =
483 if (!lambdaThisCaptureField->getType()->isPointerType()) {
484 // If the enclosing object was captured by value, just use its
485 // address. Sign this pointer.
486 cxxThisValue = thisFieldLValue.getPointer();
487 } else {
488 // Load the lvalue pointed to by the field, since '*this' was captured
489 // by reference.
491 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
492 }
493 }
494 for (auto *fd : md->getParent()->fields()) {
495 if (fd->hasCapturedVLAType())
496 cgm.errorNYI(loc, "lambda captured VLA type");
497 }
498 } else {
499 // Not in a lambda; just use 'this' from the method.
500 // FIXME: Should we generate a new load for each use of 'this'? The fast
501 // register allocator would be happier...
503 }
504
507 }
508}
509
511 // Pop any cleanups that might have been associated with the
512 // parameters. Do this in whatever block we're currently in; it's
513 // important to do this before we enter the return block or return
514 // edges will be *really* confused.
515 // TODO(cir): Use prologueCleanupDepth here.
516 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
517 if (hasCleanups) {
519 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
521 }
522}
523
524mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
525 // We start with function level scope for variables.
527
528 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
529 return emitCompoundStmtWithoutScope(*block);
530
531 return emitStmt(body, /*useCurrentScope=*/true);
532}
533
534static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
535 // Remove any leftover blocks that are unreachable and empty, since they do
536 // not represent unreachable code useful for warnings nor anything deemed
537 // useful in general.
538 SmallVector<mlir::Block *> blocksToDelete;
539 for (mlir::Block &block : func.getBlocks()) {
540 if (block.empty() && block.getUses().empty())
541 blocksToDelete.push_back(&block);
542 }
543 for (mlir::Block *block : blocksToDelete)
544 block->erase();
545}
546
547cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
548 cir::FuncType funcType) {
549 const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
550 curGD = gd;
551
552 SourceLocation loc = funcDecl->getLocation();
553 Stmt *body = funcDecl->getBody();
554 SourceRange bodyRange =
555 body ? body->getSourceRange() : funcDecl->getLocation();
556
557 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
558 : builder.getUnknownLoc()};
559
560 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
561 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
562 };
563 const mlir::Location fusedLoc = mlir::FusedLoc::get(
565 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
566 mlir::Block *entryBB = fn.addEntryBlock();
567
568 FunctionArgList args;
569 QualType retTy = buildFunctionArgList(gd, args);
570
571 // Create a scope in the symbol table to hold variable declarations.
573 {
574 LexicalScope lexScope(*this, fusedLoc, entryBB);
575
576 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
577
578 if (isa<CXXDestructorDecl>(funcDecl)) {
579 emitDestructorBody(args);
580 } else if (isa<CXXConstructorDecl>(funcDecl)) {
582 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
583 funcDecl->hasAttr<CUDAGlobalAttr>()) {
584 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
585 } else if (isa<CXXMethodDecl>(funcDecl) &&
586 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
587 // The lambda static invoker function is special, because it forwards or
588 // clones the body of the function call operator (but is actually
589 // static).
591 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
592 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
593 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
594 // Implicit copy-assignment gets the same special treatment as implicit
595 // copy-constructors.
597 } else if (body) {
598 if (mlir::failed(emitFunctionBody(body))) {
599 return nullptr;
600 }
601 } else {
602 // Anything without a body should have been handled above.
603 llvm_unreachable("no definition for normal function");
604 }
605
606 if (mlir::failed(fn.verifyBody()))
607 return nullptr;
608
609 finishFunction(bodyRange.getEnd());
610 }
611
613 return fn;
614}
615
618 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
619 CXXCtorType ctorType = curGD.getCtorType();
620
621 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
622 ctorType == Ctor_Complete) &&
623 "can only generate complete ctor for this ABI");
624
625 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
626 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
627 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
628 return;
629 }
630
631 const FunctionDecl *definition = nullptr;
632 Stmt *body = ctor->getBody(definition);
633 assert(definition == ctor && "emitting wrong constructor body");
634
635 if (isa_and_nonnull<CXXTryStmt>(body)) {
636 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
637 return;
638 }
639
642
643 // TODO: in restricted cases, we can emit the vbase initializers of a
644 // complete ctor and then delegate to the base ctor.
645
646 // Emit the constructor prologue, i.e. the base and member initializers.
647 emitCtorPrologue(ctor, ctorType, args);
648
649 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
650 // now just to have it handled.
651 if (mlir::failed(emitStmt(body, true))) {
652 cgm.errorNYI(ctor->getSourceRange(),
653 "emitConstructorBody: emit body statement failed.");
654 return;
655 }
656}
657
658/// Emits the body of the current destructor.
660 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
661 CXXDtorType dtorType = curGD.getDtorType();
662
663 // For an abstract class, non-base destructors are never used (and can't
664 // be emitted in general, because vbase dtors may not have been validated
665 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
666 // in fact emit references to them from other compilations, so emit them
667 // as functions containing a trap instruction.
668 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
669 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
670 return;
671 }
672
673 Stmt *body = dtor->getBody();
675
676 // The call to operator delete in a deleting destructor happens
677 // outside of the function-try-block, which means it's always
678 // possible to delegate the destructor body to the complete
679 // destructor. Do so.
680 if (dtorType == Dtor_Deleting) {
681 RunCleanupsScope dtorEpilogue(*this);
683 if (haveInsertPoint()) {
685 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
686 /*delegating=*/false, loadCXXThisAddress(), thisTy);
687 }
688 return;
689 }
690
691 // If the body is a function-try-block, enter the try before
692 // anything else.
693 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
694 if (isTryBody)
695 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
696
698
699 // Enter the epilogue cleanups.
700 RunCleanupsScope dtorEpilogue(*this);
701
702 // If this is the complete variant, just invoke the base variant;
703 // the epilogue will destruct the virtual bases. But we can't do
704 // this optimization if the body is a function-try-block, because
705 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
706 // always delegate because we might not have a definition in this TU.
707 switch (dtorType) {
708 case Dtor_Unified:
709 llvm_unreachable("not expecting a unified dtor");
710 case Dtor_Comdat:
711 llvm_unreachable("not expecting a COMDAT");
712 case Dtor_Deleting:
713 llvm_unreachable("already handled deleting case");
714
715 case Dtor_Complete:
716 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
717 "can't emit a dtor without a body for non-Microsoft ABIs");
718
719 // Enter the cleanup scopes for virtual bases.
721
722 if (!isTryBody) {
724 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
725 /*delegating=*/false, loadCXXThisAddress(), thisTy);
726 break;
727 }
728
729 // Fallthrough: act like we're in the base variant.
730 [[fallthrough]];
731
732 case Dtor_Base:
733 assert(body);
734
735 // Enter the cleanup scopes for fields and non-virtual bases.
737
739
740 if (isTryBody) {
741 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
742 } else if (body) {
743 (void)emitStmt(body, /*useCurrentScope=*/true);
744 } else {
745 assert(dtor->isImplicit() && "bodyless dtor not implicit");
746 // nothing to do besides what's in the epilogue
747 }
748 // -fapple-kext must inline any call to this dtor into
749 // the caller's body.
751
752 break;
753 }
754
755 // Jump out through the epilogue cleanups.
756 dtorEpilogue.forceCleanup();
757
758 // Exit the try if applicable.
759 if (isTryBody)
760 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
761}
762
763/// Given a value of type T* that may not be to a complete object, construct
764/// an l-vlaue withi the natural pointee alignment of T.
766 QualType ty) {
767 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
768 // assert on the result type first.
769 LValueBaseInfo baseInfo;
771 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
772 return makeAddrLValue(Address(val, align), ty, baseInfo);
773}
774
776 QualType ty) {
777 LValueBaseInfo baseInfo;
778 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
779 Address addr(val, convertTypeForMem(ty), alignment);
781 return makeAddrLValue(addr, ty, baseInfo);
782}
783
785 FunctionArgList &args) {
786 const auto *fd = cast<FunctionDecl>(gd.getDecl());
787 QualType retTy = fd->getReturnType();
788
789 const auto *md = dyn_cast<CXXMethodDecl>(fd);
790 if (md && md->isInstance()) {
791 if (cgm.getCXXABI().hasThisReturn(gd))
792 cgm.errorNYI(fd->getSourceRange(), "this return");
793 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
794 cgm.errorNYI(fd->getSourceRange(), "most derived return");
795 cgm.getCXXABI().buildThisParam(*this, args);
796 }
797
798 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
799 if (cd->getInheritedConstructor())
800 cgm.errorNYI(fd->getSourceRange(),
801 "buildFunctionArgList: inherited constructor");
802
803 for (auto *param : fd->parameters())
804 args.push_back(param);
805
806 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
807 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
808
809 return retTy;
810}
811
812/// Emit code to compute a designator that specifies the location
813/// of the expression.
814/// FIXME: document this function better.
816 // FIXME: ApplyDebugLocation DL(*this, e);
817 switch (e->getStmtClass()) {
818 default:
820 std::string("l-value not implemented for '") +
821 e->getStmtClassName() + "'");
822 return LValue();
823 case Expr::ArraySubscriptExprClass:
825 case Expr::UnaryOperatorClass:
827 case Expr::StringLiteralClass:
829 case Expr::MemberExprClass:
831 case Expr::CompoundLiteralExprClass:
833 case Expr::PredefinedExprClass:
835 case Expr::BinaryOperatorClass:
837 case Expr::CompoundAssignOperatorClass: {
838 QualType ty = e->getType();
839 if (ty->getAs<AtomicType>()) {
840 cgm.errorNYI(e->getSourceRange(),
841 "CompoundAssignOperator with AtomicType");
842 return LValue();
843 }
844 if (!ty->isAnyComplexType())
846
848 }
849 case Expr::CallExprClass:
850 case Expr::CXXMemberCallExprClass:
851 case Expr::CXXOperatorCallExprClass:
852 case Expr::UserDefinedLiteralClass:
854 case Expr::ParenExprClass:
855 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
856 case Expr::GenericSelectionExprClass:
857 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
858 case Expr::DeclRefExprClass:
860 case Expr::CStyleCastExprClass:
861 case Expr::CXXStaticCastExprClass:
862 case Expr::CXXDynamicCastExprClass:
863 case Expr::ImplicitCastExprClass:
865 case Expr::MaterializeTemporaryExprClass:
867 case Expr::ChooseExprClass:
868 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
869 }
870}
871
872static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
873 SmallString<256> buffer;
874 llvm::raw_svector_ostream out(buffer);
875 out << name << cnt;
876 return std::string(out.str());
877}
878
882
886
887void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
888 QualType ty) {
889 // Ignore empty classes in C++.
891 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
892 return;
893
894 // Cast the dest ptr to the appropriate i8 pointer type.
895 if (builder.isInt8Ty(destPtr.getElementType())) {
896 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
897 }
898
899 // Get size and alignment info for this aggregate.
900 const CharUnits size = getContext().getTypeSizeInChars(ty);
901 if (size.isZero()) {
902 // But note that getTypeInfo returns 0 for a VLA.
903 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
904 cgm.errorNYI(loc,
905 "emitNullInitialization for zero size VariableArrayType");
906 } else {
907 return;
908 }
909 }
910
911 // If the type contains a pointer to data member we can't memset it to zero.
912 // Instead, create a null constant and copy it to the destination.
913 // TODO: there are other patterns besides zero that we can usefully memset,
914 // like -1, which happens to be the pattern used by member-pointers.
915 if (!cgm.getTypes().isZeroInitializable(ty)) {
916 cgm.errorNYI(loc, "type is not zero initializable");
917 }
918
919 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
920 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
921 // respective address.
922 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
923 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
924 builder.createStore(loc, zeroValue, destPtr);
925}
926
927// TODO(cir): should be shared with LLVM codegen.
929 const Expr *e = ce->getSubExpr();
930
931 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
932 return false;
933
934 if (isa<CXXThisExpr>(e->IgnoreParens())) {
935 // We always assume that 'this' is never null.
936 return false;
937 }
938
939 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
940 // And that glvalue casts are never null.
941 if (ice->isGLValue())
942 return false;
943 }
944
945 return true;
946}
947
948/// Computes the length of an array in elements, as well as the base
949/// element type and a properly-typed first element pointer.
950mlir::Value
952 QualType &baseType, Address &addr) {
953 const clang::ArrayType *arrayType = origArrayType;
954
955 // If it's a VLA, we have to load the stored size. Note that
956 // this is the size of the VLA in bytes, not its size in elements.
959 cgm.errorNYI(*currSrcLoc, "VLAs");
960 return builder.getConstInt(*currSrcLoc, SizeTy, 0);
961 }
962
963 uint64_t countFromCLAs = 1;
964 QualType eltType;
965
966 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
967
968 while (cirArrayType) {
970 countFromCLAs *= cirArrayType.getSize();
971 eltType = arrayType->getElementType();
972
973 cirArrayType =
974 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
975
976 arrayType = getContext().getAsArrayType(arrayType->getElementType());
977 assert((!cirArrayType || arrayType) &&
978 "CIR and Clang types are out-of-sync");
979 }
980
981 if (arrayType) {
982 // From this point onwards, the Clang array type has been emitted
983 // as some other type (probably a packed struct). Compute the array
984 // size, and just emit the 'begin' expression as a bitcast.
985 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
986 }
987
988 baseType = eltType;
989 return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
990}
991
993 mlir::Value ptrValue, QualType ty, SourceLocation loc,
994 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
996 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
997 alignment, offsetValue);
998}
999
1001 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1002 int64_t alignment, mlir::Value offsetValue) {
1003 QualType ty = expr->getType();
1004 SourceLocation loc = expr->getExprLoc();
1005 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1006 offsetValue);
1007}
1008
1009// TODO(cir): Most of this function can be shared between CIRGen
1010// and traditional LLVM codegen
1012 assert(type->isVariablyModifiedType() &&
1013 "Must pass variably modified type to EmitVLASizes!");
1014
1015 // We're going to walk down into the type and look for VLA
1016 // expressions.
1017 do {
1018 assert(type->isVariablyModifiedType());
1019
1020 const Type *ty = type.getTypePtr();
1021 switch (ty->getTypeClass()) {
1022 case Type::CountAttributed:
1023 case Type::PackIndexing:
1024 case Type::ArrayParameter:
1025 case Type::HLSLAttributedResource:
1026 case Type::HLSLInlineSpirv:
1027 case Type::PredefinedSugar:
1028 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1029 break;
1030
1031#define TYPE(Class, Base)
1032#define ABSTRACT_TYPE(Class, Base)
1033#define NON_CANONICAL_TYPE(Class, Base)
1034#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1035#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1036#include "clang/AST/TypeNodes.inc"
1037 llvm_unreachable(
1038 "dependent type must be resolved before the CIR codegen");
1039
1040 // These types are never variably-modified.
1041 case Type::Builtin:
1042 case Type::Complex:
1043 case Type::Vector:
1044 case Type::ExtVector:
1045 case Type::ConstantMatrix:
1046 case Type::Record:
1047 case Type::Enum:
1048 case Type::Using:
1049 case Type::TemplateSpecialization:
1050 case Type::ObjCTypeParam:
1051 case Type::ObjCObject:
1052 case Type::ObjCInterface:
1053 case Type::ObjCObjectPointer:
1054 case Type::BitInt:
1055 llvm_unreachable("type class is never variably-modified!");
1056
1057 case Type::Adjusted:
1058 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1059 break;
1060
1061 case Type::Decayed:
1062 type = cast<clang::DecayedType>(ty)->getPointeeType();
1063 break;
1064
1065 case Type::Pointer:
1066 type = cast<clang::PointerType>(ty)->getPointeeType();
1067 break;
1068
1069 case Type::BlockPointer:
1070 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1071 break;
1072
1073 case Type::LValueReference:
1074 case Type::RValueReference:
1075 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1076 break;
1077
1078 case Type::MemberPointer:
1079 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1080 break;
1081
1082 case Type::ConstantArray:
1083 case Type::IncompleteArray:
1084 // Losing element qualification here is fine.
1085 type = cast<clang::ArrayType>(ty)->getElementType();
1086 break;
1087
1088 case Type::VariableArray: {
1089 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
1090 break;
1091 }
1092
1093 case Type::FunctionProto:
1094 case Type::FunctionNoProto:
1095 type = cast<clang::FunctionType>(ty)->getReturnType();
1096 break;
1097
1098 case Type::Paren:
1099 case Type::TypeOf:
1100 case Type::UnaryTransform:
1101 case Type::Attributed:
1102 case Type::BTFTagAttributed:
1103 case Type::SubstTemplateTypeParm:
1104 case Type::MacroQualified:
1105 // Keep walking after single level desugaring.
1106 type = type.getSingleStepDesugaredType(getContext());
1107 break;
1108
1109 case Type::Typedef:
1110 case Type::Decltype:
1111 case Type::Auto:
1112 case Type::DeducedTemplateSpecialization:
1113 // Stop walking: nothing to do.
1114 return;
1115
1116 case Type::TypeOfExpr:
1117 // Stop walking: emit typeof expression.
1118 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1119 return;
1120
1121 case Type::Atomic:
1122 type = cast<clang::AtomicType>(ty)->getValueType();
1123 break;
1124
1125 case Type::Pipe:
1126 type = cast<clang::PipeType>(ty)->getElementType();
1127 break;
1128 }
1129 } while (type->isVariablyModifiedType());
1130}
1131
1133 if (getContext().getBuiltinVaListType()->isArrayType())
1134 return emitPointerWithAlignment(e);
1135 return emitLValue(e).getAddress();
1136}
1137
1138} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:833
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
mlir::Type getElementType() const
Definition Address.h:109
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
bool shouldNullCheckClassCastValue(const CastExpr *ce)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1720
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:2000
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3271
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4537
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2867
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8883
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8662
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9103
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2190
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool coroEndBuiltinCall()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647