clang 22.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/ExprCXX.h"
22
23#include <cassert>
24
25namespace clang::CIRGen {
26
28 bool suppressNewContext)
29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
30 ehStack.setCGF(this);
31}
32
34
35// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
37 type = type.getCanonicalType();
38 while (true) {
39 switch (type->getTypeClass()) {
40#define TYPE(name, parent)
41#define ABSTRACT_TYPE(name, parent)
42#define NON_CANONICAL_TYPE(name, parent) case Type::name:
43#define DEPENDENT_TYPE(name, parent) case Type::name:
44#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
45#include "clang/AST/TypeNodes.inc"
46 llvm_unreachable("non-canonical or dependent type in IR-generation");
47
48 case Type::Auto:
49 case Type::DeducedTemplateSpecialization:
50 llvm_unreachable("undeduced type in IR-generation");
51
52 // Various scalar types.
53 case Type::Builtin:
54 case Type::Pointer:
55 case Type::BlockPointer:
56 case Type::LValueReference:
57 case Type::RValueReference:
58 case Type::MemberPointer:
59 case Type::Vector:
60 case Type::ExtVector:
61 case Type::ConstantMatrix:
62 case Type::FunctionProto:
63 case Type::FunctionNoProto:
64 case Type::Enum:
65 case Type::ObjCObjectPointer:
66 case Type::Pipe:
67 case Type::BitInt:
68 case Type::HLSLAttributedResource:
69 case Type::HLSLInlineSpirv:
70 return cir::TEK_Scalar;
71
72 // Complexes.
73 case Type::Complex:
74 return cir::TEK_Complex;
75
76 // Arrays, records, and Objective-C objects.
77 case Type::ConstantArray:
78 case Type::IncompleteArray:
79 case Type::VariableArray:
80 case Type::Record:
81 case Type::ObjCObject:
82 case Type::ObjCInterface:
83 case Type::ArrayParameter:
84 return cir::TEK_Aggregate;
85
86 // We operate on atomic values according to their underlying type.
87 case Type::Atomic:
88 type = cast<AtomicType>(type)->getValueType();
89 continue;
90 }
91 llvm_unreachable("unknown type kind!");
92 }
93}
94
96 return cgm.getTypes().convertTypeForMem(t);
97}
98
100 return cgm.getTypes().convertType(t);
101}
102
104 // Some AST nodes might contain invalid source locations (e.g.
105 // CXXDefaultArgExpr), workaround that to still get something out.
106 if (srcLoc.isValid()) {
108 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
109 StringRef filename = pLoc.getFilename();
110 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
111 pLoc.getLine(), pLoc.getColumn());
112 }
113 // Do our best...
114 assert(currSrcLoc && "expected to inherit some source location");
115 return *currSrcLoc;
116}
117
118mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
119 // Some AST nodes might contain invalid source locations (e.g.
120 // CXXDefaultArgExpr), workaround that to still get something out.
121 if (srcLoc.isValid()) {
122 mlir::Location beg = getLoc(srcLoc.getBegin());
123 mlir::Location end = getLoc(srcLoc.getEnd());
124 SmallVector<mlir::Location, 2> locs = {beg, end};
125 mlir::Attribute metadata;
126 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
127 }
128 if (currSrcLoc) {
129 return *currSrcLoc;
130 }
131 // We're brave, but time to give up.
132 return builder.getUnknownLoc();
133}
134
135mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
136 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
137 mlir::Attribute metadata;
138 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
139}
140
141bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
142 // Null statement, not a label!
143 if (!s)
144 return false;
145
146 // If this is a label, we have to emit the code, consider something like:
147 // if (0) { ... foo: bar(); } goto foo;
148 //
149 // TODO: If anyone cared, we could track __label__'s, since we know that you
150 // can't jump to one from outside their declared region.
151 if (isa<LabelStmt>(s))
152 return true;
153
154 // If this is a case/default statement, and we haven't seen a switch, we
155 // have to emit the code.
156 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
157 return true;
158
159 // If this is a switch statement, we want to ignore case statements when we
160 // recursively process the sub-statements of the switch. If we haven't
161 // encountered a switch statement, we treat case statements like labels, but
162 // if we are processing a switch statement, case statements are expected.
163 if (isa<SwitchStmt>(s))
164 ignoreCaseStmts = true;
165
166 // Scan subexpressions for verboten labels.
167 return std::any_of(s->child_begin(), s->child_end(),
168 [=](const Stmt *subStmt) {
169 return containsLabel(subStmt, ignoreCaseStmts);
170 });
171}
172
173/// If the specified expression does not fold to a constant, or if it does but
174/// contains a label, return false. If it constant folds return true and set
175/// the boolean result in Result.
176bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
177 bool allowLabels) {
178 llvm::APSInt resultInt;
179 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
180 return false;
181
182 resultBool = resultInt.getBoolValue();
183 return true;
184}
185
186/// If the specified expression does not fold to a constant, or if it does
187/// fold but contains a label, return false. If it constant folds, return
188/// true and set the folded value.
190 llvm::APSInt &resultInt,
191 bool allowLabels) {
192 // FIXME: Rename and handle conversion of other evaluatable things
193 // to bool.
194 Expr::EvalResult result;
195 if (!cond->EvaluateAsInt(result, getContext()))
196 return false; // Not foldable, not integer or not fully evaluatable.
197
198 llvm::APSInt intValue = result.Val.getInt();
199 if (!allowLabels && containsLabel(cond))
200 return false; // Contains a label.
201
202 resultInt = intValue;
203 return true;
204}
205
206void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
207 CharUnits alignment) {
208 if (!type->isVoidType()) {
209 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
210 /*insertIntoFnEntryBlock=*/false);
211 fnRetAlloca = addr;
212 returnValue = Address(addr, alignment);
213 }
214}
215
216void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
217 mlir::Location loc, CharUnits alignment,
218 bool isParam) {
219 assert(isa<NamedDecl>(var) && "Needs a named decl");
220 assert(!symbolTable.count(var) && "not supposed to be available just yet");
221
222 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
223 assert(allocaOp && "expected cir::AllocaOp");
224
225 if (isParam)
226 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
227 if (ty->isReferenceType() || ty.isConstQualified())
228 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
229
230 symbolTable.insert(var, allocaOp);
231}
232
234 CIRGenBuilderTy &builder = cgf.builder;
235 LexicalScope *localScope = cgf.curLexScope;
236
237 auto applyCleanup = [&]() {
238 if (performCleanup) {
239 // ApplyDebugLocation
241 forceCleanup();
242 }
243 };
244
245 // Cleanup are done right before codegen resumes a scope. This is where
246 // objects are destroyed. Process all return blocks.
247 // TODO(cir): Handle returning from a switch statement through a cleanup
248 // block. We can't simply jump to the cleanup block, because the cleanup block
249 // is not part of the case region. Either reemit all cleanups in the return
250 // block or wait for MLIR structured control flow to support early exits.
252 for (mlir::Block *retBlock : localScope->getRetBlocks()) {
253 mlir::OpBuilder::InsertionGuard guard(builder);
254 builder.setInsertionPointToEnd(retBlock);
255 retBlocks.push_back(retBlock);
256 mlir::Location retLoc = localScope->getRetLoc(retBlock);
257 emitReturn(retLoc);
258 }
259
260 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
261 mlir::OpBuilder::InsertionGuard guard(builder);
262 builder.setInsertionPointToEnd(insPt);
263
264 // If we still don't have a cleanup block, it means that `applyCleanup`
265 // below might be able to get us one.
266 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
267
268 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
269 applyCleanup();
270
271 // If we now have one after `applyCleanup`, hook it up properly.
272 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
273 cleanupBlock = localScope->getCleanupBlock(builder);
274 cir::BrOp::create(builder, insPt->back().getLoc(), cleanupBlock);
275 if (!cleanupBlock->mightHaveTerminator()) {
276 mlir::OpBuilder::InsertionGuard guard(builder);
277 builder.setInsertionPointToEnd(cleanupBlock);
278 cir::YieldOp::create(builder, localScope->endLoc);
279 }
280 }
281
282 if (localScope->depth == 0) {
283 // Reached the end of the function.
284 // Special handling only for single return block case
285 if (localScope->getRetBlocks().size() == 1) {
286 mlir::Block *retBlock = localScope->getRetBlocks()[0];
287 mlir::Location retLoc = localScope->getRetLoc(retBlock);
288 if (retBlock->getUses().empty()) {
289 retBlock->erase();
290 } else {
291 // Thread return block via cleanup block.
292 if (cleanupBlock) {
293 for (mlir::BlockOperand &blockUse : retBlock->getUses()) {
294 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
295 brOp.setSuccessor(cleanupBlock);
296 }
297 }
298
299 cir::BrOp::create(builder, retLoc, retBlock);
300 return;
301 }
302 }
303 emitImplicitReturn();
304 return;
305 }
306
307 // End of any local scope != function
308 // Ternary ops have to deal with matching arms for yielding types
309 // and do return a value, it must do its own cir.yield insertion.
310 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
311 !retVal ? cir::YieldOp::create(builder, localScope->endLoc)
312 : cir::YieldOp::create(builder, localScope->endLoc, retVal);
313 }
314 };
315
316 // If a cleanup block has been created at some point, branch to it
317 // and set the insertion point to continue at the cleanup block.
318 // Terminators are then inserted either in the cleanup block or
319 // inline in this current block.
320 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
321 if (cleanupBlock)
322 insertCleanupAndLeave(cleanupBlock);
323
324 // Now deal with any pending block wrap up like implicit end of
325 // scope.
326
327 mlir::Block *curBlock = builder.getBlock();
328 if (isGlobalInit() && !curBlock)
329 return;
330 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
331 return;
332
333 // Get rid of any empty block at the end of the scope.
334 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
335 if (!entryBlock && curBlock->empty()) {
336 curBlock->erase();
337 for (mlir::Block *retBlock : retBlocks) {
338 if (retBlock->getUses().empty())
339 retBlock->erase();
340 }
341 return;
342 }
343
344 // If there's a cleanup block, branch to it, nothing else to do.
345 if (cleanupBlock) {
346 cir::BrOp::create(builder, curBlock->back().getLoc(), cleanupBlock);
347 return;
348 }
349
350 // No pre-existent cleanup block, emit cleanup code and yield/return.
351 insertCleanupAndLeave(curBlock);
352}
353
354cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
355 CIRGenBuilderTy &builder = cgf.getBuilder();
356
357 // If we are on a coroutine, add the coro_end builtin call.
359
360 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
361 assert(fn && "emitReturn from non-function");
362 if (!fn.getFunctionType().hasVoidReturn()) {
363 // Load the value from `__retval` and return it via the `cir.return` op.
364 auto value = cir::LoadOp::create(
365 builder, loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
366 return cir::ReturnOp::create(builder, loc,
367 llvm::ArrayRef(value.getResult()));
368 }
369 return cir::ReturnOp::create(builder, loc);
370}
371
372// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
373// candidate for sharing between CIRGen and CodeGen.
374static bool mayDropFunctionReturn(const ASTContext &astContext,
375 QualType returnType) {
376 // We can't just discard the return value for a record type with a complex
377 // destructor or a non-trivially copyable type.
378 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
379 return classDecl->hasTrivialDestructor();
380 return returnType.isTriviallyCopyableType(astContext);
381}
382
383void CIRGenFunction::LexicalScope::emitImplicitReturn() {
384 CIRGenBuilderTy &builder = cgf.getBuilder();
385 LexicalScope *localScope = cgf.curLexScope;
386
387 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
388
389 // In C++, flowing off the end of a non-void function is always undefined
390 // behavior. In C, flowing off the end of a non-void function is undefined
391 // behavior only if the non-existent return value is used by the caller.
392 // That influences whether the terminating op is trap, unreachable, or
393 // return.
394 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
395 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
396 builder.getInsertionBlock()) {
397 bool shouldEmitUnreachable =
398 cgf.cgm.getCodeGenOpts().StrictReturn ||
399 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
400
401 if (shouldEmitUnreachable) {
403 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
404 cir::TrapOp::create(builder, localScope->endLoc);
405 else
406 cir::UnreachableOp::create(builder, localScope->endLoc);
407 builder.clearInsertionPoint();
408 return;
409 }
410 }
411
412 (void)emitReturn(localScope->endLoc);
413}
414
416 LexicalScope *scope = this;
417 while (scope) {
418 if (scope->isTry())
419 return scope->getTry();
420 scope = scope->parentScope;
421 }
422 return nullptr;
423}
424
426 cir::FuncOp fn, cir::FuncType funcType,
428 SourceLocation startLoc) {
429 assert(!curFn &&
430 "CIRGenFunction can only be used for one function at a time");
431
432 curFn = fn;
433
434 const Decl *d = gd.getDecl();
435
436 didCallStackSave = false;
437 curCodeDecl = d;
438 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
440
441 prologueCleanupDepth = ehStack.stable_begin();
442
443 mlir::Block *entryBB = &fn.getBlocks().front();
444 builder.setInsertionPointToStart(entryBB);
445
446 // TODO(cir): this should live in `emitFunctionProlog
447 // Declare all the function arguments in the symbol table.
448 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
449 const VarDecl *paramVar = std::get<0>(nameValue);
450 mlir::Value paramVal = std::get<1>(nameValue);
451 CharUnits alignment = getContext().getDeclAlign(paramVar);
452 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
453 paramVal.setLoc(paramLoc);
454
455 mlir::Value addrVal =
456 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
457 convertType(paramVar->getType()), paramLoc, alignment,
458 /*insertIntoFnEntryBlock=*/true);
459
460 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
461 /*isParam=*/true);
462
463 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
464
465 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
466 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
468 if (isPromoted)
469 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion");
470
471 // Location of the store to the param storage tracked as beginning of
472 // the function body.
473 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc());
474 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
475 }
476 assert(builder.getInsertionBlock() && "Should be valid");
477
478 // When the current function is not void, create an address to store the
479 // result value.
480 if (!returnType->isVoidType())
481 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
482 getContext().getTypeAlignInChars(returnType));
483
484 if (isa_and_nonnull<CXXMethodDecl>(d) &&
485 cast<CXXMethodDecl>(d)->isInstance()) {
486 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
487
488 const auto *md = cast<CXXMethodDecl>(d);
489 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
490 // We're in a lambda.
491 auto fn = dyn_cast<cir::FuncOp>(curFn);
492 assert(fn && "lambda in non-function region");
493 fn.setLambda(true);
494
495 // Figure out the captures.
496 md->getParent()->getCaptureFields(lambdaCaptureFields,
499 // If the lambda captures the object referred to by '*this' - either by
500 // value or by reference, make sure CXXThisValue points to the correct
501 // object.
502
503 // Get the lvalue for the field (which is a copy of the enclosing object
504 // or contains the address of the enclosing object).
505 LValue thisFieldLValue =
507 if (!lambdaThisCaptureField->getType()->isPointerType()) {
508 // If the enclosing object was captured by value, just use its
509 // address. Sign this pointer.
510 cxxThisValue = thisFieldLValue.getPointer();
511 } else {
512 // Load the lvalue pointed to by the field, since '*this' was captured
513 // by reference.
515 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
516 }
517 }
518 for (auto *fd : md->getParent()->fields()) {
519 if (fd->hasCapturedVLAType())
520 cgm.errorNYI(loc, "lambda captured VLA type");
521 }
522 } else {
523 // Not in a lambda; just use 'this' from the method.
524 // FIXME: Should we generate a new load for each use of 'this'? The fast
525 // register allocator would be happier...
527 }
528
531 }
532}
533
535 // Pop any cleanups that might have been associated with the
536 // parameters. Do this in whatever block we're currently in; it's
537 // important to do this before we enter the return block or return
538 // edges will be *really* confused.
539 // TODO(cir): Use prologueCleanupDepth here.
540 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
541 if (hasCleanups) {
543 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
545 }
546}
547
548mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
549 // We start with function level scope for variables.
551
552 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
553 return emitCompoundStmtWithoutScope(*block);
554
555 return emitStmt(body, /*useCurrentScope=*/true);
556}
557
558static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
559 // Remove any leftover blocks that are unreachable and empty, since they do
560 // not represent unreachable code useful for warnings nor anything deemed
561 // useful in general.
562 SmallVector<mlir::Block *> blocksToDelete;
563 for (mlir::Block &block : func.getBlocks()) {
564 if (block.empty() && block.getUses().empty())
565 blocksToDelete.push_back(&block);
566 }
567 for (mlir::Block *block : blocksToDelete)
568 block->erase();
569}
570
571cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
572 cir::FuncType funcType) {
573 const auto *funcDecl = cast<FunctionDecl>(gd.getDecl());
574 curGD = gd;
575
576 if (funcDecl->isInlineBuiltinDeclaration()) {
577 // When generating code for a builtin with an inline declaration, use a
578 // mangled name to hold the actual body, while keeping an external
579 // declaration in case the function pointer is referenced somewhere.
580 std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
581 cir::FuncOp clone =
582 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
583 if (!clone) {
584 mlir::OpBuilder::InsertionGuard guard(builder);
585 builder.setInsertionPoint(fn);
586 clone = cir::FuncOp::create(builder, fn.getLoc(), fdInlineName,
587 fn.getFunctionType());
588 clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
589 clone.setSymVisibility("private");
590 clone.setInlineKind(cir::InlineKind::AlwaysInline);
591 }
592 fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
593 fn.setSymVisibility("private");
594 fn = clone;
595 } else {
596 // Detect the unusual situation where an inline version is shadowed by a
597 // non-inline version. In that case we should pick the external one
598 // everywhere. That's GCC behavior too.
599 for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
600 pd = pd->getPreviousDecl()) {
601 if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
602 std::string inlineName = funcDecl->getName().str() + ".inline";
603 if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
604 cgm.getGlobalValue(inlineName))) {
605 // Replace all uses of the .inline function with the regular function
606 // FIXME: This performs a linear walk over the module. Introduce some
607 // caching here.
608 if (inlineFn
609 .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
610 .failed())
611 llvm_unreachable("Failed to replace inline builtin symbol uses");
612 inlineFn.erase();
613 }
614 break;
615 }
616 }
617 }
618
619 SourceLocation loc = funcDecl->getLocation();
620 Stmt *body = funcDecl->getBody();
621 SourceRange bodyRange =
622 body ? body->getSourceRange() : funcDecl->getLocation();
623
624 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
625 : builder.getUnknownLoc()};
626
627 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
628 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
629 };
630 const mlir::Location fusedLoc = mlir::FusedLoc::get(
632 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
633 mlir::Block *entryBB = fn.addEntryBlock();
634
635 FunctionArgList args;
636 QualType retTy = buildFunctionArgList(gd, args);
637
638 // Create a scope in the symbol table to hold variable declarations.
640 {
641 LexicalScope lexScope(*this, fusedLoc, entryBB);
642
643 // Emit the standard function prologue.
644 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
645
646 // Save parameters for coroutine function.
647 if (body && isa_and_nonnull<CoroutineBodyStmt>(body))
648 llvm::append_range(fnArgs, funcDecl->parameters());
649
650 if (isa<CXXDestructorDecl>(funcDecl)) {
651 emitDestructorBody(args);
652 } else if (isa<CXXConstructorDecl>(funcDecl)) {
654 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
655 funcDecl->hasAttr<CUDAGlobalAttr>()) {
656 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
657 } else if (isa<CXXMethodDecl>(funcDecl) &&
658 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
659 // The lambda static invoker function is special, because it forwards or
660 // clones the body of the function call operator (but is actually
661 // static).
663 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
664 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
665 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
666 // Implicit copy-assignment gets the same special treatment as implicit
667 // copy-constructors.
669 } else if (body) {
670 // Emit standard function body.
671 if (mlir::failed(emitFunctionBody(body))) {
672 return nullptr;
673 }
674 } else {
675 // Anything without a body should have been handled above.
676 llvm_unreachable("no definition for normal function");
677 }
678
679 if (mlir::failed(fn.verifyBody()))
680 return nullptr;
681
682 finishFunction(bodyRange.getEnd());
683 }
684
686 return fn;
687}
688
691 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
692 CXXCtorType ctorType = curGD.getCtorType();
693
694 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
695 ctorType == Ctor_Complete) &&
696 "can only generate complete ctor for this ABI");
697
698 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), ctor);
699
700 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
701 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
702 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
703 return;
704 }
705
706 const FunctionDecl *definition = nullptr;
707 Stmt *body = ctor->getBody(definition);
708 assert(definition == ctor && "emitting wrong constructor body");
709
710 if (isa_and_nonnull<CXXTryStmt>(body)) {
711 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
712 return;
713 }
714
717
718 // TODO: in restricted cases, we can emit the vbase initializers of a
719 // complete ctor and then delegate to the base ctor.
720
721 // Emit the constructor prologue, i.e. the base and member initializers.
722 emitCtorPrologue(ctor, ctorType, args);
723
724 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
725 // now just to have it handled.
726 if (mlir::failed(emitStmt(body, true))) {
727 cgm.errorNYI(ctor->getSourceRange(),
728 "emitConstructorBody: emit body statement failed.");
729 return;
730 }
731}
732
733/// Emits the body of the current destructor.
735 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
736 CXXDtorType dtorType = curGD.getDtorType();
737
738 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), dtor);
739
740 // For an abstract class, non-base destructors are never used (and can't
741 // be emitted in general, because vbase dtors may not have been validated
742 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
743 // in fact emit references to them from other compilations, so emit them
744 // as functions containing a trap instruction.
745 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
746 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
747 return;
748 }
749
750 Stmt *body = dtor->getBody();
752
753 // The call to operator delete in a deleting destructor happens
754 // outside of the function-try-block, which means it's always
755 // possible to delegate the destructor body to the complete
756 // destructor. Do so.
757 if (dtorType == Dtor_Deleting) {
758 RunCleanupsScope dtorEpilogue(*this);
760 if (haveInsertPoint()) {
762 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
763 /*delegating=*/false, loadCXXThisAddress(), thisTy);
764 }
765 return;
766 }
767
768 // If the body is a function-try-block, enter the try before
769 // anything else.
770 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
771 if (isTryBody)
772 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
773
775
776 // Enter the epilogue cleanups.
777 RunCleanupsScope dtorEpilogue(*this);
778
779 // If this is the complete variant, just invoke the base variant;
780 // the epilogue will destruct the virtual bases. But we can't do
781 // this optimization if the body is a function-try-block, because
782 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
783 // always delegate because we might not have a definition in this TU.
784 switch (dtorType) {
785 case Dtor_Unified:
786 llvm_unreachable("not expecting a unified dtor");
787 case Dtor_Comdat:
788 llvm_unreachable("not expecting a COMDAT");
789 case Dtor_Deleting:
790 llvm_unreachable("already handled deleting case");
791
792 case Dtor_Complete:
793 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
794 "can't emit a dtor without a body for non-Microsoft ABIs");
795
796 // Enter the cleanup scopes for virtual bases.
798
799 if (!isTryBody) {
801 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
802 /*delegating=*/false, loadCXXThisAddress(), thisTy);
803 break;
804 }
805
806 // Fallthrough: act like we're in the base variant.
807 [[fallthrough]];
808
809 case Dtor_Base:
810 assert(body);
811
812 // Enter the cleanup scopes for fields and non-virtual bases.
814
816
817 if (isTryBody) {
818 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
819 } else if (body) {
820 (void)emitStmt(body, /*useCurrentScope=*/true);
821 } else {
822 assert(dtor->isImplicit() && "bodyless dtor not implicit");
823 // nothing to do besides what's in the epilogue
824 }
825 // -fapple-kext must inline any call to this dtor into
826 // the caller's body.
828
829 break;
830 }
831
832 // Jump out through the epilogue cleanups.
833 dtorEpilogue.forceCleanup();
834
835 // Exit the try if applicable.
836 if (isTryBody)
837 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
838}
839
840/// Given a value of type T* that may not be to a complete object, construct
841/// an l-vlaue withi the natural pointee alignment of T.
843 QualType ty) {
844 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
845 // assert on the result type first.
846 LValueBaseInfo baseInfo;
848 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
849 return makeAddrLValue(Address(val, align), ty, baseInfo);
850}
851
853 QualType ty) {
854 LValueBaseInfo baseInfo;
855 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
856 Address addr(val, convertTypeForMem(ty), alignment);
858 return makeAddrLValue(addr, ty, baseInfo);
859}
860
862 FunctionArgList &args) {
863 const auto *fd = cast<FunctionDecl>(gd.getDecl());
864 QualType retTy = fd->getReturnType();
865
866 const auto *md = dyn_cast<CXXMethodDecl>(fd);
867 if (md && md->isInstance()) {
868 if (cgm.getCXXABI().hasThisReturn(gd))
869 cgm.errorNYI(fd->getSourceRange(), "this return");
870 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
871 cgm.errorNYI(fd->getSourceRange(), "most derived return");
872 cgm.getCXXABI().buildThisParam(*this, args);
873 }
874
875 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
876 if (cd->getInheritedConstructor())
877 cgm.errorNYI(fd->getSourceRange(),
878 "buildFunctionArgList: inherited constructor");
879
880 for (auto *param : fd->parameters())
881 args.push_back(param);
882
883 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
884 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
885
886 return retTy;
887}
888
889/// Emit code to compute a designator that specifies the location
890/// of the expression.
891/// FIXME: document this function better.
893 // FIXME: ApplyDebugLocation DL(*this, e);
894 switch (e->getStmtClass()) {
895 default:
897 std::string("l-value not implemented for '") +
898 e->getStmtClassName() + "'");
899 return LValue();
900 case Expr::ConditionalOperatorClass:
902 case Expr::BinaryConditionalOperatorClass:
904 case Expr::ArraySubscriptExprClass:
906 case Expr::ExtVectorElementExprClass:
908 case Expr::UnaryOperatorClass:
910 case Expr::StringLiteralClass:
912 case Expr::MemberExprClass:
914 case Expr::CompoundLiteralExprClass:
916 case Expr::PredefinedExprClass:
918 case Expr::BinaryOperatorClass:
920 case Expr::CompoundAssignOperatorClass: {
921 QualType ty = e->getType();
922 if (ty->getAs<AtomicType>()) {
923 cgm.errorNYI(e->getSourceRange(),
924 "CompoundAssignOperator with AtomicType");
925 return LValue();
926 }
927 if (!ty->isAnyComplexType())
929
931 }
932 case Expr::CallExprClass:
933 case Expr::CXXMemberCallExprClass:
934 case Expr::CXXOperatorCallExprClass:
935 case Expr::UserDefinedLiteralClass:
937 case Expr::ExprWithCleanupsClass: {
938 const auto *cleanups = cast<ExprWithCleanups>(e);
939 RunCleanupsScope scope(*this);
940 LValue lv = emitLValue(cleanups->getSubExpr());
942 return lv;
943 }
944 case Expr::CXXDefaultArgExprClass: {
945 auto *dae = cast<CXXDefaultArgExpr>(e);
946 CXXDefaultArgExprScope scope(*this, dae);
947 return emitLValue(dae->getExpr());
948 }
949 case Expr::ParenExprClass:
950 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
951 case Expr::GenericSelectionExprClass:
952 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
953 case Expr::DeclRefExprClass:
955 case Expr::CStyleCastExprClass:
956 case Expr::CXXStaticCastExprClass:
957 case Expr::CXXDynamicCastExprClass:
958 case Expr::ImplicitCastExprClass:
960 case Expr::MaterializeTemporaryExprClass:
962 case Expr::OpaqueValueExprClass:
964 case Expr::ChooseExprClass:
965 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
966 }
967}
968
969static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
970 SmallString<256> buffer;
971 llvm::raw_svector_ostream out(buffer);
972 out << name << cnt;
973 return std::string(out.str());
974}
975
979
983
984void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
985 QualType ty) {
986 // Ignore empty classes in C++.
988 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
989 return;
990
991 // Cast the dest ptr to the appropriate i8 pointer type.
992 if (builder.isInt8Ty(destPtr.getElementType())) {
993 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
994 }
995
996 // Get size and alignment info for this aggregate.
997 const CharUnits size = getContext().getTypeSizeInChars(ty);
998 if (size.isZero()) {
999 // But note that getTypeInfo returns 0 for a VLA.
1000 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
1001 cgm.errorNYI(loc,
1002 "emitNullInitialization for zero size VariableArrayType");
1003 } else {
1004 return;
1005 }
1006 }
1007
1008 // If the type contains a pointer to data member we can't memset it to zero.
1009 // Instead, create a null constant and copy it to the destination.
1010 // TODO: there are other patterns besides zero that we can usefully memset,
1011 // like -1, which happens to be the pattern used by member-pointers.
1012 if (!cgm.getTypes().isZeroInitializable(ty)) {
1013 cgm.errorNYI(loc, "type is not zero initializable");
1014 }
1015
1016 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
1017 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
1018 // respective address.
1019 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1020 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
1021 builder.createStore(loc, zeroValue, destPtr);
1022}
1023
1024// TODO(cir): should be shared with LLVM codegen.
1026 const Expr *e = ce->getSubExpr();
1027
1028 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
1029 return false;
1030
1031 if (isa<CXXThisExpr>(e->IgnoreParens())) {
1032 // We always assume that 'this' is never null.
1033 return false;
1034 }
1035
1036 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1037 // And that glvalue casts are never null.
1038 if (ice->isGLValue())
1039 return false;
1040 }
1041
1042 return true;
1043}
1044
1045/// Computes the length of an array in elements, as well as the base
1046/// element type and a properly-typed first element pointer.
1047mlir::Value
1049 QualType &baseType, Address &addr) {
1050 const clang::ArrayType *arrayType = origArrayType;
1051
1052 // If it's a VLA, we have to load the stored size. Note that
1053 // this is the size of the VLA in bytes, not its size in elements.
1056 cgm.errorNYI(*currSrcLoc, "VLAs");
1057 return builder.getConstInt(*currSrcLoc, sizeTy, 0);
1058 }
1059
1060 uint64_t countFromCLAs = 1;
1061 QualType eltType;
1062
1063 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
1064
1065 while (cirArrayType) {
1067 countFromCLAs *= cirArrayType.getSize();
1068 eltType = arrayType->getElementType();
1069
1070 cirArrayType =
1071 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
1072
1073 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1074 assert((!cirArrayType || arrayType) &&
1075 "CIR and Clang types are out-of-sync");
1076 }
1077
1078 if (arrayType) {
1079 // From this point onwards, the Clang array type has been emitted
1080 // as some other type (probably a packed struct). Compute the array
1081 // size, and just emit the 'begin' expression as a bitcast.
1082 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
1083 }
1084
1085 baseType = eltType;
1086 return builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
1087}
1088
1090 mlir::Value ptrValue, QualType ty, SourceLocation loc,
1091 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
1093 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
1094 alignment, offsetValue);
1095}
1096
1098 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1099 int64_t alignment, mlir::Value offsetValue) {
1100 QualType ty = expr->getType();
1101 SourceLocation loc = expr->getExprLoc();
1102 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1103 offsetValue);
1104}
1105
1107 const VariableArrayType *vla =
1108 cgm.getASTContext().getAsVariableArrayType(type);
1109 assert(vla && "type was not a variable array type!");
1110 return getVLASize(vla);
1111}
1112
1115 // The number of elements so far; always size_t.
1116 mlir::Value numElements;
1117
1118 QualType elementType;
1119 do {
1120 elementType = type->getElementType();
1121 mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
1122 assert(vlaSize && "no size for VLA!");
1123 assert(vlaSize.getType() == sizeTy);
1124
1125 if (!numElements) {
1126 numElements = vlaSize;
1127 } else {
1128 // It's undefined behavior if this wraps around, so mark it that way.
1129 // FIXME: Teach -fsanitize=undefined to trap this.
1130
1131 numElements =
1132 builder.createMul(numElements.getLoc(), numElements, vlaSize,
1134 }
1135 } while ((type = getContext().getAsVariableArrayType(elementType)));
1136
1137 assert(numElements && "Undefined elements number");
1138 return {numElements, elementType};
1139}
1140
1141// TODO(cir): Most of this function can be shared between CIRGen
1142// and traditional LLVM codegen
1144 assert(type->isVariablyModifiedType() &&
1145 "Must pass variably modified type to EmitVLASizes!");
1146
1147 // We're going to walk down into the type and look for VLA
1148 // expressions.
1149 do {
1150 assert(type->isVariablyModifiedType());
1151
1152 const Type *ty = type.getTypePtr();
1153 switch (ty->getTypeClass()) {
1154 case Type::CountAttributed:
1155 case Type::PackIndexing:
1156 case Type::ArrayParameter:
1157 case Type::HLSLAttributedResource:
1158 case Type::HLSLInlineSpirv:
1159 case Type::PredefinedSugar:
1160 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1161 break;
1162
1163#define TYPE(Class, Base)
1164#define ABSTRACT_TYPE(Class, Base)
1165#define NON_CANONICAL_TYPE(Class, Base)
1166#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1167#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1168#include "clang/AST/TypeNodes.inc"
1169 llvm_unreachable(
1170 "dependent type must be resolved before the CIR codegen");
1171
1172 // These types are never variably-modified.
1173 case Type::Builtin:
1174 case Type::Complex:
1175 case Type::Vector:
1176 case Type::ExtVector:
1177 case Type::ConstantMatrix:
1178 case Type::Record:
1179 case Type::Enum:
1180 case Type::Using:
1181 case Type::TemplateSpecialization:
1182 case Type::ObjCTypeParam:
1183 case Type::ObjCObject:
1184 case Type::ObjCInterface:
1185 case Type::ObjCObjectPointer:
1186 case Type::BitInt:
1187 llvm_unreachable("type class is never variably-modified!");
1188
1189 case Type::Adjusted:
1190 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1191 break;
1192
1193 case Type::Decayed:
1194 type = cast<clang::DecayedType>(ty)->getPointeeType();
1195 break;
1196
1197 case Type::Pointer:
1198 type = cast<clang::PointerType>(ty)->getPointeeType();
1199 break;
1200
1201 case Type::BlockPointer:
1202 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1203 break;
1204
1205 case Type::LValueReference:
1206 case Type::RValueReference:
1207 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1208 break;
1209
1210 case Type::MemberPointer:
1211 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1212 break;
1213
1214 case Type::ConstantArray:
1215 case Type::IncompleteArray:
1216 // Losing element qualification here is fine.
1217 type = cast<clang::ArrayType>(ty)->getElementType();
1218 break;
1219
1220 case Type::VariableArray: {
1221 // Losing element qualification here is fine.
1223
1224 // Unknown size indication requires no size computation.
1225 // Otherwise, evaluate and record it.
1226 if (const Expr *sizeExpr = vat->getSizeExpr()) {
1227 // It's possible that we might have emitted this already,
1228 // e.g. with a typedef and a pointer to it.
1229 mlir::Value &entry = vlaSizeMap[sizeExpr];
1230 if (!entry) {
1231 mlir::Value size = emitScalarExpr(sizeExpr);
1233
1234 // Always zexting here would be wrong if it weren't
1235 // undefined behavior to have a negative bound.
1236 // FIXME: What about when size's type is larger than size_t?
1237 entry = builder.createIntCast(size, sizeTy);
1238 }
1239 }
1240 type = vat->getElementType();
1241 break;
1242 }
1243
1244 case Type::FunctionProto:
1245 case Type::FunctionNoProto:
1246 type = cast<clang::FunctionType>(ty)->getReturnType();
1247 break;
1248
1249 case Type::Paren:
1250 case Type::TypeOf:
1251 case Type::UnaryTransform:
1252 case Type::Attributed:
1253 case Type::BTFTagAttributed:
1254 case Type::SubstTemplateTypeParm:
1255 case Type::MacroQualified:
1256 // Keep walking after single level desugaring.
1257 type = type.getSingleStepDesugaredType(getContext());
1258 break;
1259
1260 case Type::Typedef:
1261 case Type::Decltype:
1262 case Type::Auto:
1263 case Type::DeducedTemplateSpecialization:
1264 // Stop walking: nothing to do.
1265 return;
1266
1267 case Type::TypeOfExpr:
1268 // Stop walking: emit typeof expression.
1269 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1270 return;
1271
1272 case Type::Atomic:
1273 type = cast<clang::AtomicType>(ty)->getValueType();
1274 break;
1275
1276 case Type::Pipe:
1277 type = cast<clang::PipeType>(ty)->getElementType();
1278 break;
1279 }
1280 } while (type->isVariablyModifiedType());
1281}
1282
1284 if (getContext().getBuiltinVaListType()->isArrayType())
1285 return emitPointerWithAlignment(e);
1286 return emitLValue(e).getAddress();
1287}
1288
1289} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:833
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
mlir::Type getElementType() const
Definition Address.h:111
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
bool didCallStackSave
Whether a cir.stacksave operation has been added.
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1719
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:2000
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3271
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4541
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8871
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8650
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2193
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Expr * getSizeExpr() const
Definition TypeBase.h:3980
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool coroEndBuiltinCall()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool cleanupWithPreservedValues()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647