clang 22.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/ExprCXX.h"
22
23#include <cassert>
24
25namespace clang::CIRGen {
26
28 bool suppressNewContext)
29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
30 ehStack.setCGF(this);
31}
32
34
35// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
37 type = type.getCanonicalType();
38 while (true) {
39 switch (type->getTypeClass()) {
40#define TYPE(name, parent)
41#define ABSTRACT_TYPE(name, parent)
42#define NON_CANONICAL_TYPE(name, parent) case Type::name:
43#define DEPENDENT_TYPE(name, parent) case Type::name:
44#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
45#include "clang/AST/TypeNodes.inc"
46 llvm_unreachable("non-canonical or dependent type in IR-generation");
47
48 case Type::Auto:
49 case Type::DeducedTemplateSpecialization:
50 llvm_unreachable("undeduced type in IR-generation");
51
52 // Various scalar types.
53 case Type::Builtin:
54 case Type::Pointer:
55 case Type::BlockPointer:
56 case Type::LValueReference:
57 case Type::RValueReference:
58 case Type::MemberPointer:
59 case Type::Vector:
60 case Type::ExtVector:
61 case Type::ConstantMatrix:
62 case Type::FunctionProto:
63 case Type::FunctionNoProto:
64 case Type::Enum:
65 case Type::ObjCObjectPointer:
66 case Type::Pipe:
67 case Type::BitInt:
68 case Type::HLSLAttributedResource:
69 case Type::HLSLInlineSpirv:
70 return cir::TEK_Scalar;
71
72 // Complexes.
73 case Type::Complex:
74 return cir::TEK_Complex;
75
76 // Arrays, records, and Objective-C objects.
77 case Type::ConstantArray:
78 case Type::IncompleteArray:
79 case Type::VariableArray:
80 case Type::Record:
81 case Type::ObjCObject:
82 case Type::ObjCInterface:
83 case Type::ArrayParameter:
84 return cir::TEK_Aggregate;
85
86 // We operate on atomic values according to their underlying type.
87 case Type::Atomic:
88 type = cast<AtomicType>(type)->getValueType();
89 continue;
90 }
91 llvm_unreachable("unknown type kind!");
92 }
93}
94
96 return cgm.getTypes().convertTypeForMem(t);
97}
98
100 return cgm.getTypes().convertType(t);
101}
102
104 // Some AST nodes might contain invalid source locations (e.g.
105 // CXXDefaultArgExpr), workaround that to still get something out.
106 if (srcLoc.isValid()) {
108 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
109 StringRef filename = pLoc.getFilename();
110 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
111 pLoc.getLine(), pLoc.getColumn());
112 }
113 // Do our best...
114 assert(currSrcLoc && "expected to inherit some source location");
115 return *currSrcLoc;
116}
117
118mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
119 // Some AST nodes might contain invalid source locations (e.g.
120 // CXXDefaultArgExpr), workaround that to still get something out.
121 if (srcLoc.isValid()) {
122 mlir::Location beg = getLoc(srcLoc.getBegin());
123 mlir::Location end = getLoc(srcLoc.getEnd());
124 SmallVector<mlir::Location, 2> locs = {beg, end};
125 mlir::Attribute metadata;
126 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
127 }
128 if (currSrcLoc) {
129 return *currSrcLoc;
130 }
131 // We're brave, but time to give up.
132 return builder.getUnknownLoc();
133}
134
135mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
136 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
137 mlir::Attribute metadata;
138 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
139}
140
141bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
142 // Null statement, not a label!
143 if (!s)
144 return false;
145
146 // If this is a label, we have to emit the code, consider something like:
147 // if (0) { ... foo: bar(); } goto foo;
148 //
149 // TODO: If anyone cared, we could track __label__'s, since we know that you
150 // can't jump to one from outside their declared region.
151 if (isa<LabelStmt>(s))
152 return true;
153
154 // If this is a case/default statement, and we haven't seen a switch, we
155 // have to emit the code.
156 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
157 return true;
158
159 // If this is a switch statement, we want to ignore case statements when we
160 // recursively process the sub-statements of the switch. If we haven't
161 // encountered a switch statement, we treat case statements like labels, but
162 // if we are processing a switch statement, case statements are expected.
163 if (isa<SwitchStmt>(s))
164 ignoreCaseStmts = true;
165
166 // Scan subexpressions for verboten labels.
167 return std::any_of(s->child_begin(), s->child_end(),
168 [=](const Stmt *subStmt) {
169 return containsLabel(subStmt, ignoreCaseStmts);
170 });
171}
172
173/// If the specified expression does not fold to a constant, or if it does but
174/// contains a label, return false. If it constant folds return true and set
175/// the boolean result in Result.
176bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
177 bool allowLabels) {
178 llvm::APSInt resultInt;
179 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
180 return false;
181
182 resultBool = resultInt.getBoolValue();
183 return true;
184}
185
186/// If the specified expression does not fold to a constant, or if it does
187/// fold but contains a label, return false. If it constant folds, return
188/// true and set the folded value.
190 llvm::APSInt &resultInt,
191 bool allowLabels) {
192 // FIXME: Rename and handle conversion of other evaluatable things
193 // to bool.
194 Expr::EvalResult result;
195 if (!cond->EvaluateAsInt(result, getContext()))
196 return false; // Not foldable, not integer or not fully evaluatable.
197
198 llvm::APSInt intValue = result.Val.getInt();
199 if (!allowLabels && containsLabel(cond))
200 return false; // Contains a label.
201
202 resultInt = intValue;
203 return true;
204}
205
206void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
207 CharUnits alignment) {
208 if (!type->isVoidType()) {
209 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
210 /*insertIntoFnEntryBlock=*/false);
211 fnRetAlloca = addr;
212 returnValue = Address(addr, alignment);
213 }
214}
215
216void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
217 mlir::Location loc, CharUnits alignment,
218 bool isParam) {
219 assert(isa<NamedDecl>(var) && "Needs a named decl");
220 assert(!symbolTable.count(var) && "not supposed to be available just yet");
221
222 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
223 assert(allocaOp && "expected cir::AllocaOp");
224
225 if (isParam)
226 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
227 if (ty->isReferenceType() || ty.isConstQualified())
228 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
229
230 symbolTable.insert(var, allocaOp);
231}
232
234 CIRGenBuilderTy &builder = cgf.builder;
235 LexicalScope *localScope = cgf.curLexScope;
236
237 auto applyCleanup = [&]() {
238 if (performCleanup) {
239 // ApplyDebugLocation
241 forceCleanup();
242 }
243 };
244
245 // Cleanup are done right before codegen resumes a scope. This is where
246 // objects are destroyed. Process all return blocks.
247 // TODO(cir): Handle returning from a switch statement through a cleanup
248 // block. We can't simply jump to the cleanup block, because the cleanup block
249 // is not part of the case region. Either reemit all cleanups in the return
250 // block or wait for MLIR structured control flow to support early exits.
252 for (mlir::Block *retBlock : localScope->getRetBlocks()) {
253 mlir::OpBuilder::InsertionGuard guard(builder);
254 builder.setInsertionPointToEnd(retBlock);
255 retBlocks.push_back(retBlock);
256 mlir::Location retLoc = localScope->getRetLoc(retBlock);
257 emitReturn(retLoc);
258 }
259
260 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
261 mlir::OpBuilder::InsertionGuard guard(builder);
262 builder.setInsertionPointToEnd(insPt);
263
264 // If we still don't have a cleanup block, it means that `applyCleanup`
265 // below might be able to get us one.
266 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
267
268 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
269 applyCleanup();
270
271 // If we now have one after `applyCleanup`, hook it up properly.
272 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
273 cleanupBlock = localScope->getCleanupBlock(builder);
274 cir::BrOp::create(builder, insPt->back().getLoc(), cleanupBlock);
275 if (!cleanupBlock->mightHaveTerminator()) {
276 mlir::OpBuilder::InsertionGuard guard(builder);
277 builder.setInsertionPointToEnd(cleanupBlock);
278 cir::YieldOp::create(builder, localScope->endLoc);
279 }
280 }
281
282 if (localScope->depth == 0) {
283 // Reached the end of the function.
284 // Special handling only for single return block case
285 if (localScope->getRetBlocks().size() == 1) {
286 mlir::Block *retBlock = localScope->getRetBlocks()[0];
287 mlir::Location retLoc = localScope->getRetLoc(retBlock);
288 if (retBlock->getUses().empty()) {
289 retBlock->erase();
290 } else {
291 // Thread return block via cleanup block.
292 if (cleanupBlock) {
293 for (mlir::BlockOperand &blockUse : retBlock->getUses()) {
294 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
295 brOp.setSuccessor(cleanupBlock);
296 }
297 }
298
299 cir::BrOp::create(builder, retLoc, retBlock);
300 return;
301 }
302 }
303 emitImplicitReturn();
304 return;
305 }
306
307 // End of any local scope != function
308 // Ternary ops have to deal with matching arms for yielding types
309 // and do return a value, it must do its own cir.yield insertion.
310 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
311 !retVal ? cir::YieldOp::create(builder, localScope->endLoc)
312 : cir::YieldOp::create(builder, localScope->endLoc, retVal);
313 }
314 };
315
316 // If a cleanup block has been created at some point, branch to it
317 // and set the insertion point to continue at the cleanup block.
318 // Terminators are then inserted either in the cleanup block or
319 // inline in this current block.
320 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
321 if (cleanupBlock)
322 insertCleanupAndLeave(cleanupBlock);
323
324 // Now deal with any pending block wrap up like implicit end of
325 // scope.
326
327 mlir::Block *curBlock = builder.getBlock();
328 if (isGlobalInit() && !curBlock)
329 return;
330 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
331 return;
332
333 // Get rid of any empty block at the end of the scope.
334 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
335 if (!entryBlock && curBlock->empty()) {
336 curBlock->erase();
337 for (mlir::Block *retBlock : retBlocks) {
338 if (retBlock->getUses().empty())
339 retBlock->erase();
340 }
341 return;
342 }
343
344 // If there's a cleanup block, branch to it, nothing else to do.
345 if (cleanupBlock) {
346 cir::BrOp::create(builder, curBlock->back().getLoc(), cleanupBlock);
347 return;
348 }
349
350 // No pre-existent cleanup block, emit cleanup code and yield/return.
351 insertCleanupAndLeave(curBlock);
352}
353
354cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
355 CIRGenBuilderTy &builder = cgf.getBuilder();
356
357 // If we are on a coroutine, add the coro_end builtin call.
359
360 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
361 assert(fn && "emitReturn from non-function");
362 if (!fn.getFunctionType().hasVoidReturn()) {
363 // Load the value from `__retval` and return it via the `cir.return` op.
364 auto value = cir::LoadOp::create(
365 builder, loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
366 return cir::ReturnOp::create(builder, loc,
367 llvm::ArrayRef(value.getResult()));
368 }
369 return cir::ReturnOp::create(builder, loc);
370}
371
372// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
373// candidate for sharing between CIRGen and CodeGen.
374static bool mayDropFunctionReturn(const ASTContext &astContext,
375 QualType returnType) {
376 // We can't just discard the return value for a record type with a complex
377 // destructor or a non-trivially copyable type.
378 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
379 return classDecl->hasTrivialDestructor();
380 return returnType.isTriviallyCopyableType(astContext);
381}
382
383void CIRGenFunction::LexicalScope::emitImplicitReturn() {
384 CIRGenBuilderTy &builder = cgf.getBuilder();
385 LexicalScope *localScope = cgf.curLexScope;
386
387 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
388
389 // In C++, flowing off the end of a non-void function is always undefined
390 // behavior. In C, flowing off the end of a non-void function is undefined
391 // behavior only if the non-existent return value is used by the caller.
392 // That influences whether the terminating op is trap, unreachable, or
393 // return.
394 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
395 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
396 builder.getInsertionBlock()) {
397 bool shouldEmitUnreachable =
398 cgf.cgm.getCodeGenOpts().StrictReturn ||
399 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
400
401 if (shouldEmitUnreachable) {
403 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
404 cir::TrapOp::create(builder, localScope->endLoc);
405 else
406 cir::UnreachableOp::create(builder, localScope->endLoc);
407 builder.clearInsertionPoint();
408 return;
409 }
410 }
411
412 (void)emitReturn(localScope->endLoc);
413}
414
416 cir::FuncOp fn, cir::FuncType funcType,
418 SourceLocation startLoc) {
419 assert(!curFn &&
420 "CIRGenFunction can only be used for one function at a time");
421
422 curFn = fn;
423
424 const Decl *d = gd.getDecl();
425
426 didCallStackSave = false;
427 curCodeDecl = d;
428 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
430
431 prologueCleanupDepth = ehStack.stable_begin();
432
433 mlir::Block *entryBB = &fn.getBlocks().front();
434 builder.setInsertionPointToStart(entryBB);
435
436 // TODO(cir): this should live in `emitFunctionProlog
437 // Declare all the function arguments in the symbol table.
438 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
439 const VarDecl *paramVar = std::get<0>(nameValue);
440 mlir::Value paramVal = std::get<1>(nameValue);
441 CharUnits alignment = getContext().getDeclAlign(paramVar);
442 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
443 paramVal.setLoc(paramLoc);
444
445 mlir::Value addrVal =
446 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
447 convertType(paramVar->getType()), paramLoc, alignment,
448 /*insertIntoFnEntryBlock=*/true);
449
450 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
451 /*isParam=*/true);
452
453 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
454
455 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
456 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
458 if (isPromoted)
459 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion");
460
461 // Location of the store to the param storage tracked as beginning of
462 // the function body.
463 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc());
464 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
465 }
466 assert(builder.getInsertionBlock() && "Should be valid");
467
468 // When the current function is not void, create an address to store the
469 // result value.
470 if (!returnType->isVoidType())
471 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
472 getContext().getTypeAlignInChars(returnType));
473
474 if (isa_and_nonnull<CXXMethodDecl>(d) &&
475 cast<CXXMethodDecl>(d)->isInstance()) {
476 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
477
478 const auto *md = cast<CXXMethodDecl>(d);
479 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
480 // We're in a lambda.
481 auto fn = dyn_cast<cir::FuncOp>(curFn);
482 assert(fn && "lambda in non-function region");
483 fn.setLambda(true);
484
485 // Figure out the captures.
486 md->getParent()->getCaptureFields(lambdaCaptureFields,
489 // If the lambda captures the object referred to by '*this' - either by
490 // value or by reference, make sure CXXThisValue points to the correct
491 // object.
492
493 // Get the lvalue for the field (which is a copy of the enclosing object
494 // or contains the address of the enclosing object).
495 LValue thisFieldLValue =
497 if (!lambdaThisCaptureField->getType()->isPointerType()) {
498 // If the enclosing object was captured by value, just use its
499 // address. Sign this pointer.
500 cxxThisValue = thisFieldLValue.getPointer();
501 } else {
502 // Load the lvalue pointed to by the field, since '*this' was captured
503 // by reference.
505 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
506 }
507 }
508 for (auto *fd : md->getParent()->fields()) {
509 if (fd->hasCapturedVLAType())
510 cgm.errorNYI(loc, "lambda captured VLA type");
511 }
512 } else {
513 // Not in a lambda; just use 'this' from the method.
514 // FIXME: Should we generate a new load for each use of 'this'? The fast
515 // register allocator would be happier...
517 }
518
521 }
522}
523
525 // Pop any cleanups that might have been associated with the
526 // parameters. Do this in whatever block we're currently in; it's
527 // important to do this before we enter the return block or return
528 // edges will be *really* confused.
529 // TODO(cir): Use prologueCleanupDepth here.
530 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
531 if (hasCleanups) {
533 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
535 }
536}
537
538mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
539 // We start with function level scope for variables.
541
542 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
543 return emitCompoundStmtWithoutScope(*block);
544
545 return emitStmt(body, /*useCurrentScope=*/true);
546}
547
548static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
549 // Remove any leftover blocks that are unreachable and empty, since they do
550 // not represent unreachable code useful for warnings nor anything deemed
551 // useful in general.
552 SmallVector<mlir::Block *> blocksToDelete;
553 for (mlir::Block &block : func.getBlocks()) {
554 if (block.empty() && block.getUses().empty())
555 blocksToDelete.push_back(&block);
556 }
557 for (mlir::Block *block : blocksToDelete)
558 block->erase();
559}
560
561cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
562 cir::FuncType funcType) {
563 const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
564 curGD = gd;
565
566 if (funcDecl->isInlineBuiltinDeclaration()) {
567 // When generating code for a builtin with an inline declaration, use a
568 // mangled name to hold the actual body, while keeping an external
569 // declaration in case the function pointer is referenced somewhere.
570 std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
571 cir::FuncOp clone =
572 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
573 if (!clone) {
574 mlir::OpBuilder::InsertionGuard guard(builder);
575 builder.setInsertionPoint(fn);
576 clone = cir::FuncOp::create(builder, fn.getLoc(), fdInlineName,
577 fn.getFunctionType());
578 clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
579 clone.setSymVisibility("private");
580 clone.setInlineKind(cir::InlineKind::AlwaysInline);
581 }
582 fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
583 fn.setSymVisibility("private");
584 fn = clone;
585 } else {
586 // Detect the unusual situation where an inline version is shadowed by a
587 // non-inline version. In that case we should pick the external one
588 // everywhere. That's GCC behavior too.
589 for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
590 pd = pd->getPreviousDecl()) {
591 if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
592 std::string inlineName = funcDecl->getName().str() + ".inline";
593 if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
594 cgm.getGlobalValue(inlineName))) {
595 // Replace all uses of the .inline function with the regular function
596 // FIXME: This performs a linear walk over the module. Introduce some
597 // caching here.
598 if (inlineFn
599 .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
600 .failed())
601 llvm_unreachable("Failed to replace inline builtin symbol uses");
602 inlineFn.erase();
603 }
604 break;
605 }
606 }
607 }
608
609 SourceLocation loc = funcDecl->getLocation();
610 Stmt *body = funcDecl->getBody();
611 SourceRange bodyRange =
612 body ? body->getSourceRange() : funcDecl->getLocation();
613
614 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
615 : builder.getUnknownLoc()};
616
617 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
618 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
619 };
620 const mlir::Location fusedLoc = mlir::FusedLoc::get(
622 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
623 mlir::Block *entryBB = fn.addEntryBlock();
624
625 FunctionArgList args;
626 QualType retTy = buildFunctionArgList(gd, args);
627
628 // Create a scope in the symbol table to hold variable declarations.
630 {
631 LexicalScope lexScope(*this, fusedLoc, entryBB);
632
633 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
634
635 if (isa<CXXDestructorDecl>(funcDecl)) {
636 emitDestructorBody(args);
637 } else if (isa<CXXConstructorDecl>(funcDecl)) {
639 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
640 funcDecl->hasAttr<CUDAGlobalAttr>()) {
641 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
642 } else if (isa<CXXMethodDecl>(funcDecl) &&
643 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
644 // The lambda static invoker function is special, because it forwards or
645 // clones the body of the function call operator (but is actually
646 // static).
648 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
649 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
650 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
651 // Implicit copy-assignment gets the same special treatment as implicit
652 // copy-constructors.
654 } else if (body) {
655 if (mlir::failed(emitFunctionBody(body))) {
656 return nullptr;
657 }
658 } else {
659 // Anything without a body should have been handled above.
660 llvm_unreachable("no definition for normal function");
661 }
662
663 if (mlir::failed(fn.verifyBody()))
664 return nullptr;
665
666 finishFunction(bodyRange.getEnd());
667 }
668
670 return fn;
671}
672
675 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
676 CXXCtorType ctorType = curGD.getCtorType();
677
678 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
679 ctorType == Ctor_Complete) &&
680 "can only generate complete ctor for this ABI");
681
682 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
683 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
684 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
685 return;
686 }
687
688 const FunctionDecl *definition = nullptr;
689 Stmt *body = ctor->getBody(definition);
690 assert(definition == ctor && "emitting wrong constructor body");
691
692 if (isa_and_nonnull<CXXTryStmt>(body)) {
693 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
694 return;
695 }
696
699
700 // TODO: in restricted cases, we can emit the vbase initializers of a
701 // complete ctor and then delegate to the base ctor.
702
703 // Emit the constructor prologue, i.e. the base and member initializers.
704 emitCtorPrologue(ctor, ctorType, args);
705
706 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
707 // now just to have it handled.
708 if (mlir::failed(emitStmt(body, true))) {
709 cgm.errorNYI(ctor->getSourceRange(),
710 "emitConstructorBody: emit body statement failed.");
711 return;
712 }
713}
714
715/// Emits the body of the current destructor.
717 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
718 CXXDtorType dtorType = curGD.getDtorType();
719
720 // For an abstract class, non-base destructors are never used (and can't
721 // be emitted in general, because vbase dtors may not have been validated
722 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
723 // in fact emit references to them from other compilations, so emit them
724 // as functions containing a trap instruction.
725 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
726 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
727 return;
728 }
729
730 Stmt *body = dtor->getBody();
732
733 // The call to operator delete in a deleting destructor happens
734 // outside of the function-try-block, which means it's always
735 // possible to delegate the destructor body to the complete
736 // destructor. Do so.
737 if (dtorType == Dtor_Deleting) {
738 RunCleanupsScope dtorEpilogue(*this);
740 if (haveInsertPoint()) {
742 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
743 /*delegating=*/false, loadCXXThisAddress(), thisTy);
744 }
745 return;
746 }
747
748 // If the body is a function-try-block, enter the try before
749 // anything else.
750 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
751 if (isTryBody)
752 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
753
755
756 // Enter the epilogue cleanups.
757 RunCleanupsScope dtorEpilogue(*this);
758
759 // If this is the complete variant, just invoke the base variant;
760 // the epilogue will destruct the virtual bases. But we can't do
761 // this optimization if the body is a function-try-block, because
762 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
763 // always delegate because we might not have a definition in this TU.
764 switch (dtorType) {
765 case Dtor_Unified:
766 llvm_unreachable("not expecting a unified dtor");
767 case Dtor_Comdat:
768 llvm_unreachable("not expecting a COMDAT");
769 case Dtor_Deleting:
770 llvm_unreachable("already handled deleting case");
771
772 case Dtor_Complete:
773 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
774 "can't emit a dtor without a body for non-Microsoft ABIs");
775
776 // Enter the cleanup scopes for virtual bases.
778
779 if (!isTryBody) {
781 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
782 /*delegating=*/false, loadCXXThisAddress(), thisTy);
783 break;
784 }
785
786 // Fallthrough: act like we're in the base variant.
787 [[fallthrough]];
788
789 case Dtor_Base:
790 assert(body);
791
792 // Enter the cleanup scopes for fields and non-virtual bases.
794
796
797 if (isTryBody) {
798 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
799 } else if (body) {
800 (void)emitStmt(body, /*useCurrentScope=*/true);
801 } else {
802 assert(dtor->isImplicit() && "bodyless dtor not implicit");
803 // nothing to do besides what's in the epilogue
804 }
805 // -fapple-kext must inline any call to this dtor into
806 // the caller's body.
808
809 break;
810 }
811
812 // Jump out through the epilogue cleanups.
813 dtorEpilogue.forceCleanup();
814
815 // Exit the try if applicable.
816 if (isTryBody)
817 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
818}
819
820/// Given a value of type T* that may not be to a complete object, construct
821/// an l-vlaue withi the natural pointee alignment of T.
823 QualType ty) {
824 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
825 // assert on the result type first.
826 LValueBaseInfo baseInfo;
828 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
829 return makeAddrLValue(Address(val, align), ty, baseInfo);
830}
831
833 QualType ty) {
834 LValueBaseInfo baseInfo;
835 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
836 Address addr(val, convertTypeForMem(ty), alignment);
838 return makeAddrLValue(addr, ty, baseInfo);
839}
840
842 FunctionArgList &args) {
843 const auto *fd = cast<FunctionDecl>(gd.getDecl());
844 QualType retTy = fd->getReturnType();
845
846 const auto *md = dyn_cast<CXXMethodDecl>(fd);
847 if (md && md->isInstance()) {
848 if (cgm.getCXXABI().hasThisReturn(gd))
849 cgm.errorNYI(fd->getSourceRange(), "this return");
850 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
851 cgm.errorNYI(fd->getSourceRange(), "most derived return");
852 cgm.getCXXABI().buildThisParam(*this, args);
853 }
854
855 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
856 if (cd->getInheritedConstructor())
857 cgm.errorNYI(fd->getSourceRange(),
858 "buildFunctionArgList: inherited constructor");
859
860 for (auto *param : fd->parameters())
861 args.push_back(param);
862
863 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
864 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
865
866 return retTy;
867}
868
869/// Emit code to compute a designator that specifies the location
870/// of the expression.
871/// FIXME: document this function better.
873 // FIXME: ApplyDebugLocation DL(*this, e);
874 switch (e->getStmtClass()) {
875 default:
877 std::string("l-value not implemented for '") +
878 e->getStmtClassName() + "'");
879 return LValue();
880 case Expr::ConditionalOperatorClass:
882 case Expr::BinaryConditionalOperatorClass:
884 case Expr::ArraySubscriptExprClass:
886 case Expr::UnaryOperatorClass:
888 case Expr::StringLiteralClass:
890 case Expr::MemberExprClass:
892 case Expr::CompoundLiteralExprClass:
894 case Expr::PredefinedExprClass:
896 case Expr::BinaryOperatorClass:
898 case Expr::CompoundAssignOperatorClass: {
899 QualType ty = e->getType();
900 if (ty->getAs<AtomicType>()) {
901 cgm.errorNYI(e->getSourceRange(),
902 "CompoundAssignOperator with AtomicType");
903 return LValue();
904 }
905 if (!ty->isAnyComplexType())
907
909 }
910 case Expr::CallExprClass:
911 case Expr::CXXMemberCallExprClass:
912 case Expr::CXXOperatorCallExprClass:
913 case Expr::UserDefinedLiteralClass:
915 case Expr::ParenExprClass:
916 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
917 case Expr::GenericSelectionExprClass:
918 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
919 case Expr::DeclRefExprClass:
921 case Expr::CStyleCastExprClass:
922 case Expr::CXXStaticCastExprClass:
923 case Expr::CXXDynamicCastExprClass:
924 case Expr::ImplicitCastExprClass:
926 case Expr::MaterializeTemporaryExprClass:
928 case Expr::OpaqueValueExprClass:
930 case Expr::ChooseExprClass:
931 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
932 }
933}
934
935static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
936 SmallString<256> buffer;
937 llvm::raw_svector_ostream out(buffer);
938 out << name << cnt;
939 return std::string(out.str());
940}
941
945
949
950void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
951 QualType ty) {
952 // Ignore empty classes in C++.
954 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
955 return;
956
957 // Cast the dest ptr to the appropriate i8 pointer type.
958 if (builder.isInt8Ty(destPtr.getElementType())) {
959 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
960 }
961
962 // Get size and alignment info for this aggregate.
963 const CharUnits size = getContext().getTypeSizeInChars(ty);
964 if (size.isZero()) {
965 // But note that getTypeInfo returns 0 for a VLA.
966 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
967 cgm.errorNYI(loc,
968 "emitNullInitialization for zero size VariableArrayType");
969 } else {
970 return;
971 }
972 }
973
974 // If the type contains a pointer to data member we can't memset it to zero.
975 // Instead, create a null constant and copy it to the destination.
976 // TODO: there are other patterns besides zero that we can usefully memset,
977 // like -1, which happens to be the pattern used by member-pointers.
978 if (!cgm.getTypes().isZeroInitializable(ty)) {
979 cgm.errorNYI(loc, "type is not zero initializable");
980 }
981
982 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
983 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
984 // respective address.
985 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
986 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
987 builder.createStore(loc, zeroValue, destPtr);
988}
989
990// TODO(cir): should be shared with LLVM codegen.
992 const Expr *e = ce->getSubExpr();
993
994 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
995 return false;
996
997 if (isa<CXXThisExpr>(e->IgnoreParens())) {
998 // We always assume that 'this' is never null.
999 return false;
1000 }
1001
1002 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1003 // And that glvalue casts are never null.
1004 if (ice->isGLValue())
1005 return false;
1006 }
1007
1008 return true;
1009}
1010
1011/// Computes the length of an array in elements, as well as the base
1012/// element type and a properly-typed first element pointer.
1013mlir::Value
1015 QualType &baseType, Address &addr) {
1016 const clang::ArrayType *arrayType = origArrayType;
1017
1018 // If it's a VLA, we have to load the stored size. Note that
1019 // this is the size of the VLA in bytes, not its size in elements.
1022 cgm.errorNYI(*currSrcLoc, "VLAs");
1023 return builder.getConstInt(*currSrcLoc, sizeTy, 0);
1024 }
1025
1026 uint64_t countFromCLAs = 1;
1027 QualType eltType;
1028
1029 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
1030
1031 while (cirArrayType) {
1033 countFromCLAs *= cirArrayType.getSize();
1034 eltType = arrayType->getElementType();
1035
1036 cirArrayType =
1037 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
1038
1039 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1040 assert((!cirArrayType || arrayType) &&
1041 "CIR and Clang types are out-of-sync");
1042 }
1043
1044 if (arrayType) {
1045 // From this point onwards, the Clang array type has been emitted
1046 // as some other type (probably a packed struct). Compute the array
1047 // size, and just emit the 'begin' expression as a bitcast.
1048 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
1049 }
1050
1051 baseType = eltType;
1052 return builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
1053}
1054
1056 mlir::Value ptrValue, QualType ty, SourceLocation loc,
1057 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
1059 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
1060 alignment, offsetValue);
1061}
1062
1064 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1065 int64_t alignment, mlir::Value offsetValue) {
1066 QualType ty = expr->getType();
1067 SourceLocation loc = expr->getExprLoc();
1068 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1069 offsetValue);
1070}
1071
1073 const VariableArrayType *vla =
1074 cgm.getASTContext().getAsVariableArrayType(type);
1075 assert(vla && "type was not a variable array type!");
1076 return getVLASize(vla);
1077}
1078
1081 // The number of elements so far; always size_t.
1082 mlir::Value numElements;
1083
1084 QualType elementType;
1085 do {
1086 elementType = type->getElementType();
1087 mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
1088 assert(vlaSize && "no size for VLA!");
1089 assert(vlaSize.getType() == sizeTy);
1090
1091 if (!numElements) {
1092 numElements = vlaSize;
1093 } else {
1094 // It's undefined behavior if this wraps around, so mark it that way.
1095 // FIXME: Teach -fsanitize=undefined to trap this.
1096
1097 numElements =
1098 builder.createMul(numElements.getLoc(), numElements, vlaSize,
1100 }
1101 } while ((type = getContext().getAsVariableArrayType(elementType)));
1102
1103 assert(numElements && "Undefined elements number");
1104 return {numElements, elementType};
1105}
1106
1107// TODO(cir): Most of this function can be shared between CIRGen
1108// and traditional LLVM codegen
1110 assert(type->isVariablyModifiedType() &&
1111 "Must pass variably modified type to EmitVLASizes!");
1112
1113 // We're going to walk down into the type and look for VLA
1114 // expressions.
1115 do {
1116 assert(type->isVariablyModifiedType());
1117
1118 const Type *ty = type.getTypePtr();
1119 switch (ty->getTypeClass()) {
1120 case Type::CountAttributed:
1121 case Type::PackIndexing:
1122 case Type::ArrayParameter:
1123 case Type::HLSLAttributedResource:
1124 case Type::HLSLInlineSpirv:
1125 case Type::PredefinedSugar:
1126 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1127 break;
1128
1129#define TYPE(Class, Base)
1130#define ABSTRACT_TYPE(Class, Base)
1131#define NON_CANONICAL_TYPE(Class, Base)
1132#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1133#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1134#include "clang/AST/TypeNodes.inc"
1135 llvm_unreachable(
1136 "dependent type must be resolved before the CIR codegen");
1137
1138 // These types are never variably-modified.
1139 case Type::Builtin:
1140 case Type::Complex:
1141 case Type::Vector:
1142 case Type::ExtVector:
1143 case Type::ConstantMatrix:
1144 case Type::Record:
1145 case Type::Enum:
1146 case Type::Using:
1147 case Type::TemplateSpecialization:
1148 case Type::ObjCTypeParam:
1149 case Type::ObjCObject:
1150 case Type::ObjCInterface:
1151 case Type::ObjCObjectPointer:
1152 case Type::BitInt:
1153 llvm_unreachable("type class is never variably-modified!");
1154
1155 case Type::Adjusted:
1156 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1157 break;
1158
1159 case Type::Decayed:
1160 type = cast<clang::DecayedType>(ty)->getPointeeType();
1161 break;
1162
1163 case Type::Pointer:
1164 type = cast<clang::PointerType>(ty)->getPointeeType();
1165 break;
1166
1167 case Type::BlockPointer:
1168 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1169 break;
1170
1171 case Type::LValueReference:
1172 case Type::RValueReference:
1173 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1174 break;
1175
1176 case Type::MemberPointer:
1177 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1178 break;
1179
1180 case Type::ConstantArray:
1181 case Type::IncompleteArray:
1182 // Losing element qualification here is fine.
1183 type = cast<clang::ArrayType>(ty)->getElementType();
1184 break;
1185
1186 case Type::VariableArray: {
1187 // Losing element qualification here is fine.
1189
1190 // Unknown size indication requires no size computation.
1191 // Otherwise, evaluate and record it.
1192 if (const Expr *sizeExpr = vat->getSizeExpr()) {
1193 // It's possible that we might have emitted this already,
1194 // e.g. with a typedef and a pointer to it.
1195 mlir::Value &entry = vlaSizeMap[sizeExpr];
1196 if (!entry) {
1197 mlir::Value size = emitScalarExpr(sizeExpr);
1199
1200 // Always zexting here would be wrong if it weren't
1201 // undefined behavior to have a negative bound.
1202 // FIXME: What about when size's type is larger than size_t?
1203 entry = builder.createIntCast(size, sizeTy);
1204 }
1205 }
1206 type = vat->getElementType();
1207 break;
1208 }
1209
1210 case Type::FunctionProto:
1211 case Type::FunctionNoProto:
1212 type = cast<clang::FunctionType>(ty)->getReturnType();
1213 break;
1214
1215 case Type::Paren:
1216 case Type::TypeOf:
1217 case Type::UnaryTransform:
1218 case Type::Attributed:
1219 case Type::BTFTagAttributed:
1220 case Type::SubstTemplateTypeParm:
1221 case Type::MacroQualified:
1222 // Keep walking after single level desugaring.
1223 type = type.getSingleStepDesugaredType(getContext());
1224 break;
1225
1226 case Type::Typedef:
1227 case Type::Decltype:
1228 case Type::Auto:
1229 case Type::DeducedTemplateSpecialization:
1230 // Stop walking: nothing to do.
1231 return;
1232
1233 case Type::TypeOfExpr:
1234 // Stop walking: emit typeof expression.
1235 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1236 return;
1237
1238 case Type::Atomic:
1239 type = cast<clang::AtomicType>(ty)->getValueType();
1240 break;
1241
1242 case Type::Pipe:
1243 type = cast<clang::PipeType>(ty)->getElementType();
1244 break;
1245 }
1246 } while (type->isVariablyModifiedType());
1247}
1248
1250 if (getContext().getBuiltinVaListType()->isArrayType())
1251 return emitPointerWithAlignment(e);
1252 return emitLValue(e).getAddress();
1253}
1254
1255} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:833
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
mlir::Type getElementType() const
Definition Address.h:109
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
bool shouldNullCheckClassCastValue(const CastExpr *ce)
bool didCallStackSave
Whether a cir.stacksave operation has been added.
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1720
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:2000
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3268
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4538
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8871
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8650
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2190
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Expr * getSizeExpr() const
Definition TypeBase.h:3980
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool coroEndBuiltinCall()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647