clang 22.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/ExprCXX.h"
23
24#include <cassert>
25
26namespace clang::CIRGen {
27
29 bool suppressNewContext)
30 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
31 ehStack.setCGF(this);
32}
33
35
36// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
38 type = type.getCanonicalType();
39 while (true) {
40 switch (type->getTypeClass()) {
41#define TYPE(name, parent)
42#define ABSTRACT_TYPE(name, parent)
43#define NON_CANONICAL_TYPE(name, parent) case Type::name:
44#define DEPENDENT_TYPE(name, parent) case Type::name:
45#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
46#include "clang/AST/TypeNodes.inc"
47 llvm_unreachable("non-canonical or dependent type in IR-generation");
48
49 case Type::Auto:
50 case Type::DeducedTemplateSpecialization:
51 llvm_unreachable("undeduced type in IR-generation");
52
53 // Various scalar types.
54 case Type::Builtin:
55 case Type::Pointer:
56 case Type::BlockPointer:
57 case Type::LValueReference:
58 case Type::RValueReference:
59 case Type::MemberPointer:
60 case Type::Vector:
61 case Type::ExtVector:
62 case Type::ConstantMatrix:
63 case Type::FunctionProto:
64 case Type::FunctionNoProto:
65 case Type::Enum:
66 case Type::ObjCObjectPointer:
67 case Type::Pipe:
68 case Type::BitInt:
69 case Type::HLSLAttributedResource:
70 case Type::HLSLInlineSpirv:
71 return cir::TEK_Scalar;
72
73 // Complexes.
74 case Type::Complex:
75 return cir::TEK_Complex;
76
77 // Arrays, records, and Objective-C objects.
78 case Type::ConstantArray:
79 case Type::IncompleteArray:
80 case Type::VariableArray:
81 case Type::Record:
82 case Type::ObjCObject:
83 case Type::ObjCInterface:
84 case Type::ArrayParameter:
85 return cir::TEK_Aggregate;
86
87 // We operate on atomic values according to their underlying type.
88 case Type::Atomic:
89 type = cast<AtomicType>(type)->getValueType();
90 continue;
91 }
92 llvm_unreachable("unknown type kind!");
93 }
94}
95
97 return cgm.getTypes().convertTypeForMem(t);
98}
99
101 return cgm.getTypes().convertType(t);
102}
103
105 // Some AST nodes might contain invalid source locations (e.g.
106 // CXXDefaultArgExpr), workaround that to still get something out.
107 if (srcLoc.isValid()) {
109 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
110 StringRef filename = pLoc.getFilename();
111 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
112 pLoc.getLine(), pLoc.getColumn());
113 }
114 // Do our best...
115 assert(currSrcLoc && "expected to inherit some source location");
116 return *currSrcLoc;
117}
118
119mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
120 // Some AST nodes might contain invalid source locations (e.g.
121 // CXXDefaultArgExpr), workaround that to still get something out.
122 if (srcLoc.isValid()) {
123 mlir::Location beg = getLoc(srcLoc.getBegin());
124 mlir::Location end = getLoc(srcLoc.getEnd());
125 SmallVector<mlir::Location, 2> locs = {beg, end};
126 mlir::Attribute metadata;
127 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
128 }
129 if (currSrcLoc) {
130 return *currSrcLoc;
131 }
132 // We're brave, but time to give up.
133 return builder.getUnknownLoc();
134}
135
136mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
137 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
138 mlir::Attribute metadata;
139 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
140}
141
142bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
143 // Null statement, not a label!
144 if (!s)
145 return false;
146
147 // If this is a label, we have to emit the code, consider something like:
148 // if (0) { ... foo: bar(); } goto foo;
149 //
150 // TODO: If anyone cared, we could track __label__'s, since we know that you
151 // can't jump to one from outside their declared region.
152 if (isa<LabelStmt>(s))
153 return true;
154
155 // If this is a case/default statement, and we haven't seen a switch, we
156 // have to emit the code.
157 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
158 return true;
159
160 // If this is a switch statement, we want to ignore case statements when we
161 // recursively process the sub-statements of the switch. If we haven't
162 // encountered a switch statement, we treat case statements like labels, but
163 // if we are processing a switch statement, case statements are expected.
164 if (isa<SwitchStmt>(s))
165 ignoreCaseStmts = true;
166
167 // Scan subexpressions for verboten labels.
168 return std::any_of(s->child_begin(), s->child_end(),
169 [=](const Stmt *subStmt) {
170 return containsLabel(subStmt, ignoreCaseStmts);
171 });
172}
173
174/// If the specified expression does not fold to a constant, or if it does but
175/// contains a label, return false. If it constant folds return true and set
176/// the boolean result in Result.
177bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
178 bool allowLabels) {
179 llvm::APSInt resultInt;
180 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
181 return false;
182
183 resultBool = resultInt.getBoolValue();
184 return true;
185}
186
187/// If the specified expression does not fold to a constant, or if it does
188/// fold but contains a label, return false. If it constant folds, return
189/// true and set the folded value.
191 llvm::APSInt &resultInt,
192 bool allowLabels) {
193 // FIXME: Rename and handle conversion of other evaluatable things
194 // to bool.
195 Expr::EvalResult result;
196 if (!cond->EvaluateAsInt(result, getContext()))
197 return false; // Not foldable, not integer or not fully evaluatable.
198
199 llvm::APSInt intValue = result.Val.getInt();
200 if (!allowLabels && containsLabel(cond))
201 return false; // Contains a label.
202
203 resultInt = intValue;
204 return true;
205}
206
207void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
208 CharUnits alignment) {
209 if (!type->isVoidType()) {
210 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
211 /*insertIntoFnEntryBlock=*/false);
212 fnRetAlloca = addr;
213 returnValue = Address(addr, alignment);
214 }
215}
216
217void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
218 mlir::Location loc, CharUnits alignment,
219 bool isParam) {
220 assert(isa<NamedDecl>(var) && "Needs a named decl");
221 assert(!symbolTable.count(var) && "not supposed to be available just yet");
222
223 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
224 assert(allocaOp && "expected cir::AllocaOp");
225
226 if (isParam)
227 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
228 if (ty->isReferenceType() || ty.isConstQualified())
229 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
230
231 symbolTable.insert(var, allocaOp);
232}
233
235 CIRGenBuilderTy &builder = cgf.builder;
236 LexicalScope *localScope = cgf.curLexScope;
237
238 auto applyCleanup = [&]() {
239 if (performCleanup) {
240 // ApplyDebugLocation
242 forceCleanup();
243 }
244 };
245
246 // Cleanup are done right before codegen resumes a scope. This is where
247 // objects are destroyed. Process all return blocks.
248 // TODO(cir): Handle returning from a switch statement through a cleanup
249 // block. We can't simply jump to the cleanup block, because the cleanup block
250 // is not part of the case region. Either reemit all cleanups in the return
251 // block or wait for MLIR structured control flow to support early exits.
253 for (mlir::Block *retBlock : localScope->getRetBlocks()) {
254 mlir::OpBuilder::InsertionGuard guard(builder);
255 builder.setInsertionPointToEnd(retBlock);
256 retBlocks.push_back(retBlock);
257 mlir::Location retLoc = localScope->getRetLoc(retBlock);
258 emitReturn(retLoc);
259 }
260
261 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
262 mlir::OpBuilder::InsertionGuard guard(builder);
263 builder.setInsertionPointToEnd(insPt);
264
265 // If we still don't have a cleanup block, it means that `applyCleanup`
266 // below might be able to get us one.
267 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
268
269 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
270 applyCleanup();
271
272 // If we now have one after `applyCleanup`, hook it up properly.
273 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
274 cleanupBlock = localScope->getCleanupBlock(builder);
275 cir::BrOp::create(builder, insPt->back().getLoc(), cleanupBlock);
276 if (!cleanupBlock->mightHaveTerminator()) {
277 mlir::OpBuilder::InsertionGuard guard(builder);
278 builder.setInsertionPointToEnd(cleanupBlock);
279 cir::YieldOp::create(builder, localScope->endLoc);
280 }
281 }
282
283 if (localScope->depth == 0) {
284 // Reached the end of the function.
285 // Special handling only for single return block case
286 if (localScope->getRetBlocks().size() == 1) {
287 mlir::Block *retBlock = localScope->getRetBlocks()[0];
288 mlir::Location retLoc = localScope->getRetLoc(retBlock);
289 if (retBlock->getUses().empty()) {
290 retBlock->erase();
291 } else {
292 // Thread return block via cleanup block.
293 if (cleanupBlock) {
294 for (mlir::BlockOperand &blockUse : retBlock->getUses()) {
295 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
296 brOp.setSuccessor(cleanupBlock);
297 }
298 }
299
300 cir::BrOp::create(builder, retLoc, retBlock);
301 return;
302 }
303 }
304 emitImplicitReturn();
305 return;
306 }
307
308 // End of any local scope != function
309 // Ternary ops have to deal with matching arms for yielding types
310 // and do return a value, it must do its own cir.yield insertion.
311 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
312 !retVal ? cir::YieldOp::create(builder, localScope->endLoc)
313 : cir::YieldOp::create(builder, localScope->endLoc, retVal);
314 }
315 };
316
317 // If a cleanup block has been created at some point, branch to it
318 // and set the insertion point to continue at the cleanup block.
319 // Terminators are then inserted either in the cleanup block or
320 // inline in this current block.
321 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
322 if (cleanupBlock)
323 insertCleanupAndLeave(cleanupBlock);
324
325 // Now deal with any pending block wrap up like implicit end of
326 // scope.
327
328 mlir::Block *curBlock = builder.getBlock();
329 if (isGlobalInit() && !curBlock)
330 return;
331 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
332 return;
333
334 // Get rid of any empty block at the end of the scope.
335 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
336 if (!entryBlock && curBlock->empty()) {
337 curBlock->erase();
338 for (mlir::Block *retBlock : retBlocks) {
339 if (retBlock->getUses().empty())
340 retBlock->erase();
341 }
342 return;
343 }
344
345 // If there's a cleanup block, branch to it, nothing else to do.
346 if (cleanupBlock) {
347 cir::BrOp::create(builder, curBlock->back().getLoc(), cleanupBlock);
348 return;
349 }
350
351 // No pre-existent cleanup block, emit cleanup code and yield/return.
352 insertCleanupAndLeave(curBlock);
353}
354
355cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
356 CIRGenBuilderTy &builder = cgf.getBuilder();
357
358 // If we are on a coroutine, add the coro_end builtin call.
360
361 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
362 assert(fn && "emitReturn from non-function");
363 if (!fn.getFunctionType().hasVoidReturn()) {
364 // Load the value from `__retval` and return it via the `cir.return` op.
365 auto value = cir::LoadOp::create(
366 builder, loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
367 return cir::ReturnOp::create(builder, loc,
368 llvm::ArrayRef(value.getResult()));
369 }
370 return cir::ReturnOp::create(builder, loc);
371}
372
373// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
374// candidate for sharing between CIRGen and CodeGen.
375static bool mayDropFunctionReturn(const ASTContext &astContext,
376 QualType returnType) {
377 // We can't just discard the return value for a record type with a complex
378 // destructor or a non-trivially copyable type.
379 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
380 return classDecl->hasTrivialDestructor();
381 return returnType.isTriviallyCopyableType(astContext);
382}
383
384void CIRGenFunction::LexicalScope::emitImplicitReturn() {
385 CIRGenBuilderTy &builder = cgf.getBuilder();
386 LexicalScope *localScope = cgf.curLexScope;
387
388 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
389
390 // In C++, flowing off the end of a non-void function is always undefined
391 // behavior. In C, flowing off the end of a non-void function is undefined
392 // behavior only if the non-existent return value is used by the caller.
393 // That influences whether the terminating op is trap, unreachable, or
394 // return.
395 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
396 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
397 builder.getInsertionBlock()) {
398 bool shouldEmitUnreachable =
399 cgf.cgm.getCodeGenOpts().StrictReturn ||
400 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
401
402 if (shouldEmitUnreachable) {
404 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
405 cir::TrapOp::create(builder, localScope->endLoc);
406 else
407 cir::UnreachableOp::create(builder, localScope->endLoc);
408 builder.clearInsertionPoint();
409 return;
410 }
411 }
412
413 (void)emitReturn(localScope->endLoc);
414}
415
417 LexicalScope *scope = this;
418 while (scope) {
419 if (scope->isTry())
420 return scope->getTry();
421 scope = scope->parentScope;
422 }
423 return nullptr;
424}
425
426/// An argument came in as a promoted argument; demote it back to its
427/// declared type.
428static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var,
429 mlir::Value value) {
430 mlir::Type ty = cgf.convertType(var->getType());
431
432 // This can happen with promotions that actually don't change the
433 // underlying type, like the enum promotions.
434 if (value.getType() == ty)
435 return value;
436
437 assert((mlir::isa<cir::IntType>(ty) || cir::isAnyFloatingPointType(ty)) &&
438 "unexpected promotion type");
439
440 if (mlir::isa<cir::IntType>(ty))
441 return cgf.getBuilder().CIRBaseBuilderTy::createIntCast(value, ty);
442
443 return cgf.getBuilder().createFloatingCast(value, ty);
444}
445
447 mlir::Block *entryBB,
448 const FunctionDecl *fd,
449 SourceLocation bodyBeginLoc) {
450 // Naked functions don't have prologues.
451 if (fd && fd->hasAttr<NakedAttr>()) {
452 cgm.errorNYI(bodyBeginLoc, "naked function decl");
453 }
454
455 // Declare all the function arguments in the symbol table.
456 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
457 const VarDecl *paramVar = std::get<0>(nameValue);
458 mlir::Value paramVal = std::get<1>(nameValue);
459 CharUnits alignment = getContext().getDeclAlign(paramVar);
460 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
461 paramVal.setLoc(paramLoc);
462
463 mlir::Value addrVal =
464 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
465 convertType(paramVar->getType()), paramLoc, alignment,
466 /*insertIntoFnEntryBlock=*/true);
467
468 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
469 /*isParam=*/true);
470
471 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
472
473 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
474 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
476 if (isPromoted)
477 paramVal = emitArgumentDemotion(*this, paramVar, paramVal);
478
479 // Location of the store to the param storage tracked as beginning of
480 // the function body.
481 mlir::Location fnBodyBegin = getLoc(bodyBeginLoc);
482 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
483 }
484 assert(builder.getInsertionBlock() && "Should be valid");
485}
486
488 cir::FuncOp fn, cir::FuncType funcType,
490 SourceLocation startLoc) {
491 assert(!curFn &&
492 "CIRGenFunction can only be used for one function at a time");
493
494 curFn = fn;
495
496 const Decl *d = gd.getDecl();
497
498 didCallStackSave = false;
499 curCodeDecl = d;
500 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
502
503 prologueCleanupDepth = ehStack.stable_begin();
504
505 mlir::Block *entryBB = &fn.getBlocks().front();
506 builder.setInsertionPointToStart(entryBB);
507
508 // Determine the function body begin location for the prolog.
509 // If fd is null or has no body, use startLoc as fallback.
510 SourceLocation bodyBeginLoc = startLoc;
511 if (fd) {
512 if (Stmt *body = fd->getBody())
513 bodyBeginLoc = body->getBeginLoc();
514 else
515 bodyBeginLoc = fd->getLocation();
516 }
517
518 emitFunctionProlog(args, entryBB, fd, bodyBeginLoc);
519
520 // When the current function is not void, create an address to store the
521 // result value.
522 if (!returnType->isVoidType()) {
523 // Determine the function body end location.
524 // If fd is null or has no body, use loc as fallback.
525 SourceLocation bodyEndLoc = loc;
526 if (fd) {
527 if (Stmt *body = fd->getBody())
528 bodyEndLoc = body->getEndLoc();
529 else
530 bodyEndLoc = fd->getLocation();
531 }
532 emitAndUpdateRetAlloca(returnType, getLoc(bodyEndLoc),
533 getContext().getTypeAlignInChars(returnType));
534 }
535
536 if (isa_and_nonnull<CXXMethodDecl>(d) &&
537 cast<CXXMethodDecl>(d)->isInstance()) {
538 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
539
540 const auto *md = cast<CXXMethodDecl>(d);
541 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
542 // We're in a lambda.
543 auto fn = dyn_cast<cir::FuncOp>(curFn);
544 assert(fn && "lambda in non-function region");
545 fn.setLambda(true);
546
547 // Figure out the captures.
548 md->getParent()->getCaptureFields(lambdaCaptureFields,
551 // If the lambda captures the object referred to by '*this' - either by
552 // value or by reference, make sure CXXThisValue points to the correct
553 // object.
554
555 // Get the lvalue for the field (which is a copy of the enclosing object
556 // or contains the address of the enclosing object).
557 LValue thisFieldLValue =
559 if (!lambdaThisCaptureField->getType()->isPointerType()) {
560 // If the enclosing object was captured by value, just use its
561 // address. Sign this pointer.
562 cxxThisValue = thisFieldLValue.getPointer();
563 } else {
564 // Load the lvalue pointed to by the field, since '*this' was captured
565 // by reference.
567 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
568 }
569 }
570 for (auto *fd : md->getParent()->fields()) {
571 if (fd->hasCapturedVLAType())
572 cgm.errorNYI(loc, "lambda captured VLA type");
573 }
574 } else {
575 // Not in a lambda; just use 'this' from the method.
576 // FIXME: Should we generate a new load for each use of 'this'? The fast
577 // register allocator would be happier...
579 }
580
583 }
584}
585
587 for (cir::BlockAddressOp &blockAddress : cgm.unresolvedBlockAddressToLabel) {
588 cir::LabelOp labelOp =
589 cgm.lookupBlockAddressInfo(blockAddress.getBlockAddrInfo());
590 assert(labelOp && "expected cir.labelOp to already be emitted");
591 cgm.updateResolvedBlockAddress(blockAddress, labelOp);
592 }
593 cgm.unresolvedBlockAddressToLabel.clear();
594}
595
598 return;
601 mlir::OpBuilder::InsertionGuard guard(builder);
602 builder.setInsertionPointToEnd(indirectGotoBlock);
603 for (auto &[blockAdd, labelOp] : cgm.blockAddressToLabel) {
604 succesors.push_back(labelOp->getBlock());
605 rangeOperands.push_back(labelOp->getBlock()->getArguments());
606 }
607 cir::IndirectBrOp::create(builder, builder.getUnknownLoc(),
608 indirectGotoBlock->getArgument(0), false,
609 rangeOperands, succesors);
610 cgm.blockAddressToLabel.clear();
611}
612
614 // Resolve block address-to-label mappings, then emit the indirect branch
615 // with the corresponding targets.
618
619 // If a label address was taken but no indirect goto was used, we can't remove
620 // the block argument here. Instead, we mark the 'indirectbr' op
621 // as poison so that the cleanup can be deferred to lowering, since the
622 // verifier doesn't allow the 'indirectbr' target address to be null.
623 if (indirectGotoBlock && indirectGotoBlock->hasNoPredecessors()) {
624 auto indrBr = cast<cir::IndirectBrOp>(indirectGotoBlock->front());
625 indrBr.setPoison(true);
626 }
627
628 // Pop any cleanups that might have been associated with the
629 // parameters. Do this in whatever block we're currently in; it's
630 // important to do this before we enter the return block or return
631 // edges will be *really* confused.
632 // TODO(cir): Use prologueCleanupDepth here.
633 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
634 if (hasCleanups) {
636 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
638 }
639}
640
641mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
642 // We start with function level scope for variables.
644
645 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
646 return emitCompoundStmtWithoutScope(*block);
647
648 return emitStmt(body, /*useCurrentScope=*/true);
649}
650
651static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
652 // Remove any leftover blocks that are unreachable and empty, since they do
653 // not represent unreachable code useful for warnings nor anything deemed
654 // useful in general.
655 SmallVector<mlir::Block *> blocksToDelete;
656 for (mlir::Block &block : func.getBlocks()) {
657 if (block.empty() && block.getUses().empty())
658 blocksToDelete.push_back(&block);
659 }
660 for (mlir::Block *block : blocksToDelete)
661 block->erase();
662}
663
664cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
665 cir::FuncType funcType) {
666 const auto *funcDecl = cast<FunctionDecl>(gd.getDecl());
667 curGD = gd;
668
669 if (funcDecl->isInlineBuiltinDeclaration()) {
670 // When generating code for a builtin with an inline declaration, use a
671 // mangled name to hold the actual body, while keeping an external
672 // declaration in case the function pointer is referenced somewhere.
673 std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
674 cir::FuncOp clone =
675 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
676 if (!clone) {
677 mlir::OpBuilder::InsertionGuard guard(builder);
678 builder.setInsertionPoint(fn);
679 clone = cir::FuncOp::create(builder, fn.getLoc(), fdInlineName,
680 fn.getFunctionType());
681 clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
682 clone.setSymVisibility("private");
683 clone.setInlineKind(cir::InlineKind::AlwaysInline);
684 }
685 fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
686 fn.setSymVisibility("private");
687 fn = clone;
688 } else {
689 // Detect the unusual situation where an inline version is shadowed by a
690 // non-inline version. In that case we should pick the external one
691 // everywhere. That's GCC behavior too.
692 for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
693 pd = pd->getPreviousDecl()) {
694 if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
695 std::string inlineName = funcDecl->getName().str() + ".inline";
696 if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
697 cgm.getGlobalValue(inlineName))) {
698 // Replace all uses of the .inline function with the regular function
699 // FIXME: This performs a linear walk over the module. Introduce some
700 // caching here.
701 if (inlineFn
702 .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
703 .failed())
704 llvm_unreachable("Failed to replace inline builtin symbol uses");
705 inlineFn.erase();
706 }
707 break;
708 }
709 }
710 }
711
712 SourceLocation loc = funcDecl->getLocation();
713 Stmt *body = funcDecl->getBody();
714 SourceRange bodyRange =
715 body ? body->getSourceRange() : funcDecl->getLocation();
716
717 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
718 : builder.getUnknownLoc()};
719
720 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
721 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
722 };
723 const mlir::Location fusedLoc = mlir::FusedLoc::get(
725 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
726 mlir::Block *entryBB = fn.addEntryBlock();
727
728 FunctionArgList args;
729 QualType retTy = buildFunctionArgList(gd, args);
730
731 // Create a scope in the symbol table to hold variable declarations.
733 {
734 LexicalScope lexScope(*this, fusedLoc, entryBB);
735
736 // Emit the standard function prologue.
737 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
738
739 // Save parameters for coroutine function.
740 if (body && isa_and_nonnull<CoroutineBodyStmt>(body))
741 llvm::append_range(fnArgs, funcDecl->parameters());
742
743 if (isa<CXXDestructorDecl>(funcDecl)) {
744 emitDestructorBody(args);
745 } else if (isa<CXXConstructorDecl>(funcDecl)) {
747 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
748 funcDecl->hasAttr<CUDAGlobalAttr>()) {
749 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
750 } else if (isa<CXXMethodDecl>(funcDecl) &&
751 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
752 // The lambda static invoker function is special, because it forwards or
753 // clones the body of the function call operator (but is actually
754 // static).
756 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
757 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
758 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
759 // Implicit copy-assignment gets the same special treatment as implicit
760 // copy-constructors.
762 } else if (body) {
763 // Emit standard function body.
764 if (mlir::failed(emitFunctionBody(body))) {
765 return nullptr;
766 }
767 } else {
768 // Anything without a body should have been handled above.
769 llvm_unreachable("no definition for normal function");
770 }
771
772 if (mlir::failed(fn.verifyBody()))
773 return nullptr;
774
775 finishFunction(bodyRange.getEnd());
776 }
777
779 return fn;
780}
781
784 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
785 CXXCtorType ctorType = curGD.getCtorType();
786
787 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
788 ctorType == Ctor_Complete) &&
789 "can only generate complete ctor for this ABI");
790
791 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), ctor);
792
793 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
794 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
795 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
796 return;
797 }
798
799 const FunctionDecl *definition = nullptr;
800 Stmt *body = ctor->getBody(definition);
801 assert(definition == ctor && "emitting wrong constructor body");
802
803 if (isa_and_nonnull<CXXTryStmt>(body)) {
804 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
805 return;
806 }
807
810
811 // TODO: in restricted cases, we can emit the vbase initializers of a
812 // complete ctor and then delegate to the base ctor.
813
814 // Emit the constructor prologue, i.e. the base and member initializers.
815 emitCtorPrologue(ctor, ctorType, args);
816
817 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
818 // now just to have it handled.
819 if (mlir::failed(emitStmt(body, true))) {
820 cgm.errorNYI(ctor->getSourceRange(),
821 "emitConstructorBody: emit body statement failed.");
822 return;
823 }
824}
825
826/// Emits the body of the current destructor.
828 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
829 CXXDtorType dtorType = curGD.getDtorType();
830
831 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), dtor);
832
833 // For an abstract class, non-base destructors are never used (and can't
834 // be emitted in general, because vbase dtors may not have been validated
835 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
836 // in fact emit references to them from other compilations, so emit them
837 // as functions containing a trap instruction.
838 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
839 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
840 return;
841 }
842
843 Stmt *body = dtor->getBody();
845
846 // The call to operator delete in a deleting destructor happens
847 // outside of the function-try-block, which means it's always
848 // possible to delegate the destructor body to the complete
849 // destructor. Do so.
850 if (dtorType == Dtor_Deleting || dtorType == Dtor_VectorDeleting) {
852 cgm.errorNYI(dtor->getSourceRange(), "emitConditionalArrayDtorCall");
853 RunCleanupsScope dtorEpilogue(*this);
855 if (haveInsertPoint()) {
857 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
858 /*delegating=*/false, loadCXXThisAddress(), thisTy);
859 }
860 return;
861 }
862
863 // If the body is a function-try-block, enter the try before
864 // anything else.
865 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
866 if (isTryBody)
867 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
868
870
871 // Enter the epilogue cleanups.
872 RunCleanupsScope dtorEpilogue(*this);
873
874 // If this is the complete variant, just invoke the base variant;
875 // the epilogue will destruct the virtual bases. But we can't do
876 // this optimization if the body is a function-try-block, because
877 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
878 // always delegate because we might not have a definition in this TU.
879 switch (dtorType) {
880 case Dtor_Unified:
881 llvm_unreachable("not expecting a unified dtor");
882 case Dtor_Comdat:
883 llvm_unreachable("not expecting a COMDAT");
884 case Dtor_Deleting:
886 llvm_unreachable("already handled deleting case");
887
888 case Dtor_Complete:
889 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
890 "can't emit a dtor without a body for non-Microsoft ABIs");
891
892 // Enter the cleanup scopes for virtual bases.
894
895 if (!isTryBody) {
897 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
898 /*delegating=*/false, loadCXXThisAddress(), thisTy);
899 break;
900 }
901
902 // Fallthrough: act like we're in the base variant.
903 [[fallthrough]];
904
905 case Dtor_Base:
906 assert(body);
907
908 // Enter the cleanup scopes for fields and non-virtual bases.
910
912
913 if (isTryBody) {
914 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
915 } else if (body) {
916 (void)emitStmt(body, /*useCurrentScope=*/true);
917 } else {
918 assert(dtor->isImplicit() && "bodyless dtor not implicit");
919 // nothing to do besides what's in the epilogue
920 }
921 // -fapple-kext must inline any call to this dtor into
922 // the caller's body.
924
925 break;
926 }
927
928 // Jump out through the epilogue cleanups.
929 dtorEpilogue.forceCleanup();
930
931 // Exit the try if applicable.
932 if (isTryBody)
933 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
934}
935
936/// Given a value of type T* that may not be to a complete object, construct
937/// an l-vlaue withi the natural pointee alignment of T.
939 QualType ty) {
940 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
941 // assert on the result type first.
942 LValueBaseInfo baseInfo;
944 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
945 return makeAddrLValue(Address(val, align), ty, baseInfo);
946}
947
949 QualType ty) {
950 LValueBaseInfo baseInfo;
951 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
952 Address addr(val, convertTypeForMem(ty), alignment);
954 return makeAddrLValue(addr, ty, baseInfo);
955}
956
958 FunctionArgList &args) {
959 const auto *fd = cast<FunctionDecl>(gd.getDecl());
960 QualType retTy = fd->getReturnType();
961
962 const auto *md = dyn_cast<CXXMethodDecl>(fd);
963 if (md && md->isInstance()) {
964 if (cgm.getCXXABI().hasThisReturn(gd))
965 cgm.errorNYI(fd->getSourceRange(), "this return");
966 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
967 cgm.errorNYI(fd->getSourceRange(), "most derived return");
968 cgm.getCXXABI().buildThisParam(*this, args);
969 }
970
971 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
972 if (cd->getInheritedConstructor())
973 cgm.errorNYI(fd->getSourceRange(),
974 "buildFunctionArgList: inherited constructor");
975
976 for (auto *param : fd->parameters())
977 args.push_back(param);
978
979 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
980 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
981
982 return retTy;
983}
984
985/// Emit code to compute a designator that specifies the location
986/// of the expression.
987/// FIXME: document this function better.
989 // FIXME: ApplyDebugLocation DL(*this, e);
990 switch (e->getStmtClass()) {
991 default:
993 std::string("l-value not implemented for '") +
994 e->getStmtClassName() + "'");
995 return LValue();
996 case Expr::ConditionalOperatorClass:
998 case Expr::BinaryConditionalOperatorClass:
1000 case Expr::ArraySubscriptExprClass:
1002 case Expr::ExtVectorElementExprClass:
1004 case Expr::UnaryOperatorClass:
1006 case Expr::StringLiteralClass:
1008 case Expr::MemberExprClass:
1010 case Expr::CompoundLiteralExprClass:
1012 case Expr::PredefinedExprClass:
1014 case Expr::BinaryOperatorClass:
1016 case Expr::CompoundAssignOperatorClass: {
1017 QualType ty = e->getType();
1018 if (ty->getAs<AtomicType>()) {
1019 cgm.errorNYI(e->getSourceRange(),
1020 "CompoundAssignOperator with AtomicType");
1021 return LValue();
1022 }
1023 if (!ty->isAnyComplexType())
1025
1027 }
1028 case Expr::CallExprClass:
1029 case Expr::CXXMemberCallExprClass:
1030 case Expr::CXXOperatorCallExprClass:
1031 case Expr::UserDefinedLiteralClass:
1033 case Expr::ExprWithCleanupsClass: {
1034 const auto *cleanups = cast<ExprWithCleanups>(e);
1035 RunCleanupsScope scope(*this);
1036 LValue lv = emitLValue(cleanups->getSubExpr());
1038 return lv;
1039 }
1040 case Expr::CXXDefaultArgExprClass: {
1041 auto *dae = cast<CXXDefaultArgExpr>(e);
1042 CXXDefaultArgExprScope scope(*this, dae);
1043 return emitLValue(dae->getExpr());
1044 }
1045 case Expr::ParenExprClass:
1046 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
1047 case Expr::GenericSelectionExprClass:
1048 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
1049 case Expr::DeclRefExprClass:
1051 case Expr::CStyleCastExprClass:
1052 case Expr::CXXStaticCastExprClass:
1053 case Expr::CXXDynamicCastExprClass:
1054 case Expr::ImplicitCastExprClass:
1055 return emitCastLValue(cast<CastExpr>(e));
1056 case Expr::MaterializeTemporaryExprClass:
1058 case Expr::OpaqueValueExprClass:
1060 case Expr::ChooseExprClass:
1061 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
1062 }
1063}
1064
1065static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
1066 SmallString<256> buffer;
1067 llvm::raw_svector_ostream out(buffer);
1068 out << name << cnt;
1069 return std::string(out.str());
1070}
1071
1073 return getVersionedTmpName("ref.tmp", counterRefTmp++);
1074}
1075
1077 return getVersionedTmpName("agg.tmp", counterAggTmp++);
1078}
1079
1080void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
1081 QualType ty) {
1082 // Ignore empty classes in C++.
1083 if (getLangOpts().CPlusPlus)
1084 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
1085 return;
1086
1087 // Cast the dest ptr to the appropriate i8 pointer type.
1088 if (builder.isInt8Ty(destPtr.getElementType())) {
1089 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
1090 }
1091
1092 // Get size and alignment info for this aggregate.
1093 const CharUnits size = getContext().getTypeSizeInChars(ty);
1094 if (size.isZero()) {
1095 // But note that getTypeInfo returns 0 for a VLA.
1096 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
1097 cgm.errorNYI(loc,
1098 "emitNullInitialization for zero size VariableArrayType");
1099 } else {
1100 return;
1101 }
1102 }
1103
1104 // If the type contains a pointer to data member we can't memset it to zero.
1105 // Instead, create a null constant and copy it to the destination.
1106 // TODO: there are other patterns besides zero that we can usefully memset,
1107 // like -1, which happens to be the pattern used by member-pointers.
1108 if (!cgm.getTypes().isZeroInitializable(ty)) {
1109 cgm.errorNYI(loc, "type is not zero initializable");
1110 }
1111
1112 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
1113 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
1114 // respective address.
1115 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1116 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
1117 builder.createStore(loc, zeroValue, destPtr);
1118}
1119
1120// TODO(cir): should be shared with LLVM codegen.
1122 const Expr *e = ce->getSubExpr();
1123
1124 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
1125 return false;
1126
1127 if (isa<CXXThisExpr>(e->IgnoreParens())) {
1128 // We always assume that 'this' is never null.
1129 return false;
1130 }
1131
1132 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1133 // And that glvalue casts are never null.
1134 if (ice->isGLValue())
1135 return false;
1136 }
1137
1138 return true;
1139}
1140
1141/// Computes the length of an array in elements, as well as the base
1142/// element type and a properly-typed first element pointer.
1143mlir::Value
1145 QualType &baseType, Address &addr) {
1146 const clang::ArrayType *arrayType = origArrayType;
1147
1148 // If it's a VLA, we have to load the stored size. Note that
1149 // this is the size of the VLA in bytes, not its size in elements.
1152 cgm.errorNYI(*currSrcLoc, "VLAs");
1153 return builder.getConstInt(*currSrcLoc, sizeTy, 0);
1154 }
1155
1156 uint64_t countFromCLAs = 1;
1157 QualType eltType;
1158
1159 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
1160
1161 while (cirArrayType) {
1163 countFromCLAs *= cirArrayType.getSize();
1164 eltType = arrayType->getElementType();
1165
1166 cirArrayType =
1167 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
1168
1169 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1170 assert((!cirArrayType || arrayType) &&
1171 "CIR and Clang types are out-of-sync");
1172 }
1173
1174 if (arrayType) {
1175 // From this point onwards, the Clang array type has been emitted
1176 // as some other type (probably a packed struct). Compute the array
1177 // size, and just emit the 'begin' expression as a bitcast.
1178 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
1179 }
1180
1181 baseType = eltType;
1182 return builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
1183}
1184
1186 // If we already made the indirect branch for indirect goto, return its block.
1188 return;
1189
1190 mlir::OpBuilder::InsertionGuard guard(builder);
1192 builder.createBlock(builder.getBlock()->getParent(), {}, {voidPtrTy},
1193 {builder.getUnknownLoc()});
1194}
1195
1197 mlir::Value ptrValue, QualType ty, SourceLocation loc,
1198 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
1200 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
1201 alignment, offsetValue);
1202}
1203
1205 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1206 int64_t alignment, mlir::Value offsetValue) {
1207 QualType ty = expr->getType();
1208 SourceLocation loc = expr->getExprLoc();
1209 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1210 offsetValue);
1211}
1212
1214 const VariableArrayType *vla =
1215 cgm.getASTContext().getAsVariableArrayType(type);
1216 assert(vla && "type was not a variable array type!");
1217 return getVLASize(vla);
1218}
1219
1222 // The number of elements so far; always size_t.
1223 mlir::Value numElements;
1224
1225 QualType elementType;
1226 do {
1227 elementType = type->getElementType();
1228 mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
1229 assert(vlaSize && "no size for VLA!");
1230 assert(vlaSize.getType() == sizeTy);
1231
1232 if (!numElements) {
1233 numElements = vlaSize;
1234 } else {
1235 // It's undefined behavior if this wraps around, so mark it that way.
1236 // FIXME: Teach -fsanitize=undefined to trap this.
1237
1238 numElements =
1239 builder.createMul(numElements.getLoc(), numElements, vlaSize,
1241 }
1242 } while ((type = getContext().getAsVariableArrayType(elementType)));
1243
1244 assert(numElements && "Undefined elements number");
1245 return {numElements, elementType};
1246}
1247
1250 mlir::Value vlaSize = vlaSizeMap[vla->getSizeExpr()];
1251 assert(vlaSize && "no size for VLA!");
1252 assert(vlaSize.getType() == sizeTy);
1253 return {vlaSize, vla->getElementType()};
1254}
1255
1256// TODO(cir): Most of this function can be shared between CIRGen
1257// and traditional LLVM codegen
1259 assert(type->isVariablyModifiedType() &&
1260 "Must pass variably modified type to EmitVLASizes!");
1261
1262 // We're going to walk down into the type and look for VLA
1263 // expressions.
1264 do {
1265 assert(type->isVariablyModifiedType());
1266
1267 const Type *ty = type.getTypePtr();
1268 switch (ty->getTypeClass()) {
1269 case Type::CountAttributed:
1270 case Type::PackIndexing:
1271 case Type::ArrayParameter:
1272 case Type::HLSLAttributedResource:
1273 case Type::HLSLInlineSpirv:
1274 case Type::PredefinedSugar:
1275 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1276 break;
1277
1278#define TYPE(Class, Base)
1279#define ABSTRACT_TYPE(Class, Base)
1280#define NON_CANONICAL_TYPE(Class, Base)
1281#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1282#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1283#include "clang/AST/TypeNodes.inc"
1284 llvm_unreachable(
1285 "dependent type must be resolved before the CIR codegen");
1286
1287 // These types are never variably-modified.
1288 case Type::Builtin:
1289 case Type::Complex:
1290 case Type::Vector:
1291 case Type::ExtVector:
1292 case Type::ConstantMatrix:
1293 case Type::Record:
1294 case Type::Enum:
1295 case Type::Using:
1296 case Type::TemplateSpecialization:
1297 case Type::ObjCTypeParam:
1298 case Type::ObjCObject:
1299 case Type::ObjCInterface:
1300 case Type::ObjCObjectPointer:
1301 case Type::BitInt:
1302 llvm_unreachable("type class is never variably-modified!");
1303
1304 case Type::Adjusted:
1305 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1306 break;
1307
1308 case Type::Decayed:
1309 type = cast<clang::DecayedType>(ty)->getPointeeType();
1310 break;
1311
1312 case Type::Pointer:
1313 type = cast<clang::PointerType>(ty)->getPointeeType();
1314 break;
1315
1316 case Type::BlockPointer:
1317 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1318 break;
1319
1320 case Type::LValueReference:
1321 case Type::RValueReference:
1322 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1323 break;
1324
1325 case Type::MemberPointer:
1326 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1327 break;
1328
1329 case Type::ConstantArray:
1330 case Type::IncompleteArray:
1331 // Losing element qualification here is fine.
1332 type = cast<clang::ArrayType>(ty)->getElementType();
1333 break;
1334
1335 case Type::VariableArray: {
1336 // Losing element qualification here is fine.
1338
1339 // Unknown size indication requires no size computation.
1340 // Otherwise, evaluate and record it.
1341 if (const Expr *sizeExpr = vat->getSizeExpr()) {
1342 // It's possible that we might have emitted this already,
1343 // e.g. with a typedef and a pointer to it.
1344 mlir::Value &entry = vlaSizeMap[sizeExpr];
1345 if (!entry) {
1346 mlir::Value size = emitScalarExpr(sizeExpr);
1348
1349 // Always zexting here would be wrong if it weren't
1350 // undefined behavior to have a negative bound.
1351 // FIXME: What about when size's type is larger than size_t?
1352 entry = builder.createIntCast(size, sizeTy);
1353 }
1354 }
1355 type = vat->getElementType();
1356 break;
1357 }
1358
1359 case Type::FunctionProto:
1360 case Type::FunctionNoProto:
1361 type = cast<clang::FunctionType>(ty)->getReturnType();
1362 break;
1363
1364 case Type::Paren:
1365 case Type::TypeOf:
1366 case Type::UnaryTransform:
1367 case Type::Attributed:
1368 case Type::BTFTagAttributed:
1369 case Type::SubstTemplateTypeParm:
1370 case Type::MacroQualified:
1371 // Keep walking after single level desugaring.
1372 type = type.getSingleStepDesugaredType(getContext());
1373 break;
1374
1375 case Type::Typedef:
1376 case Type::Decltype:
1377 case Type::Auto:
1378 case Type::DeducedTemplateSpecialization:
1379 // Stop walking: nothing to do.
1380 return;
1381
1382 case Type::TypeOfExpr:
1383 // Stop walking: emit typeof expression.
1384 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1385 return;
1386
1387 case Type::Atomic:
1388 type = cast<clang::AtomicType>(ty)->getValueType();
1389 break;
1390
1391 case Type::Pipe:
1392 type = cast<clang::PipeType>(ty)->getElementType();
1393 break;
1394 }
1395 } while (type->isVariablyModifiedType());
1396}
1397
1399 if (getContext().getBuiltinVaListType()->isArrayType())
1400 return emitPointerWithAlignment(e);
1401 return emitLValue(e).getAddress();
1402}
1403
1404} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:852
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
mlir::Type getElementType() const
Definition Address.h:117
mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1730
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:2000
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3275
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4545
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
StmtClass getStmtClass() const
Definition Stmt.h:1483
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8871
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8650
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2197
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Expr * getSizeExpr() const
Definition TypeBase.h:3980
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var, mlir::Value value)
An argument came in as a promoted argument; demote it back to its declared type.
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_VectorDeleting
Vector deleting dtor.
Definition ABI.h:40
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool coroEndBuiltinCall()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool cleanupWithPreservedValues()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647