clang 23.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/ExprCXX.h"
23
24#include <cassert>
25
26namespace clang::CIRGen {
27
29 bool suppressNewContext)
30 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
31 ehStack.setCGF(this);
32}
33
35
36// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
38 type = type.getCanonicalType();
39 while (true) {
40 switch (type->getTypeClass()) {
41#define TYPE(name, parent)
42#define ABSTRACT_TYPE(name, parent)
43#define NON_CANONICAL_TYPE(name, parent) case Type::name:
44#define DEPENDENT_TYPE(name, parent) case Type::name:
45#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
46#include "clang/AST/TypeNodes.inc"
47 llvm_unreachable("non-canonical or dependent type in IR-generation");
48
49 case Type::Auto:
50 case Type::DeducedTemplateSpecialization:
51 llvm_unreachable("undeduced type in IR-generation");
52
53 // Various scalar types.
54 case Type::Builtin:
55 case Type::Pointer:
56 case Type::BlockPointer:
57 case Type::LValueReference:
58 case Type::RValueReference:
59 case Type::MemberPointer:
60 case Type::Vector:
61 case Type::ExtVector:
62 case Type::ConstantMatrix:
63 case Type::FunctionProto:
64 case Type::FunctionNoProto:
65 case Type::Enum:
66 case Type::ObjCObjectPointer:
67 case Type::Pipe:
68 case Type::BitInt:
69 case Type::HLSLAttributedResource:
70 case Type::HLSLInlineSpirv:
71 return cir::TEK_Scalar;
72
73 // Complexes.
74 case Type::Complex:
75 return cir::TEK_Complex;
76
77 // Arrays, records, and Objective-C objects.
78 case Type::ConstantArray:
79 case Type::IncompleteArray:
80 case Type::VariableArray:
81 case Type::Record:
82 case Type::ObjCObject:
83 case Type::ObjCInterface:
84 case Type::ArrayParameter:
85 return cir::TEK_Aggregate;
86
87 // We operate on atomic values according to their underlying type.
88 case Type::Atomic:
89 type = cast<AtomicType>(type)->getValueType();
90 continue;
91 }
92 llvm_unreachable("unknown type kind!");
93 }
94}
95
97 return cgm.getTypes().convertTypeForMem(t);
98}
99
101 return cgm.getTypes().convertType(t);
102}
103
105 // Some AST nodes might contain invalid source locations (e.g.
106 // CXXDefaultArgExpr), workaround that to still get something out.
107 if (srcLoc.isValid()) {
109 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
110 StringRef filename = pLoc.getFilename();
111 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
112 pLoc.getLine(), pLoc.getColumn());
113 }
114 // Do our best...
115 assert(currSrcLoc && "expected to inherit some source location");
116 return *currSrcLoc;
117}
118
119mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
120 // Some AST nodes might contain invalid source locations (e.g.
121 // CXXDefaultArgExpr), workaround that to still get something out.
122 if (srcLoc.isValid()) {
123 mlir::Location beg = getLoc(srcLoc.getBegin());
124 mlir::Location end = getLoc(srcLoc.getEnd());
125 SmallVector<mlir::Location, 2> locs = {beg, end};
126 mlir::Attribute metadata;
127 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
128 }
129 if (currSrcLoc) {
130 return *currSrcLoc;
131 }
132 // We're brave, but time to give up.
133 return builder.getUnknownLoc();
134}
135
136mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
137 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
138 mlir::Attribute metadata;
139 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
140}
141
142bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
143 // Null statement, not a label!
144 if (!s)
145 return false;
146
147 // If this is a label, we have to emit the code, consider something like:
148 // if (0) { ... foo: bar(); } goto foo;
149 //
150 // TODO: If anyone cared, we could track __label__'s, since we know that you
151 // can't jump to one from outside their declared region.
152 if (isa<LabelStmt>(s))
153 return true;
154
155 // If this is a case/default statement, and we haven't seen a switch, we
156 // have to emit the code.
157 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
158 return true;
159
160 // If this is a switch statement, we want to ignore case statements when we
161 // recursively process the sub-statements of the switch. If we haven't
162 // encountered a switch statement, we treat case statements like labels, but
163 // if we are processing a switch statement, case statements are expected.
164 if (isa<SwitchStmt>(s))
165 ignoreCaseStmts = true;
166
167 // Scan subexpressions for verboten labels.
168 return std::any_of(s->child_begin(), s->child_end(),
169 [=](const Stmt *subStmt) {
170 return containsLabel(subStmt, ignoreCaseStmts);
171 });
172}
173
174/// If the specified expression does not fold to a constant, or if it does but
175/// contains a label, return false. If it constant folds return true and set
176/// the boolean result in Result.
177bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
178 bool allowLabels) {
179 llvm::APSInt resultInt;
180 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
181 return false;
182
183 resultBool = resultInt.getBoolValue();
184 return true;
185}
186
187/// If the specified expression does not fold to a constant, or if it does
188/// fold but contains a label, return false. If it constant folds, return
189/// true and set the folded value.
191 llvm::APSInt &resultInt,
192 bool allowLabels) {
193 // FIXME: Rename and handle conversion of other evaluatable things
194 // to bool.
195 Expr::EvalResult result;
196 if (!cond->EvaluateAsInt(result, getContext()))
197 return false; // Not foldable, not integer or not fully evaluatable.
198
199 llvm::APSInt intValue = result.Val.getInt();
200 if (!allowLabels && containsLabel(cond))
201 return false; // Contains a label.
202
203 resultInt = intValue;
204 return true;
205}
206
207void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
208 CharUnits alignment) {
209 if (!type->isVoidType()) {
210 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
211 /*insertIntoFnEntryBlock=*/false);
212 fnRetAlloca = addr;
213 returnValue = Address(addr, alignment);
214 }
215}
216
217void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
218 mlir::Location loc, CharUnits alignment,
219 bool isParam) {
220 assert(isa<NamedDecl>(var) && "Needs a named decl");
221 assert(!symbolTable.count(var) && "not supposed to be available just yet");
222
223 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
224 assert(allocaOp && "expected cir::AllocaOp");
225
226 if (isParam)
227 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
228 if (ty->isReferenceType() || ty.isConstQualified())
229 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
230
231 symbolTable.insert(var, allocaOp);
232}
233
235 CIRGenBuilderTy &builder = cgf.builder;
236 LexicalScope *localScope = cgf.curLexScope;
237
238 auto applyCleanup = [&]() {
239 if (performCleanup) {
240 // ApplyDebugLocation
242 forceCleanup();
243 }
244 };
245
246 // Cleanup are done right before codegen resumes a scope. This is where
247 // objects are destroyed. Process all return blocks.
248 // TODO(cir): Handle returning from a switch statement through a cleanup
249 // block. We can't simply jump to the cleanup block, because the cleanup block
250 // is not part of the case region. Either reemit all cleanups in the return
251 // block or wait for MLIR structured control flow to support early exits.
253 for (mlir::Block *retBlock : localScope->getRetBlocks()) {
254 mlir::OpBuilder::InsertionGuard guard(builder);
255 builder.setInsertionPointToEnd(retBlock);
256 retBlocks.push_back(retBlock);
257 mlir::Location retLoc = localScope->getRetLoc(retBlock);
258 emitReturn(retLoc);
259 }
260
261 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
262 mlir::OpBuilder::InsertionGuard guard(builder);
263 builder.setInsertionPointToEnd(insPt);
264
265 // If we still don't have a cleanup block, it means that `applyCleanup`
266 // below might be able to get us one.
267 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
268
269 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
270 applyCleanup();
271
272 // If we now have one after `applyCleanup`, hook it up properly.
273 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
274 cleanupBlock = localScope->getCleanupBlock(builder);
275 cir::BrOp::create(builder, insPt->back().getLoc(), cleanupBlock);
276 if (!cleanupBlock->mightHaveTerminator()) {
277 mlir::OpBuilder::InsertionGuard guard(builder);
278 builder.setInsertionPointToEnd(cleanupBlock);
279 cir::YieldOp::create(builder, localScope->endLoc);
280 }
281 }
282
283 if (localScope->depth == 0) {
284 // Reached the end of the function.
285 // Special handling only for single return block case
286 if (localScope->getRetBlocks().size() == 1) {
287 mlir::Block *retBlock = localScope->getRetBlocks()[0];
288 mlir::Location retLoc = localScope->getRetLoc(retBlock);
289 if (retBlock->getUses().empty()) {
290 retBlock->erase();
291 } else {
292 // Thread return block via cleanup block.
293 if (cleanupBlock) {
294 for (mlir::BlockOperand &blockUse : retBlock->getUses()) {
295 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
296 brOp.setSuccessor(cleanupBlock);
297 }
298 }
299
300 cir::BrOp::create(builder, retLoc, retBlock);
301 return;
302 }
303 }
304 emitImplicitReturn();
305 return;
306 }
307
308 // End of any local scope != function
309 // Ternary ops have to deal with matching arms for yielding types
310 // and do return a value, it must do its own cir.yield insertion.
311 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
312 !retVal ? cir::YieldOp::create(builder, localScope->endLoc)
313 : cir::YieldOp::create(builder, localScope->endLoc, retVal);
314 }
315 };
316
317 // If a cleanup block has been created at some point, branch to it
318 // and set the insertion point to continue at the cleanup block.
319 // Terminators are then inserted either in the cleanup block or
320 // inline in this current block.
321 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
322 if (cleanupBlock)
323 insertCleanupAndLeave(cleanupBlock);
324
325 // Now deal with any pending block wrap up like implicit end of
326 // scope.
327
328 mlir::Block *curBlock = builder.getBlock();
329 if (isGlobalInit() && !curBlock)
330 return;
331 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
332 return;
333
334 // Get rid of any empty block at the end of the scope.
335 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
336 if (!entryBlock && curBlock->empty()) {
337 curBlock->erase();
338 for (mlir::Block *retBlock : retBlocks) {
339 if (retBlock->getUses().empty())
340 retBlock->erase();
341 }
342 return;
343 }
344
345 // If there's a cleanup block, branch to it, nothing else to do.
346 if (cleanupBlock) {
347 cir::BrOp::create(builder, curBlock->back().getLoc(), cleanupBlock);
348 return;
349 }
350
351 // No pre-existent cleanup block, emit cleanup code and yield/return.
352 insertCleanupAndLeave(curBlock);
353}
354
355cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
356 CIRGenBuilderTy &builder = cgf.getBuilder();
357
358 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
359 assert(fn && "emitReturn from non-function");
360
361 // If we are on a coroutine, add the coro_end builtin call.
362 if (fn.getCoroutine())
363 cgf.emitCoroEndBuiltinCall(loc,
364 builder.getNullPtr(builder.getVoidPtrTy(), loc));
365 if (!fn.getFunctionType().hasVoidReturn()) {
366 // Load the value from `__retval` and return it via the `cir.return` op.
367 auto value = cir::LoadOp::create(
368 builder, loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
369 return cir::ReturnOp::create(builder, loc,
370 llvm::ArrayRef(value.getResult()));
371 }
372 return cir::ReturnOp::create(builder, loc);
373}
374
375// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
376// candidate for sharing between CIRGen and CodeGen.
377static bool mayDropFunctionReturn(const ASTContext &astContext,
378 QualType returnType) {
379 // We can't just discard the return value for a record type with a complex
380 // destructor or a non-trivially copyable type.
381 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
382 return classDecl->hasTrivialDestructor();
383 return returnType.isTriviallyCopyableType(astContext);
384}
385
386void CIRGenFunction::LexicalScope::emitImplicitReturn() {
387 CIRGenBuilderTy &builder = cgf.getBuilder();
388 LexicalScope *localScope = cgf.curLexScope;
389
390 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
391
392 // In C++, flowing off the end of a non-void function is always undefined
393 // behavior. In C, flowing off the end of a non-void function is undefined
394 // behavior only if the non-existent return value is used by the caller.
395 // That influences whether the terminating op is trap, unreachable, or
396 // return.
397 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
398 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
399 builder.getInsertionBlock()) {
400 bool shouldEmitUnreachable =
401 cgf.cgm.getCodeGenOpts().StrictReturn ||
402 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
403
404 if (shouldEmitUnreachable) {
406 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
407 cir::TrapOp::create(builder, localScope->endLoc);
408 else
409 cir::UnreachableOp::create(builder, localScope->endLoc);
410 builder.clearInsertionPoint();
411 return;
412 }
413 }
414
415 (void)emitReturn(localScope->endLoc);
416}
417
419 LexicalScope *scope = this;
420 while (scope) {
421 if (scope->isTry())
422 return scope->getTry();
423 scope = scope->parentScope;
424 }
425 return nullptr;
426}
427
428/// An argument came in as a promoted argument; demote it back to its
429/// declared type.
430static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var,
431 mlir::Value value) {
432 mlir::Type ty = cgf.convertType(var->getType());
433
434 // This can happen with promotions that actually don't change the
435 // underlying type, like the enum promotions.
436 if (value.getType() == ty)
437 return value;
438
439 assert((mlir::isa<cir::IntType>(ty) || cir::isAnyFloatingPointType(ty)) &&
440 "unexpected promotion type");
441
442 if (mlir::isa<cir::IntType>(ty))
443 return cgf.getBuilder().CIRBaseBuilderTy::createIntCast(value, ty);
444
445 return cgf.getBuilder().createFloatingCast(value, ty);
446}
447
449 mlir::Block *entryBB,
450 const FunctionDecl *fd,
451 SourceLocation bodyBeginLoc) {
452 // Naked functions don't have prologues.
453 if (fd && fd->hasAttr<NakedAttr>()) {
454 cgm.errorNYI(bodyBeginLoc, "naked function decl");
455 }
456
457 // Declare all the function arguments in the symbol table.
458 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
459 const VarDecl *paramVar = std::get<0>(nameValue);
460 mlir::Value paramVal = std::get<1>(nameValue);
461 CharUnits alignment = getContext().getDeclAlign(paramVar);
462 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
463 paramVal.setLoc(paramLoc);
464
465 mlir::Value addrVal =
466 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
467 convertType(paramVar->getType()), paramLoc, alignment,
468 /*insertIntoFnEntryBlock=*/true);
469
470 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
471 /*isParam=*/true);
472
473 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
474
475 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
476 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
478 if (isPromoted)
479 paramVal = emitArgumentDemotion(*this, paramVar, paramVal);
480
481 // Location of the store to the param storage tracked as beginning of
482 // the function body.
483 mlir::Location fnBodyBegin = getLoc(bodyBeginLoc);
484 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
485 }
486 assert(builder.getInsertionBlock() && "Should be valid");
487}
488
490 cir::FuncOp fn, cir::FuncType funcType,
492 SourceLocation startLoc) {
493 assert(!curFn &&
494 "CIRGenFunction can only be used for one function at a time");
495
496 curFn = fn;
497
498 const Decl *d = gd.getDecl();
499
500 didCallStackSave = false;
501 curCodeDecl = d;
502 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
504
505 prologueCleanupDepth = ehStack.stable_begin();
506
507 mlir::Block *entryBB = &fn.getBlocks().front();
508 builder.setInsertionPointToStart(entryBB);
509
510 // Determine the function body begin location for the prolog.
511 // If fd is null or has no body, use startLoc as fallback.
512 SourceLocation bodyBeginLoc = startLoc;
513 if (fd) {
514 if (Stmt *body = fd->getBody())
515 bodyBeginLoc = body->getBeginLoc();
516 else
517 bodyBeginLoc = fd->getLocation();
518 }
519
520 emitFunctionProlog(args, entryBB, fd, bodyBeginLoc);
521
522 // When the current function is not void, create an address to store the
523 // result value.
524 if (!returnType->isVoidType()) {
525 // Determine the function body end location.
526 // If fd is null or has no body, use loc as fallback.
527 SourceLocation bodyEndLoc = loc;
528 if (fd) {
529 if (Stmt *body = fd->getBody())
530 bodyEndLoc = body->getEndLoc();
531 else
532 bodyEndLoc = fd->getLocation();
533 }
534 emitAndUpdateRetAlloca(returnType, getLoc(bodyEndLoc),
535 getContext().getTypeAlignInChars(returnType));
536 }
537
538 if (isa_and_nonnull<CXXMethodDecl>(d) &&
539 cast<CXXMethodDecl>(d)->isInstance()) {
540 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
541
542 const auto *md = cast<CXXMethodDecl>(d);
543 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
544 // We're in a lambda.
545 auto fn = dyn_cast<cir::FuncOp>(curFn);
546 assert(fn && "lambda in non-function region");
547 fn.setLambda(true);
548
549 // Figure out the captures.
550 md->getParent()->getCaptureFields(lambdaCaptureFields,
553 // If the lambda captures the object referred to by '*this' - either by
554 // value or by reference, make sure CXXThisValue points to the correct
555 // object.
556
557 // Get the lvalue for the field (which is a copy of the enclosing object
558 // or contains the address of the enclosing object).
559 LValue thisFieldLValue =
561 if (!lambdaThisCaptureField->getType()->isPointerType()) {
562 // If the enclosing object was captured by value, just use its
563 // address. Sign this pointer.
564 cxxThisValue = thisFieldLValue.getPointer();
565 } else {
566 // Load the lvalue pointed to by the field, since '*this' was captured
567 // by reference.
569 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
570 }
571 }
572 for (auto *fd : md->getParent()->fields()) {
573 if (fd->hasCapturedVLAType())
574 cgm.errorNYI(loc, "lambda captured VLA type");
575 }
576 } else {
577 // Not in a lambda; just use 'this' from the method.
578 // FIXME: Should we generate a new load for each use of 'this'? The fast
579 // register allocator would be happier...
581 }
582
585 }
586}
587
589 for (cir::BlockAddressOp &blockAddress : cgm.unresolvedBlockAddressToLabel) {
590 cir::LabelOp labelOp =
591 cgm.lookupBlockAddressInfo(blockAddress.getBlockAddrInfo());
592 assert(labelOp && "expected cir.labelOp to already be emitted");
593 cgm.updateResolvedBlockAddress(blockAddress, labelOp);
594 }
595 cgm.unresolvedBlockAddressToLabel.clear();
596}
597
600 return;
603 mlir::OpBuilder::InsertionGuard guard(builder);
604 builder.setInsertionPointToEnd(indirectGotoBlock);
605 for (auto &[blockAdd, labelOp] : cgm.blockAddressToLabel) {
606 succesors.push_back(labelOp->getBlock());
607 rangeOperands.push_back(labelOp->getBlock()->getArguments());
608 }
609 cir::IndirectBrOp::create(builder, builder.getUnknownLoc(),
610 indirectGotoBlock->getArgument(0), false,
611 rangeOperands, succesors);
612 cgm.blockAddressToLabel.clear();
613}
614
616 // Resolve block address-to-label mappings, then emit the indirect branch
617 // with the corresponding targets.
620
621 // If a label address was taken but no indirect goto was used, we can't remove
622 // the block argument here. Instead, we mark the 'indirectbr' op
623 // as poison so that the cleanup can be deferred to lowering, since the
624 // verifier doesn't allow the 'indirectbr' target address to be null.
625 if (indirectGotoBlock && indirectGotoBlock->hasNoPredecessors()) {
626 auto indrBr = cast<cir::IndirectBrOp>(indirectGotoBlock->front());
627 indrBr.setPoison(true);
628 }
629
630 // Pop any cleanups that might have been associated with the
631 // parameters. Do this in whatever block we're currently in; it's
632 // important to do this before we enter the return block or return
633 // edges will be *really* confused.
634 // TODO(cir): Use prologueCleanupDepth here.
635 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
636 if (hasCleanups) {
638 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
640 }
641}
642
643mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
644 // We start with function level scope for variables.
646
647 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
648 return emitCompoundStmtWithoutScope(*block);
649
650 return emitStmt(body, /*useCurrentScope=*/true);
651}
652
653static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
654 // Remove any leftover blocks that are unreachable and empty, since they do
655 // not represent unreachable code useful for warnings nor anything deemed
656 // useful in general.
657 SmallVector<mlir::Block *> blocksToDelete;
658 for (mlir::Block &block : func.getBlocks()) {
659 if (block.empty() && block.getUses().empty())
660 blocksToDelete.push_back(&block);
661 }
662 for (mlir::Block *block : blocksToDelete)
663 block->erase();
664}
665
666cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
667 cir::FuncType funcType) {
668 const auto *funcDecl = cast<FunctionDecl>(gd.getDecl());
669 curGD = gd;
670
671 if (funcDecl->isInlineBuiltinDeclaration()) {
672 // When generating code for a builtin with an inline declaration, use a
673 // mangled name to hold the actual body, while keeping an external
674 // declaration in case the function pointer is referenced somewhere.
675 std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
676 cir::FuncOp clone =
677 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
678 if (!clone) {
679 mlir::OpBuilder::InsertionGuard guard(builder);
680 builder.setInsertionPoint(fn);
681 clone = cir::FuncOp::create(builder, fn.getLoc(), fdInlineName,
682 fn.getFunctionType());
683 clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
684 clone.setSymVisibility("private");
685 clone.setInlineKind(cir::InlineKind::AlwaysInline);
686 }
687 fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
688 fn.setSymVisibility("private");
689 fn = clone;
690 } else {
691 // Detect the unusual situation where an inline version is shadowed by a
692 // non-inline version. In that case we should pick the external one
693 // everywhere. That's GCC behavior too.
694 for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
695 pd = pd->getPreviousDecl()) {
696 if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
697 std::string inlineName = funcDecl->getName().str() + ".inline";
698 if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
699 cgm.getGlobalValue(inlineName))) {
700 // Replace all uses of the .inline function with the regular function
701 // FIXME: This performs a linear walk over the module. Introduce some
702 // caching here.
703 if (inlineFn
704 .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
705 .failed())
706 llvm_unreachable("Failed to replace inline builtin symbol uses");
707 inlineFn.erase();
708 }
709 break;
710 }
711 }
712 }
713
714 SourceLocation loc = funcDecl->getLocation();
715 Stmt *body = funcDecl->getBody();
716 SourceRange bodyRange =
717 body ? body->getSourceRange() : funcDecl->getLocation();
718
719 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
720 : builder.getUnknownLoc()};
721
722 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
723 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
724 };
725 const mlir::Location fusedLoc = mlir::FusedLoc::get(
727 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
728 mlir::Block *entryBB = fn.addEntryBlock();
729
730 FunctionArgList args;
731 QualType retTy = buildFunctionArgList(gd, args);
732
733 // Create a scope in the symbol table to hold variable declarations.
735 {
736 LexicalScope lexScope(*this, fusedLoc, entryBB);
737
738 // Emit the standard function prologue.
739 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
740
741 // Save parameters for coroutine function.
742 if (body && isa_and_nonnull<CoroutineBodyStmt>(body))
743 llvm::append_range(fnArgs, funcDecl->parameters());
744
745 if (isa<CXXDestructorDecl>(funcDecl)) {
746 emitDestructorBody(args);
747 } else if (isa<CXXConstructorDecl>(funcDecl)) {
749 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
750 funcDecl->hasAttr<CUDAGlobalAttr>()) {
751 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
752 } else if (isa<CXXMethodDecl>(funcDecl) &&
753 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
754 // The lambda static invoker function is special, because it forwards or
755 // clones the body of the function call operator (but is actually
756 // static).
758 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
759 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
760 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
761 // Implicit copy-assignment gets the same special treatment as implicit
762 // copy-constructors.
764 } else if (body) {
765 // Emit standard function body.
766 if (mlir::failed(emitFunctionBody(body))) {
767 return nullptr;
768 }
769 } else {
770 // Anything without a body should have been handled above.
771 llvm_unreachable("no definition for normal function");
772 }
773
774 if (mlir::failed(fn.verifyBody()))
775 return nullptr;
776
777 finishFunction(bodyRange.getEnd());
778 }
779
781 return fn;
782}
783
786 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
787 CXXCtorType ctorType = curGD.getCtorType();
788
789 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
790 ctorType == Ctor_Complete) &&
791 "can only generate complete ctor for this ABI");
792
793 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), ctor);
794
795 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
796 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
797 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
798 return;
799 }
800
801 const FunctionDecl *definition = nullptr;
802 Stmt *body = ctor->getBody(definition);
803 assert(definition == ctor && "emitting wrong constructor body");
804
805 if (isa_and_nonnull<CXXTryStmt>(body)) {
806 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
807 return;
808 }
809
812
813 // TODO: in restricted cases, we can emit the vbase initializers of a
814 // complete ctor and then delegate to the base ctor.
815
816 // Emit the constructor prologue, i.e. the base and member initializers.
817 emitCtorPrologue(ctor, ctorType, args);
818
819 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
820 // now just to have it handled.
821 if (mlir::failed(emitStmt(body, true))) {
822 cgm.errorNYI(ctor->getSourceRange(),
823 "emitConstructorBody: emit body statement failed.");
824 return;
825 }
826}
827
828/// Emits the body of the current destructor.
830 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
831 CXXDtorType dtorType = curGD.getDtorType();
832
833 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), dtor);
834
835 // For an abstract class, non-base destructors are never used (and can't
836 // be emitted in general, because vbase dtors may not have been validated
837 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
838 // in fact emit references to them from other compilations, so emit them
839 // as functions containing a trap instruction.
840 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
841 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
842 return;
843 }
844
845 Stmt *body = dtor->getBody();
847
848 // The call to operator delete in a deleting destructor happens
849 // outside of the function-try-block, which means it's always
850 // possible to delegate the destructor body to the complete
851 // destructor. Do so.
852 if (dtorType == Dtor_Deleting || dtorType == Dtor_VectorDeleting) {
854 cgm.errorNYI(dtor->getSourceRange(), "emitConditionalArrayDtorCall");
855 RunCleanupsScope dtorEpilogue(*this);
857 if (haveInsertPoint()) {
859 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
860 /*delegating=*/false, loadCXXThisAddress(), thisTy);
861 }
862 return;
863 }
864
865 // If the body is a function-try-block, enter the try before
866 // anything else.
867 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
868 if (isTryBody)
869 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
870
872
873 // Enter the epilogue cleanups.
874 RunCleanupsScope dtorEpilogue(*this);
875
876 // If this is the complete variant, just invoke the base variant;
877 // the epilogue will destruct the virtual bases. But we can't do
878 // this optimization if the body is a function-try-block, because
879 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
880 // always delegate because we might not have a definition in this TU.
881 switch (dtorType) {
882 case Dtor_Unified:
883 llvm_unreachable("not expecting a unified dtor");
884 case Dtor_Comdat:
885 llvm_unreachable("not expecting a COMDAT");
886 case Dtor_Deleting:
888 llvm_unreachable("already handled deleting case");
889
890 case Dtor_Complete:
891 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
892 "can't emit a dtor without a body for non-Microsoft ABIs");
893
894 // Enter the cleanup scopes for virtual bases.
896
897 if (!isTryBody) {
899 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
900 /*delegating=*/false, loadCXXThisAddress(), thisTy);
901 break;
902 }
903
904 // Fallthrough: act like we're in the base variant.
905 [[fallthrough]];
906
907 case Dtor_Base:
908 assert(body);
909
910 // Enter the cleanup scopes for fields and non-virtual bases.
912
914
915 if (isTryBody) {
916 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
917 } else if (body) {
918 (void)emitStmt(body, /*useCurrentScope=*/true);
919 } else {
920 assert(dtor->isImplicit() && "bodyless dtor not implicit");
921 // nothing to do besides what's in the epilogue
922 }
923 // -fapple-kext must inline any call to this dtor into
924 // the caller's body.
926
927 break;
928 }
929
930 // Jump out through the epilogue cleanups.
931 dtorEpilogue.forceCleanup();
932
933 // Exit the try if applicable.
934 if (isTryBody)
935 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
936}
937
938/// Given a value of type T* that may not be to a complete object, construct
939/// an l-vlaue withi the natural pointee alignment of T.
941 QualType ty) {
942 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
943 // assert on the result type first.
944 LValueBaseInfo baseInfo;
946 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
947 return makeAddrLValue(Address(val, align), ty, baseInfo);
948}
949
951 QualType ty) {
952 LValueBaseInfo baseInfo;
953 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
954 Address addr(val, convertTypeForMem(ty), alignment);
956 return makeAddrLValue(addr, ty, baseInfo);
957}
958
960 FunctionArgList &args) {
961 const auto *fd = cast<FunctionDecl>(gd.getDecl());
962 QualType retTy = fd->getReturnType();
963
964 const auto *md = dyn_cast<CXXMethodDecl>(fd);
965 if (md && md->isInstance()) {
966 if (cgm.getCXXABI().hasThisReturn(gd))
967 cgm.errorNYI(fd->getSourceRange(), "this return");
968 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
969 cgm.errorNYI(fd->getSourceRange(), "most derived return");
970 cgm.getCXXABI().buildThisParam(*this, args);
971 }
972
973 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
974 if (cd->getInheritedConstructor())
975 cgm.errorNYI(fd->getSourceRange(),
976 "buildFunctionArgList: inherited constructor");
977
978 for (auto *param : fd->parameters())
979 args.push_back(param);
980
981 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
982 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
983
984 return retTy;
985}
986
987/// Emit code to compute a designator that specifies the location
988/// of the expression.
989/// FIXME: document this function better.
991 // FIXME: ApplyDebugLocation DL(*this, e);
992 switch (e->getStmtClass()) {
993 default:
995 std::string("l-value not implemented for '") +
996 e->getStmtClassName() + "'");
997 return LValue();
998 case Expr::ConditionalOperatorClass:
1000 case Expr::BinaryConditionalOperatorClass:
1002 case Expr::ArraySubscriptExprClass:
1004 case Expr::ExtVectorElementExprClass:
1006 case Expr::UnaryOperatorClass:
1008 case Expr::StringLiteralClass:
1010 case Expr::MemberExprClass:
1012 case Expr::CompoundLiteralExprClass:
1014 case Expr::PredefinedExprClass:
1016 case Expr::BinaryOperatorClass:
1018 case Expr::CompoundAssignOperatorClass: {
1019 QualType ty = e->getType();
1020 if (ty->getAs<AtomicType>()) {
1021 cgm.errorNYI(e->getSourceRange(),
1022 "CompoundAssignOperator with AtomicType");
1023 return LValue();
1024 }
1025 if (!ty->isAnyComplexType())
1027
1029 }
1030 case Expr::CallExprClass:
1031 case Expr::CXXMemberCallExprClass:
1032 case Expr::CXXOperatorCallExprClass:
1033 case Expr::UserDefinedLiteralClass:
1035 case Expr::ExprWithCleanupsClass: {
1036 const auto *cleanups = cast<ExprWithCleanups>(e);
1037 RunCleanupsScope scope(*this);
1038 LValue lv = emitLValue(cleanups->getSubExpr());
1040 return lv;
1041 }
1042 case Expr::CXXDefaultArgExprClass: {
1043 auto *dae = cast<CXXDefaultArgExpr>(e);
1044 CXXDefaultArgExprScope scope(*this, dae);
1045 return emitLValue(dae->getExpr());
1046 }
1047 case Expr::ParenExprClass:
1048 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
1049 case Expr::GenericSelectionExprClass:
1050 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
1051 case Expr::DeclRefExprClass:
1053 case Expr::CStyleCastExprClass:
1054 case Expr::CXXStaticCastExprClass:
1055 case Expr::CXXDynamicCastExprClass:
1056 case Expr::ImplicitCastExprClass:
1057 return emitCastLValue(cast<CastExpr>(e));
1058 case Expr::MaterializeTemporaryExprClass:
1060 case Expr::OpaqueValueExprClass:
1062 case Expr::ChooseExprClass:
1063 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
1064 }
1065}
1066
1067static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
1068 SmallString<256> buffer;
1069 llvm::raw_svector_ostream out(buffer);
1070 out << name << cnt;
1071 return std::string(out.str());
1072}
1073
1075 return getVersionedTmpName("ref.tmp", counterRefTmp++);
1076}
1077
1079 return getVersionedTmpName("agg.tmp", counterAggTmp++);
1080}
1081
1082void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
1083 QualType ty) {
1084 // Ignore empty classes in C++.
1085 if (getLangOpts().CPlusPlus)
1086 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
1087 return;
1088
1089 // Cast the dest ptr to the appropriate i8 pointer type.
1090 if (builder.isInt8Ty(destPtr.getElementType())) {
1091 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
1092 }
1093
1094 // Get size and alignment info for this aggregate.
1095 const CharUnits size = getContext().getTypeSizeInChars(ty);
1096 if (size.isZero()) {
1097 // But note that getTypeInfo returns 0 for a VLA.
1098 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
1099 cgm.errorNYI(loc,
1100 "emitNullInitialization for zero size VariableArrayType");
1101 } else {
1102 return;
1103 }
1104 }
1105
1106 // If the type contains a pointer to data member we can't memset it to zero.
1107 // Instead, create a null constant and copy it to the destination.
1108 // TODO: there are other patterns besides zero that we can usefully memset,
1109 // like -1, which happens to be the pattern used by member-pointers.
1110 if (!cgm.getTypes().isZeroInitializable(ty)) {
1111 cgm.errorNYI(loc, "type is not zero initializable");
1112 }
1113
1114 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
1115 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
1116 // respective address.
1117 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1118 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
1119 builder.createStore(loc, zeroValue, destPtr);
1120}
1121
1122// TODO(cir): should be shared with LLVM codegen.
1124 const Expr *e = ce->getSubExpr();
1125
1126 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
1127 return false;
1128
1129 if (isa<CXXThisExpr>(e->IgnoreParens())) {
1130 // We always assume that 'this' is never null.
1131 return false;
1132 }
1133
1134 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1135 // And that glvalue casts are never null.
1136 if (ice->isGLValue())
1137 return false;
1138 }
1139
1140 return true;
1141}
1142
1143/// Computes the length of an array in elements, as well as the base
1144/// element type and a properly-typed first element pointer.
1145mlir::Value
1147 QualType &baseType, Address &addr) {
1148 const clang::ArrayType *arrayType = origArrayType;
1149
1150 // If it's a VLA, we have to load the stored size. Note that
1151 // this is the size of the VLA in bytes, not its size in elements.
1154 cgm.errorNYI(*currSrcLoc, "VLAs");
1155 return builder.getConstInt(*currSrcLoc, sizeTy, 0);
1156 }
1157
1158 uint64_t countFromCLAs = 1;
1159 QualType eltType;
1160
1161 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
1162
1163 while (cirArrayType) {
1165 countFromCLAs *= cirArrayType.getSize();
1166 eltType = arrayType->getElementType();
1167
1168 cirArrayType =
1169 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
1170
1171 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1172 assert((!cirArrayType || arrayType) &&
1173 "CIR and Clang types are out-of-sync");
1174 }
1175
1176 if (arrayType) {
1177 // From this point onwards, the Clang array type has been emitted
1178 // as some other type (probably a packed struct). Compute the array
1179 // size, and just emit the 'begin' expression as a bitcast.
1180 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
1181 }
1182
1183 baseType = eltType;
1184 return builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
1185}
1186
1188 // If we already made the indirect branch for indirect goto, return its block.
1190 return;
1191
1192 mlir::OpBuilder::InsertionGuard guard(builder);
1194 builder.createBlock(builder.getBlock()->getParent(), {}, {voidPtrTy},
1195 {builder.getUnknownLoc()});
1196}
1197
1199 mlir::Value ptrValue, QualType ty, SourceLocation loc,
1200 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
1202 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
1203 alignment, offsetValue);
1204}
1205
1207 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1208 int64_t alignment, mlir::Value offsetValue) {
1209 QualType ty = expr->getType();
1210 SourceLocation loc = expr->getExprLoc();
1211 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1212 offsetValue);
1213}
1214
1216 const VariableArrayType *vla =
1217 cgm.getASTContext().getAsVariableArrayType(type);
1218 assert(vla && "type was not a variable array type!");
1219 return getVLASize(vla);
1220}
1221
1224 // The number of elements so far; always size_t.
1225 mlir::Value numElements;
1226
1227 QualType elementType;
1228 do {
1229 elementType = type->getElementType();
1230 mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
1231 assert(vlaSize && "no size for VLA!");
1232 assert(vlaSize.getType() == sizeTy);
1233
1234 if (!numElements) {
1235 numElements = vlaSize;
1236 } else {
1237 // It's undefined behavior if this wraps around, so mark it that way.
1238 // FIXME: Teach -fsanitize=undefined to trap this.
1239
1240 numElements =
1241 builder.createMul(numElements.getLoc(), numElements, vlaSize,
1243 }
1244 } while ((type = getContext().getAsVariableArrayType(elementType)));
1245
1246 assert(numElements && "Undefined elements number");
1247 return {numElements, elementType};
1248}
1249
1252 mlir::Value vlaSize = vlaSizeMap[vla->getSizeExpr()];
1253 assert(vlaSize && "no size for VLA!");
1254 assert(vlaSize.getType() == sizeTy);
1255 return {vlaSize, vla->getElementType()};
1256}
1257
1258// TODO(cir): Most of this function can be shared between CIRGen
1259// and traditional LLVM codegen
1261 assert(type->isVariablyModifiedType() &&
1262 "Must pass variably modified type to EmitVLASizes!");
1263
1264 // We're going to walk down into the type and look for VLA
1265 // expressions.
1266 do {
1267 assert(type->isVariablyModifiedType());
1268
1269 const Type *ty = type.getTypePtr();
1270 switch (ty->getTypeClass()) {
1271 case Type::CountAttributed:
1272 case Type::PackIndexing:
1273 case Type::ArrayParameter:
1274 case Type::HLSLAttributedResource:
1275 case Type::HLSLInlineSpirv:
1276 case Type::PredefinedSugar:
1277 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1278 break;
1279
1280#define TYPE(Class, Base)
1281#define ABSTRACT_TYPE(Class, Base)
1282#define NON_CANONICAL_TYPE(Class, Base)
1283#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1284#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1285#include "clang/AST/TypeNodes.inc"
1286 llvm_unreachable(
1287 "dependent type must be resolved before the CIR codegen");
1288
1289 // These types are never variably-modified.
1290 case Type::Builtin:
1291 case Type::Complex:
1292 case Type::Vector:
1293 case Type::ExtVector:
1294 case Type::ConstantMatrix:
1295 case Type::Record:
1296 case Type::Enum:
1297 case Type::Using:
1298 case Type::TemplateSpecialization:
1299 case Type::ObjCTypeParam:
1300 case Type::ObjCObject:
1301 case Type::ObjCInterface:
1302 case Type::ObjCObjectPointer:
1303 case Type::BitInt:
1304 llvm_unreachable("type class is never variably-modified!");
1305
1306 case Type::Adjusted:
1307 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1308 break;
1309
1310 case Type::Decayed:
1311 type = cast<clang::DecayedType>(ty)->getPointeeType();
1312 break;
1313
1314 case Type::Pointer:
1315 type = cast<clang::PointerType>(ty)->getPointeeType();
1316 break;
1317
1318 case Type::BlockPointer:
1319 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1320 break;
1321
1322 case Type::LValueReference:
1323 case Type::RValueReference:
1324 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1325 break;
1326
1327 case Type::MemberPointer:
1328 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1329 break;
1330
1331 case Type::ConstantArray:
1332 case Type::IncompleteArray:
1333 // Losing element qualification here is fine.
1334 type = cast<clang::ArrayType>(ty)->getElementType();
1335 break;
1336
1337 case Type::VariableArray: {
1338 // Losing element qualification here is fine.
1340
1341 // Unknown size indication requires no size computation.
1342 // Otherwise, evaluate and record it.
1343 if (const Expr *sizeExpr = vat->getSizeExpr()) {
1344 // It's possible that we might have emitted this already,
1345 // e.g. with a typedef and a pointer to it.
1346 mlir::Value &entry = vlaSizeMap[sizeExpr];
1347 if (!entry) {
1348 mlir::Value size = emitScalarExpr(sizeExpr);
1350
1351 // Always zexting here would be wrong if it weren't
1352 // undefined behavior to have a negative bound.
1353 // FIXME: What about when size's type is larger than size_t?
1354 entry = builder.createIntCast(size, sizeTy);
1355 }
1356 }
1357 type = vat->getElementType();
1358 break;
1359 }
1360
1361 case Type::FunctionProto:
1362 case Type::FunctionNoProto:
1363 type = cast<clang::FunctionType>(ty)->getReturnType();
1364 break;
1365
1366 case Type::Paren:
1367 case Type::TypeOf:
1368 case Type::UnaryTransform:
1369 case Type::Attributed:
1370 case Type::BTFTagAttributed:
1371 case Type::SubstTemplateTypeParm:
1372 case Type::MacroQualified:
1373 // Keep walking after single level desugaring.
1374 type = type.getSingleStepDesugaredType(getContext());
1375 break;
1376
1377 case Type::Typedef:
1378 case Type::Decltype:
1379 case Type::Auto:
1380 case Type::DeducedTemplateSpecialization:
1381 // Stop walking: nothing to do.
1382 return;
1383
1384 case Type::TypeOfExpr:
1385 // Stop walking: emit typeof expression.
1386 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1387 return;
1388
1389 case Type::Atomic:
1390 type = cast<clang::AtomicType>(ty)->getValueType();
1391 break;
1392
1393 case Type::Pipe:
1394 type = cast<clang::PipeType>(ty)->getElementType();
1395 break;
1396 }
1397 } while (type->isVariablyModifiedType());
1398}
1399
1401 if (getContext().getBuiltinVaListType()->isArrayType())
1402 return emitPointerWithAlignment(e);
1403 return emitLValue(e).getAddress();
1404}
1405
1406} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
SourceManager & getSourceManager()
Definition ASTContext.h:851
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3723
QualType getElementType() const
Definition TypeBase.h:3735
mlir::Type getElementType() const
Definition Address.h:122
mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3676
CastKind getCastKind() const
Definition Expr.h:3720
Expr * getSubExpr()
Definition Expr.h:3726
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3085
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:2000
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3279
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4549
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3853
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2867
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:86
StmtClass getStmtClass() const
Definition Stmt.h:1485
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8891
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8664
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2201
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3967
Expr * getSizeExpr() const
Definition TypeBase.h:3981
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var, mlir::Value value)
An argument came in as a promoted argument; demote it back to its declared type.
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_VectorDeleting
Vector deleting dtor.
Definition ABI.h:40
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool cleanupWithPreservedValues()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647