clang 22.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/ExprCXX.h"
22
23#include <cassert>
24
25namespace clang::CIRGen {
26
28 bool suppressNewContext)
29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
30 ehStack.setCGF(this);
31}
32
34
35// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
37 type = type.getCanonicalType();
38 while (true) {
39 switch (type->getTypeClass()) {
40#define TYPE(name, parent)
41#define ABSTRACT_TYPE(name, parent)
42#define NON_CANONICAL_TYPE(name, parent) case Type::name:
43#define DEPENDENT_TYPE(name, parent) case Type::name:
44#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
45#include "clang/AST/TypeNodes.inc"
46 llvm_unreachable("non-canonical or dependent type in IR-generation");
47
48 case Type::Auto:
49 case Type::DeducedTemplateSpecialization:
50 llvm_unreachable("undeduced type in IR-generation");
51
52 // Various scalar types.
53 case Type::Builtin:
54 case Type::Pointer:
55 case Type::BlockPointer:
56 case Type::LValueReference:
57 case Type::RValueReference:
58 case Type::MemberPointer:
59 case Type::Vector:
60 case Type::ExtVector:
61 case Type::ConstantMatrix:
62 case Type::FunctionProto:
63 case Type::FunctionNoProto:
64 case Type::Enum:
65 case Type::ObjCObjectPointer:
66 case Type::Pipe:
67 case Type::BitInt:
68 case Type::HLSLAttributedResource:
69 case Type::HLSLInlineSpirv:
70 return cir::TEK_Scalar;
71
72 // Complexes.
73 case Type::Complex:
74 return cir::TEK_Complex;
75
76 // Arrays, records, and Objective-C objects.
77 case Type::ConstantArray:
78 case Type::IncompleteArray:
79 case Type::VariableArray:
80 case Type::Record:
81 case Type::ObjCObject:
82 case Type::ObjCInterface:
83 case Type::ArrayParameter:
84 return cir::TEK_Aggregate;
85
86 // We operate on atomic values according to their underlying type.
87 case Type::Atomic:
88 type = cast<AtomicType>(type)->getValueType();
89 continue;
90 }
91 llvm_unreachable("unknown type kind!");
92 }
93}
94
96 return cgm.getTypes().convertTypeForMem(t);
97}
98
100 return cgm.getTypes().convertType(t);
101}
102
104 // Some AST nodes might contain invalid source locations (e.g.
105 // CXXDefaultArgExpr), workaround that to still get something out.
106 if (srcLoc.isValid()) {
108 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
109 StringRef filename = pLoc.getFilename();
110 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
111 pLoc.getLine(), pLoc.getColumn());
112 }
113 // Do our best...
114 assert(currSrcLoc && "expected to inherit some source location");
115 return *currSrcLoc;
116}
117
118mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
119 // Some AST nodes might contain invalid source locations (e.g.
120 // CXXDefaultArgExpr), workaround that to still get something out.
121 if (srcLoc.isValid()) {
122 mlir::Location beg = getLoc(srcLoc.getBegin());
123 mlir::Location end = getLoc(srcLoc.getEnd());
124 SmallVector<mlir::Location, 2> locs = {beg, end};
125 mlir::Attribute metadata;
126 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
127 }
128 if (currSrcLoc) {
129 return *currSrcLoc;
130 }
131 // We're brave, but time to give up.
132 return builder.getUnknownLoc();
133}
134
135mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
136 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
137 mlir::Attribute metadata;
138 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
139}
140
141bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
142 // Null statement, not a label!
143 if (!s)
144 return false;
145
146 // If this is a label, we have to emit the code, consider something like:
147 // if (0) { ... foo: bar(); } goto foo;
148 //
149 // TODO: If anyone cared, we could track __label__'s, since we know that you
150 // can't jump to one from outside their declared region.
151 if (isa<LabelStmt>(s))
152 return true;
153
154 // If this is a case/default statement, and we haven't seen a switch, we
155 // have to emit the code.
156 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
157 return true;
158
159 // If this is a switch statement, we want to ignore case statements when we
160 // recursively process the sub-statements of the switch. If we haven't
161 // encountered a switch statement, we treat case statements like labels, but
162 // if we are processing a switch statement, case statements are expected.
163 if (isa<SwitchStmt>(s))
164 ignoreCaseStmts = true;
165
166 // Scan subexpressions for verboten labels.
167 return std::any_of(s->child_begin(), s->child_end(),
168 [=](const Stmt *subStmt) {
169 return containsLabel(subStmt, ignoreCaseStmts);
170 });
171}
172
173/// If the specified expression does not fold to a constant, or if it does but
174/// contains a label, return false. If it constant folds return true and set
175/// the boolean result in Result.
176bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
177 bool allowLabels) {
178 llvm::APSInt resultInt;
179 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
180 return false;
181
182 resultBool = resultInt.getBoolValue();
183 return true;
184}
185
186/// If the specified expression does not fold to a constant, or if it does
187/// fold but contains a label, return false. If it constant folds, return
188/// true and set the folded value.
190 llvm::APSInt &resultInt,
191 bool allowLabels) {
192 // FIXME: Rename and handle conversion of other evaluatable things
193 // to bool.
194 Expr::EvalResult result;
195 if (!cond->EvaluateAsInt(result, getContext()))
196 return false; // Not foldable, not integer or not fully evaluatable.
197
198 llvm::APSInt intValue = result.Val.getInt();
199 if (!allowLabels && containsLabel(cond))
200 return false; // Contains a label.
201
202 resultInt = intValue;
203 return true;
204}
205
206void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
207 CharUnits alignment) {
208 if (!type->isVoidType()) {
209 fnRetAlloca = emitAlloca("__retval", convertType(type), loc, alignment,
210 /*insertIntoFnEntryBlock=*/false);
211 }
212}
213
214void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
215 mlir::Location loc, CharUnits alignment,
216 bool isParam) {
217 assert(isa<NamedDecl>(var) && "Needs a named decl");
218 assert(!symbolTable.count(var) && "not supposed to be available just yet");
219
220 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
221 assert(allocaOp && "expected cir::AllocaOp");
222
223 if (isParam)
224 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
225 if (ty->isReferenceType() || ty.isConstQualified())
226 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
227
228 symbolTable.insert(var, allocaOp);
229}
230
232 CIRGenBuilderTy &builder = cgf.builder;
233 LexicalScope *localScope = cgf.curLexScope;
234
235 auto applyCleanup = [&]() {
236 if (performCleanup) {
237 // ApplyDebugLocation
239 forceCleanup();
240 }
241 };
242
243 if (returnBlock != nullptr) {
244 // Write out the return block, which loads the value from `__retval` and
245 // issues the `cir.return`.
246 mlir::OpBuilder::InsertionGuard guard(builder);
247 builder.setInsertionPointToEnd(returnBlock);
248 (void)emitReturn(*returnLoc);
249 }
250
251 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
252 mlir::OpBuilder::InsertionGuard guard(builder);
253 builder.setInsertionPointToEnd(insPt);
254
255 // If we still don't have a cleanup block, it means that `applyCleanup`
256 // below might be able to get us one.
257 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
258
259 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
260 applyCleanup();
261
262 // If we now have one after `applyCleanup`, hook it up properly.
263 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
264 cleanupBlock = localScope->getCleanupBlock(builder);
265 builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
266 if (!cleanupBlock->mightHaveTerminator()) {
267 mlir::OpBuilder::InsertionGuard guard(builder);
268 builder.setInsertionPointToEnd(cleanupBlock);
269 builder.create<cir::YieldOp>(localScope->endLoc);
270 }
271 }
272
273 if (localScope->depth == 0) {
274 // Reached the end of the function.
275 if (returnBlock != nullptr) {
276 if (returnBlock->getUses().empty()) {
277 returnBlock->erase();
278 } else {
279 // Thread return block via cleanup block.
280 if (cleanupBlock) {
281 for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
282 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
283 brOp.setSuccessor(cleanupBlock);
284 }
285 }
286
287 builder.create<cir::BrOp>(*returnLoc, returnBlock);
288 return;
289 }
290 }
291 emitImplicitReturn();
292 return;
293 }
294
295 // End of any local scope != function
296 // Ternary ops have to deal with matching arms for yielding types
297 // and do return a value, it must do its own cir.yield insertion.
298 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
299 !retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
300 : builder.create<cir::YieldOp>(localScope->endLoc, retVal);
301 }
302 };
303
304 // If a cleanup block has been created at some point, branch to it
305 // and set the insertion point to continue at the cleanup block.
306 // Terminators are then inserted either in the cleanup block or
307 // inline in this current block.
308 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
309 if (cleanupBlock)
310 insertCleanupAndLeave(cleanupBlock);
311
312 // Now deal with any pending block wrap up like implicit end of
313 // scope.
314
315 mlir::Block *curBlock = builder.getBlock();
316 if (isGlobalInit() && !curBlock)
317 return;
318 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
319 return;
320
321 // Get rid of any empty block at the end of the scope.
322 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
323 if (!entryBlock && curBlock->empty()) {
324 curBlock->erase();
325 if (returnBlock != nullptr && returnBlock->getUses().empty())
326 returnBlock->erase();
327 return;
328 }
329
330 // If there's a cleanup block, branch to it, nothing else to do.
331 if (cleanupBlock) {
332 builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
333 return;
334 }
335
336 // No pre-existent cleanup block, emit cleanup code and yield/return.
337 insertCleanupAndLeave(curBlock);
338}
339
340cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
341 CIRGenBuilderTy &builder = cgf.getBuilder();
342
343 if (!cgf.curFn.getFunctionType().hasVoidReturn()) {
344 // Load the value from `__retval` and return it via the `cir.return` op.
345 auto value = builder.create<cir::LoadOp>(
346 loc, cgf.curFn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
347 return builder.create<cir::ReturnOp>(loc,
348 llvm::ArrayRef(value.getResult()));
349 }
350 return builder.create<cir::ReturnOp>(loc);
351}
352
353// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
354// candidate for sharing between CIRGen and CodeGen.
355static bool mayDropFunctionReturn(const ASTContext &astContext,
356 QualType returnType) {
357 // We can't just discard the return value for a record type with a complex
358 // destructor or a non-trivially copyable type.
359 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
360 return classDecl->hasTrivialDestructor();
361 return returnType.isTriviallyCopyableType(astContext);
362}
363
364void CIRGenFunction::LexicalScope::emitImplicitReturn() {
365 CIRGenBuilderTy &builder = cgf.getBuilder();
366 LexicalScope *localScope = cgf.curLexScope;
367
368 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
369
370 // In C++, flowing off the end of a non-void function is always undefined
371 // behavior. In C, flowing off the end of a non-void function is undefined
372 // behavior only if the non-existent return value is used by the caller.
373 // That influences whether the terminating op is trap, unreachable, or
374 // return.
375 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
376 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
377 builder.getInsertionBlock()) {
378 bool shouldEmitUnreachable =
379 cgf.cgm.getCodeGenOpts().StrictReturn ||
380 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
381
382 if (shouldEmitUnreachable) {
384 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
385 builder.create<cir::TrapOp>(localScope->endLoc);
386 else
387 builder.create<cir::UnreachableOp>(localScope->endLoc);
388 builder.clearInsertionPoint();
389 return;
390 }
391 }
392
393 (void)emitReturn(localScope->endLoc);
394}
395
397 cir::FuncOp fn, cir::FuncType funcType,
399 SourceLocation startLoc) {
400 assert(!curFn &&
401 "CIRGenFunction can only be used for one function at a time");
402
403 curFn = fn;
404
405 const Decl *d = gd.getDecl();
406 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
408
410
411 mlir::Block *entryBB = &fn.getBlocks().front();
412 builder.setInsertionPointToStart(entryBB);
413
414 // TODO(cir): this should live in `emitFunctionProlog
415 // Declare all the function arguments in the symbol table.
416 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
417 const VarDecl *paramVar = std::get<0>(nameValue);
418 mlir::Value paramVal = std::get<1>(nameValue);
419 CharUnits alignment = getContext().getDeclAlign(paramVar);
420 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
421 paramVal.setLoc(paramLoc);
422
423 mlir::Value addrVal =
424 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
425 convertType(paramVar->getType()), paramLoc, alignment,
426 /*insertIntoFnEntryBlock=*/true);
427
428 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
429 /*isParam=*/true);
430
431 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
432
433 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
434 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
436 if (isPromoted)
437 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion");
438
439 // Location of the store to the param storage tracked as beginning of
440 // the function body.
441 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc());
442 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
443 }
444 assert(builder.getInsertionBlock() && "Should be valid");
445
446 // When the current function is not void, create an address to store the
447 // result value.
448 if (!returnType->isVoidType())
449 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
450 getContext().getTypeAlignInChars(returnType));
451
452 if (isa_and_nonnull<CXXMethodDecl>(d) &&
453 cast<CXXMethodDecl>(d)->isInstance()) {
455
456 const auto *md = cast<CXXMethodDecl>(d);
457 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
458 cgm.errorNYI(loc, "lambda call operator");
459 } else {
460 // Not in a lambda; just use 'this' from the method.
461 // FIXME: Should we generate a new load for each use of 'this'? The fast
462 // register allocator would be happier...
464 }
465
468 }
469}
470
472 // Pop any cleanups that might have been associated with the
473 // parameters. Do this in whatever block we're currently in; it's
474 // important to do this before we enter the return block or return
475 // edges will be *really* confused.
476 // TODO(cir): Use prologueCleanupDepth here.
477 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
478 if (hasCleanups) {
480 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
482 }
483}
484
485mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
486 // We start with function level scope for variables.
488
489 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
490 return emitCompoundStmtWithoutScope(*block);
491
492 return emitStmt(body, /*useCurrentScope=*/true);
493}
494
495static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
496 // Remove any leftover blocks that are unreachable and empty, since they do
497 // not represent unreachable code useful for warnings nor anything deemed
498 // useful in general.
499 SmallVector<mlir::Block *> blocksToDelete;
500 for (mlir::Block &block : func.getBlocks()) {
501 if (block.empty() && block.getUses().empty())
502 blocksToDelete.push_back(&block);
503 }
504 for (mlir::Block *block : blocksToDelete)
505 block->erase();
506}
507
508cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
509 cir::FuncType funcType) {
510 const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
511 curGD = gd;
512
513 SourceLocation loc = funcDecl->getLocation();
514 Stmt *body = funcDecl->getBody();
515 SourceRange bodyRange =
516 body ? body->getSourceRange() : funcDecl->getLocation();
517
518 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
519 : builder.getUnknownLoc()};
520
521 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
522 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
523 };
524 const mlir::Location fusedLoc = mlir::FusedLoc::get(
526 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
527 mlir::Block *entryBB = fn.addEntryBlock();
528
529 FunctionArgList args;
530 QualType retTy = buildFunctionArgList(gd, args);
531
532 // Create a scope in the symbol table to hold variable declarations.
534 {
535 LexicalScope lexScope(*this, fusedLoc, entryBB);
536
537 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
538
539 if (isa<CXXDestructorDecl>(funcDecl)) {
540 emitDestructorBody(args);
541 } else if (isa<CXXConstructorDecl>(funcDecl)) {
543 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
544 funcDecl->hasAttr<CUDAGlobalAttr>()) {
545 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
546 } else if (isa<CXXMethodDecl>(funcDecl) &&
547 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
548 getCIRGenModule().errorNYI(bodyRange, "Lambda static invoker");
549 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
550 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
551 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
552 // Implicit copy-assignment gets the same special treatment as implicit
553 // copy-constructors.
555 } else if (body) {
556 if (mlir::failed(emitFunctionBody(body))) {
557 return nullptr;
558 }
559 } else {
560 // Anything without a body should have been handled above.
561 llvm_unreachable("no definition for normal function");
562 }
563
564 if (mlir::failed(fn.verifyBody()))
565 return nullptr;
566
567 finishFunction(bodyRange.getEnd());
568 }
569
571 return fn;
572}
573
576 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
577 CXXCtorType ctorType = curGD.getCtorType();
578
580 ctorType == Ctor_Complete) &&
581 "can only generate complete ctor for this ABI");
582
583 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
585 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
586 return;
587 }
588
589 const FunctionDecl *definition = nullptr;
590 Stmt *body = ctor->getBody(definition);
591 assert(definition == ctor && "emitting wrong constructor body");
592
593 if (isa_and_nonnull<CXXTryStmt>(body)) {
594 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
595 return;
596 }
597
600
601 // TODO: in restricted cases, we can emit the vbase initializers of a
602 // complete ctor and then delegate to the base ctor.
603
604 // Emit the constructor prologue, i.e. the base and member initializers.
605 emitCtorPrologue(ctor, ctorType, args);
606
607 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
608 // now just to have it handled.
609 if (mlir::failed(emitStmt(body, true))) {
610 cgm.errorNYI(ctor->getSourceRange(),
611 "emitConstructorBody: emit body statement failed.");
612 return;
613 }
614}
615
616/// Emits the body of the current destructor.
618 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
619 CXXDtorType dtorType = curGD.getDtorType();
620
621 // For an abstract class, non-base destructors are never used (and can't
622 // be emitted in general, because vbase dtors may not have been validated
623 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
624 // in fact emit references to them from other compilations, so emit them
625 // as functions containing a trap instruction.
626 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
627 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
628 return;
629 }
630
631 Stmt *body = dtor->getBody();
633
634 // The call to operator delete in a deleting destructor happens
635 // outside of the function-try-block, which means it's always
636 // possible to delegate the destructor body to the complete
637 // destructor. Do so.
638 if (dtorType == Dtor_Deleting) {
639 cgm.errorNYI(dtor->getSourceRange(), "deleting destructor");
640 return;
641 }
642
643 // If the body is a function-try-block, enter the try before
644 // anything else.
645 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
646 if (isTryBody)
647 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
648
651
652 // If this is the complete variant, just invoke the base variant;
653 // the epilogue will destruct the virtual bases. But we can't do
654 // this optimization if the body is a function-try-block, because
655 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
656 // always delegate because we might not have a definition in this TU.
657 switch (dtorType) {
658 case Dtor_Comdat:
659 llvm_unreachable("not expecting a COMDAT");
660 case Dtor_Deleting:
661 llvm_unreachable("already handled deleting case");
662
663 case Dtor_Complete:
664 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
665 "can't emit a dtor without a body for non-Microsoft ABIs");
666
668
669 if (!isTryBody) {
671 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
672 /*delegating=*/false, loadCXXThisAddress(), thisTy);
673 break;
674 }
675
676 // Fallthrough: act like we're in the base variant.
677 [[fallthrough]];
678
679 case Dtor_Base:
680 assert(body);
681
684
685 if (isTryBody) {
686 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
687 } else if (body) {
688 (void)emitStmt(body, /*useCurrentScope=*/true);
689 } else {
690 assert(dtor->isImplicit() && "bodyless dtor not implicit");
691 // nothing to do besides what's in the epilogue
692 }
693 // -fapple-kext must inline any call to this dtor into
694 // the caller's body.
696
697 break;
698 }
699
701
702 // Exit the try if applicable.
703 if (isTryBody)
704 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
705}
706
707/// Given a value of type T* that may not be to a complete object, construct
708/// an l-vlaue withi the natural pointee alignment of T.
710 QualType ty) {
711 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
712 // assert on the result type first.
713 LValueBaseInfo baseInfo;
715 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
716 return makeAddrLValue(Address(val, align), ty, baseInfo);
717}
718
720 QualType ty) {
721 LValueBaseInfo baseInfo;
722 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
723 Address addr(val, convertTypeForMem(ty), alignment);
725 return makeAddrLValue(addr, ty, baseInfo);
726}
727
729 FunctionArgList &args) {
730 const auto *fd = cast<FunctionDecl>(gd.getDecl());
731 QualType retTy = fd->getReturnType();
732
733 const auto *md = dyn_cast<CXXMethodDecl>(fd);
734 if (md && md->isInstance()) {
735 if (cgm.getCXXABI().hasThisReturn(gd))
736 cgm.errorNYI(fd->getSourceRange(), "this return");
737 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
738 cgm.errorNYI(fd->getSourceRange(), "most derived return");
739 cgm.getCXXABI().buildThisParam(*this, args);
740 }
741
742 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
743 if (cd->getInheritedConstructor())
744 cgm.errorNYI(fd->getSourceRange(),
745 "buildFunctionArgList: inherited constructor");
746
747 for (auto *param : fd->parameters())
748 args.push_back(param);
749
750 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
752
753 return retTy;
754}
755
756/// Emit code to compute a designator that specifies the location
757/// of the expression.
758/// FIXME: document this function better.
760 // FIXME: ApplyDebugLocation DL(*this, e);
761 switch (e->getStmtClass()) {
762 default:
764 std::string("l-value not implemented for '") +
765 e->getStmtClassName() + "'");
766 return LValue();
767 case Expr::ArraySubscriptExprClass:
768 return emitArraySubscriptExpr(cast<ArraySubscriptExpr>(e));
769 case Expr::UnaryOperatorClass:
770 return emitUnaryOpLValue(cast<UnaryOperator>(e));
771 case Expr::StringLiteralClass:
772 return emitStringLiteralLValue(cast<StringLiteral>(e));
773 case Expr::MemberExprClass:
774 return emitMemberExpr(cast<MemberExpr>(e));
775 case Expr::CompoundLiteralExprClass:
776 return emitCompoundLiteralLValue(cast<CompoundLiteralExpr>(e));
777 case Expr::BinaryOperatorClass:
778 return emitBinaryOperatorLValue(cast<BinaryOperator>(e));
779 case Expr::CompoundAssignOperatorClass: {
780 QualType ty = e->getType();
781 if (ty->getAs<AtomicType>()) {
783 "CompoundAssignOperator with AtomicType");
784 return LValue();
785 }
786 if (!ty->isAnyComplexType())
787 return emitCompoundAssignmentLValue(cast<CompoundAssignOperator>(e));
788
789 return emitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(e));
790 }
791 case Expr::CallExprClass:
792 case Expr::CXXMemberCallExprClass:
793 case Expr::CXXOperatorCallExprClass:
794 case Expr::UserDefinedLiteralClass:
795 return emitCallExprLValue(cast<CallExpr>(e));
796 case Expr::ParenExprClass:
797 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
798 case Expr::DeclRefExprClass:
799 return emitDeclRefLValue(cast<DeclRefExpr>(e));
800 case Expr::CStyleCastExprClass:
801 case Expr::CXXStaticCastExprClass:
802 case Expr::CXXDynamicCastExprClass:
803 case Expr::ImplicitCastExprClass:
804 return emitCastLValue(cast<CastExpr>(e));
805 case Expr::MaterializeTemporaryExprClass:
806 return emitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(e));
807 }
808}
809
810static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
811 SmallString<256> buffer;
812 llvm::raw_svector_ostream out(buffer);
813 out << name << cnt;
814 return std::string(out.str());
815}
816
818 return getVersionedTmpName("ref.tmp", counterRefTmp++);
819}
820
822 return getVersionedTmpName("agg.tmp", counterAggTmp++);
823}
824
825void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
826 QualType ty) {
827 // Ignore empty classes in C++.
829 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
830 return;
831
832 // Cast the dest ptr to the appropriate i8 pointer type.
833 if (builder.isInt8Ty(destPtr.getElementType())) {
834 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
835 }
836
837 // Get size and alignment info for this aggregate.
838 const CharUnits size = getContext().getTypeSizeInChars(ty);
839 if (size.isZero()) {
840 // But note that getTypeInfo returns 0 for a VLA.
841 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
842 cgm.errorNYI(loc,
843 "emitNullInitialization for zero size VariableArrayType");
844 } else {
845 return;
846 }
847 }
848
849 // If the type contains a pointer to data member we can't memset it to zero.
850 // Instead, create a null constant and copy it to the destination.
851 // TODO: there are other patterns besides zero that we can usefully memset,
852 // like -1, which happens to be the pattern used by member-pointers.
853 if (!cgm.getTypes().isZeroInitializable(ty)) {
854 cgm.errorNYI(loc, "type is not zero initializable");
855 }
856
857 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
858 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
859 // respective address.
860 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
861 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
862 builder.createStore(loc, zeroValue, destPtr);
863}
864
865// TODO(cir): should be shared with LLVM codegen.
867 const Expr *e = ce->getSubExpr();
868
869 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
870 return false;
871
872 if (isa<CXXThisExpr>(e->IgnoreParens())) {
873 // We always assume that 'this' is never null.
874 return false;
875 }
876
877 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
878 // And that glvalue casts are never null.
879 if (ice->isGLValue())
880 return false;
881 }
882
883 return true;
884}
885
886/// Computes the length of an array in elements, as well as the base
887/// element type and a properly-typed first element pointer.
888mlir::Value
890 QualType &baseType, Address &addr) {
891 const clang::ArrayType *arrayType = origArrayType;
892
893 // If it's a VLA, we have to load the stored size. Note that
894 // this is the size of the VLA in bytes, not its size in elements.
895 if (isa<VariableArrayType>(arrayType)) {
897 cgm.errorNYI(*currSrcLoc, "VLAs");
898 return builder.getConstInt(*currSrcLoc, SizeTy, 0);
899 }
900
901 uint64_t countFromCLAs = 1;
902 QualType eltType;
903
904 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
905
906 while (cirArrayType) {
907 assert(isa<ConstantArrayType>(arrayType));
908 countFromCLAs *= cirArrayType.getSize();
909 eltType = arrayType->getElementType();
910
911 cirArrayType =
912 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
913
914 arrayType = getContext().getAsArrayType(arrayType->getElementType());
915 assert((!cirArrayType || arrayType) &&
916 "CIR and Clang types are out-of-sync");
917 }
918
919 if (arrayType) {
920 // From this point onwards, the Clang array type has been emitted
921 // as some other type (probably a packed struct). Compute the array
922 // size, and just emit the 'begin' expression as a bitcast.
923 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
924 }
925
926 baseType = eltType;
927 return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
928}
929
931 mlir::Value ptrValue, QualType ty, SourceLocation loc,
932 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
934 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
935 alignment, offsetValue);
936}
937
939 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
940 int64_t alignment, mlir::Value offsetValue) {
941 QualType ty = expr->getType();
942 SourceLocation loc = expr->getExprLoc();
943 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
944 offsetValue);
945}
946
947// TODO(cir): Most of this function can be shared between CIRGen
948// and traditional LLVM codegen
950 assert(type->isVariablyModifiedType() &&
951 "Must pass variably modified type to EmitVLASizes!");
952
953 // We're going to walk down into the type and look for VLA
954 // expressions.
955 do {
956 assert(type->isVariablyModifiedType());
957
958 const Type *ty = type.getTypePtr();
959 switch (ty->getTypeClass()) {
960 case Type::CountAttributed:
961 case Type::PackIndexing:
962 case Type::ArrayParameter:
963 case Type::HLSLAttributedResource:
964 case Type::HLSLInlineSpirv:
965 case Type::PredefinedSugar:
966 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
967 break;
968
969#define TYPE(Class, Base)
970#define ABSTRACT_TYPE(Class, Base)
971#define NON_CANONICAL_TYPE(Class, Base)
972#define DEPENDENT_TYPE(Class, Base) case Type::Class:
973#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
974#include "clang/AST/TypeNodes.inc"
975 llvm_unreachable(
976 "dependent type must be resolved before the CIR codegen");
977
978 // These types are never variably-modified.
979 case Type::Builtin:
980 case Type::Complex:
981 case Type::Vector:
982 case Type::ExtVector:
983 case Type::ConstantMatrix:
984 case Type::Record:
985 case Type::Enum:
986 case Type::Using:
987 case Type::TemplateSpecialization:
988 case Type::ObjCTypeParam:
989 case Type::ObjCObject:
990 case Type::ObjCInterface:
991 case Type::ObjCObjectPointer:
992 case Type::BitInt:
993 llvm_unreachable("type class is never variably-modified!");
994
995 case Type::Adjusted:
996 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
997 break;
998
999 case Type::Decayed:
1000 type = cast<clang::DecayedType>(ty)->getPointeeType();
1001 break;
1002
1003 case Type::Pointer:
1004 type = cast<clang::PointerType>(ty)->getPointeeType();
1005 break;
1006
1007 case Type::BlockPointer:
1008 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1009 break;
1010
1011 case Type::LValueReference:
1012 case Type::RValueReference:
1013 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1014 break;
1015
1016 case Type::MemberPointer:
1017 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1018 break;
1019
1020 case Type::ConstantArray:
1021 case Type::IncompleteArray:
1022 // Losing element qualification here is fine.
1023 type = cast<clang::ArrayType>(ty)->getElementType();
1024 break;
1025
1026 case Type::VariableArray: {
1027 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
1028 break;
1029 }
1030
1031 case Type::FunctionProto:
1032 case Type::FunctionNoProto:
1033 type = cast<clang::FunctionType>(ty)->getReturnType();
1034 break;
1035
1036 case Type::Paren:
1037 case Type::TypeOf:
1038 case Type::UnaryTransform:
1039 case Type::Attributed:
1040 case Type::BTFTagAttributed:
1041 case Type::SubstTemplateTypeParm:
1042 case Type::MacroQualified:
1043 // Keep walking after single level desugaring.
1044 type = type.getSingleStepDesugaredType(getContext());
1045 break;
1046
1047 case Type::Typedef:
1048 case Type::Decltype:
1049 case Type::Auto:
1050 case Type::DeducedTemplateSpecialization:
1051 // Stop walking: nothing to do.
1052 return;
1053
1054 case Type::TypeOfExpr:
1055 // Stop walking: emit typeof expression.
1056 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1057 return;
1058
1059 case Type::Atomic:
1060 type = cast<clang::AtomicType>(ty)->getValueType();
1061 break;
1062
1063 case Type::Pipe:
1064 type = cast<clang::PipeType>(ty)->getElementType();
1065 break;
1066 }
1067 } while (type->isVariablyModifiedType());
1068}
1069
1071 if (getContext().getBuiltinVaListType()->isArrayType())
1072 return emitPointerWithAlignment(e);
1073 return emitLValue(e).getAddress();
1074}
1075
1076} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition: APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
SourceManager & getSourceManager()
Definition: ASTContext.h:801
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: TypeBase.h:3738
mlir::Type getElementType() const
Definition: Address.h:101
virtual void emitInstanceFunctionProlog(SourceLocation loc, CIRGenFunction &cgf)=0
Emit the ABI-specific prolog for the function.
virtual bool hasThisReturn(clang::GlobalDecl gd) const
Returns true if the given constructor or destructor is one of the kinds that the ABI says returns 'th...
Definition: CIRGenCXXABI.h:153
virtual bool hasMostDerivedReturn(clang::GlobalDecl gd) const
Definition: CIRGenCXXABI.h:155
void buildThisParam(CIRGenFunction &cgf, FunctionArgList &params)
Build a parameter variable suitable for 'this'.
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
Definition: CIRGenClass.cpp:29
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
Definition: CIRGenExpr.cpp:67
void emitVariablyModifiedType(QualType ty)
const clang::LangOptions & getLangOpts() const
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
LValue emitStringLiteralLValue(const StringLiteral *e)
Definition: CIRGenExpr.cpp:964
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
Definition: CIRGenExpr.cpp:876
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
Definition: CIRGenExpr.cpp:707
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
cir::FuncOp curFn
The function for which code is currently being generated.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
CIRGenModule & getCIRGenModule()
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
Definition: CIRGenExpr.cpp:981
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
Definition: CIRGenExpr.cpp:618
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
Definition: CIRGenStmt.cpp:110
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
Definition: CIRGenStmt.cpp:66
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
Definition: CIRGenModule.h:56
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
clang::CharUnits getNaturalTypeAlignment(clang::QualType t, LValueBaseInfo *baseInfo)
FIXME: this could likely be a common helper and not necessarily related with codegen.
const clang::TargetInfo & getTarget() const
Definition: CIRGenModule.h:103
CIRGenCXXABI & getCXXABI() const
Definition: CIRGenModule.h:109
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
mlir::Type convertType(clang::QualType type)
Convert a Clang type into a mlir::Type.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
void setCGF(CIRGenFunction *inCGF)
Definition: EHScopeStack.h:156
stable_iterator stable_begin() const
Create a stable reference to the top of the EH stack.
Definition: EHScopeStack.h:174
Type for representing both the decl and type of parameters to a function.
Definition: CIRGenCall.h:191
Address getAddress() const
Definition: CIRGenValue.h:211
Represents a C++ destructor within a class.
Definition: DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition: DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition: DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition: DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition: DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3612
CastKind getCastKind() const
Definition: Expr.h:3656
Expr * getSubExpr()
Definition: Expr.h:3662
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition: Stmt.h:1731
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition: DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
Definition: DeclBase.cpp:1267
This represents one expression.
Definition: Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3069
QualType getType() const
Definition: Expr.h:144
Represents a function declaration or definition.
Definition: Decl.h:1999
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition: Decl.cpp:3271
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition: Decl.cpp:4486
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:57
CXXCtorType getCtorType() const
Definition: GlobalDecl.h:108
CXXDtorType getDtorType() const
Definition: GlobalDecl.h:113
const Decl * getDecl() const
Definition: GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition: Expr.h:3789
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition: TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition: Type.cpp:2871
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
bool isValid() const
Stmt - This represents one statement.
Definition: Stmt.h:85
StmtClass getStmtClass() const
Definition: Stmt.h:1483
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:334
const char * getStmtClassName() const
Definition: Stmt.cpp:87
bool hasConstructorVariants() const
Does this ABI have different entrypoints for complete-object and base-subobject constructors?
Definition: TargetCXXABI.h:194
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1360
The base class of the type hierarchy.
Definition: TypeBase.h:1833
bool isVoidType() const
Definition: TypeBase.h:8936
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition: Type.h:26
bool isAnyComplexType() const
Definition: TypeBase.h:8715
TypeClass getTypeClass() const
Definition: TypeBase.h:2403
const T * getAs() const
Member-template getAs<specific type>'.
Definition: TypeBase.h:9159
QualType getType() const
Definition: Decl.h:722
Represents a variable declaration or definition.
Definition: Decl.h:925
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition: Decl.cpp:2190
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition: ABI.h:24
@ Ctor_Base
Base object ctor.
Definition: ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition: ABI.h:25
@ CPlusPlus
Definition: LangStandard.h:55
CXXDtorType
C++ destructor types.
Definition: ABI.h:33
@ Dtor_Comdat
The COMDAT used for dtors.
Definition: ABI.h:37
@ Dtor_Base
Base object dtor.
Definition: ABI.h:36
@ Dtor_Complete
Complete object dtor.
Definition: ABI.h:35
@ Dtor_Deleting
Deleting dtor.
Definition: ABI.h:34
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool cxxabiStructorImplicitParam()
static bool sanitizers()
static bool dtorCleanups()
static bool appleKext()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
This structure provides a set of types that are commonly used during IR emission.
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:647