clang 22.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/ExprCXX.h"
22
23#include <cassert>
24
25namespace clang::CIRGen {
26
28 bool suppressNewContext)
29 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
30 ehStack.setCGF(this);
31}
32
34
35// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
37 type = type.getCanonicalType();
38 while (true) {
39 switch (type->getTypeClass()) {
40#define TYPE(name, parent)
41#define ABSTRACT_TYPE(name, parent)
42#define NON_CANONICAL_TYPE(name, parent) case Type::name:
43#define DEPENDENT_TYPE(name, parent) case Type::name:
44#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
45#include "clang/AST/TypeNodes.inc"
46 llvm_unreachable("non-canonical or dependent type in IR-generation");
47
48 case Type::Auto:
49 case Type::DeducedTemplateSpecialization:
50 llvm_unreachable("undeduced type in IR-generation");
51
52 // Various scalar types.
53 case Type::Builtin:
54 case Type::Pointer:
55 case Type::BlockPointer:
56 case Type::LValueReference:
57 case Type::RValueReference:
58 case Type::MemberPointer:
59 case Type::Vector:
60 case Type::ExtVector:
61 case Type::ConstantMatrix:
62 case Type::FunctionProto:
63 case Type::FunctionNoProto:
64 case Type::Enum:
65 case Type::ObjCObjectPointer:
66 case Type::Pipe:
67 case Type::BitInt:
68 case Type::HLSLAttributedResource:
69 case Type::HLSLInlineSpirv:
70 return cir::TEK_Scalar;
71
72 // Complexes.
73 case Type::Complex:
74 return cir::TEK_Complex;
75
76 // Arrays, records, and Objective-C objects.
77 case Type::ConstantArray:
78 case Type::IncompleteArray:
79 case Type::VariableArray:
80 case Type::Record:
81 case Type::ObjCObject:
82 case Type::ObjCInterface:
83 case Type::ArrayParameter:
84 return cir::TEK_Aggregate;
85
86 // We operate on atomic values according to their underlying type.
87 case Type::Atomic:
88 type = cast<AtomicType>(type)->getValueType();
89 continue;
90 }
91 llvm_unreachable("unknown type kind!");
92 }
93}
94
96 return cgm.getTypes().convertTypeForMem(t);
97}
98
100 return cgm.getTypes().convertType(t);
101}
102
104 // Some AST nodes might contain invalid source locations (e.g.
105 // CXXDefaultArgExpr), workaround that to still get something out.
106 if (srcLoc.isValid()) {
108 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
109 StringRef filename = pLoc.getFilename();
110 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
111 pLoc.getLine(), pLoc.getColumn());
112 }
113 // Do our best...
114 assert(currSrcLoc && "expected to inherit some source location");
115 return *currSrcLoc;
116}
117
118mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
119 // Some AST nodes might contain invalid source locations (e.g.
120 // CXXDefaultArgExpr), workaround that to still get something out.
121 if (srcLoc.isValid()) {
122 mlir::Location beg = getLoc(srcLoc.getBegin());
123 mlir::Location end = getLoc(srcLoc.getEnd());
124 SmallVector<mlir::Location, 2> locs = {beg, end};
125 mlir::Attribute metadata;
126 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
127 }
128 if (currSrcLoc) {
129 return *currSrcLoc;
130 }
131 // We're brave, but time to give up.
132 return builder.getUnknownLoc();
133}
134
135mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
136 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
137 mlir::Attribute metadata;
138 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
139}
140
141bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
142 // Null statement, not a label!
143 if (!s)
144 return false;
145
146 // If this is a label, we have to emit the code, consider something like:
147 // if (0) { ... foo: bar(); } goto foo;
148 //
149 // TODO: If anyone cared, we could track __label__'s, since we know that you
150 // can't jump to one from outside their declared region.
151 if (isa<LabelStmt>(s))
152 return true;
153
154 // If this is a case/default statement, and we haven't seen a switch, we
155 // have to emit the code.
156 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
157 return true;
158
159 // If this is a switch statement, we want to ignore case statements when we
160 // recursively process the sub-statements of the switch. If we haven't
161 // encountered a switch statement, we treat case statements like labels, but
162 // if we are processing a switch statement, case statements are expected.
163 if (isa<SwitchStmt>(s))
164 ignoreCaseStmts = true;
165
166 // Scan subexpressions for verboten labels.
167 return std::any_of(s->child_begin(), s->child_end(),
168 [=](const Stmt *subStmt) {
169 return containsLabel(subStmt, ignoreCaseStmts);
170 });
171}
172
173/// If the specified expression does not fold to a constant, or if it does but
174/// contains a label, return false. If it constant folds return true and set
175/// the boolean result in Result.
176bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
177 bool allowLabels) {
178 llvm::APSInt resultInt;
179 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
180 return false;
181
182 resultBool = resultInt.getBoolValue();
183 return true;
184}
185
186/// If the specified expression does not fold to a constant, or if it does
187/// fold but contains a label, return false. If it constant folds, return
188/// true and set the folded value.
190 llvm::APSInt &resultInt,
191 bool allowLabels) {
192 // FIXME: Rename and handle conversion of other evaluatable things
193 // to bool.
194 Expr::EvalResult result;
195 if (!cond->EvaluateAsInt(result, getContext()))
196 return false; // Not foldable, not integer or not fully evaluatable.
197
198 llvm::APSInt intValue = result.Val.getInt();
199 if (!allowLabels && containsLabel(cond))
200 return false; // Contains a label.
201
202 resultInt = intValue;
203 return true;
204}
205
206void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
207 CharUnits alignment) {
208 if (!type->isVoidType()) {
209 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
210 /*insertIntoFnEntryBlock=*/false);
211 fnRetAlloca = addr;
212 returnValue = Address(addr, alignment);
213 }
214}
215
216void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
217 mlir::Location loc, CharUnits alignment,
218 bool isParam) {
219 assert(isa<NamedDecl>(var) && "Needs a named decl");
220 assert(!symbolTable.count(var) && "not supposed to be available just yet");
221
222 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
223 assert(allocaOp && "expected cir::AllocaOp");
224
225 if (isParam)
226 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
227 if (ty->isReferenceType() || ty.isConstQualified())
228 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
229
230 symbolTable.insert(var, allocaOp);
231}
232
234 CIRGenBuilderTy &builder = cgf.builder;
235 LexicalScope *localScope = cgf.curLexScope;
236
237 auto applyCleanup = [&]() {
238 if (performCleanup) {
239 // ApplyDebugLocation
241 forceCleanup();
242 }
243 };
244
245 if (returnBlock != nullptr) {
246 // Write out the return block, which loads the value from `__retval` and
247 // issues the `cir.return`.
248 mlir::OpBuilder::InsertionGuard guard(builder);
249 builder.setInsertionPointToEnd(returnBlock);
250 (void)emitReturn(*returnLoc);
251 }
252
253 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
254 mlir::OpBuilder::InsertionGuard guard(builder);
255 builder.setInsertionPointToEnd(insPt);
256
257 // If we still don't have a cleanup block, it means that `applyCleanup`
258 // below might be able to get us one.
259 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
260
261 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
262 applyCleanup();
263
264 // If we now have one after `applyCleanup`, hook it up properly.
265 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
266 cleanupBlock = localScope->getCleanupBlock(builder);
267 builder.create<cir::BrOp>(insPt->back().getLoc(), cleanupBlock);
268 if (!cleanupBlock->mightHaveTerminator()) {
269 mlir::OpBuilder::InsertionGuard guard(builder);
270 builder.setInsertionPointToEnd(cleanupBlock);
271 builder.create<cir::YieldOp>(localScope->endLoc);
272 }
273 }
274
275 if (localScope->depth == 0) {
276 // Reached the end of the function.
277 if (returnBlock != nullptr) {
278 if (returnBlock->getUses().empty()) {
279 returnBlock->erase();
280 } else {
281 // Thread return block via cleanup block.
282 if (cleanupBlock) {
283 for (mlir::BlockOperand &blockUse : returnBlock->getUses()) {
284 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
285 brOp.setSuccessor(cleanupBlock);
286 }
287 }
288
289 builder.create<cir::BrOp>(*returnLoc, returnBlock);
290 return;
291 }
292 }
293 emitImplicitReturn();
294 return;
295 }
296
297 // End of any local scope != function
298 // Ternary ops have to deal with matching arms for yielding types
299 // and do return a value, it must do its own cir.yield insertion.
300 if (!localScope->isTernary() && !insPt->mightHaveTerminator()) {
301 !retVal ? builder.create<cir::YieldOp>(localScope->endLoc)
302 : builder.create<cir::YieldOp>(localScope->endLoc, retVal);
303 }
304 };
305
306 // If a cleanup block has been created at some point, branch to it
307 // and set the insertion point to continue at the cleanup block.
308 // Terminators are then inserted either in the cleanup block or
309 // inline in this current block.
310 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
311 if (cleanupBlock)
312 insertCleanupAndLeave(cleanupBlock);
313
314 // Now deal with any pending block wrap up like implicit end of
315 // scope.
316
317 mlir::Block *curBlock = builder.getBlock();
318 if (isGlobalInit() && !curBlock)
319 return;
320 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
321 return;
322
323 // Get rid of any empty block at the end of the scope.
324 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
325 if (!entryBlock && curBlock->empty()) {
326 curBlock->erase();
327 if (returnBlock != nullptr && returnBlock->getUses().empty())
328 returnBlock->erase();
329 return;
330 }
331
332 // If there's a cleanup block, branch to it, nothing else to do.
333 if (cleanupBlock) {
334 builder.create<cir::BrOp>(curBlock->back().getLoc(), cleanupBlock);
335 return;
336 }
337
338 // No pre-existent cleanup block, emit cleanup code and yield/return.
339 insertCleanupAndLeave(curBlock);
340}
341
342cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
343 CIRGenBuilderTy &builder = cgf.getBuilder();
344
345 if (!cgf.curFn.getFunctionType().hasVoidReturn()) {
346 // Load the value from `__retval` and return it via the `cir.return` op.
347 auto value = builder.create<cir::LoadOp>(
348 loc, cgf.curFn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
349 return builder.create<cir::ReturnOp>(loc,
350 llvm::ArrayRef(value.getResult()));
351 }
352 return builder.create<cir::ReturnOp>(loc);
353}
354
355// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
356// candidate for sharing between CIRGen and CodeGen.
357static bool mayDropFunctionReturn(const ASTContext &astContext,
358 QualType returnType) {
359 // We can't just discard the return value for a record type with a complex
360 // destructor or a non-trivially copyable type.
361 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
362 return classDecl->hasTrivialDestructor();
363 return returnType.isTriviallyCopyableType(astContext);
364}
365
366void CIRGenFunction::LexicalScope::emitImplicitReturn() {
367 CIRGenBuilderTy &builder = cgf.getBuilder();
368 LexicalScope *localScope = cgf.curLexScope;
369
370 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
371
372 // In C++, flowing off the end of a non-void function is always undefined
373 // behavior. In C, flowing off the end of a non-void function is undefined
374 // behavior only if the non-existent return value is used by the caller.
375 // That influences whether the terminating op is trap, unreachable, or
376 // return.
377 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
378 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
379 builder.getInsertionBlock()) {
380 bool shouldEmitUnreachable =
381 cgf.cgm.getCodeGenOpts().StrictReturn ||
382 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
383
384 if (shouldEmitUnreachable) {
386 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
387 builder.create<cir::TrapOp>(localScope->endLoc);
388 else
389 builder.create<cir::UnreachableOp>(localScope->endLoc);
390 builder.clearInsertionPoint();
391 return;
392 }
393 }
394
395 (void)emitReturn(localScope->endLoc);
396}
397
399 cir::FuncOp fn, cir::FuncType funcType,
401 SourceLocation startLoc) {
402 assert(!curFn &&
403 "CIRGenFunction can only be used for one function at a time");
404
405 curFn = fn;
406
407 const Decl *d = gd.getDecl();
408 curCodeDecl = d;
409 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
411
412 prologueCleanupDepth = ehStack.stable_begin();
413
414 mlir::Block *entryBB = &fn.getBlocks().front();
415 builder.setInsertionPointToStart(entryBB);
416
417 // TODO(cir): this should live in `emitFunctionProlog
418 // Declare all the function arguments in the symbol table.
419 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
420 const VarDecl *paramVar = std::get<0>(nameValue);
421 mlir::Value paramVal = std::get<1>(nameValue);
422 CharUnits alignment = getContext().getDeclAlign(paramVar);
423 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
424 paramVal.setLoc(paramLoc);
425
426 mlir::Value addrVal =
427 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
428 convertType(paramVar->getType()), paramLoc, alignment,
429 /*insertIntoFnEntryBlock=*/true);
430
431 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
432 /*isParam=*/true);
433
434 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
435
436 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
437 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
439 if (isPromoted)
440 cgm.errorNYI(fd->getSourceRange(), "Function argument demotion");
441
442 // Location of the store to the param storage tracked as beginning of
443 // the function body.
444 mlir::Location fnBodyBegin = getLoc(fd->getBody()->getBeginLoc());
445 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
446 }
447 assert(builder.getInsertionBlock() && "Should be valid");
448
449 // When the current function is not void, create an address to store the
450 // result value.
451 if (!returnType->isVoidType())
452 emitAndUpdateRetAlloca(returnType, getLoc(fd->getBody()->getEndLoc()),
453 getContext().getTypeAlignInChars(returnType));
454
455 if (isa_and_nonnull<CXXMethodDecl>(d) &&
456 cast<CXXMethodDecl>(d)->isInstance()) {
457 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
458
459 const auto *md = cast<CXXMethodDecl>(d);
460 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
461 // We're in a lambda.
462 curFn.setLambda(true);
463
464 // Figure out the captures.
465 md->getParent()->getCaptureFields(lambdaCaptureFields,
468 // If the lambda captures the object referred to by '*this' - either by
469 // value or by reference, make sure CXXThisValue points to the correct
470 // object.
471
472 // Get the lvalue for the field (which is a copy of the enclosing object
473 // or contains the address of the enclosing object).
474 LValue thisFieldLValue =
476 if (!lambdaThisCaptureField->getType()->isPointerType()) {
477 // If the enclosing object was captured by value, just use its
478 // address. Sign this pointer.
479 cxxThisValue = thisFieldLValue.getPointer();
480 } else {
481 // Load the lvalue pointed to by the field, since '*this' was captured
482 // by reference.
484 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
485 }
486 }
487 for (auto *fd : md->getParent()->fields()) {
488 if (fd->hasCapturedVLAType())
489 cgm.errorNYI(loc, "lambda captured VLA type");
490 }
491 } else {
492 // Not in a lambda; just use 'this' from the method.
493 // FIXME: Should we generate a new load for each use of 'this'? The fast
494 // register allocator would be happier...
496 }
497
500 }
501}
502
504 // Pop any cleanups that might have been associated with the
505 // parameters. Do this in whatever block we're currently in; it's
506 // important to do this before we enter the return block or return
507 // edges will be *really* confused.
508 // TODO(cir): Use prologueCleanupDepth here.
509 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
510 if (hasCleanups) {
512 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
514 }
515}
516
517mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
518 // We start with function level scope for variables.
520
521 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
522 return emitCompoundStmtWithoutScope(*block);
523
524 return emitStmt(body, /*useCurrentScope=*/true);
525}
526
527static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
528 // Remove any leftover blocks that are unreachable and empty, since they do
529 // not represent unreachable code useful for warnings nor anything deemed
530 // useful in general.
531 SmallVector<mlir::Block *> blocksToDelete;
532 for (mlir::Block &block : func.getBlocks()) {
533 if (block.empty() && block.getUses().empty())
534 blocksToDelete.push_back(&block);
535 }
536 for (mlir::Block *block : blocksToDelete)
537 block->erase();
538}
539
540cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
541 cir::FuncType funcType) {
542 const auto funcDecl = cast<FunctionDecl>(gd.getDecl());
543 curGD = gd;
544
545 SourceLocation loc = funcDecl->getLocation();
546 Stmt *body = funcDecl->getBody();
547 SourceRange bodyRange =
548 body ? body->getSourceRange() : funcDecl->getLocation();
549
550 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
551 : builder.getUnknownLoc()};
552
553 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
554 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
555 };
556 const mlir::Location fusedLoc = mlir::FusedLoc::get(
558 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
559 mlir::Block *entryBB = fn.addEntryBlock();
560
561 FunctionArgList args;
562 QualType retTy = buildFunctionArgList(gd, args);
563
564 // Create a scope in the symbol table to hold variable declarations.
566 {
567 LexicalScope lexScope(*this, fusedLoc, entryBB);
568
569 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
570
571 if (isa<CXXDestructorDecl>(funcDecl)) {
572 emitDestructorBody(args);
573 } else if (isa<CXXConstructorDecl>(funcDecl)) {
575 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
576 funcDecl->hasAttr<CUDAGlobalAttr>()) {
577 getCIRGenModule().errorNYI(bodyRange, "CUDA kernel");
578 } else if (isa<CXXMethodDecl>(funcDecl) &&
579 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
580 // The lambda static invoker function is special, because it forwards or
581 // clones the body of the function call operator (but is actually
582 // static).
584 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
585 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
586 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
587 // Implicit copy-assignment gets the same special treatment as implicit
588 // copy-constructors.
590 } else if (body) {
591 if (mlir::failed(emitFunctionBody(body))) {
592 return nullptr;
593 }
594 } else {
595 // Anything without a body should have been handled above.
596 llvm_unreachable("no definition for normal function");
597 }
598
599 if (mlir::failed(fn.verifyBody()))
600 return nullptr;
601
602 finishFunction(bodyRange.getEnd());
603 }
604
606 return fn;
607}
608
611 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
612 CXXCtorType ctorType = curGD.getCtorType();
613
614 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
615 ctorType == Ctor_Complete) &&
616 "can only generate complete ctor for this ABI");
617
618 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
619 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
620 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
621 return;
622 }
623
624 const FunctionDecl *definition = nullptr;
625 Stmt *body = ctor->getBody(definition);
626 assert(definition == ctor && "emitting wrong constructor body");
627
628 if (isa_and_nonnull<CXXTryStmt>(body)) {
629 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
630 return;
631 }
632
635
636 // TODO: in restricted cases, we can emit the vbase initializers of a
637 // complete ctor and then delegate to the base ctor.
638
639 // Emit the constructor prologue, i.e. the base and member initializers.
640 emitCtorPrologue(ctor, ctorType, args);
641
642 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
643 // now just to have it handled.
644 if (mlir::failed(emitStmt(body, true))) {
645 cgm.errorNYI(ctor->getSourceRange(),
646 "emitConstructorBody: emit body statement failed.");
647 return;
648 }
649}
650
651/// Emits the body of the current destructor.
653 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
654 CXXDtorType dtorType = curGD.getDtorType();
655
656 // For an abstract class, non-base destructors are never used (and can't
657 // be emitted in general, because vbase dtors may not have been validated
658 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
659 // in fact emit references to them from other compilations, so emit them
660 // as functions containing a trap instruction.
661 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
662 cgm.errorNYI(dtor->getSourceRange(), "abstract base class destructors");
663 return;
664 }
665
666 Stmt *body = dtor->getBody();
668
669 // The call to operator delete in a deleting destructor happens
670 // outside of the function-try-block, which means it's always
671 // possible to delegate the destructor body to the complete
672 // destructor. Do so.
673 if (dtorType == Dtor_Deleting) {
674 cgm.errorNYI(dtor->getSourceRange(), "deleting destructor");
675 return;
676 }
677
678 // If the body is a function-try-block, enter the try before
679 // anything else.
680 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
681 if (isTryBody)
682 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
683
686
687 // If this is the complete variant, just invoke the base variant;
688 // the epilogue will destruct the virtual bases. But we can't do
689 // this optimization if the body is a function-try-block, because
690 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
691 // always delegate because we might not have a definition in this TU.
692 switch (dtorType) {
693 case Dtor_Unified:
694 llvm_unreachable("not expecting a unified dtor");
695 case Dtor_Comdat:
696 llvm_unreachable("not expecting a COMDAT");
697 case Dtor_Deleting:
698 llvm_unreachable("already handled deleting case");
699
700 case Dtor_Complete:
701 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
702 "can't emit a dtor without a body for non-Microsoft ABIs");
703
705
706 if (!isTryBody) {
708 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
709 /*delegating=*/false, loadCXXThisAddress(), thisTy);
710 break;
711 }
712
713 // Fallthrough: act like we're in the base variant.
714 [[fallthrough]];
715
716 case Dtor_Base:
717 assert(body);
718
721
722 if (isTryBody) {
723 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
724 } else if (body) {
725 (void)emitStmt(body, /*useCurrentScope=*/true);
726 } else {
727 assert(dtor->isImplicit() && "bodyless dtor not implicit");
728 // nothing to do besides what's in the epilogue
729 }
730 // -fapple-kext must inline any call to this dtor into
731 // the caller's body.
733
734 break;
735 }
736
738
739 // Exit the try if applicable.
740 if (isTryBody)
741 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
742}
743
744/// Given a value of type T* that may not be to a complete object, construct
745/// an l-vlaue withi the natural pointee alignment of T.
747 QualType ty) {
748 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
749 // assert on the result type first.
750 LValueBaseInfo baseInfo;
752 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
753 return makeAddrLValue(Address(val, align), ty, baseInfo);
754}
755
757 QualType ty) {
758 LValueBaseInfo baseInfo;
759 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
760 Address addr(val, convertTypeForMem(ty), alignment);
762 return makeAddrLValue(addr, ty, baseInfo);
763}
764
766 FunctionArgList &args) {
767 const auto *fd = cast<FunctionDecl>(gd.getDecl());
768 QualType retTy = fd->getReturnType();
769
770 const auto *md = dyn_cast<CXXMethodDecl>(fd);
771 if (md && md->isInstance()) {
772 if (cgm.getCXXABI().hasThisReturn(gd))
773 cgm.errorNYI(fd->getSourceRange(), "this return");
774 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
775 cgm.errorNYI(fd->getSourceRange(), "most derived return");
776 cgm.getCXXABI().buildThisParam(*this, args);
777 }
778
779 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
780 if (cd->getInheritedConstructor())
781 cgm.errorNYI(fd->getSourceRange(),
782 "buildFunctionArgList: inherited constructor");
783
784 for (auto *param : fd->parameters())
785 args.push_back(param);
786
787 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
788 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
789
790 return retTy;
791}
792
793/// Emit code to compute a designator that specifies the location
794/// of the expression.
795/// FIXME: document this function better.
797 // FIXME: ApplyDebugLocation DL(*this, e);
798 switch (e->getStmtClass()) {
799 default:
801 std::string("l-value not implemented for '") +
802 e->getStmtClassName() + "'");
803 return LValue();
804 case Expr::ArraySubscriptExprClass:
806 case Expr::UnaryOperatorClass:
808 case Expr::StringLiteralClass:
810 case Expr::MemberExprClass:
812 case Expr::CompoundLiteralExprClass:
814 case Expr::BinaryOperatorClass:
816 case Expr::CompoundAssignOperatorClass: {
817 QualType ty = e->getType();
818 if (ty->getAs<AtomicType>()) {
819 cgm.errorNYI(e->getSourceRange(),
820 "CompoundAssignOperator with AtomicType");
821 return LValue();
822 }
823 if (!ty->isAnyComplexType())
825
827 }
828 case Expr::CallExprClass:
829 case Expr::CXXMemberCallExprClass:
830 case Expr::CXXOperatorCallExprClass:
831 case Expr::UserDefinedLiteralClass:
833 case Expr::ParenExprClass:
834 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
835 case Expr::DeclRefExprClass:
837 case Expr::CStyleCastExprClass:
838 case Expr::CXXStaticCastExprClass:
839 case Expr::CXXDynamicCastExprClass:
840 case Expr::ImplicitCastExprClass:
842 case Expr::MaterializeTemporaryExprClass:
844 }
845}
846
847static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
848 SmallString<256> buffer;
849 llvm::raw_svector_ostream out(buffer);
850 out << name << cnt;
851 return std::string(out.str());
852}
853
857
861
862void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
863 QualType ty) {
864 // Ignore empty classes in C++.
866 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
867 return;
868
869 // Cast the dest ptr to the appropriate i8 pointer type.
870 if (builder.isInt8Ty(destPtr.getElementType())) {
871 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
872 }
873
874 // Get size and alignment info for this aggregate.
875 const CharUnits size = getContext().getTypeSizeInChars(ty);
876 if (size.isZero()) {
877 // But note that getTypeInfo returns 0 for a VLA.
878 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
879 cgm.errorNYI(loc,
880 "emitNullInitialization for zero size VariableArrayType");
881 } else {
882 return;
883 }
884 }
885
886 // If the type contains a pointer to data member we can't memset it to zero.
887 // Instead, create a null constant and copy it to the destination.
888 // TODO: there are other patterns besides zero that we can usefully memset,
889 // like -1, which happens to be the pattern used by member-pointers.
890 if (!cgm.getTypes().isZeroInitializable(ty)) {
891 cgm.errorNYI(loc, "type is not zero initializable");
892 }
893
894 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
895 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
896 // respective address.
897 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
898 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
899 builder.createStore(loc, zeroValue, destPtr);
900}
901
902// TODO(cir): should be shared with LLVM codegen.
904 const Expr *e = ce->getSubExpr();
905
906 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
907 return false;
908
909 if (isa<CXXThisExpr>(e->IgnoreParens())) {
910 // We always assume that 'this' is never null.
911 return false;
912 }
913
914 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
915 // And that glvalue casts are never null.
916 if (ice->isGLValue())
917 return false;
918 }
919
920 return true;
921}
922
923/// Computes the length of an array in elements, as well as the base
924/// element type and a properly-typed first element pointer.
925mlir::Value
927 QualType &baseType, Address &addr) {
928 const clang::ArrayType *arrayType = origArrayType;
929
930 // If it's a VLA, we have to load the stored size. Note that
931 // this is the size of the VLA in bytes, not its size in elements.
934 cgm.errorNYI(*currSrcLoc, "VLAs");
935 return builder.getConstInt(*currSrcLoc, SizeTy, 0);
936 }
937
938 uint64_t countFromCLAs = 1;
939 QualType eltType;
940
941 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
942
943 while (cirArrayType) {
945 countFromCLAs *= cirArrayType.getSize();
946 eltType = arrayType->getElementType();
947
948 cirArrayType =
949 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
950
951 arrayType = getContext().getAsArrayType(arrayType->getElementType());
952 assert((!cirArrayType || arrayType) &&
953 "CIR and Clang types are out-of-sync");
954 }
955
956 if (arrayType) {
957 // From this point onwards, the Clang array type has been emitted
958 // as some other type (probably a packed struct). Compute the array
959 // size, and just emit the 'begin' expression as a bitcast.
960 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
961 }
962
963 baseType = eltType;
964 return builder.getConstInt(*currSrcLoc, SizeTy, countFromCLAs);
965}
966
968 mlir::Value ptrValue, QualType ty, SourceLocation loc,
969 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
971 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
972 alignment, offsetValue);
973}
974
976 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
977 int64_t alignment, mlir::Value offsetValue) {
978 QualType ty = expr->getType();
979 SourceLocation loc = expr->getExprLoc();
980 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
981 offsetValue);
982}
983
984// TODO(cir): Most of this function can be shared between CIRGen
985// and traditional LLVM codegen
987 assert(type->isVariablyModifiedType() &&
988 "Must pass variably modified type to EmitVLASizes!");
989
990 // We're going to walk down into the type and look for VLA
991 // expressions.
992 do {
993 assert(type->isVariablyModifiedType());
994
995 const Type *ty = type.getTypePtr();
996 switch (ty->getTypeClass()) {
997 case Type::CountAttributed:
998 case Type::PackIndexing:
999 case Type::ArrayParameter:
1000 case Type::HLSLAttributedResource:
1001 case Type::HLSLInlineSpirv:
1002 case Type::PredefinedSugar:
1003 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1004 break;
1005
1006#define TYPE(Class, Base)
1007#define ABSTRACT_TYPE(Class, Base)
1008#define NON_CANONICAL_TYPE(Class, Base)
1009#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1010#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1011#include "clang/AST/TypeNodes.inc"
1012 llvm_unreachable(
1013 "dependent type must be resolved before the CIR codegen");
1014
1015 // These types are never variably-modified.
1016 case Type::Builtin:
1017 case Type::Complex:
1018 case Type::Vector:
1019 case Type::ExtVector:
1020 case Type::ConstantMatrix:
1021 case Type::Record:
1022 case Type::Enum:
1023 case Type::Using:
1024 case Type::TemplateSpecialization:
1025 case Type::ObjCTypeParam:
1026 case Type::ObjCObject:
1027 case Type::ObjCInterface:
1028 case Type::ObjCObjectPointer:
1029 case Type::BitInt:
1030 llvm_unreachable("type class is never variably-modified!");
1031
1032 case Type::Adjusted:
1033 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1034 break;
1035
1036 case Type::Decayed:
1037 type = cast<clang::DecayedType>(ty)->getPointeeType();
1038 break;
1039
1040 case Type::Pointer:
1041 type = cast<clang::PointerType>(ty)->getPointeeType();
1042 break;
1043
1044 case Type::BlockPointer:
1045 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1046 break;
1047
1048 case Type::LValueReference:
1049 case Type::RValueReference:
1050 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1051 break;
1052
1053 case Type::MemberPointer:
1054 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1055 break;
1056
1057 case Type::ConstantArray:
1058 case Type::IncompleteArray:
1059 // Losing element qualification here is fine.
1060 type = cast<clang::ArrayType>(ty)->getElementType();
1061 break;
1062
1063 case Type::VariableArray: {
1064 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType VLA");
1065 break;
1066 }
1067
1068 case Type::FunctionProto:
1069 case Type::FunctionNoProto:
1070 type = cast<clang::FunctionType>(ty)->getReturnType();
1071 break;
1072
1073 case Type::Paren:
1074 case Type::TypeOf:
1075 case Type::UnaryTransform:
1076 case Type::Attributed:
1077 case Type::BTFTagAttributed:
1078 case Type::SubstTemplateTypeParm:
1079 case Type::MacroQualified:
1080 // Keep walking after single level desugaring.
1081 type = type.getSingleStepDesugaredType(getContext());
1082 break;
1083
1084 case Type::Typedef:
1085 case Type::Decltype:
1086 case Type::Auto:
1087 case Type::DeducedTemplateSpecialization:
1088 // Stop walking: nothing to do.
1089 return;
1090
1091 case Type::TypeOfExpr:
1092 // Stop walking: emit typeof expression.
1093 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1094 return;
1095
1096 case Type::Atomic:
1097 type = cast<clang::AtomicType>(ty)->getValueType();
1098 break;
1099
1100 case Type::Pipe:
1101 type = cast<clang::PipeType>(ty)->getElementType();
1102 break;
1103 }
1104 } while (type->isVariablyModifiedType());
1105}
1106
1108 if (getContext().getBuiltinVaListType()->isArrayType())
1109 return emitPointerWithAlignment(e);
1110 return emitLValue(e).getAddress();
1111}
1112
1113} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:188
SourceManager & getSourceManager()
Definition ASTContext.h:798
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3720
mlir::Type getElementType() const
Definition Address.h:101
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
LValue emitStringLiteralLValue(const StringLiteral *e)
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
cir::FuncOp curFn
The function for which code is currently being generated.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth)
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:191
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2255
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2279
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1720
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:1999
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3271
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4490
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3787
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2867
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:85
StmtClass getStmtClass() const
Definition Stmt.h:1472
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
const char * getStmtClassName() const
Definition Stmt.cpp:87
bool isVoidType() const
Definition TypeBase.h:8878
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8657
TypeClass getTypeClass() const
Definition TypeBase.h:2385
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9098
QualType getType() const
Definition Decl.h:722
Represents a variable declaration or definition.
Definition Decl.h:925
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2190
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool dtorCleanups()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647