clang 23.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/ExprCXX.h"
23#include "llvm/IR/FPEnv.h"
24
25#include <cassert>
26
27namespace clang::CIRGen {
28
30 bool suppressNewContext)
31 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
32 ehStack.setCGF(this);
33}
34
36
37// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
39 type = type.getCanonicalType();
40 while (true) {
41 switch (type->getTypeClass()) {
42#define TYPE(name, parent)
43#define ABSTRACT_TYPE(name, parent)
44#define NON_CANONICAL_TYPE(name, parent) case Type::name:
45#define DEPENDENT_TYPE(name, parent) case Type::name:
46#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
47#include "clang/AST/TypeNodes.inc"
48 llvm_unreachable("non-canonical or dependent type in IR-generation");
49
50 case Type::Auto:
51 case Type::DeducedTemplateSpecialization:
52 llvm_unreachable("undeduced type in IR-generation");
53
54 // Various scalar types.
55 case Type::Builtin:
56 case Type::Pointer:
57 case Type::BlockPointer:
58 case Type::LValueReference:
59 case Type::RValueReference:
60 case Type::MemberPointer:
61 case Type::Vector:
62 case Type::ExtVector:
63 case Type::ConstantMatrix:
64 case Type::FunctionProto:
65 case Type::FunctionNoProto:
66 case Type::Enum:
67 case Type::ObjCObjectPointer:
68 case Type::Pipe:
69 case Type::BitInt:
70 case Type::OverflowBehavior:
71 case Type::HLSLAttributedResource:
72 case Type::HLSLInlineSpirv:
73 return cir::TEK_Scalar;
74
75 // Complexes.
76 case Type::Complex:
77 return cir::TEK_Complex;
78
79 // Arrays, records, and Objective-C objects.
80 case Type::ConstantArray:
81 case Type::IncompleteArray:
82 case Type::VariableArray:
83 case Type::Record:
84 case Type::ObjCObject:
85 case Type::ObjCInterface:
86 case Type::ArrayParameter:
87 return cir::TEK_Aggregate;
88
89 // We operate on atomic values according to their underlying type.
90 case Type::Atomic:
91 type = cast<AtomicType>(type)->getValueType();
92 continue;
93 }
94 llvm_unreachable("unknown type kind!");
95 }
96}
97
99 return cgm.getTypes().convertTypeForMem(t);
100}
101
103 return cgm.getTypes().convertType(t);
104}
105
107 // Some AST nodes might contain invalid source locations (e.g.
108 // CXXDefaultArgExpr), workaround that to still get something out.
109 if (srcLoc.isValid()) {
111 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
112 StringRef filename = pLoc.getFilename();
113 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
114 pLoc.getLine(), pLoc.getColumn());
115 }
116 // We expect to have a currSrcLoc set, so we assert here, but it isn't
117 // critical for the correctness of compilation, so in non-assert builds
118 // we fallback on using an unknown location.
119 assert(currSrcLoc && "expected to inherit some source location");
120 if (currSrcLoc)
121 return *currSrcLoc;
122 // We're brave, but time to give up.
123 return builder.getUnknownLoc();
124}
125
126mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
127 // Some AST nodes might contain invalid source locations (e.g.
128 // CXXDefaultArgExpr), workaround that to still get something out.
129 if (srcLoc.isValid()) {
130 mlir::Location beg = getLoc(srcLoc.getBegin());
131 mlir::Location end = getLoc(srcLoc.getEnd());
132 SmallVector<mlir::Location, 2> locs = {beg, end};
133 mlir::Attribute metadata;
134 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
135 }
136 // We expect to have a currSrcLoc set, so we assert here, but it isn't
137 // critical for the correctness of compilation, so in non-assert builds
138 // we fallback on using an unknown location.
139 assert(currSrcLoc && "expected to inherit some source location");
140 if (currSrcLoc)
141 return *currSrcLoc;
142 // We're brave, but time to give up.
143 return builder.getUnknownLoc();
144}
145
146mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
147 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
148 mlir::Attribute metadata;
149 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
150}
151
152bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
153 // Null statement, not a label!
154 if (!s)
155 return false;
156
157 // If this is a label, we have to emit the code, consider something like:
158 // if (0) { ... foo: bar(); } goto foo;
159 //
160 // TODO: If anyone cared, we could track __label__'s, since we know that you
161 // can't jump to one from outside their declared region.
162 if (isa<LabelStmt>(s))
163 return true;
164
165 // If this is a case/default statement, and we haven't seen a switch, we
166 // have to emit the code.
167 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
168 return true;
169
170 // If this is a switch statement, we want to ignore case statements when we
171 // recursively process the sub-statements of the switch. If we haven't
172 // encountered a switch statement, we treat case statements like labels, but
173 // if we are processing a switch statement, case statements are expected.
174 if (isa<SwitchStmt>(s))
175 ignoreCaseStmts = true;
176
177 // Scan subexpressions for verboten labels.
178 return std::any_of(s->child_begin(), s->child_end(),
179 [=](const Stmt *subStmt) {
180 return containsLabel(subStmt, ignoreCaseStmts);
181 });
182}
183
184/// If the specified expression does not fold to a constant, or if it does but
185/// contains a label, return false. If it constant folds return true and set
186/// the boolean result in Result.
187bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
188 bool allowLabels) {
189 llvm::APSInt resultInt;
190 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
191 return false;
192
193 resultBool = resultInt.getBoolValue();
194 return true;
195}
196
197/// If the specified expression does not fold to a constant, or if it does
198/// fold but contains a label, return false. If it constant folds, return
199/// true and set the folded value.
201 llvm::APSInt &resultInt,
202 bool allowLabels) {
203 // FIXME: Rename and handle conversion of other evaluatable things
204 // to bool.
205 Expr::EvalResult result;
206 if (!cond->EvaluateAsInt(result, getContext()))
207 return false; // Not foldable, not integer or not fully evaluatable.
208
209 llvm::APSInt intValue = result.Val.getInt();
210 if (!allowLabels && containsLabel(cond))
211 return false; // Contains a label.
212
213 resultInt = intValue;
214 return true;
215}
216
217void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
218 CharUnits alignment) {
219 if (!type->isVoidType()) {
220 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
221 /*insertIntoFnEntryBlock=*/false);
222 fnRetAlloca = addr;
223 returnValue = Address(addr, alignment);
224 }
225}
226
227void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
228 mlir::Location loc, CharUnits alignment,
229 bool isParam) {
230 assert(isa<NamedDecl>(var) && "Needs a named decl");
231 assert(!symbolTable.count(var) && "not supposed to be available just yet");
232
233 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
234 assert(allocaOp && "expected cir::AllocaOp");
235
236 if (isParam)
237 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
238 if (ty->isReferenceType() || ty.isConstQualified())
239 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
240
241 symbolTable.insert(var, allocaOp);
242}
243
245 CIRGenBuilderTy &builder = cgf.builder;
246 LexicalScope *localScope = cgf.curLexScope;
247
248 auto applyCleanup = [&]() {
249 if (performCleanup) {
250 // ApplyDebugLocation
252 forceCleanup();
253 }
254 };
255
256 // Cleanup are done right before codegen resumes a scope. This is where
257 // objects are destroyed. Process all return blocks.
258 // TODO(cir): Handle returning from a switch statement through a cleanup
259 // block. We can't simply jump to the cleanup block, because the cleanup block
260 // is not part of the case region. Either reemit all cleanups in the return
261 // block or wait for MLIR structured control flow to support early exits.
263 for (mlir::Block *retBlock : localScope->getRetBlocks()) {
264 mlir::OpBuilder::InsertionGuard guard(builder);
265 builder.setInsertionPointToEnd(retBlock);
266 retBlocks.push_back(retBlock);
267 mlir::Location retLoc = localScope->getRetLoc(retBlock);
268 emitReturn(retLoc);
269 }
270
271 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
272 mlir::OpBuilder::InsertionGuard guard(builder);
273 builder.setInsertionPointToEnd(insPt);
274
275 // If we still don't have a cleanup block, it means that `applyCleanup`
276 // below might be able to get us one.
277 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
278
279 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
280 applyCleanup();
281
282 mlir::Block *currentBlock = builder.getBlock();
283
284 // If we now have one after `applyCleanup`, hook it up properly.
285 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
286 cleanupBlock = localScope->getCleanupBlock(builder);
287 cir::BrOp::create(builder, insPt->back().getLoc(), cleanupBlock);
288 if (!cleanupBlock->mightHaveTerminator()) {
289 mlir::OpBuilder::InsertionGuard guard(builder);
290 builder.setInsertionPointToEnd(cleanupBlock);
291 cir::YieldOp::create(builder, localScope->endLoc);
292 }
293 }
294
295 if (localScope->depth == 0) {
296 // Reached the end of the function.
297 // Special handling only for single return block case
298 if (localScope->getRetBlocks().size() == 1) {
299 mlir::Block *retBlock = localScope->getRetBlocks()[0];
300 mlir::Location retLoc = localScope->getRetLoc(retBlock);
301 if (retBlock->getUses().empty()) {
302 retBlock->erase();
303 } else {
304 // Thread return block via cleanup block.
305 if (cleanupBlock) {
306 for (mlir::BlockOperand &blockUse : retBlock->getUses()) {
307 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
308 brOp.setSuccessor(cleanupBlock);
309 }
310 }
311
312 cir::BrOp::create(builder, retLoc, retBlock);
313 return;
314 }
315 }
316 emitImplicitReturn();
317 return;
318 }
319
320 // End of any local scope != function
321 // Ternary ops have to deal with matching arms for yielding types
322 // and do return a value, it must do its own cir.yield insertion.
323 if (!localScope->isTernary() && !currentBlock->mightHaveTerminator()) {
324 !retVal ? cir::YieldOp::create(builder, localScope->endLoc)
325 : cir::YieldOp::create(builder, localScope->endLoc, retVal);
326 }
327 };
328
329 // If a cleanup block has been created at some point, branch to it
330 // and set the insertion point to continue at the cleanup block.
331 // Terminators are then inserted either in the cleanup block or
332 // inline in this current block.
333 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
334 if (cleanupBlock)
335 insertCleanupAndLeave(cleanupBlock);
336
337 // Now deal with any pending block wrap up like implicit end of
338 // scope.
339
340 mlir::Block *curBlock = builder.getBlock();
341 if (isGlobalInit() && !curBlock)
342 return;
343 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
344 return;
345
346 // Get rid of any empty block at the end of the scope.
347 bool isEntryBlock = builder.getInsertionBlock()->isEntryBlock();
348 if (!isEntryBlock && curBlock->empty()) {
349 curBlock->erase();
350 for (mlir::Block *retBlock : retBlocks) {
351 if (retBlock->getUses().empty())
352 retBlock->erase();
353 }
354 // The empty block was created by a terminator (return/break/continue)
355 // and is now erased. If there are pending cleanup scopes (from variables
356 // with destructors), we need to pop them and ensure the containing scope
357 // block gets a proper terminator (e.g. cir.yield). Without this, the
358 // cleanup-scope-op popping that would otherwise happen in
359 // ~RunCleanupsScope leaves the scope block without a terminator.
360 if (hasPendingCleanups()) {
361 builder.setInsertionPointToEnd(entryBlock);
362 insertCleanupAndLeave(entryBlock);
363 }
364 return;
365 }
366
367 // If there's a cleanup block, branch to it, nothing else to do.
368 if (cleanupBlock) {
369 cir::BrOp::create(builder, curBlock->back().getLoc(), cleanupBlock);
370 return;
371 }
372
373 // No pre-existent cleanup block, emit cleanup code and yield/return.
374 insertCleanupAndLeave(curBlock);
375}
376
377cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
378 CIRGenBuilderTy &builder = cgf.getBuilder();
379
380 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
381 assert(fn && "emitReturn from non-function");
382
383 // If we are on a coroutine, add the coro_end builtin call.
384 if (fn.getCoroutine())
385 cgf.emitCoroEndBuiltinCall(loc,
386 builder.getNullPtr(builder.getVoidPtrTy(), loc));
387 if (!fn.getFunctionType().hasVoidReturn()) {
388 // Load the value from `__retval` and return it via the `cir.return` op.
389 auto value = cir::LoadOp::create(
390 builder, loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
391 return cir::ReturnOp::create(builder, loc,
392 llvm::ArrayRef(value.getResult()));
393 }
394 return cir::ReturnOp::create(builder, loc);
395}
396
397// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
398// candidate for sharing between CIRGen and CodeGen.
399static bool mayDropFunctionReturn(const ASTContext &astContext,
400 QualType returnType) {
401 // We can't just discard the return value for a record type with a complex
402 // destructor or a non-trivially copyable type.
403 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
404 return classDecl->hasTrivialDestructor();
405 return returnType.isTriviallyCopyableType(astContext);
406}
407
408static bool previousOpIsNonYieldingCleanup(mlir::Block *block) {
409 if (block->empty())
410 return false;
411 mlir::Operation *op = &block->back();
412 auto cleanupScopeOp = mlir::dyn_cast<cir::CleanupScopeOp>(op);
413 if (!cleanupScopeOp)
414 return false;
415
416 // Check whether the body region of the cleanup scope exits via cir.yield.
417 // Exits via cir.return or cir.goto do not fall through to the operation
418 // following the cleanup scope, and exits via break, continue, and resume
419 // are not expected here.
420 for (mlir::Block &bodyBlock : cleanupScopeOp.getBodyRegion()) {
421 if (bodyBlock.mightHaveTerminator()) {
422 if (mlir::isa<cir::YieldOp>(bodyBlock.getTerminator()))
423 return false;
424 assert(!mlir::isa<cir::BreakOp>(bodyBlock.getTerminator()) &&
425 !mlir::isa<cir::ContinueOp>(bodyBlock.getTerminator()) &&
426 !mlir::isa<cir::ResumeOp>(bodyBlock.getTerminator()));
427 }
428 }
429 return true;
430}
431
432void CIRGenFunction::LexicalScope::emitImplicitReturn() {
433 CIRGenBuilderTy &builder = cgf.getBuilder();
434 LexicalScope *localScope = cgf.curLexScope;
435
436 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
437
438 // In C++, flowing off the end of a non-void function is always undefined
439 // behavior. In C, flowing off the end of a non-void function is undefined
440 // behavior only if the non-existent return value is used by the caller.
441 // That influences whether the terminating op is trap, unreachable, or
442 // return.
443 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
444 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
445 builder.getInsertionBlock() &&
446 !previousOpIsNonYieldingCleanup(builder.getInsertionBlock())) {
447 bool shouldEmitUnreachable =
448 cgf.cgm.getCodeGenOpts().StrictReturn ||
449 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
450
451 if (shouldEmitUnreachable) {
453 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
454 cir::TrapOp::create(builder, localScope->endLoc);
455 else
456 cir::UnreachableOp::create(builder, localScope->endLoc);
457 builder.clearInsertionPoint();
458 return;
459 }
460 }
461
462 (void)emitReturn(localScope->endLoc);
463}
464
466 LexicalScope *scope = this;
467 while (scope) {
468 if (scope->isTry())
469 return scope->getTry();
470 scope = scope->parentScope;
471 }
472 return nullptr;
473}
474
475/// An argument came in as a promoted argument; demote it back to its
476/// declared type.
477static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var,
478 mlir::Value value) {
479 mlir::Type ty = cgf.convertType(var->getType());
480
481 // This can happen with promotions that actually don't change the
482 // underlying type, like the enum promotions.
483 if (value.getType() == ty)
484 return value;
485
486 assert((mlir::isa<cir::IntType>(ty) || cir::isAnyFloatingPointType(ty)) &&
487 "unexpected promotion type");
488
489 if (mlir::isa<cir::IntType>(ty))
490 return cgf.getBuilder().CIRBaseBuilderTy::createIntCast(value, ty);
491
492 return cgf.getBuilder().createFloatingCast(value, ty);
493}
494
496 mlir::Block *entryBB,
497 const FunctionDecl *fd,
498 SourceLocation bodyBeginLoc) {
499 // Naked functions don't have prologues.
500 if (fd && fd->hasAttr<NakedAttr>()) {
501 cgm.errorNYI(bodyBeginLoc, "naked function decl");
502 }
503
504 // Declare all the function arguments in the symbol table.
505 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
506 const VarDecl *paramVar = std::get<0>(nameValue);
507 mlir::Value paramVal = std::get<1>(nameValue);
508 CharUnits alignment = getContext().getDeclAlign(paramVar);
509 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
510 paramVal.setLoc(paramLoc);
511
512 mlir::Value addrVal =
513 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
514 convertType(paramVar->getType()), paramLoc, alignment,
515 /*insertIntoFnEntryBlock=*/true);
516
517 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
518 /*isParam=*/true);
519
520 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
521
522 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
523 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
525 if (isPromoted)
526 paramVal = emitArgumentDemotion(*this, paramVar, paramVal);
527
528 // Location of the store to the param storage tracked as beginning of
529 // the function body.
530 mlir::Location fnBodyBegin = getLoc(bodyBeginLoc);
531 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
532 }
533 assert(builder.getInsertionBlock() && "Should be valid");
534}
535
537 cir::FuncOp fn, cir::FuncType funcType,
539 SourceLocation startLoc) {
540 assert(!curFn &&
541 "CIRGenFunction can only be used for one function at a time");
542
543 curFn = fn;
544
545 const Decl *d = gd.getDecl();
546
547 didCallStackSave = false;
548 curCodeDecl = d;
549 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
550 curFuncDecl = (d ? d->getNonClosureContext() : nullptr);
551
552 prologueCleanupDepth = ehStack.stable_begin();
553
554 mlir::Block *entryBB = &fn.getBlocks().front();
555 builder.setInsertionPointToStart(entryBB);
556
557 // Determine the function body begin location for the prolog.
558 // If fd is null or has no body, use startLoc as fallback.
559 SourceLocation bodyBeginLoc = startLoc;
560 if (fd) {
561 if (Stmt *body = fd->getBody())
562 bodyBeginLoc = body->getBeginLoc();
563 else
564 bodyBeginLoc = fd->getLocation();
565 }
566
567 emitFunctionProlog(args, entryBB, fd, bodyBeginLoc);
568
569 // When the current function is not void, create an address to store the
570 // result value.
571 if (!returnType->isVoidType()) {
572 // Determine the function body end location.
573 // If fd is null or has no body, use loc as fallback.
574 SourceLocation bodyEndLoc = loc;
575 if (fd) {
576 if (Stmt *body = fd->getBody())
577 bodyEndLoc = body->getEndLoc();
578 else
579 bodyEndLoc = fd->getLocation();
580 }
581 emitAndUpdateRetAlloca(returnType, getLoc(bodyEndLoc),
582 getContext().getTypeAlignInChars(returnType));
583 }
584
585 if (isa_and_nonnull<CXXMethodDecl>(d) &&
586 cast<CXXMethodDecl>(d)->isInstance()) {
587 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
588
589 const auto *md = cast<CXXMethodDecl>(d);
590 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
591 // We're in a lambda.
592 auto fn = dyn_cast<cir::FuncOp>(curFn);
593 assert(fn && "lambda in non-function region");
594 fn.setLambda(true);
595
596 // Figure out the captures.
597 md->getParent()->getCaptureFields(lambdaCaptureFields,
600 // If the lambda captures the object referred to by '*this' - either by
601 // value or by reference, make sure CXXThisValue points to the correct
602 // object.
603
604 // Get the lvalue for the field (which is a copy of the enclosing object
605 // or contains the address of the enclosing object).
606 LValue thisFieldLValue =
608 if (!lambdaThisCaptureField->getType()->isPointerType()) {
609 // If the enclosing object was captured by value, just use its
610 // address. Sign this pointer.
611 cxxThisValue = thisFieldLValue.getPointer();
612 } else {
613 // Load the lvalue pointed to by the field, since '*this' was captured
614 // by reference.
616 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
617 }
618 }
619 for (auto *fd : md->getParent()->fields()) {
620 if (fd->hasCapturedVLAType())
621 cgm.errorNYI(loc, "lambda captured VLA type");
622 }
623 } else {
624 // Not in a lambda; just use 'this' from the method.
625 // FIXME: Should we generate a new load for each use of 'this'? The fast
626 // register allocator would be happier...
628 }
629
632 }
633}
634
636 for (cir::BlockAddressOp &blockAddress : cgm.unresolvedBlockAddressToLabel) {
637 cir::LabelOp labelOp =
638 cgm.lookupBlockAddressInfo(blockAddress.getBlockAddrInfo());
639 assert(labelOp && "expected cir.labelOp to already be emitted");
640 cgm.updateResolvedBlockAddress(blockAddress, labelOp);
641 }
642 cgm.unresolvedBlockAddressToLabel.clear();
643}
644
647 return;
650 mlir::OpBuilder::InsertionGuard guard(builder);
651 builder.setInsertionPointToEnd(indirectGotoBlock);
652 for (auto &[blockAdd, labelOp] : cgm.blockAddressToLabel) {
653 succesors.push_back(labelOp->getBlock());
654 rangeOperands.push_back(labelOp->getBlock()->getArguments());
655 }
656 cir::IndirectBrOp::create(builder, builder.getUnknownLoc(),
657 indirectGotoBlock->getArgument(0), false,
658 rangeOperands, succesors);
659 cgm.blockAddressToLabel.clear();
660}
661
663 // Resolve block address-to-label mappings, then emit the indirect branch
664 // with the corresponding targets.
667
668 // If a label address was taken but no indirect goto was used, we can't remove
669 // the block argument here. Instead, we mark the 'indirectbr' op
670 // as poison so that the cleanup can be deferred to lowering, since the
671 // verifier doesn't allow the 'indirectbr' target address to be null.
672 if (indirectGotoBlock && indirectGotoBlock->hasNoPredecessors()) {
673 auto indrBr = cast<cir::IndirectBrOp>(indirectGotoBlock->front());
674 indrBr.setPoison(true);
675 }
676
677 // Pop any cleanups that might have been associated with the
678 // parameters. Do this in whatever block we're currently in; it's
679 // important to do this before we enter the return block or return
680 // edges will be *really* confused.
681 // TODO(cir): Use prologueCleanupDepth here.
682 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
683 if (hasCleanups) {
685 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
687 }
688}
689
690mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
691 // We start with function level scope for variables.
693
694 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
695 return emitCompoundStmtWithoutScope(*block);
696
697 return emitStmt(body, /*useCurrentScope=*/true);
698}
699
700static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
701 // Remove any leftover blocks that are unreachable and empty, since they do
702 // not represent unreachable code useful for warnings nor anything deemed
703 // useful in general.
704 SmallVector<mlir::Block *> blocksToDelete;
705 for (mlir::Block &block : func.getBlocks()) {
706 if (block.empty() && block.getUses().empty())
707 blocksToDelete.push_back(&block);
708 }
709 for (mlir::Block *block : blocksToDelete)
710 block->erase();
711}
712
713cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
714 cir::FuncType funcType) {
715 const auto *funcDecl = cast<FunctionDecl>(gd.getDecl());
716 curGD = gd;
717
718 if (funcDecl->isInlineBuiltinDeclaration()) {
719 // When generating code for a builtin with an inline declaration, use a
720 // mangled name to hold the actual body, while keeping an external
721 // declaration in case the function pointer is referenced somewhere.
722 std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
723 cir::FuncOp clone =
724 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
725 if (!clone) {
726 mlir::OpBuilder::InsertionGuard guard(builder);
727 builder.setInsertionPoint(fn);
728 clone = cir::FuncOp::create(builder, fn.getLoc(), fdInlineName,
729 fn.getFunctionType());
730 clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
731 clone.setSymVisibility("private");
732 clone.setInlineKind(cir::InlineKind::AlwaysInline);
733 }
734 fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
735 fn.setSymVisibility("private");
736 fn = clone;
737 } else {
738 // Detect the unusual situation where an inline version is shadowed by a
739 // non-inline version. In that case we should pick the external one
740 // everywhere. That's GCC behavior too.
741 for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
742 pd = pd->getPreviousDecl()) {
743 if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
744 std::string inlineName = funcDecl->getName().str() + ".inline";
745 if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
746 cgm.getGlobalValue(inlineName))) {
747 // Replace all uses of the .inline function with the regular function
748 // FIXME: This performs a linear walk over the module. Introduce some
749 // caching here.
750 if (inlineFn
751 .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
752 .failed())
753 llvm_unreachable("Failed to replace inline builtin symbol uses");
754 inlineFn.erase();
755 }
756 break;
757 }
758 }
759 }
760
761 SourceLocation loc = funcDecl->getLocation();
762 Stmt *body = funcDecl->getBody();
763 SourceRange bodyRange =
764 body ? body->getSourceRange() : funcDecl->getLocation();
765
766 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
767 : builder.getUnknownLoc()};
768
769 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
770 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
771 };
772 const mlir::Location fusedLoc = mlir::FusedLoc::get(
774 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
775 mlir::Block *entryBB = fn.addEntryBlock();
776
777 FunctionArgList args;
778 QualType retTy = buildFunctionArgList(gd, args);
779
780 // Create a scope in the symbol table to hold variable declarations.
782 {
783 LexicalScope lexScope(*this, fusedLoc, entryBB);
784
785 // Emit the standard function prologue.
786 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
787
788 // Save parameters for coroutine function.
789 if (body && isa_and_nonnull<CoroutineBodyStmt>(body))
790 llvm::append_range(fnArgs, funcDecl->parameters());
791
792 if (isa<CXXDestructorDecl>(funcDecl)) {
793 emitDestructorBody(args);
794 } else if (isa<CXXConstructorDecl>(funcDecl)) {
796 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
797 funcDecl->hasAttr<CUDAGlobalAttr>()) {
798 cgm.getCUDARuntime().emitDeviceStub(*this, fn, args);
799 } else if (isa<CXXMethodDecl>(funcDecl) &&
800 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
801 // The lambda static invoker function is special, because it forwards or
802 // clones the body of the function call operator (but is actually
803 // static).
805 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
806 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
807 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
808 // Implicit copy-assignment gets the same special treatment as implicit
809 // copy-constructors.
811 } else if (body) {
812 // Emit standard function body.
813 if (mlir::failed(emitFunctionBody(body))) {
814 return nullptr;
815 }
816 } else {
817 // Anything without a body should have been handled above.
818 llvm_unreachable("no definition for normal function");
819 }
820
821 if (mlir::failed(fn.verifyBody()))
822 return nullptr;
823
824 finishFunction(bodyRange.getEnd());
825 }
826
828 return fn;
829}
830
833 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
834 CXXCtorType ctorType = curGD.getCtorType();
835
836 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
837 ctorType == Ctor_Complete) &&
838 "can only generate complete ctor for this ABI");
839
840 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), ctor);
841
842 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
843 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
844 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
845 return;
846 }
847
848 const FunctionDecl *definition = nullptr;
849 Stmt *body = ctor->getBody(definition);
850 assert(definition == ctor && "emitting wrong constructor body");
851
852 if (isa_and_nonnull<CXXTryStmt>(body)) {
853 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
854 return;
855 }
856
859
860 // TODO: in restricted cases, we can emit the vbase initializers of a
861 // complete ctor and then delegate to the base ctor.
862
863 // Emit the constructor prologue, i.e. the base and member initializers.
864 emitCtorPrologue(ctor, ctorType, args);
865
866 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
867 // now just to have it handled.
868 if (mlir::failed(emitStmt(body, true))) {
869 cgm.errorNYI(ctor->getSourceRange(),
870 "emitConstructorBody: emit body statement failed.");
871 return;
872 }
873}
874
875/// Emits the body of the current destructor.
877 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
878 CXXDtorType dtorType = curGD.getDtorType();
879
880 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), dtor);
881
882 // For an abstract class, non-base destructors are never used (and can't
883 // be emitted in general, because vbase dtors may not have been validated
884 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
885 // in fact emit references to them from other compilations, so emit them
886 // as functions containing a trap instruction.
887 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
888 SourceLocation loc =
889 dtor->hasBody() ? dtor->getBody()->getBeginLoc() : dtor->getLocation();
890 emitTrap(getLoc(loc), true);
891 return;
892 }
893
894 Stmt *body = dtor->getBody();
896
897 // The call to operator delete in a deleting destructor happens
898 // outside of the function-try-block, which means it's always
899 // possible to delegate the destructor body to the complete
900 // destructor. Do so.
901 if (dtorType == Dtor_Deleting || dtorType == Dtor_VectorDeleting) {
903 cgm.errorNYI(dtor->getSourceRange(), "emitConditionalArrayDtorCall");
904 RunCleanupsScope dtorEpilogue(*this);
906 if (haveInsertPoint()) {
908 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
909 /*delegating=*/false, loadCXXThisAddress(), thisTy);
910 }
911 return;
912 }
913
914 // If the body is a function-try-block, enter the try before
915 // anything else.
916 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
917 if (isTryBody)
918 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
919
921
922 // Enter the epilogue cleanups.
923 RunCleanupsScope dtorEpilogue(*this);
924
925 // If this is the complete variant, just invoke the base variant;
926 // the epilogue will destruct the virtual bases. But we can't do
927 // this optimization if the body is a function-try-block, because
928 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
929 // always delegate because we might not have a definition in this TU.
930 switch (dtorType) {
931 case Dtor_Unified:
932 llvm_unreachable("not expecting a unified dtor");
933 case Dtor_Comdat:
934 llvm_unreachable("not expecting a COMDAT");
935 case Dtor_Deleting:
937 llvm_unreachable("already handled deleting case");
938
939 case Dtor_Complete:
940 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
941 "can't emit a dtor without a body for non-Microsoft ABIs");
942
943 // Enter the cleanup scopes for virtual bases.
945
946 if (!isTryBody) {
948 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
949 /*delegating=*/false, loadCXXThisAddress(), thisTy);
950 break;
951 }
952
953 // Fallthrough: act like we're in the base variant.
954 [[fallthrough]];
955
956 case Dtor_Base:
957 assert(body);
958
959 // Enter the cleanup scopes for fields and non-virtual bases.
961
963
964 if (isTryBody) {
965 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
966 } else if (body) {
967 (void)emitStmt(body, /*useCurrentScope=*/true);
968 } else {
969 assert(dtor->isImplicit() && "bodyless dtor not implicit");
970 // nothing to do besides what's in the epilogue
971 }
972 // -fapple-kext must inline any call to this dtor into
973 // the caller's body.
975
976 break;
977 }
978
979 // Jump out through the epilogue cleanups.
980 dtorEpilogue.forceCleanup();
981
982 // Exit the try if applicable.
983 if (isTryBody)
984 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
985}
986
987/// Given a value of type T* that may not be to a complete object, construct
988/// an l-vlaue withi the natural pointee alignment of T.
990 QualType ty) {
991 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
992 // assert on the result type first.
993 LValueBaseInfo baseInfo;
995 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
996 return makeAddrLValue(Address(val, align), ty, baseInfo);
997}
998
1000 QualType ty) {
1001 LValueBaseInfo baseInfo;
1002 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
1003 Address addr(val, convertTypeForMem(ty), alignment);
1005 return makeAddrLValue(addr, ty, baseInfo);
1006}
1007
1008// Map the LangOption for exception behavior into the corresponding enum in
1009// the IR.
1010static llvm::fp::ExceptionBehavior
1012 switch (kind) {
1014 return llvm::fp::ebIgnore;
1016 return llvm::fp::ebMayTrap;
1018 return llvm::fp::ebStrict;
1020 llvm_unreachable("expected explicitly initialized exception behavior");
1021 }
1022 llvm_unreachable("unsupported FP exception behavior");
1023}
1024
1026 FunctionArgList &args) {
1027 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1028 QualType retTy = fd->getReturnType();
1029
1030 const auto *md = dyn_cast<CXXMethodDecl>(fd);
1031 if (md && md->isInstance()) {
1032 if (cgm.getCXXABI().hasThisReturn(gd))
1033 cgm.errorNYI(fd->getSourceRange(), "this return");
1034 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
1035 cgm.errorNYI(fd->getSourceRange(), "most derived return");
1036 cgm.getCXXABI().buildThisParam(*this, args);
1037 }
1038
1039 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
1040 if (cd->getInheritedConstructor())
1041 cgm.errorNYI(fd->getSourceRange(),
1042 "buildFunctionArgList: inherited constructor");
1043
1044 for (auto *param : fd->parameters())
1045 args.push_back(param);
1046
1047 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
1048 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
1049
1050 return retTy;
1051}
1052
1053/// Emit code to compute a designator that specifies the location
1054/// of the expression.
1055/// FIXME: document this function better.
1057 // FIXME: ApplyDebugLocation DL(*this, e);
1058 switch (e->getStmtClass()) {
1059 default:
1061 std::string("l-value not implemented for '") +
1062 e->getStmtClassName() + "'");
1063 return LValue();
1064 case Expr::ConditionalOperatorClass:
1066 case Expr::BinaryConditionalOperatorClass:
1068 case Expr::ArraySubscriptExprClass:
1070 case Expr::ExtVectorElementExprClass:
1072 case Expr::UnaryOperatorClass:
1074 case Expr::StringLiteralClass:
1076 case Expr::MemberExprClass:
1078 case Expr::CompoundLiteralExprClass:
1080 case Expr::PredefinedExprClass:
1082 case Expr::BinaryOperatorClass:
1084 case Expr::CompoundAssignOperatorClass: {
1085 QualType ty = e->getType();
1086 if (ty->getAs<AtomicType>()) {
1087 cgm.errorNYI(e->getSourceRange(),
1088 "CompoundAssignOperator with AtomicType");
1089 return LValue();
1090 }
1091 if (!ty->isAnyComplexType())
1093
1095 }
1096 case Expr::CallExprClass:
1097 case Expr::CXXMemberCallExprClass:
1098 case Expr::CXXOperatorCallExprClass:
1099 case Expr::UserDefinedLiteralClass:
1101 case Expr::ExprWithCleanupsClass: {
1102 const auto *cleanups = cast<ExprWithCleanups>(e);
1103 RunCleanupsScope scope(*this);
1104 LValue lv = emitLValue(cleanups->getSubExpr());
1106 return lv;
1107 }
1108 case Expr::CXXDefaultArgExprClass: {
1109 auto *dae = cast<CXXDefaultArgExpr>(e);
1110 CXXDefaultArgExprScope scope(*this, dae);
1111 return emitLValue(dae->getExpr());
1112 }
1113 case Expr::CXXTypeidExprClass:
1115 case Expr::ParenExprClass:
1116 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
1117 case Expr::GenericSelectionExprClass:
1118 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
1119 case Expr::DeclRefExprClass:
1121 case Expr::ImplicitCastExprClass:
1122 case Expr::CStyleCastExprClass:
1123 case Expr::CXXStaticCastExprClass:
1124 case Expr::CXXDynamicCastExprClass:
1125 case Expr::CXXReinterpretCastExprClass:
1126 case Expr::CXXConstCastExprClass:
1127 // TODO(cir): The above list is missing CXXFunctionalCastExprClass,
1128 // CXXAddrSpaceCastExprClass, and ObjCBridgedCastExprClass.
1129 return emitCastLValue(cast<CastExpr>(e));
1130 case Expr::MaterializeTemporaryExprClass:
1132 case Expr::OpaqueValueExprClass:
1134 case Expr::ChooseExprClass:
1135 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
1136 case Expr::SubstNonTypeTemplateParmExprClass:
1137 return emitLValue(cast<SubstNonTypeTemplateParmExpr>(e)->getReplacement());
1138 }
1139}
1140
1141static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
1142 SmallString<256> buffer;
1143 llvm::raw_svector_ostream out(buffer);
1144 out << name << cnt;
1145 return std::string(out.str());
1146}
1147
1149 return getVersionedTmpName("ref.tmp", counterRefTmp++);
1150}
1151
1153 return getVersionedTmpName("agg.tmp", counterAggTmp++);
1154}
1155
1156void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
1157 QualType ty) {
1158 // Ignore empty classes in C++.
1159 if (getLangOpts().CPlusPlus)
1160 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
1161 return;
1162
1163 // Cast the dest ptr to the appropriate i8 pointer type.
1164 if (builder.isInt8Ty(destPtr.getElementType())) {
1165 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
1166 }
1167
1168 // Get size and alignment info for this aggregate.
1169 const CharUnits size = getContext().getTypeSizeInChars(ty);
1170 if (size.isZero()) {
1171 // But note that getTypeInfo returns 0 for a VLA.
1172 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
1173 cgm.errorNYI(loc,
1174 "emitNullInitialization for zero size VariableArrayType");
1175 } else {
1176 return;
1177 }
1178 }
1179
1180 // If the type contains a pointer to data member we can't memset it to zero.
1181 // Instead, create a null constant and copy it to the destination.
1182 // TODO: there are other patterns besides zero that we can usefully memset,
1183 // like -1, which happens to be the pattern used by member-pointers.
1184 if (!cgm.getTypes().isZeroInitializable(ty)) {
1185 cgm.errorNYI(loc, "type is not zero initializable");
1186 }
1187
1188 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
1189 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
1190 // respective address.
1191 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1192 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
1193 builder.createStore(loc, zeroValue, destPtr);
1194}
1195
1197 const clang::Expr *e)
1198 : cgf(cgf) {
1199 ConstructorHelper(e->getFPFeaturesInEffect(cgf.getLangOpts()));
1200}
1201
1203 FPOptions fpFeatures)
1204 : cgf(cgf) {
1205 ConstructorHelper(fpFeatures);
1206}
1207
1208void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper(
1209 FPOptions fpFeatures) {
1210 oldFPFeatures = cgf.curFPFeatures;
1211 cgf.curFPFeatures = fpFeatures;
1212
1213 oldExcept = cgf.builder.getDefaultConstrainedExcept();
1214 oldRounding = cgf.builder.getDefaultConstrainedRounding();
1215
1216 if (oldFPFeatures == fpFeatures)
1217 return;
1218
1219 // TODO(cir): create guard to restore fast math configurations.
1221
1222 [[maybe_unused]] llvm::RoundingMode newRoundingBehavior =
1223 fpFeatures.getRoundingMode();
1224 // TODO(cir): override rounding behaviour once FM configs are guarded.
1225 [[maybe_unused]] llvm::fp::ExceptionBehavior newExceptionBehavior =
1227 fpFeatures.getExceptionMode()));
1228 // TODO(cir): override exception behaviour once FM configs are guarded.
1229
1230 // TODO(cir): override FP flags once FM configs are guarded.
1232
1233 assert((cgf.curFuncDecl == nullptr || cgf.builder.getIsFPConstrained() ||
1234 isa<CXXConstructorDecl>(cgf.curFuncDecl) ||
1235 isa<CXXDestructorDecl>(cgf.curFuncDecl) ||
1236 (newExceptionBehavior == llvm::fp::ebIgnore &&
1237 newRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
1238 "FPConstrained should be enabled on entire function");
1239
1240 // TODO(cir): mark CIR function with fast math attributes.
1242}
1243
1245 cgf.curFPFeatures = oldFPFeatures;
1246 cgf.builder.setDefaultConstrainedExcept(oldExcept);
1247 cgf.builder.setDefaultConstrainedRounding(oldRounding);
1248}
1249
1250// TODO(cir): should be shared with LLVM codegen.
1252 const Expr *e = ce->getSubExpr();
1253
1254 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
1255 return false;
1256
1257 if (isa<CXXThisExpr>(e->IgnoreParens())) {
1258 // We always assume that 'this' is never null.
1259 return false;
1260 }
1261
1262 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1263 // And that glvalue casts are never null.
1264 if (ice->isGLValue())
1265 return false;
1266 }
1267
1268 return true;
1269}
1270
1271/// Computes the length of an array in elements, as well as the base
1272/// element type and a properly-typed first element pointer.
1273mlir::Value
1275 QualType &baseType, Address &addr) {
1276 const clang::ArrayType *arrayType = origArrayType;
1277
1278 // If it's a VLA, we have to load the stored size. Note that
1279 // this is the size of the VLA in bytes, not its size in elements.
1281 assert(!cir::MissingFeatures::vlas());
1282 cgm.errorNYI(*currSrcLoc, "VLAs");
1283 return builder.getConstInt(*currSrcLoc, sizeTy, 0);
1284 }
1285
1286 uint64_t countFromCLAs = 1;
1287 QualType eltType;
1288
1289 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
1290
1291 while (cirArrayType) {
1293 countFromCLAs *= cirArrayType.getSize();
1294 eltType = arrayType->getElementType();
1295
1296 cirArrayType =
1297 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
1298
1299 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1300 assert((!cirArrayType || arrayType) &&
1301 "CIR and Clang types are out-of-sync");
1302 }
1303
1304 if (arrayType) {
1305 // From this point onwards, the Clang array type has been emitted
1306 // as some other type (probably a packed struct). Compute the array
1307 // size, and just emit the 'begin' expression as a bitcast.
1308 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
1309 }
1310
1311 baseType = eltType;
1312 return builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
1313}
1314
1316 // If we already made the indirect branch for indirect goto, return its block.
1318 return;
1319
1320 mlir::OpBuilder::InsertionGuard guard(builder);
1322 builder.createBlock(builder.getBlock()->getParent(), {}, {voidPtrTy},
1323 {builder.getUnknownLoc()});
1324}
1325
1327 mlir::Value ptrValue, QualType ty, SourceLocation loc,
1328 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
1330 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
1331 alignment, offsetValue);
1332}
1333
1335 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1336 int64_t alignment, mlir::Value offsetValue) {
1337 QualType ty = expr->getType();
1338 SourceLocation loc = expr->getExprLoc();
1339 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1340 offsetValue);
1341}
1342
1344 const VariableArrayType *vla =
1345 cgm.getASTContext().getAsVariableArrayType(type);
1346 assert(vla && "type was not a variable array type!");
1347 return getVLASize(vla);
1348}
1349
1352 // The number of elements so far; always size_t.
1353 mlir::Value numElements;
1354
1355 QualType elementType;
1356 do {
1357 elementType = type->getElementType();
1358 mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
1359 assert(vlaSize && "no size for VLA!");
1360 assert(vlaSize.getType() == sizeTy);
1361
1362 if (!numElements) {
1363 numElements = vlaSize;
1364 } else {
1365 // It's undefined behavior if this wraps around, so mark it that way.
1366 // FIXME: Teach -fsanitize=undefined to trap this.
1367
1368 numElements =
1369 builder.createMul(numElements.getLoc(), numElements, vlaSize,
1371 }
1372 } while ((type = getContext().getAsVariableArrayType(elementType)));
1373
1374 assert(numElements && "Undefined elements number");
1375 return {numElements, elementType};
1376}
1377
1380 mlir::Value vlaSize = vlaSizeMap[vla->getSizeExpr()];
1381 assert(vlaSize && "no size for VLA!");
1382 assert(vlaSize.getType() == sizeTy);
1383 return {vlaSize, vla->getElementType()};
1384}
1385
1386// TODO(cir): Most of this function can be shared between CIRGen
1387// and traditional LLVM codegen
1389 assert(type->isVariablyModifiedType() &&
1390 "Must pass variably modified type to EmitVLASizes!");
1391
1392 // We're going to walk down into the type and look for VLA
1393 // expressions.
1394 do {
1395 assert(type->isVariablyModifiedType());
1396
1397 const Type *ty = type.getTypePtr();
1398 switch (ty->getTypeClass()) {
1399 case Type::CountAttributed:
1400 case Type::PackIndexing:
1401 case Type::ArrayParameter:
1402 case Type::HLSLAttributedResource:
1403 case Type::HLSLInlineSpirv:
1404 case Type::PredefinedSugar:
1405 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1406 break;
1407
1408#define TYPE(Class, Base)
1409#define ABSTRACT_TYPE(Class, Base)
1410#define NON_CANONICAL_TYPE(Class, Base)
1411#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1412#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1413#include "clang/AST/TypeNodes.inc"
1414 llvm_unreachable(
1415 "dependent type must be resolved before the CIR codegen");
1416
1417 // These types are never variably-modified.
1418 case Type::Builtin:
1419 case Type::Complex:
1420 case Type::Vector:
1421 case Type::ExtVector:
1422 case Type::ConstantMatrix:
1423 case Type::Record:
1424 case Type::Enum:
1425 case Type::Using:
1426 case Type::TemplateSpecialization:
1427 case Type::ObjCTypeParam:
1428 case Type::ObjCObject:
1429 case Type::ObjCInterface:
1430 case Type::ObjCObjectPointer:
1431 case Type::BitInt:
1432 case Type::OverflowBehavior:
1433 llvm_unreachable("type class is never variably-modified!");
1434
1435 case Type::Adjusted:
1436 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1437 break;
1438
1439 case Type::Decayed:
1440 type = cast<clang::DecayedType>(ty)->getPointeeType();
1441 break;
1442
1443 case Type::Pointer:
1444 type = cast<clang::PointerType>(ty)->getPointeeType();
1445 break;
1446
1447 case Type::BlockPointer:
1448 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1449 break;
1450
1451 case Type::LValueReference:
1452 case Type::RValueReference:
1453 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1454 break;
1455
1456 case Type::MemberPointer:
1457 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1458 break;
1459
1460 case Type::ConstantArray:
1461 case Type::IncompleteArray:
1462 // Losing element qualification here is fine.
1463 type = cast<clang::ArrayType>(ty)->getElementType();
1464 break;
1465
1466 case Type::VariableArray: {
1467 // Losing element qualification here is fine.
1469
1470 // Unknown size indication requires no size computation.
1471 // Otherwise, evaluate and record it.
1472 if (const Expr *sizeExpr = vat->getSizeExpr()) {
1473 // It's possible that we might have emitted this already,
1474 // e.g. with a typedef and a pointer to it.
1475 mlir::Value &entry = vlaSizeMap[sizeExpr];
1476 if (!entry) {
1477 mlir::Value size = emitScalarExpr(sizeExpr);
1479
1480 // Always zexting here would be wrong if it weren't
1481 // undefined behavior to have a negative bound.
1482 // FIXME: What about when size's type is larger than size_t?
1483 entry = builder.createIntCast(size, sizeTy);
1484 }
1485 }
1486 type = vat->getElementType();
1487 break;
1488 }
1489
1490 case Type::FunctionProto:
1491 case Type::FunctionNoProto:
1492 type = cast<clang::FunctionType>(ty)->getReturnType();
1493 break;
1494
1495 case Type::Paren:
1496 case Type::TypeOf:
1497 case Type::UnaryTransform:
1498 case Type::Attributed:
1499 case Type::BTFTagAttributed:
1500 case Type::SubstTemplateTypeParm:
1501 case Type::MacroQualified:
1502 // Keep walking after single level desugaring.
1503 type = type.getSingleStepDesugaredType(getContext());
1504 break;
1505
1506 case Type::Typedef:
1507 case Type::Decltype:
1508 case Type::Auto:
1509 case Type::DeducedTemplateSpecialization:
1510 // Stop walking: nothing to do.
1511 return;
1512
1513 case Type::TypeOfExpr:
1514 // Stop walking: emit typeof expression.
1515 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1516 return;
1517
1518 case Type::Atomic:
1519 type = cast<clang::AtomicType>(ty)->getValueType();
1520 break;
1521
1522 case Type::Pipe:
1523 type = cast<clang::PipeType>(ty)->getElementType();
1524 break;
1525 }
1526 } while (type->isVariablyModifiedType());
1527}
1528
1530 if (getContext().getBuiltinVaListType()->isArrayType())
1531 return emitPointerWithAlignment(e);
1532 return emitLValue(e).getAddress();
1533}
1534
1535} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:508
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
SourceManager & getSourceManager()
Definition ASTContext.h:859
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3772
QualType getElementType() const
Definition TypeBase.h:3784
mlir::Type getElementType() const
Definition Address.h:123
mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType)
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
bool hasPendingCleanups() const
Whether there are any pending cleanups that have been pushed since this scope was entered.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2262
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2286
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1741
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
SourceLocation getLocation() const
Definition DeclBase.h:439
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition Expr.cpp:3989
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
QualType getType() const
Definition Expr.h:144
LangOptions::FPExceptionModeKind getExceptionMode() const
RoundingMode getRoundingMode() const
Represents a function declaration or definition.
Definition Decl.h:2015
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3280
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4550
bool hasBody(const FunctionDecl *&Definition) const
Returns true if the function has a body.
Definition Decl.cpp:3200
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3856
FPExceptionModeKind
Possible floating point exception behavior.
@ FPE_Default
Used internally to represent initial unspecified value.
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2912
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:86
StmtClass getStmtClass() const
Definition Stmt.h:1494
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
const char * getStmtClassName() const
Definition Stmt.cpp:86
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
bool isVoidType() const
Definition TypeBase.h:9034
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8803
TypeClass getTypeClass() const
Definition TypeBase.h:2433
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2202
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
Expr * getSizeExpr() const
Definition TypeBase.h:4030
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static bool previousOpIsNonYieldingCleanup(mlir::Block *block)
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var, mlir::Value value)
An argument came in as a promoted argument; demote it back to its declared type.
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
static llvm::fp::ExceptionBehavior toConstrainedExceptMd(LangOptions::FPExceptionModeKind kind)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_VectorDeleting
Vector deleting dtor.
Definition ABI.h:40
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool fastMathFuncAttributes()
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool fastMathGuard()
static bool fastMathFlags()
static bool generateDebugInfo()
static bool cleanupWithPreservedValues()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650