clang 23.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/ExprCXX.h"
23#include "llvm/IR/FPEnv.h"
24
25#include <cassert>
26
27namespace clang::CIRGen {
28
30 bool suppressNewContext)
31 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
32 ehStack.setCGF(this);
33}
34
36
37// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
39 type = type.getCanonicalType();
40 while (true) {
41 switch (type->getTypeClass()) {
42#define TYPE(name, parent)
43#define ABSTRACT_TYPE(name, parent)
44#define NON_CANONICAL_TYPE(name, parent) case Type::name:
45#define DEPENDENT_TYPE(name, parent) case Type::name:
46#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
47#include "clang/AST/TypeNodes.inc"
48 llvm_unreachable("non-canonical or dependent type in IR-generation");
49
50 case Type::Auto:
51 case Type::DeducedTemplateSpecialization:
52 llvm_unreachable("undeduced type in IR-generation");
53
54 // Various scalar types.
55 case Type::Builtin:
56 case Type::Pointer:
57 case Type::BlockPointer:
58 case Type::LValueReference:
59 case Type::RValueReference:
60 case Type::MemberPointer:
61 case Type::Vector:
62 case Type::ExtVector:
63 case Type::ConstantMatrix:
64 case Type::FunctionProto:
65 case Type::FunctionNoProto:
66 case Type::Enum:
67 case Type::ObjCObjectPointer:
68 case Type::Pipe:
69 case Type::BitInt:
70 case Type::OverflowBehavior:
71 case Type::HLSLAttributedResource:
72 case Type::HLSLInlineSpirv:
73 return cir::TEK_Scalar;
74
75 // Complexes.
76 case Type::Complex:
77 return cir::TEK_Complex;
78
79 // Arrays, records, and Objective-C objects.
80 case Type::ConstantArray:
81 case Type::IncompleteArray:
82 case Type::VariableArray:
83 case Type::Record:
84 case Type::ObjCObject:
85 case Type::ObjCInterface:
86 case Type::ArrayParameter:
87 return cir::TEK_Aggregate;
88
89 // We operate on atomic values according to their underlying type.
90 case Type::Atomic:
91 type = cast<AtomicType>(type)->getValueType();
92 continue;
93 }
94 llvm_unreachable("unknown type kind!");
95 }
96}
97
99 return cgm.getTypes().convertTypeForMem(t);
100}
101
103 return cgm.getTypes().convertType(t);
104}
105
107 // Some AST nodes might contain invalid source locations (e.g.
108 // CXXDefaultArgExpr), workaround that to still get something out.
109 if (srcLoc.isValid()) {
111 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
112 StringRef filename = pLoc.getFilename();
113 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
114 pLoc.getLine(), pLoc.getColumn());
115 }
116 // We expect to have a currSrcLoc set, so we assert here, but it isn't
117 // critical for the correctness of compilation, so in non-assert builds
118 // we fallback on using an unknown location.
119 assert(currSrcLoc && "expected to inherit some source location");
120 if (currSrcLoc)
121 return *currSrcLoc;
122 // We're brave, but time to give up.
123 return builder.getUnknownLoc();
124}
125
126mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
127 // Some AST nodes might contain invalid source locations (e.g.
128 // CXXDefaultArgExpr), workaround that to still get something out.
129 if (srcLoc.isValid()) {
130 mlir::Location beg = getLoc(srcLoc.getBegin());
131 mlir::Location end = getLoc(srcLoc.getEnd());
132 SmallVector<mlir::Location, 2> locs = {beg, end};
133 mlir::Attribute metadata;
134 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
135 }
136 // We expect to have a currSrcLoc set, so we assert here, but it isn't
137 // critical for the correctness of compilation, so in non-assert builds
138 // we fallback on using an unknown location.
139 assert(currSrcLoc && "expected to inherit some source location");
140 if (currSrcLoc)
141 return *currSrcLoc;
142 // We're brave, but time to give up.
143 return builder.getUnknownLoc();
144}
145
146mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
147 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
148 mlir::Attribute metadata;
149 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
150}
151
152bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
153 // Null statement, not a label!
154 if (!s)
155 return false;
156
157 // If this is a label, we have to emit the code, consider something like:
158 // if (0) { ... foo: bar(); } goto foo;
159 //
160 // TODO: If anyone cared, we could track __label__'s, since we know that you
161 // can't jump to one from outside their declared region.
162 if (isa<LabelStmt>(s))
163 return true;
164
165 // If this is a case/default statement, and we haven't seen a switch, we
166 // have to emit the code.
167 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
168 return true;
169
170 // If this is a switch statement, we want to ignore case statements when we
171 // recursively process the sub-statements of the switch. If we haven't
172 // encountered a switch statement, we treat case statements like labels, but
173 // if we are processing a switch statement, case statements are expected.
174 if (isa<SwitchStmt>(s))
175 ignoreCaseStmts = true;
176
177 // Scan subexpressions for verboten labels.
178 return std::any_of(s->child_begin(), s->child_end(),
179 [=](const Stmt *subStmt) {
180 return containsLabel(subStmt, ignoreCaseStmts);
181 });
182}
183
184/// If the specified expression does not fold to a constant, or if it does but
185/// contains a label, return false. If it constant folds return true and set
186/// the boolean result in Result.
187bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
188 bool allowLabels) {
189 llvm::APSInt resultInt;
190 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
191 return false;
192
193 resultBool = resultInt.getBoolValue();
194 return true;
195}
196
197/// If the specified expression does not fold to a constant, or if it does
198/// fold but contains a label, return false. If it constant folds, return
199/// true and set the folded value.
201 llvm::APSInt &resultInt,
202 bool allowLabels) {
203 // FIXME: Rename and handle conversion of other evaluatable things
204 // to bool.
205 Expr::EvalResult result;
206 if (!cond->EvaluateAsInt(result, getContext()))
207 return false; // Not foldable, not integer or not fully evaluatable.
208
209 llvm::APSInt intValue = result.Val.getInt();
210 if (!allowLabels && containsLabel(cond))
211 return false; // Contains a label.
212
213 resultInt = intValue;
214 return true;
215}
216
217void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
218 CharUnits alignment) {
219 if (!type->isVoidType()) {
220 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
221 /*insertIntoFnEntryBlock=*/false);
222 fnRetAlloca = addr;
223 returnValue = Address(addr, alignment);
224 }
225}
226
227void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
228 mlir::Location loc, CharUnits alignment,
229 bool isParam) {
230 assert(isa<NamedDecl>(var) && "Needs a named decl");
231 assert(!symbolTable.count(var) && "not supposed to be available just yet");
232
233 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
234 assert(allocaOp && "expected cir::AllocaOp");
235
236 if (isParam)
237 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
238 if (ty->isReferenceType() || ty.isConstQualified())
239 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
240
241 symbolTable.insert(var, allocaOp);
242}
243
245 CIRGenBuilderTy &builder = cgf.builder;
246 LexicalScope *localScope = cgf.curLexScope;
247
248 auto applyCleanup = [&]() {
249 if (performCleanup) {
250 // ApplyDebugLocation
252 forceCleanup();
253 }
254 };
255
256 // Cleanup are done right before codegen resumes a scope. This is where
257 // objects are destroyed. Process all return blocks.
258 // TODO(cir): Handle returning from a switch statement through a cleanup
259 // block. We can't simply jump to the cleanup block, because the cleanup block
260 // is not part of the case region. Either reemit all cleanups in the return
261 // block or wait for MLIR structured control flow to support early exits.
263 for (mlir::Block *retBlock : localScope->getRetBlocks()) {
264 mlir::OpBuilder::InsertionGuard guard(builder);
265 builder.setInsertionPointToEnd(retBlock);
266 retBlocks.push_back(retBlock);
267 mlir::Location retLoc = localScope->getRetLoc(retBlock);
268 emitReturn(retLoc);
269 }
270
271 auto insertCleanupAndLeave = [&](mlir::Block *insPt) {
272 mlir::OpBuilder::InsertionGuard guard(builder);
273 builder.setInsertionPointToEnd(insPt);
274
275 // If we still don't have a cleanup block, it means that `applyCleanup`
276 // below might be able to get us one.
277 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
278
279 // Leverage and defers to RunCleanupsScope's dtor and scope handling.
280 applyCleanup();
281
282 mlir::Block *currentBlock = builder.getBlock();
283
284 // If we now have one after `applyCleanup`, hook it up properly.
285 if (!cleanupBlock && localScope->getCleanupBlock(builder)) {
286 cleanupBlock = localScope->getCleanupBlock(builder);
287 cir::BrOp::create(builder, insPt->back().getLoc(), cleanupBlock);
288 if (!cleanupBlock->mightHaveTerminator()) {
289 mlir::OpBuilder::InsertionGuard guard(builder);
290 builder.setInsertionPointToEnd(cleanupBlock);
291 cir::YieldOp::create(builder, localScope->endLoc);
292 }
293 }
294
295 if (localScope->depth == 0) {
296 // Reached the end of the function.
297 // Special handling only for single return block case
298 if (localScope->getRetBlocks().size() == 1) {
299 mlir::Block *retBlock = localScope->getRetBlocks()[0];
300 mlir::Location retLoc = localScope->getRetLoc(retBlock);
301 if (retBlock->getUses().empty()) {
302 retBlock->erase();
303 } else {
304 // Thread return block via cleanup block.
305 if (cleanupBlock) {
306 for (mlir::BlockOperand &blockUse : retBlock->getUses()) {
307 cir::BrOp brOp = mlir::cast<cir::BrOp>(blockUse.getOwner());
308 brOp.setSuccessor(cleanupBlock);
309 }
310 }
311
312 cir::BrOp::create(builder, retLoc, retBlock);
313 return;
314 }
315 }
316 emitImplicitReturn();
317 return;
318 }
319
320 // End of any local scope != function
321 // Ternary ops have to deal with matching arms for yielding types
322 // and do return a value, it must do its own cir.yield insertion.
323 if (!localScope->isTernary() && !currentBlock->mightHaveTerminator()) {
324 !retVal ? cir::YieldOp::create(builder, localScope->endLoc)
325 : cir::YieldOp::create(builder, localScope->endLoc, retVal);
326 }
327 };
328
329 // If a cleanup block has been created at some point, branch to it
330 // and set the insertion point to continue at the cleanup block.
331 // Terminators are then inserted either in the cleanup block or
332 // inline in this current block.
333 mlir::Block *cleanupBlock = localScope->getCleanupBlock(builder);
334 if (cleanupBlock)
335 insertCleanupAndLeave(cleanupBlock);
336
337 // Now deal with any pending block wrap up like implicit end of
338 // scope.
339
340 mlir::Block *curBlock = builder.getBlock();
341 if (isGlobalInit() && !curBlock)
342 return;
343 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
344 return;
345
346 // Get rid of any empty block at the end of the scope.
347 bool entryBlock = builder.getInsertionBlock()->isEntryBlock();
348 if (!entryBlock && curBlock->empty()) {
349 curBlock->erase();
350 for (mlir::Block *retBlock : retBlocks) {
351 if (retBlock->getUses().empty())
352 retBlock->erase();
353 }
354 return;
355 }
356
357 // If there's a cleanup block, branch to it, nothing else to do.
358 if (cleanupBlock) {
359 cir::BrOp::create(builder, curBlock->back().getLoc(), cleanupBlock);
360 return;
361 }
362
363 // No pre-existent cleanup block, emit cleanup code and yield/return.
364 insertCleanupAndLeave(curBlock);
365}
366
367cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
368 CIRGenBuilderTy &builder = cgf.getBuilder();
369
370 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
371 assert(fn && "emitReturn from non-function");
372
373 // If we are on a coroutine, add the coro_end builtin call.
374 if (fn.getCoroutine())
375 cgf.emitCoroEndBuiltinCall(loc,
376 builder.getNullPtr(builder.getVoidPtrTy(), loc));
377 if (!fn.getFunctionType().hasVoidReturn()) {
378 // Load the value from `__retval` and return it via the `cir.return` op.
379 auto value = cir::LoadOp::create(
380 builder, loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
381 return cir::ReturnOp::create(builder, loc,
382 llvm::ArrayRef(value.getResult()));
383 }
384 return cir::ReturnOp::create(builder, loc);
385}
386
387// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
388// candidate for sharing between CIRGen and CodeGen.
389static bool mayDropFunctionReturn(const ASTContext &astContext,
390 QualType returnType) {
391 // We can't just discard the return value for a record type with a complex
392 // destructor or a non-trivially copyable type.
393 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
394 return classDecl->hasTrivialDestructor();
395 return returnType.isTriviallyCopyableType(astContext);
396}
397
398static bool previousOpIsNonYieldingCleanup(mlir::Block *block) {
399 if (block->empty())
400 return false;
401 mlir::Operation *op = &block->back();
402 auto cleanupScopeOp = mlir::dyn_cast<cir::CleanupScopeOp>(op);
403 if (!cleanupScopeOp)
404 return false;
405
406 // Check whether the body region of the cleanup scope exits via cir.yield.
407 // Exits via cir.return or cir.goto do not fall through to the operation
408 // following the cleanup scope, and exits via break, continue, and resume
409 // are not expected here.
410 for (mlir::Block &bodyBlock : cleanupScopeOp.getBodyRegion()) {
411 if (bodyBlock.mightHaveTerminator()) {
412 if (mlir::isa<cir::YieldOp>(bodyBlock.getTerminator()))
413 return false;
414 assert(!mlir::isa<cir::BreakOp>(bodyBlock.getTerminator()) &&
415 !mlir::isa<cir::ContinueOp>(bodyBlock.getTerminator()) &&
416 !mlir::isa<cir::ResumeOp>(bodyBlock.getTerminator()));
417 }
418 }
419 return true;
420}
421
422void CIRGenFunction::LexicalScope::emitImplicitReturn() {
423 CIRGenBuilderTy &builder = cgf.getBuilder();
424 LexicalScope *localScope = cgf.curLexScope;
425
426 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
427
428 // In C++, flowing off the end of a non-void function is always undefined
429 // behavior. In C, flowing off the end of a non-void function is undefined
430 // behavior only if the non-existent return value is used by the caller.
431 // That influences whether the terminating op is trap, unreachable, or
432 // return.
433 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
434 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
435 builder.getInsertionBlock() &&
436 !previousOpIsNonYieldingCleanup(builder.getInsertionBlock())) {
437 bool shouldEmitUnreachable =
438 cgf.cgm.getCodeGenOpts().StrictReturn ||
439 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
440
441 if (shouldEmitUnreachable) {
443 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
444 cir::TrapOp::create(builder, localScope->endLoc);
445 else
446 cir::UnreachableOp::create(builder, localScope->endLoc);
447 builder.clearInsertionPoint();
448 return;
449 }
450 }
451
452 (void)emitReturn(localScope->endLoc);
453}
454
456 LexicalScope *scope = this;
457 while (scope) {
458 if (scope->isTry())
459 return scope->getTry();
460 scope = scope->parentScope;
461 }
462 return nullptr;
463}
464
465/// An argument came in as a promoted argument; demote it back to its
466/// declared type.
467static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var,
468 mlir::Value value) {
469 mlir::Type ty = cgf.convertType(var->getType());
470
471 // This can happen with promotions that actually don't change the
472 // underlying type, like the enum promotions.
473 if (value.getType() == ty)
474 return value;
475
476 assert((mlir::isa<cir::IntType>(ty) || cir::isAnyFloatingPointType(ty)) &&
477 "unexpected promotion type");
478
479 if (mlir::isa<cir::IntType>(ty))
480 return cgf.getBuilder().CIRBaseBuilderTy::createIntCast(value, ty);
481
482 return cgf.getBuilder().createFloatingCast(value, ty);
483}
484
486 mlir::Block *entryBB,
487 const FunctionDecl *fd,
488 SourceLocation bodyBeginLoc) {
489 // Naked functions don't have prologues.
490 if (fd && fd->hasAttr<NakedAttr>()) {
491 cgm.errorNYI(bodyBeginLoc, "naked function decl");
492 }
493
494 // Declare all the function arguments in the symbol table.
495 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
496 const VarDecl *paramVar = std::get<0>(nameValue);
497 mlir::Value paramVal = std::get<1>(nameValue);
498 CharUnits alignment = getContext().getDeclAlign(paramVar);
499 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
500 paramVal.setLoc(paramLoc);
501
502 mlir::Value addrVal =
503 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
504 convertType(paramVar->getType()), paramLoc, alignment,
505 /*insertIntoFnEntryBlock=*/true);
506
507 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
508 /*isParam=*/true);
509
510 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
511
512 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
513 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
515 if (isPromoted)
516 paramVal = emitArgumentDemotion(*this, paramVar, paramVal);
517
518 // Location of the store to the param storage tracked as beginning of
519 // the function body.
520 mlir::Location fnBodyBegin = getLoc(bodyBeginLoc);
521 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
522 }
523 assert(builder.getInsertionBlock() && "Should be valid");
524}
525
527 cir::FuncOp fn, cir::FuncType funcType,
529 SourceLocation startLoc) {
530 assert(!curFn &&
531 "CIRGenFunction can only be used for one function at a time");
532
533 curFn = fn;
534
535 const Decl *d = gd.getDecl();
536
537 didCallStackSave = false;
538 curCodeDecl = d;
539 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
540 curFuncDecl = (d ? d->getNonClosureContext() : nullptr);
541
542 prologueCleanupDepth = ehStack.stable_begin();
543
544 mlir::Block *entryBB = &fn.getBlocks().front();
545 builder.setInsertionPointToStart(entryBB);
546
547 // Determine the function body begin location for the prolog.
548 // If fd is null or has no body, use startLoc as fallback.
549 SourceLocation bodyBeginLoc = startLoc;
550 if (fd) {
551 if (Stmt *body = fd->getBody())
552 bodyBeginLoc = body->getBeginLoc();
553 else
554 bodyBeginLoc = fd->getLocation();
555 }
556
557 emitFunctionProlog(args, entryBB, fd, bodyBeginLoc);
558
559 // When the current function is not void, create an address to store the
560 // result value.
561 if (!returnType->isVoidType()) {
562 // Determine the function body end location.
563 // If fd is null or has no body, use loc as fallback.
564 SourceLocation bodyEndLoc = loc;
565 if (fd) {
566 if (Stmt *body = fd->getBody())
567 bodyEndLoc = body->getEndLoc();
568 else
569 bodyEndLoc = fd->getLocation();
570 }
571 emitAndUpdateRetAlloca(returnType, getLoc(bodyEndLoc),
572 getContext().getTypeAlignInChars(returnType));
573 }
574
575 if (isa_and_nonnull<CXXMethodDecl>(d) &&
576 cast<CXXMethodDecl>(d)->isInstance()) {
577 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
578
579 const auto *md = cast<CXXMethodDecl>(d);
580 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
581 // We're in a lambda.
582 auto fn = dyn_cast<cir::FuncOp>(curFn);
583 assert(fn && "lambda in non-function region");
584 fn.setLambda(true);
585
586 // Figure out the captures.
587 md->getParent()->getCaptureFields(lambdaCaptureFields,
590 // If the lambda captures the object referred to by '*this' - either by
591 // value or by reference, make sure CXXThisValue points to the correct
592 // object.
593
594 // Get the lvalue for the field (which is a copy of the enclosing object
595 // or contains the address of the enclosing object).
596 LValue thisFieldLValue =
598 if (!lambdaThisCaptureField->getType()->isPointerType()) {
599 // If the enclosing object was captured by value, just use its
600 // address. Sign this pointer.
601 cxxThisValue = thisFieldLValue.getPointer();
602 } else {
603 // Load the lvalue pointed to by the field, since '*this' was captured
604 // by reference.
606 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
607 }
608 }
609 for (auto *fd : md->getParent()->fields()) {
610 if (fd->hasCapturedVLAType())
611 cgm.errorNYI(loc, "lambda captured VLA type");
612 }
613 } else {
614 // Not in a lambda; just use 'this' from the method.
615 // FIXME: Should we generate a new load for each use of 'this'? The fast
616 // register allocator would be happier...
618 }
619
622 }
623}
624
626 for (cir::BlockAddressOp &blockAddress : cgm.unresolvedBlockAddressToLabel) {
627 cir::LabelOp labelOp =
628 cgm.lookupBlockAddressInfo(blockAddress.getBlockAddrInfo());
629 assert(labelOp && "expected cir.labelOp to already be emitted");
630 cgm.updateResolvedBlockAddress(blockAddress, labelOp);
631 }
632 cgm.unresolvedBlockAddressToLabel.clear();
633}
634
637 return;
640 mlir::OpBuilder::InsertionGuard guard(builder);
641 builder.setInsertionPointToEnd(indirectGotoBlock);
642 for (auto &[blockAdd, labelOp] : cgm.blockAddressToLabel) {
643 succesors.push_back(labelOp->getBlock());
644 rangeOperands.push_back(labelOp->getBlock()->getArguments());
645 }
646 cir::IndirectBrOp::create(builder, builder.getUnknownLoc(),
647 indirectGotoBlock->getArgument(0), false,
648 rangeOperands, succesors);
649 cgm.blockAddressToLabel.clear();
650}
651
653 // Resolve block address-to-label mappings, then emit the indirect branch
654 // with the corresponding targets.
657
658 // If a label address was taken but no indirect goto was used, we can't remove
659 // the block argument here. Instead, we mark the 'indirectbr' op
660 // as poison so that the cleanup can be deferred to lowering, since the
661 // verifier doesn't allow the 'indirectbr' target address to be null.
662 if (indirectGotoBlock && indirectGotoBlock->hasNoPredecessors()) {
663 auto indrBr = cast<cir::IndirectBrOp>(indirectGotoBlock->front());
664 indrBr.setPoison(true);
665 }
666
667 // Pop any cleanups that might have been associated with the
668 // parameters. Do this in whatever block we're currently in; it's
669 // important to do this before we enter the return block or return
670 // edges will be *really* confused.
671 // TODO(cir): Use prologueCleanupDepth here.
672 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
673 if (hasCleanups) {
675 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
677 }
678}
679
680mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
681 // We start with function level scope for variables.
683
684 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
685 return emitCompoundStmtWithoutScope(*block);
686
687 return emitStmt(body, /*useCurrentScope=*/true);
688}
689
690static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
691 // Remove any leftover blocks that are unreachable and empty, since they do
692 // not represent unreachable code useful for warnings nor anything deemed
693 // useful in general.
694 SmallVector<mlir::Block *> blocksToDelete;
695 for (mlir::Block &block : func.getBlocks()) {
696 if (block.empty() && block.getUses().empty())
697 blocksToDelete.push_back(&block);
698 }
699 for (mlir::Block *block : blocksToDelete)
700 block->erase();
701}
702
703cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
704 cir::FuncType funcType) {
705 const auto *funcDecl = cast<FunctionDecl>(gd.getDecl());
706 curGD = gd;
707
708 if (funcDecl->isInlineBuiltinDeclaration()) {
709 // When generating code for a builtin with an inline declaration, use a
710 // mangled name to hold the actual body, while keeping an external
711 // declaration in case the function pointer is referenced somewhere.
712 std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
713 cir::FuncOp clone =
714 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
715 if (!clone) {
716 mlir::OpBuilder::InsertionGuard guard(builder);
717 builder.setInsertionPoint(fn);
718 clone = cir::FuncOp::create(builder, fn.getLoc(), fdInlineName,
719 fn.getFunctionType());
720 clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
721 clone.setSymVisibility("private");
722 clone.setInlineKind(cir::InlineKind::AlwaysInline);
723 }
724 fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
725 fn.setSymVisibility("private");
726 fn = clone;
727 } else {
728 // Detect the unusual situation where an inline version is shadowed by a
729 // non-inline version. In that case we should pick the external one
730 // everywhere. That's GCC behavior too.
731 for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
732 pd = pd->getPreviousDecl()) {
733 if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
734 std::string inlineName = funcDecl->getName().str() + ".inline";
735 if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
736 cgm.getGlobalValue(inlineName))) {
737 // Replace all uses of the .inline function with the regular function
738 // FIXME: This performs a linear walk over the module. Introduce some
739 // caching here.
740 if (inlineFn
741 .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
742 .failed())
743 llvm_unreachable("Failed to replace inline builtin symbol uses");
744 inlineFn.erase();
745 }
746 break;
747 }
748 }
749 }
750
751 SourceLocation loc = funcDecl->getLocation();
752 Stmt *body = funcDecl->getBody();
753 SourceRange bodyRange =
754 body ? body->getSourceRange() : funcDecl->getLocation();
755
756 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
757 : builder.getUnknownLoc()};
758
759 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
760 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
761 };
762 const mlir::Location fusedLoc = mlir::FusedLoc::get(
764 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
765 mlir::Block *entryBB = fn.addEntryBlock();
766
767 FunctionArgList args;
768 QualType retTy = buildFunctionArgList(gd, args);
769
770 // Create a scope in the symbol table to hold variable declarations.
772 {
773 LexicalScope lexScope(*this, fusedLoc, entryBB);
774
775 // Emit the standard function prologue.
776 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
777
778 // Save parameters for coroutine function.
779 if (body && isa_and_nonnull<CoroutineBodyStmt>(body))
780 llvm::append_range(fnArgs, funcDecl->parameters());
781
782 if (isa<CXXDestructorDecl>(funcDecl)) {
783 emitDestructorBody(args);
784 } else if (isa<CXXConstructorDecl>(funcDecl)) {
786 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
787 funcDecl->hasAttr<CUDAGlobalAttr>()) {
788 cgm.getCUDARuntime().emitDeviceStub(*this, fn, args);
789 } else if (isa<CXXMethodDecl>(funcDecl) &&
790 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
791 // The lambda static invoker function is special, because it forwards or
792 // clones the body of the function call operator (but is actually
793 // static).
795 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
796 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
797 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
798 // Implicit copy-assignment gets the same special treatment as implicit
799 // copy-constructors.
801 } else if (body) {
802 // Emit standard function body.
803 if (mlir::failed(emitFunctionBody(body))) {
804 return nullptr;
805 }
806 } else {
807 // Anything without a body should have been handled above.
808 llvm_unreachable("no definition for normal function");
809 }
810
811 if (mlir::failed(fn.verifyBody()))
812 return nullptr;
813
814 finishFunction(bodyRange.getEnd());
815 }
816
818 return fn;
819}
820
823 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
824 CXXCtorType ctorType = curGD.getCtorType();
825
826 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
827 ctorType == Ctor_Complete) &&
828 "can only generate complete ctor for this ABI");
829
830 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), ctor);
831
832 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
833 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
834 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
835 return;
836 }
837
838 const FunctionDecl *definition = nullptr;
839 Stmt *body = ctor->getBody(definition);
840 assert(definition == ctor && "emitting wrong constructor body");
841
842 if (isa_and_nonnull<CXXTryStmt>(body)) {
843 cgm.errorNYI(ctor->getSourceRange(), "emitConstructorBody: try body");
844 return;
845 }
846
849
850 // TODO: in restricted cases, we can emit the vbase initializers of a
851 // complete ctor and then delegate to the base ctor.
852
853 // Emit the constructor prologue, i.e. the base and member initializers.
854 emitCtorPrologue(ctor, ctorType, args);
855
856 // TODO(cir): propagate this result via mlir::logical result. Just unreachable
857 // now just to have it handled.
858 if (mlir::failed(emitStmt(body, true))) {
859 cgm.errorNYI(ctor->getSourceRange(),
860 "emitConstructorBody: emit body statement failed.");
861 return;
862 }
863}
864
865/// Emits the body of the current destructor.
867 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
868 CXXDtorType dtorType = curGD.getDtorType();
869
870 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), dtor);
871
872 // For an abstract class, non-base destructors are never used (and can't
873 // be emitted in general, because vbase dtors may not have been validated
874 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
875 // in fact emit references to them from other compilations, so emit them
876 // as functions containing a trap instruction.
877 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
878 SourceLocation loc =
879 dtor->hasBody() ? dtor->getBody()->getBeginLoc() : dtor->getLocation();
880 emitTrap(getLoc(loc), true);
881 return;
882 }
883
884 Stmt *body = dtor->getBody();
886
887 // The call to operator delete in a deleting destructor happens
888 // outside of the function-try-block, which means it's always
889 // possible to delegate the destructor body to the complete
890 // destructor. Do so.
891 if (dtorType == Dtor_Deleting || dtorType == Dtor_VectorDeleting) {
893 cgm.errorNYI(dtor->getSourceRange(), "emitConditionalArrayDtorCall");
894 RunCleanupsScope dtorEpilogue(*this);
896 if (haveInsertPoint()) {
898 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
899 /*delegating=*/false, loadCXXThisAddress(), thisTy);
900 }
901 return;
902 }
903
904 // If the body is a function-try-block, enter the try before
905 // anything else.
906 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
907 if (isTryBody)
908 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
909
911
912 // Enter the epilogue cleanups.
913 RunCleanupsScope dtorEpilogue(*this);
914
915 // If this is the complete variant, just invoke the base variant;
916 // the epilogue will destruct the virtual bases. But we can't do
917 // this optimization if the body is a function-try-block, because
918 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
919 // always delegate because we might not have a definition in this TU.
920 switch (dtorType) {
921 case Dtor_Unified:
922 llvm_unreachable("not expecting a unified dtor");
923 case Dtor_Comdat:
924 llvm_unreachable("not expecting a COMDAT");
925 case Dtor_Deleting:
927 llvm_unreachable("already handled deleting case");
928
929 case Dtor_Complete:
930 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
931 "can't emit a dtor without a body for non-Microsoft ABIs");
932
933 // Enter the cleanup scopes for virtual bases.
935
936 if (!isTryBody) {
938 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
939 /*delegating=*/false, loadCXXThisAddress(), thisTy);
940 break;
941 }
942
943 // Fallthrough: act like we're in the base variant.
944 [[fallthrough]];
945
946 case Dtor_Base:
947 assert(body);
948
949 // Enter the cleanup scopes for fields and non-virtual bases.
951
953
954 if (isTryBody) {
955 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
956 } else if (body) {
957 (void)emitStmt(body, /*useCurrentScope=*/true);
958 } else {
959 assert(dtor->isImplicit() && "bodyless dtor not implicit");
960 // nothing to do besides what's in the epilogue
961 }
962 // -fapple-kext must inline any call to this dtor into
963 // the caller's body.
965
966 break;
967 }
968
969 // Jump out through the epilogue cleanups.
970 dtorEpilogue.forceCleanup();
971
972 // Exit the try if applicable.
973 if (isTryBody)
974 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
975}
976
977/// Given a value of type T* that may not be to a complete object, construct
978/// an l-vlaue withi the natural pointee alignment of T.
980 QualType ty) {
981 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
982 // assert on the result type first.
983 LValueBaseInfo baseInfo;
985 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
986 return makeAddrLValue(Address(val, align), ty, baseInfo);
987}
988
990 QualType ty) {
991 LValueBaseInfo baseInfo;
992 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
993 Address addr(val, convertTypeForMem(ty), alignment);
995 return makeAddrLValue(addr, ty, baseInfo);
996}
997
998// Map the LangOption for exception behavior into the corresponding enum in
999// the IR.
1000static llvm::fp::ExceptionBehavior
1002 switch (kind) {
1004 return llvm::fp::ebIgnore;
1006 return llvm::fp::ebMayTrap;
1008 return llvm::fp::ebStrict;
1010 llvm_unreachable("expected explicitly initialized exception behavior");
1011 }
1012 llvm_unreachable("unsupported FP exception behavior");
1013}
1014
1016 FunctionArgList &args) {
1017 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1018 QualType retTy = fd->getReturnType();
1019
1020 const auto *md = dyn_cast<CXXMethodDecl>(fd);
1021 if (md && md->isInstance()) {
1022 if (cgm.getCXXABI().hasThisReturn(gd))
1023 cgm.errorNYI(fd->getSourceRange(), "this return");
1024 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
1025 cgm.errorNYI(fd->getSourceRange(), "most derived return");
1026 cgm.getCXXABI().buildThisParam(*this, args);
1027 }
1028
1029 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
1030 if (cd->getInheritedConstructor())
1031 cgm.errorNYI(fd->getSourceRange(),
1032 "buildFunctionArgList: inherited constructor");
1033
1034 for (auto *param : fd->parameters())
1035 args.push_back(param);
1036
1037 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
1038 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
1039
1040 return retTy;
1041}
1042
1043/// Emit code to compute a designator that specifies the location
1044/// of the expression.
1045/// FIXME: document this function better.
1047 // FIXME: ApplyDebugLocation DL(*this, e);
1048 switch (e->getStmtClass()) {
1049 default:
1051 std::string("l-value not implemented for '") +
1052 e->getStmtClassName() + "'");
1053 return LValue();
1054 case Expr::ConditionalOperatorClass:
1056 case Expr::BinaryConditionalOperatorClass:
1058 case Expr::ArraySubscriptExprClass:
1060 case Expr::ExtVectorElementExprClass:
1062 case Expr::UnaryOperatorClass:
1064 case Expr::StringLiteralClass:
1066 case Expr::MemberExprClass:
1068 case Expr::CompoundLiteralExprClass:
1070 case Expr::PredefinedExprClass:
1072 case Expr::BinaryOperatorClass:
1074 case Expr::CompoundAssignOperatorClass: {
1075 QualType ty = e->getType();
1076 if (ty->getAs<AtomicType>()) {
1077 cgm.errorNYI(e->getSourceRange(),
1078 "CompoundAssignOperator with AtomicType");
1079 return LValue();
1080 }
1081 if (!ty->isAnyComplexType())
1083
1085 }
1086 case Expr::CallExprClass:
1087 case Expr::CXXMemberCallExprClass:
1088 case Expr::CXXOperatorCallExprClass:
1089 case Expr::UserDefinedLiteralClass:
1091 case Expr::ExprWithCleanupsClass: {
1092 const auto *cleanups = cast<ExprWithCleanups>(e);
1093 RunCleanupsScope scope(*this);
1094 LValue lv = emitLValue(cleanups->getSubExpr());
1096 return lv;
1097 }
1098 case Expr::CXXDefaultArgExprClass: {
1099 auto *dae = cast<CXXDefaultArgExpr>(e);
1100 CXXDefaultArgExprScope scope(*this, dae);
1101 return emitLValue(dae->getExpr());
1102 }
1103 case Expr::CXXTypeidExprClass:
1105 case Expr::ParenExprClass:
1106 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
1107 case Expr::GenericSelectionExprClass:
1108 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
1109 case Expr::DeclRefExprClass:
1111 case Expr::ImplicitCastExprClass:
1112 case Expr::CStyleCastExprClass:
1113 case Expr::CXXStaticCastExprClass:
1114 case Expr::CXXDynamicCastExprClass:
1115 case Expr::CXXReinterpretCastExprClass:
1116 case Expr::CXXConstCastExprClass:
1117 // TODO(cir): The above list is missing CXXFunctionalCastExprClass,
1118 // CXXAddrSpaceCastExprClass, and ObjCBridgedCastExprClass.
1119 return emitCastLValue(cast<CastExpr>(e));
1120 case Expr::MaterializeTemporaryExprClass:
1122 case Expr::OpaqueValueExprClass:
1124 case Expr::ChooseExprClass:
1125 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
1126 case Expr::SubstNonTypeTemplateParmExprClass:
1127 return emitLValue(cast<SubstNonTypeTemplateParmExpr>(e)->getReplacement());
1128 }
1129}
1130
1131static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
1132 SmallString<256> buffer;
1133 llvm::raw_svector_ostream out(buffer);
1134 out << name << cnt;
1135 return std::string(out.str());
1136}
1137
1139 return getVersionedTmpName("ref.tmp", counterRefTmp++);
1140}
1141
1143 return getVersionedTmpName("agg.tmp", counterAggTmp++);
1144}
1145
1146void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
1147 QualType ty) {
1148 // Ignore empty classes in C++.
1149 if (getLangOpts().CPlusPlus)
1150 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
1151 return;
1152
1153 // Cast the dest ptr to the appropriate i8 pointer type.
1154 if (builder.isInt8Ty(destPtr.getElementType())) {
1155 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
1156 }
1157
1158 // Get size and alignment info for this aggregate.
1159 const CharUnits size = getContext().getTypeSizeInChars(ty);
1160 if (size.isZero()) {
1161 // But note that getTypeInfo returns 0 for a VLA.
1162 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
1163 cgm.errorNYI(loc,
1164 "emitNullInitialization for zero size VariableArrayType");
1165 } else {
1166 return;
1167 }
1168 }
1169
1170 // If the type contains a pointer to data member we can't memset it to zero.
1171 // Instead, create a null constant and copy it to the destination.
1172 // TODO: there are other patterns besides zero that we can usefully memset,
1173 // like -1, which happens to be the pattern used by member-pointers.
1174 if (!cgm.getTypes().isZeroInitializable(ty)) {
1175 cgm.errorNYI(loc, "type is not zero initializable");
1176 }
1177
1178 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
1179 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
1180 // respective address.
1181 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1182 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
1183 builder.createStore(loc, zeroValue, destPtr);
1184}
1185
1187 const clang::Expr *e)
1188 : cgf(cgf) {
1189 ConstructorHelper(e->getFPFeaturesInEffect(cgf.getLangOpts()));
1190}
1191
1193 FPOptions fpFeatures)
1194 : cgf(cgf) {
1195 ConstructorHelper(fpFeatures);
1196}
1197
1198void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper(
1199 FPOptions fpFeatures) {
1200 oldFPFeatures = cgf.curFPFeatures;
1201 cgf.curFPFeatures = fpFeatures;
1202
1203 oldExcept = cgf.builder.getDefaultConstrainedExcept();
1204 oldRounding = cgf.builder.getDefaultConstrainedRounding();
1205
1206 if (oldFPFeatures == fpFeatures)
1207 return;
1208
1209 // TODO(cir): create guard to restore fast math configurations.
1211
1212 [[maybe_unused]] llvm::RoundingMode newRoundingBehavior =
1213 fpFeatures.getRoundingMode();
1214 // TODO(cir): override rounding behaviour once FM configs are guarded.
1215 [[maybe_unused]] llvm::fp::ExceptionBehavior newExceptionBehavior =
1217 fpFeatures.getExceptionMode()));
1218 // TODO(cir): override exception behaviour once FM configs are guarded.
1219
1220 // TODO(cir): override FP flags once FM configs are guarded.
1222
1223 assert((cgf.curFuncDecl == nullptr || cgf.builder.getIsFPConstrained() ||
1224 isa<CXXConstructorDecl>(cgf.curFuncDecl) ||
1225 isa<CXXDestructorDecl>(cgf.curFuncDecl) ||
1226 (newExceptionBehavior == llvm::fp::ebIgnore &&
1227 newRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
1228 "FPConstrained should be enabled on entire function");
1229
1230 // TODO(cir): mark CIR function with fast math attributes.
1232}
1233
1235 cgf.curFPFeatures = oldFPFeatures;
1236 cgf.builder.setDefaultConstrainedExcept(oldExcept);
1237 cgf.builder.setDefaultConstrainedRounding(oldRounding);
1238}
1239
1240// TODO(cir): should be shared with LLVM codegen.
1242 const Expr *e = ce->getSubExpr();
1243
1244 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
1245 return false;
1246
1247 if (isa<CXXThisExpr>(e->IgnoreParens())) {
1248 // We always assume that 'this' is never null.
1249 return false;
1250 }
1251
1252 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1253 // And that glvalue casts are never null.
1254 if (ice->isGLValue())
1255 return false;
1256 }
1257
1258 return true;
1259}
1260
1261/// Computes the length of an array in elements, as well as the base
1262/// element type and a properly-typed first element pointer.
1263mlir::Value
1265 QualType &baseType, Address &addr) {
1266 const clang::ArrayType *arrayType = origArrayType;
1267
1268 // If it's a VLA, we have to load the stored size. Note that
1269 // this is the size of the VLA in bytes, not its size in elements.
1272 cgm.errorNYI(*currSrcLoc, "VLAs");
1273 return builder.getConstInt(*currSrcLoc, sizeTy, 0);
1274 }
1275
1276 uint64_t countFromCLAs = 1;
1277 QualType eltType;
1278
1279 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
1280
1281 while (cirArrayType) {
1283 countFromCLAs *= cirArrayType.getSize();
1284 eltType = arrayType->getElementType();
1285
1286 cirArrayType =
1287 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
1288
1289 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1290 assert((!cirArrayType || arrayType) &&
1291 "CIR and Clang types are out-of-sync");
1292 }
1293
1294 if (arrayType) {
1295 // From this point onwards, the Clang array type has been emitted
1296 // as some other type (probably a packed struct). Compute the array
1297 // size, and just emit the 'begin' expression as a bitcast.
1298 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
1299 }
1300
1301 baseType = eltType;
1302 return builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
1303}
1304
1306 // If we already made the indirect branch for indirect goto, return its block.
1308 return;
1309
1310 mlir::OpBuilder::InsertionGuard guard(builder);
1312 builder.createBlock(builder.getBlock()->getParent(), {}, {voidPtrTy},
1313 {builder.getUnknownLoc()});
1314}
1315
1317 mlir::Value ptrValue, QualType ty, SourceLocation loc,
1318 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
1320 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
1321 alignment, offsetValue);
1322}
1323
1325 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1326 int64_t alignment, mlir::Value offsetValue) {
1327 QualType ty = expr->getType();
1328 SourceLocation loc = expr->getExprLoc();
1329 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1330 offsetValue);
1331}
1332
1334 const VariableArrayType *vla =
1335 cgm.getASTContext().getAsVariableArrayType(type);
1336 assert(vla && "type was not a variable array type!");
1337 return getVLASize(vla);
1338}
1339
1342 // The number of elements so far; always size_t.
1343 mlir::Value numElements;
1344
1345 QualType elementType;
1346 do {
1347 elementType = type->getElementType();
1348 mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
1349 assert(vlaSize && "no size for VLA!");
1350 assert(vlaSize.getType() == sizeTy);
1351
1352 if (!numElements) {
1353 numElements = vlaSize;
1354 } else {
1355 // It's undefined behavior if this wraps around, so mark it that way.
1356 // FIXME: Teach -fsanitize=undefined to trap this.
1357
1358 numElements =
1359 builder.createMul(numElements.getLoc(), numElements, vlaSize,
1361 }
1362 } while ((type = getContext().getAsVariableArrayType(elementType)));
1363
1364 assert(numElements && "Undefined elements number");
1365 return {numElements, elementType};
1366}
1367
1370 mlir::Value vlaSize = vlaSizeMap[vla->getSizeExpr()];
1371 assert(vlaSize && "no size for VLA!");
1372 assert(vlaSize.getType() == sizeTy);
1373 return {vlaSize, vla->getElementType()};
1374}
1375
1376// TODO(cir): Most of this function can be shared between CIRGen
1377// and traditional LLVM codegen
1379 assert(type->isVariablyModifiedType() &&
1380 "Must pass variably modified type to EmitVLASizes!");
1381
1382 // We're going to walk down into the type and look for VLA
1383 // expressions.
1384 do {
1385 assert(type->isVariablyModifiedType());
1386
1387 const Type *ty = type.getTypePtr();
1388 switch (ty->getTypeClass()) {
1389 case Type::CountAttributed:
1390 case Type::PackIndexing:
1391 case Type::ArrayParameter:
1392 case Type::HLSLAttributedResource:
1393 case Type::HLSLInlineSpirv:
1394 case Type::PredefinedSugar:
1395 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1396 break;
1397
1398#define TYPE(Class, Base)
1399#define ABSTRACT_TYPE(Class, Base)
1400#define NON_CANONICAL_TYPE(Class, Base)
1401#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1402#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1403#include "clang/AST/TypeNodes.inc"
1404 llvm_unreachable(
1405 "dependent type must be resolved before the CIR codegen");
1406
1407 // These types are never variably-modified.
1408 case Type::Builtin:
1409 case Type::Complex:
1410 case Type::Vector:
1411 case Type::ExtVector:
1412 case Type::ConstantMatrix:
1413 case Type::Record:
1414 case Type::Enum:
1415 case Type::Using:
1416 case Type::TemplateSpecialization:
1417 case Type::ObjCTypeParam:
1418 case Type::ObjCObject:
1419 case Type::ObjCInterface:
1420 case Type::ObjCObjectPointer:
1421 case Type::BitInt:
1422 case Type::OverflowBehavior:
1423 llvm_unreachable("type class is never variably-modified!");
1424
1425 case Type::Adjusted:
1426 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1427 break;
1428
1429 case Type::Decayed:
1430 type = cast<clang::DecayedType>(ty)->getPointeeType();
1431 break;
1432
1433 case Type::Pointer:
1434 type = cast<clang::PointerType>(ty)->getPointeeType();
1435 break;
1436
1437 case Type::BlockPointer:
1438 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1439 break;
1440
1441 case Type::LValueReference:
1442 case Type::RValueReference:
1443 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1444 break;
1445
1446 case Type::MemberPointer:
1447 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1448 break;
1449
1450 case Type::ConstantArray:
1451 case Type::IncompleteArray:
1452 // Losing element qualification here is fine.
1453 type = cast<clang::ArrayType>(ty)->getElementType();
1454 break;
1455
1456 case Type::VariableArray: {
1457 // Losing element qualification here is fine.
1459
1460 // Unknown size indication requires no size computation.
1461 // Otherwise, evaluate and record it.
1462 if (const Expr *sizeExpr = vat->getSizeExpr()) {
1463 // It's possible that we might have emitted this already,
1464 // e.g. with a typedef and a pointer to it.
1465 mlir::Value &entry = vlaSizeMap[sizeExpr];
1466 if (!entry) {
1467 mlir::Value size = emitScalarExpr(sizeExpr);
1469
1470 // Always zexting here would be wrong if it weren't
1471 // undefined behavior to have a negative bound.
1472 // FIXME: What about when size's type is larger than size_t?
1473 entry = builder.createIntCast(size, sizeTy);
1474 }
1475 }
1476 type = vat->getElementType();
1477 break;
1478 }
1479
1480 case Type::FunctionProto:
1481 case Type::FunctionNoProto:
1482 type = cast<clang::FunctionType>(ty)->getReturnType();
1483 break;
1484
1485 case Type::Paren:
1486 case Type::TypeOf:
1487 case Type::UnaryTransform:
1488 case Type::Attributed:
1489 case Type::BTFTagAttributed:
1490 case Type::SubstTemplateTypeParm:
1491 case Type::MacroQualified:
1492 // Keep walking after single level desugaring.
1493 type = type.getSingleStepDesugaredType(getContext());
1494 break;
1495
1496 case Type::Typedef:
1497 case Type::Decltype:
1498 case Type::Auto:
1499 case Type::DeducedTemplateSpecialization:
1500 // Stop walking: nothing to do.
1501 return;
1502
1503 case Type::TypeOfExpr:
1504 // Stop walking: emit typeof expression.
1505 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1506 return;
1507
1508 case Type::Atomic:
1509 type = cast<clang::AtomicType>(ty)->getValueType();
1510 break;
1511
1512 case Type::Pipe:
1513 type = cast<clang::PipeType>(ty)->getElementType();
1514 break;
1515 }
1516 } while (type->isVariablyModifiedType());
1517}
1518
1520 if (getContext().getBuiltinVaListType()->isArrayType())
1521 return emitPointerWithAlignment(e);
1522 return emitLValue(e).getAddress();
1523}
1524
1525} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:508
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
SourceManager & getSourceManager()
Definition ASTContext.h:858
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3730
QualType getElementType() const
Definition TypeBase.h:3742
mlir::Type getElementType() const
Definition Address.h:123
mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType)
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2262
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2286
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1732
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
SourceLocation getLocation() const
Definition DeclBase.h:439
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition Expr.cpp:3971
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
QualType getType() const
Definition Expr.h:144
LangOptions::FPExceptionModeKind getExceptionMode() const
RoundingMode getRoundingMode() const
Represents a function declaration or definition.
Definition Decl.h:2000
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3280
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4550
bool hasBody(const FunctionDecl *&Definition) const
Returns true if the function has a body.
Definition Decl.cpp:3200
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3856
FPExceptionModeKind
Possible floating point exception behavior.
@ FPE_Default
Used internally to represent initial unspecified value.
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2911
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:86
StmtClass getStmtClass() const
Definition Stmt.h:1485
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
const char * getStmtClassName() const
Definition Stmt.cpp:86
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
bool isVoidType() const
Definition TypeBase.h:8991
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8760
TypeClass getTypeClass() const
Definition TypeBase.h:2391
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9218
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2202
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3974
Expr * getSizeExpr() const
Definition TypeBase.h:3988
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static bool previousOpIsNonYieldingCleanup(mlir::Block *block)
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var, mlir::Value value)
An argument came in as a promoted argument; demote it back to its declared type.
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
static llvm::fp::ExceptionBehavior toConstrainedExceptMd(LangOptions::FPExceptionModeKind kind)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_VectorDeleting
Vector deleting dtor.
Definition ABI.h:40
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool fastMathFuncAttributes()
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool fastMathGuard()
static bool fastMathFlags()
static bool generateDebugInfo()
static bool cleanupWithPreservedValues()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Block * getCleanupBlock(mlir::OpBuilder &builder)
mlir::Location getRetLoc(mlir::Block *b)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650