clang 23.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/ExprCXX.h"
23#include "llvm/ADT/ScopeExit.h"
24#include "llvm/IR/FPEnv.h"
25
26#include <cassert>
27
28namespace clang::CIRGen {
29
31 bool suppressNewContext)
32 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
33 ehStack.setCGF(this);
34}
35
37
38// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
40 type = type.getCanonicalType();
41 while (true) {
42 switch (type->getTypeClass()) {
43#define TYPE(name, parent)
44#define ABSTRACT_TYPE(name, parent)
45#define NON_CANONICAL_TYPE(name, parent) case Type::name:
46#define DEPENDENT_TYPE(name, parent) case Type::name:
47#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
48#include "clang/AST/TypeNodes.inc"
49 llvm_unreachable("non-canonical or dependent type in IR-generation");
50
51 case Type::Auto:
52 case Type::DeducedTemplateSpecialization:
53 llvm_unreachable("undeduced type in IR-generation");
54
55 // Various scalar types.
56 case Type::Builtin:
57 case Type::Pointer:
58 case Type::BlockPointer:
59 case Type::LValueReference:
60 case Type::RValueReference:
61 case Type::MemberPointer:
62 case Type::Vector:
63 case Type::ExtVector:
64 case Type::ConstantMatrix:
65 case Type::FunctionProto:
66 case Type::FunctionNoProto:
67 case Type::Enum:
68 case Type::ObjCObjectPointer:
69 case Type::Pipe:
70 case Type::BitInt:
71 case Type::OverflowBehavior:
72 case Type::HLSLAttributedResource:
73 case Type::HLSLInlineSpirv:
74 return cir::TEK_Scalar;
75
76 // Complexes.
77 case Type::Complex:
78 return cir::TEK_Complex;
79
80 // Arrays, records, and Objective-C objects.
81 case Type::ConstantArray:
82 case Type::IncompleteArray:
83 case Type::VariableArray:
84 case Type::Record:
85 case Type::ObjCObject:
86 case Type::ObjCInterface:
87 case Type::ArrayParameter:
88 return cir::TEK_Aggregate;
89
90 // We operate on atomic values according to their underlying type.
91 case Type::Atomic:
92 type = cast<AtomicType>(type)->getValueType();
93 continue;
94 }
95 llvm_unreachable("unknown type kind!");
96 }
97}
98
100 return cgm.getTypes().convertTypeForMem(t);
101}
102
104 return cgm.getTypes().convertType(t);
105}
106
108 // Some AST nodes might contain invalid source locations (e.g.
109 // CXXDefaultArgExpr), workaround that to still get something out.
110 if (srcLoc.isValid()) {
112 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
113 StringRef filename = pLoc.getFilename();
114 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
115 pLoc.getLine(), pLoc.getColumn());
116 }
117 // We expect to have a currSrcLoc set, so we assert here, but it isn't
118 // critical for the correctness of compilation, so in non-assert builds
119 // we fallback on using an unknown location.
120 assert(currSrcLoc && "expected to inherit some source location");
121 if (currSrcLoc)
122 return *currSrcLoc;
123 // We're brave, but time to give up.
124 return builder.getUnknownLoc();
125}
126
127mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
128 // Some AST nodes might contain invalid source locations (e.g.
129 // CXXDefaultArgExpr), workaround that to still get something out.
130 if (srcLoc.isValid()) {
131 mlir::Location beg = getLoc(srcLoc.getBegin());
132 mlir::Location end = getLoc(srcLoc.getEnd());
133 SmallVector<mlir::Location, 2> locs = {beg, end};
134 mlir::Attribute metadata;
135 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
136 }
137 // We expect to have a currSrcLoc set, so we assert here, but it isn't
138 // critical for the correctness of compilation, so in non-assert builds
139 // we fallback on using an unknown location.
140 assert(currSrcLoc && "expected to inherit some source location");
141 if (currSrcLoc)
142 return *currSrcLoc;
143 // We're brave, but time to give up.
144 return builder.getUnknownLoc();
145}
146
147mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
148 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
149 mlir::Attribute metadata;
150 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
151}
152
153bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
154 // Null statement, not a label!
155 if (!s)
156 return false;
157
158 // If this is a label, we have to emit the code, consider something like:
159 // if (0) { ... foo: bar(); } goto foo;
160 //
161 // TODO: If anyone cared, we could track __label__'s, since we know that you
162 // can't jump to one from outside their declared region.
163 if (isa<LabelStmt>(s))
164 return true;
165
166 // If this is a case/default statement, and we haven't seen a switch, we
167 // have to emit the code.
168 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
169 return true;
170
171 // If this is a switch statement, we want to ignore case statements when we
172 // recursively process the sub-statements of the switch. If we haven't
173 // encountered a switch statement, we treat case statements like labels, but
174 // if we are processing a switch statement, case statements are expected.
175 if (isa<SwitchStmt>(s))
176 ignoreCaseStmts = true;
177
178 // Scan subexpressions for verboten labels.
179 return std::any_of(s->child_begin(), s->child_end(),
180 [=](const Stmt *subStmt) {
181 return containsLabel(subStmt, ignoreCaseStmts);
182 });
183}
184
185/// If the specified expression does not fold to a constant, or if it does but
186/// contains a label, return false. If it constant folds return true and set
187/// the boolean result in Result.
188bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
189 bool allowLabels) {
190 llvm::APSInt resultInt;
191 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
192 return false;
193
194 resultBool = resultInt.getBoolValue();
195 return true;
196}
197
198/// If the specified expression does not fold to a constant, or if it does
199/// fold but contains a label, return false. If it constant folds, return
200/// true and set the folded value.
202 llvm::APSInt &resultInt,
203 bool allowLabels) {
204 // FIXME: Rename and handle conversion of other evaluatable things
205 // to bool.
206 Expr::EvalResult result;
207 if (!cond->EvaluateAsInt(result, getContext()))
208 return false; // Not foldable, not integer or not fully evaluatable.
209
210 llvm::APSInt intValue = result.Val.getInt();
211 if (!allowLabels && containsLabel(cond))
212 return false; // Contains a label.
213
214 resultInt = intValue;
215 return true;
216}
217
218void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
219 CharUnits alignment) {
220 if (!type->isVoidType()) {
221 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
222 /*insertIntoFnEntryBlock=*/false);
223 fnRetAlloca = addr;
224 returnValue = Address(addr, alignment);
225 }
226}
227
228void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
229 mlir::Location loc, CharUnits alignment,
230 bool isParam) {
231 assert(isa<NamedDecl>(var) && "Needs a named decl");
232 assert(!symbolTable.count(var) && "not supposed to be available just yet");
233
234 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
235 assert(allocaOp && "expected cir::AllocaOp");
236
237 if (isParam)
238 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
239 if (ty->isReferenceType() || ty.isConstQualified())
240 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
241
242 symbolTable.insert(var, allocaOp);
243}
244
246 CIRGenBuilderTy &builder = cgf.builder;
247 LexicalScope *localScope = cgf.curLexScope;
248
249 // Process all return blocks — emit cir.return ops.
250 // TODO(cir): Handle returning from a switch statement through a cleanup
251 // block. We can't simply jump to the cleanup block, because the cleanup block
252 // is not part of the case region. Either reemit all cleanups in the return
253 // block or wait for MLIR structured control flow to support early exits.
255 for (mlir::Block *retBlock : localScope->getRetBlocks()) {
256 mlir::OpBuilder::InsertionGuard guard(builder);
257 builder.setInsertionPointToEnd(retBlock);
258 retBlocks.push_back(retBlock);
259 mlir::Location retLoc = localScope->getRetLoc(retBlock);
260 emitReturn(retLoc);
261 }
262
263 // Pop cleanup scopes from the EH stack. In CIR, this emits cleanup code
264 // into the cleanup regions of cir.cleanup.scope ops — no CFG-level cleanup
265 // blocks or branches are needed.
266 if (performCleanup) {
268 forceCleanup();
269 }
270
271 mlir::Block *curBlock = builder.getBlock();
272 if (isGlobalInit() && !curBlock)
273 return;
274 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
275 return;
276
277 // Get rid of any empty block at the end of the scope. An empty non-entry
278 // block is created when a terminator (return/break/continue) is followed
279 // by unreachable code.
280 bool isEntryBlock = builder.getInsertionBlock()->isEntryBlock();
281 if (!isEntryBlock && curBlock->empty()) {
282 curBlock->erase();
283 for (mlir::Block *retBlock : retBlocks) {
284 if (retBlock->getUses().empty())
285 retBlock->erase();
286 }
287 return;
288 }
289
290 if (localScope->depth == 0) {
291 // Reached the end of the function.
292 if (localScope->getRetBlocks().size() == 1) {
293 mlir::Block *retBlock = localScope->getRetBlocks()[0];
294 mlir::Location retLoc = localScope->getRetLoc(retBlock);
295 if (retBlock->getUses().empty()) {
296 retBlock->erase();
297 } else {
298 cir::BrOp::create(builder, retLoc, retBlock);
299 return;
300 }
301 }
302 emitImplicitReturn();
303 return;
304 }
305
306 // End of any local scope != function.
307 // Ternary ops have to deal with matching arms for yielding types
308 // and do return a value, it must do its own cir.yield insertion.
309 if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) {
310 !retVal ? cir::YieldOp::create(builder, localScope->endLoc)
311 : cir::YieldOp::create(builder, localScope->endLoc, retVal);
312 }
313}
314
315cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
316 CIRGenBuilderTy &builder = cgf.getBuilder();
317
318 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
319 assert(fn && "emitReturn from non-function");
320
321 // If we are on a coroutine, add the coro_end builtin call.
322 if (fn.getCoroutine())
323 cgf.emitCoroEndBuiltinCall(loc,
324 builder.getNullPtr(builder.getVoidPtrTy(), loc));
325 if (!fn.getFunctionType().hasVoidReturn()) {
326 // Load the value from `__retval` and return it via the `cir.return` op.
327 auto value = cir::LoadOp::create(
328 builder, loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
329 return cir::ReturnOp::create(builder, loc,
330 llvm::ArrayRef(value.getResult()));
331 }
332 return cir::ReturnOp::create(builder, loc);
333}
334
335// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
336// candidate for sharing between CIRGen and CodeGen.
337static bool mayDropFunctionReturn(const ASTContext &astContext,
338 QualType returnType) {
339 // We can't just discard the return value for a record type with a complex
340 // destructor or a non-trivially copyable type.
341 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
342 return classDecl->hasTrivialDestructor();
343 return returnType.isTriviallyCopyableType(astContext);
344}
345
346static bool previousOpIsNonYieldingCleanup(mlir::Block *block) {
347 if (block->empty())
348 return false;
349 mlir::Operation *op = &block->back();
350 auto cleanupScopeOp = mlir::dyn_cast<cir::CleanupScopeOp>(op);
351 if (!cleanupScopeOp)
352 return false;
353
354 // Check whether the body region of the cleanup scope exits via cir.yield.
355 // Exits via cir.return or cir.goto do not fall through to the operation
356 // following the cleanup scope, and exits via break, continue, and resume
357 // are not expected here.
358 for (mlir::Block &bodyBlock : cleanupScopeOp.getBodyRegion()) {
359 if (bodyBlock.mightHaveTerminator()) {
360 if (mlir::isa<cir::YieldOp>(bodyBlock.getTerminator()))
361 return false;
362 assert(!mlir::isa<cir::BreakOp>(bodyBlock.getTerminator()) &&
363 !mlir::isa<cir::ContinueOp>(bodyBlock.getTerminator()) &&
364 !mlir::isa<cir::ResumeOp>(bodyBlock.getTerminator()));
365 }
366 }
367 return true;
368}
369
370void CIRGenFunction::LexicalScope::emitImplicitReturn() {
371 CIRGenBuilderTy &builder = cgf.getBuilder();
372 LexicalScope *localScope = cgf.curLexScope;
373
374 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
375
376 // In C++, flowing off the end of a non-void function is always undefined
377 // behavior. In C, flowing off the end of a non-void function is undefined
378 // behavior only if the non-existent return value is used by the caller.
379 // That influences whether the terminating op is trap, unreachable, or
380 // return.
381 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
382 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
383 builder.getInsertionBlock() &&
384 !previousOpIsNonYieldingCleanup(builder.getInsertionBlock())) {
385 bool shouldEmitUnreachable =
386 cgf.cgm.getCodeGenOpts().StrictReturn ||
387 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
388
389 if (shouldEmitUnreachable) {
391 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
392 cir::TrapOp::create(builder, localScope->endLoc);
393 else
394 cir::UnreachableOp::create(builder, localScope->endLoc);
395 builder.clearInsertionPoint();
396 return;
397 }
398 }
399
400 (void)emitReturn(localScope->endLoc);
401}
402
404 LexicalScope *scope = this;
405 while (scope) {
406 if (scope->isTry())
407 return scope->getTry();
408 scope = scope->parentScope;
409 }
410 return nullptr;
411}
412
413/// An argument came in as a promoted argument; demote it back to its
414/// declared type.
415static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var,
416 mlir::Value value) {
417 mlir::Type ty = cgf.convertType(var->getType());
418
419 // This can happen with promotions that actually don't change the
420 // underlying type, like the enum promotions.
421 if (value.getType() == ty)
422 return value;
423
424 assert((mlir::isa<cir::IntType>(ty) || cir::isAnyFloatingPointType(ty)) &&
425 "unexpected promotion type");
426
427 if (mlir::isa<cir::IntType>(ty))
428 return cgf.getBuilder().CIRBaseBuilderTy::createIntCast(value, ty);
429
430 return cgf.getBuilder().createFloatingCast(value, ty);
431}
432
434 mlir::Block *entryBB,
435 const FunctionDecl *fd,
436 SourceLocation bodyBeginLoc) {
437 // Naked functions don't have prologues.
438 if (fd && fd->hasAttr<NakedAttr>()) {
439 cgm.errorNYI(bodyBeginLoc, "naked function decl");
440 }
441
442 // Declare all the function arguments in the symbol table.
443 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
444 const VarDecl *paramVar = std::get<0>(nameValue);
445 mlir::Value paramVal = std::get<1>(nameValue);
446 CharUnits alignment = getContext().getDeclAlign(paramVar);
447 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
448 paramVal.setLoc(paramLoc);
449
450 mlir::Value addrVal =
451 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
452 convertType(paramVar->getType()), paramLoc, alignment,
453 /*insertIntoFnEntryBlock=*/true);
454
455 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
456 /*isParam=*/true);
457
458 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
459
460 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
461 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
463 if (isPromoted)
464 paramVal = emitArgumentDemotion(*this, paramVar, paramVal);
465
466 // Location of the store to the param storage tracked as beginning of
467 // the function body.
468 mlir::Location fnBodyBegin = getLoc(bodyBeginLoc);
469 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
470 }
471 assert(builder.getInsertionBlock() && "Should be valid");
472}
473
475 cir::FuncOp fn, cir::FuncType funcType,
477 SourceLocation startLoc) {
478 assert(!curFn &&
479 "CIRGenFunction can only be used for one function at a time");
480
481 curFn = fn;
482
483 const Decl *d = gd.getDecl();
484
485 didCallStackSave = false;
486 curCodeDecl = d;
487 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
488 curFuncDecl = (d ? d->getNonClosureContext() : nullptr);
489
490 prologueCleanupDepth = ehStack.stable_begin();
491
492 mlir::Block *entryBB = &fn.getBlocks().front();
493 builder.setInsertionPointToStart(entryBB);
494
495 // Determine the function body begin location for the prolog.
496 // If fd is null or has no body, use startLoc as fallback.
497 SourceLocation bodyBeginLoc = startLoc;
498 if (fd) {
499 if (Stmt *body = fd->getBody())
500 bodyBeginLoc = body->getBeginLoc();
501 else
502 bodyBeginLoc = fd->getLocation();
503 }
504
505 emitFunctionProlog(args, entryBB, fd, bodyBeginLoc);
506
507 // When the current function is not void, create an address to store the
508 // result value.
509 if (!returnType->isVoidType()) {
510 // Determine the function body end location.
511 // If fd is null or has no body, use loc as fallback.
512 SourceLocation bodyEndLoc = loc;
513 if (fd) {
514 if (Stmt *body = fd->getBody())
515 bodyEndLoc = body->getEndLoc();
516 else
517 bodyEndLoc = fd->getLocation();
518 }
519 emitAndUpdateRetAlloca(returnType, getLoc(bodyEndLoc),
520 getContext().getTypeAlignInChars(returnType));
521 }
522
523 if (isa_and_nonnull<CXXMethodDecl>(d) &&
524 cast<CXXMethodDecl>(d)->isInstance()) {
525 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
526
527 const auto *md = cast<CXXMethodDecl>(d);
528 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
529 // We're in a lambda.
530 auto fn = dyn_cast<cir::FuncOp>(curFn);
531 assert(fn && "lambda in non-function region");
532 fn.setLambda(true);
533
534 // Figure out the captures.
535 md->getParent()->getCaptureFields(lambdaCaptureFields,
538 // If the lambda captures the object referred to by '*this' - either by
539 // value or by reference, make sure CXXThisValue points to the correct
540 // object.
541
542 // Get the lvalue for the field (which is a copy of the enclosing object
543 // or contains the address of the enclosing object).
544 LValue thisFieldLValue =
546 if (!lambdaThisCaptureField->getType()->isPointerType()) {
547 // If the enclosing object was captured by value, just use its
548 // address. Sign this pointer.
549 cxxThisValue = thisFieldLValue.getPointer();
550 } else {
551 // Load the lvalue pointed to by the field, since '*this' was captured
552 // by reference.
554 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
555 }
556 }
557 for (auto *fd : md->getParent()->fields()) {
558 if (fd->hasCapturedVLAType())
559 cgm.errorNYI(loc, "lambda captured VLA type");
560 }
561 } else {
562 // Not in a lambda; just use 'this' from the method.
563 // FIXME: Should we generate a new load for each use of 'this'? The fast
564 // register allocator would be happier...
566 }
567
570 }
571}
572
574 for (cir::BlockAddressOp &blockAddress : cgm.unresolvedBlockAddressToLabel) {
575 cir::LabelOp labelOp =
576 cgm.lookupBlockAddressInfo(blockAddress.getBlockAddrInfo());
577 assert(labelOp && "expected cir.labelOp to already be emitted");
578 cgm.updateResolvedBlockAddress(blockAddress, labelOp);
579 }
580 cgm.unresolvedBlockAddressToLabel.clear();
581}
582
585 return;
588 mlir::OpBuilder::InsertionGuard guard(builder);
589 builder.setInsertionPointToEnd(indirectGotoBlock);
590 for (auto &[blockAdd, labelOp] : cgm.blockAddressToLabel) {
591 succesors.push_back(labelOp->getBlock());
592 rangeOperands.push_back(labelOp->getBlock()->getArguments());
593 }
594 cir::IndirectBrOp::create(builder, builder.getUnknownLoc(),
595 indirectGotoBlock->getArgument(0), false,
596 rangeOperands, succesors);
597 cgm.blockAddressToLabel.clear();
598}
599
601 // Resolve block address-to-label mappings, then emit the indirect branch
602 // with the corresponding targets.
605
606 // If a label address was taken but no indirect goto was used, we can't remove
607 // the block argument here. Instead, we mark the 'indirectbr' op
608 // as poison so that the cleanup can be deferred to lowering, since the
609 // verifier doesn't allow the 'indirectbr' target address to be null.
610 if (indirectGotoBlock && indirectGotoBlock->hasNoPredecessors()) {
611 auto indrBr = cast<cir::IndirectBrOp>(indirectGotoBlock->front());
612 indrBr.setPoison(true);
613 }
614
615 // Pop any cleanups that might have been associated with the
616 // parameters. Do this in whatever block we're currently in; it's
617 // important to do this before we enter the return block or return
618 // edges will be *really* confused.
619 // TODO(cir): Use prologueCleanupDepth here.
620 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
621 if (hasCleanups) {
623 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
625 }
626
627 assert(deferredConditionalCleanupStack.empty() &&
628 "deferred conditional cleanups were not consumed by a "
629 "FullExprCleanupScope");
630}
631
632mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
633 // We start with function level scope for variables.
635
636 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
637 return emitCompoundStmtWithoutScope(*block);
638
639 return emitStmt(body, /*useCurrentScope=*/true);
640}
641
642static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
643 // Remove any leftover blocks that are unreachable and empty, since they do
644 // not represent unreachable code useful for warnings nor anything deemed
645 // useful in general.
646 SmallVector<mlir::Block *> blocksToDelete;
647 for (mlir::Block &block : func.getBlocks()) {
648 if (block.empty() && block.getUses().empty())
649 blocksToDelete.push_back(&block);
650 }
651 for (mlir::Block *block : blocksToDelete)
652 block->erase();
653}
654
655cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
656 cir::FuncType funcType) {
657 const auto *funcDecl = cast<FunctionDecl>(gd.getDecl());
658 curGD = gd;
659
660 if (funcDecl->isInlineBuiltinDeclaration()) {
661 // When generating code for a builtin with an inline declaration, use a
662 // mangled name to hold the actual body, while keeping an external
663 // declaration in case the function pointer is referenced somewhere.
664 std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
665 cir::FuncOp clone =
666 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
667 if (!clone) {
668 mlir::OpBuilder::InsertionGuard guard(builder);
669 builder.setInsertionPoint(fn);
670 clone = cir::FuncOp::create(builder, fn.getLoc(), fdInlineName,
671 fn.getFunctionType());
672 clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
673 clone.setSymVisibility("private");
674 clone.setInlineKind(cir::InlineKind::AlwaysInline);
675 }
676 fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
677 fn.setSymVisibility("private");
678 fn = clone;
679 } else {
680 // Detect the unusual situation where an inline version is shadowed by a
681 // non-inline version. In that case we should pick the external one
682 // everywhere. That's GCC behavior too.
683 for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
684 pd = pd->getPreviousDecl()) {
685 if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
686 std::string inlineName = funcDecl->getName().str() + ".inline";
687 if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
688 cgm.getGlobalValue(inlineName))) {
689 // Replace all uses of the .inline function with the regular function
690 // FIXME: This performs a linear walk over the module. Introduce some
691 // caching here.
692 if (inlineFn
693 .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
694 .failed())
695 llvm_unreachable("Failed to replace inline builtin symbol uses");
696 inlineFn.erase();
697 }
698 break;
699 }
700 }
701 }
702
703 SourceLocation loc = funcDecl->getLocation();
704 Stmt *body = funcDecl->getBody();
705 SourceRange bodyRange =
706 body ? body->getSourceRange() : funcDecl->getLocation();
707
708 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
709 : builder.getUnknownLoc()};
710
711 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
712 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
713 };
714 const mlir::Location fusedLoc = mlir::FusedLoc::get(
716 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
717 mlir::Block *entryBB = fn.addEntryBlock();
718
719 FunctionArgList args;
720 QualType retTy = buildFunctionArgList(gd, args);
721
722 // Create a scope in the symbol table to hold variable declarations.
724 {
725 LexicalScope lexScope(*this, fusedLoc, entryBB);
726
727 // Emit the standard function prologue.
728 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
729
730 // Save parameters for coroutine function.
731 if (body && isa_and_nonnull<CoroutineBodyStmt>(body))
732 llvm::append_range(fnArgs, funcDecl->parameters());
733
734 if (isa<CXXDestructorDecl>(funcDecl)) {
735 emitDestructorBody(args);
736 } else if (isa<CXXConstructorDecl>(funcDecl)) {
738 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
739 funcDecl->hasAttr<CUDAGlobalAttr>()) {
740 cgm.getCUDARuntime().emitDeviceStub(*this, fn, args);
741 } else if (isa<CXXMethodDecl>(funcDecl) &&
742 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
743 // The lambda static invoker function is special, because it forwards or
744 // clones the body of the function call operator (but is actually
745 // static).
747 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
748 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
749 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
750 // Implicit copy-assignment gets the same special treatment as implicit
751 // copy-constructors.
753 } else if (body) {
754 // Emit standard function body.
755 if (mlir::failed(emitFunctionBody(body))) {
756 return nullptr;
757 }
758 } else {
759 // Anything without a body should have been handled above.
760 llvm_unreachable("no definition for normal function");
761 }
762
763 if (mlir::failed(fn.verifyBody()))
764 return nullptr;
765
766 finishFunction(bodyRange.getEnd());
767 }
768
770 return fn;
771}
772
775 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
776 CXXCtorType ctorType = curGD.getCtorType();
777
778 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
779 ctorType == Ctor_Complete) &&
780 "can only generate complete ctor for this ABI");
781
782 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), ctor);
783
784 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
785 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
786 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
787 return;
788 }
789
790 const FunctionDecl *definition = nullptr;
791 Stmt *body = ctor->getBody(definition);
792 assert(definition == ctor && "emitting wrong constructor body");
793
794 bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
795
796 // A type that handles the emission of the constructor body, that can be
797 // called directly for cases where we don't have a try-body, or passed to
798 // emitCXXTryStmt.
799 struct ctorTryBodyEmitter final : cxxTryBodyEmitter {
800 const CXXConstructorDecl *ctor = nullptr;
801 CXXCtorType ctorType;
802 FunctionArgList &args;
803 Stmt *emitterBody = nullptr;
804 ctorTryBodyEmitter(const CXXConstructorDecl *ctor, CXXCtorType ctorType,
805 FunctionArgList &args, bool isTryBody, Stmt *b)
806 : ctor(ctor), ctorType(ctorType), args(args),
807 emitterBody(isTryBody ? cast<CXXTryStmt>(b)->getTryBlock() : b) {}
808 ~ctorTryBodyEmitter() override = default;
809
810 mlir::LogicalResult operator()(CIRGenFunction &cgf) override {
813
814 //// TODO: in restricted cases, we can emit the vbase initializers of a
815 //// complete ctor and then delegate to the base ctor.
816
817 cgf.emitCtorPrologue(ctor, ctorType, args);
818 return cgf.emitStmt(emitterBody, /*useCurrentScope=*/true);
819 }
820 };
821
822 ctorTryBodyEmitter emitter{ctor, ctorType, args, isTryBody, body};
823 mlir::LogicalResult bodyRes =
824 isTryBody ? emitCXXTryStmt(*cast<CXXTryStmt>(body), emitter)
825 : emitter(*this);
826
827 // TODO(cir): propagate this result via mlir::logical result. Just
828 // unreachable now just to have it handled.
829 if (bodyRes.failed())
830 cgm.errorNYI(ctor->getSourceRange(),
831 "emitConstructorBody: emit body statement failed.");
832}
833
834/// Emits the body of the current destructor.
836 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
837 CXXDtorType dtorType = curGD.getDtorType();
838
839 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), dtor);
840
841 // For an abstract class, non-base destructors are never used (and can't
842 // be emitted in general, because vbase dtors may not have been validated
843 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
844 // in fact emit references to them from other compilations, so emit them
845 // as functions containing a trap instruction.
846 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
847 SourceLocation loc =
848 dtor->hasBody() ? dtor->getBody()->getBeginLoc() : dtor->getLocation();
849 emitTrap(getLoc(loc), true);
850 return;
851 }
852
853 Stmt *body = dtor->getBody();
855
856 // The call to operator delete in a deleting destructor happens
857 // outside of the function-try-block, which means it's always
858 // possible to delegate the destructor body to the complete
859 // destructor. Do so.
860 if (dtorType == Dtor_Deleting || dtorType == Dtor_VectorDeleting) {
862 cgm.errorNYI(dtor->getSourceRange(), "emitConditionalArrayDtorCall");
863 RunCleanupsScope dtorEpilogue(*this);
865 if (haveInsertPoint()) {
867 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
868 /*delegating=*/false, loadCXXThisAddress(), thisTy);
869 }
870 return;
871 }
872
873 // If the body is a function-try-block, enter the try before
874 // anything else.
875 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
876 if (isTryBody)
877 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
878
880
881 // Enter the epilogue cleanups.
882 RunCleanupsScope dtorEpilogue(*this);
883
884 // If this is the complete variant, just invoke the base variant;
885 // the epilogue will destruct the virtual bases. But we can't do
886 // this optimization if the body is a function-try-block, because
887 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
888 // always delegate because we might not have a definition in this TU.
889 switch (dtorType) {
890 case Dtor_Unified:
891 llvm_unreachable("not expecting a unified dtor");
892 case Dtor_Comdat:
893 llvm_unreachable("not expecting a COMDAT");
894 case Dtor_Deleting:
896 llvm_unreachable("already handled deleting case");
897
898 case Dtor_Complete:
899 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
900 "can't emit a dtor without a body for non-Microsoft ABIs");
901
902 // Enter the cleanup scopes for virtual bases.
904
905 if (!isTryBody) {
907 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
908 /*delegating=*/false, loadCXXThisAddress(), thisTy);
909 break;
910 }
911
912 // Fallthrough: act like we're in the base variant.
913 [[fallthrough]];
914
915 case Dtor_Base:
916 assert(body);
917
918 // Enter the cleanup scopes for fields and non-virtual bases.
920
922
923 if (isTryBody) {
924 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
925 } else if (body) {
926 (void)emitStmt(body, /*useCurrentScope=*/true);
927 } else {
928 assert(dtor->isImplicit() && "bodyless dtor not implicit");
929 // nothing to do besides what's in the epilogue
930 }
931 // -fapple-kext must inline any call to this dtor into
932 // the caller's body.
934
935 break;
936 }
937
938 // Jump out through the epilogue cleanups.
939 dtorEpilogue.forceCleanup();
940
941 // Exit the try if applicable.
942 if (isTryBody)
943 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
944}
945
946/// Given a value of type T* that may not be to a complete object, construct
947/// an l-vlaue withi the natural pointee alignment of T.
949 QualType ty) {
950 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
951 // assert on the result type first.
952 LValueBaseInfo baseInfo;
954 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
955 return makeAddrLValue(Address(val, align), ty, baseInfo);
956}
957
959 QualType ty) {
960 LValueBaseInfo baseInfo;
961 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
962 Address addr(val, convertTypeForMem(ty), alignment);
964 return makeAddrLValue(addr, ty, baseInfo);
965}
966
967// Map the LangOption for exception behavior into the corresponding enum in
968// the IR.
969static llvm::fp::ExceptionBehavior
971 switch (kind) {
973 return llvm::fp::ebIgnore;
975 return llvm::fp::ebMayTrap;
977 return llvm::fp::ebStrict;
979 llvm_unreachable("expected explicitly initialized exception behavior");
980 }
981 llvm_unreachable("unsupported FP exception behavior");
982}
983
985 FunctionArgList &args) {
986 const auto *fd = cast<FunctionDecl>(gd.getDecl());
987 QualType retTy = fd->getReturnType();
988
989 const auto *md = dyn_cast<CXXMethodDecl>(fd);
990 if (md && md->isInstance()) {
991 if (cgm.getCXXABI().hasThisReturn(gd))
992 cgm.errorNYI(fd->getSourceRange(), "this return");
993 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
994 cgm.errorNYI(fd->getSourceRange(), "most derived return");
995 cgm.getCXXABI().buildThisParam(*this, args);
996 }
997
998 bool passedParams = true;
999 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
1000 if (auto inherited = cd->getInheritedConstructor())
1001 passedParams =
1002 getTypes().inheritingCtorHasParams(inherited, gd.getCtorType());
1003
1004 if (passedParams) {
1005 for (auto *param : fd->parameters()) {
1006 args.push_back(param);
1007 if (param->hasAttr<PassObjectSizeAttr>())
1008 cgm.errorNYI(param->getSourceRange(), "pass-object-size attribute");
1009 }
1010 }
1011
1012 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
1013 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
1014
1015 return retTy;
1016}
1017
1019 // Initializing an aggregate temporary in C++11: T{...}.
1020 if (!e->isGLValue())
1021 return emitAggExprToLValue(e);
1022
1023 // An lvalue initializer list must be initializing a reference.
1024 assert(e->isTransparent() && "non-transparent glvalue init list");
1025 return emitLValue(e->getInit(0));
1026}
1027
1028static std::variant<LValue, RValue>
1030 bool forLValue, AggValueSlot slot) {
1032 SmallVector<OVMD> opaques;
1033 llvm::scope_exit opaque_cleanup{
1034 [&]() { llvm::for_each(opaques, [&](OVMD &o) { o.unbind(cgf); }); }};
1035
1036 // Find the result expression, if any.
1037 const Expr *resultExpr = e->getResultExpr();
1038 std::variant<LValue, RValue> result;
1039
1040 for (const Expr *semantic : e->semantics()) {
1041 // If this semantic expression is an opaque value, bind it
1042 // to the result of its source expression.
1043 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
1044
1045 // Skip unique OVEs.
1046 if (ov->isUnique()) {
1047 // FIXME: This doesn't really affect anything, but I cannot find a test
1048 // for this, so leave an ErrorNYI here until we can find one.
1049 cgf.cgm.errorNYI(e->getSourceRange(),
1050 "emitPseudoObjectExpr skipped for uniqueness");
1051 assert(ov != resultExpr &&
1052 "A unique OVE cannot be used as the result expression");
1053 continue;
1054 }
1055
1056 // If this is the result expression, we may need to evaluate
1057 // directly into the slot.
1058 OVMD opaqueData;
1059 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
1061 cgf.cgm.errorNYI(e->getSourceRange(),
1062 "emitPseudoObjectExpr for RValue & aggregate kind");
1063 } else {
1064 opaqueData = OVMD::bind(cgf, ov, ov->getSourceExpr());
1065
1066 // If this is the result, also evaluate the result now.
1067 if (ov == resultExpr) {
1068 // FIXME: This doesn't really affect anything, but I cannot find a
1069 // test for this, so leave an ErrorNYI here until we can find one.
1070 cgf.cgm.errorNYI(e->getSourceRange(),
1071 "emitPseudoObjectExpr as result");
1072 if (forLValue)
1073 result = cgf.emitLValue(ov);
1074 else
1075 cgf.cgm.errorNYI(e->getSourceRange(),
1076 "emitPseudoObjectExpr as an RValue");
1077 }
1078 }
1079 opaques.push_back(opaqueData);
1080 } else if (semantic == resultExpr) {
1081 // Otherwise, if the expression is the result, evaluate it
1082 // and remember the result.
1083 if (forLValue)
1084 result = cgf.emitLValue(semantic);
1085 else
1086 cgf.cgm.errorNYI(
1087 e->getSourceRange(),
1088 "emitPseudoObjectExpr as an RValue, when semantic is result");
1089 } else {
1090 // FIXME: best I can tell, this is only reachable as an r-value, so this
1091 // isn't properly tested.
1092 cgf.cgm.errorNYI(e->getSourceRange(),
1093 "emitPseudoObjectExpr as an ignored value");
1094 // Otherwise, evaluate the expression in an ignored context.
1095 cgf.emitIgnoredExpr(semantic);
1096 }
1097 }
1098
1099 return result;
1100}
1101
1103 return std::get<LValue>(emitPseudoObjectExpr(*this, e, /*forLValue=*/true,
1105}
1106
1107/// Emit code to compute a designator that specifies the location
1108/// of the expression.
1109/// FIXME: document this function better.
1111 // FIXME: ApplyDebugLocation DL(*this, e);
1112 switch (e->getStmtClass()) {
1113 default:
1115 std::string("l-value not implemented for '") +
1116 e->getStmtClassName() + "'");
1117 return LValue();
1118 case Expr::ConditionalOperatorClass:
1120 case Expr::BinaryConditionalOperatorClass:
1122 case Expr::ArraySubscriptExprClass:
1124 case Expr::ExtVectorElementExprClass:
1126 case Expr::UnaryOperatorClass:
1128 case Expr::StringLiteralClass:
1130 case Expr::MemberExprClass:
1132 case Expr::CompoundLiteralExprClass:
1134 case Expr::PredefinedExprClass:
1136 case Expr::BinaryOperatorClass:
1138 case Expr::CompoundAssignOperatorClass: {
1139 QualType ty = e->getType();
1140 if (ty->getAs<AtomicType>()) {
1141 cgm.errorNYI(e->getSourceRange(),
1142 "CompoundAssignOperator with AtomicType");
1143 return LValue();
1144 }
1145 if (!ty->isAnyComplexType())
1147
1149 }
1150 case Expr::CallExprClass:
1151 case Expr::CXXMemberCallExprClass:
1152 case Expr::CXXOperatorCallExprClass:
1153 case Expr::UserDefinedLiteralClass:
1155 case Expr::ExprWithCleanupsClass: {
1156 const auto *cleanups = cast<ExprWithCleanups>(e);
1157 RunCleanupsScope scope(*this);
1158 LValue lv = emitLValue(cleanups->getSubExpr());
1160 return lv;
1161 }
1162 case Expr::CXXDefaultArgExprClass: {
1163 auto *dae = cast<CXXDefaultArgExpr>(e);
1164 CXXDefaultArgExprScope scope(*this, dae);
1165 return emitLValue(dae->getExpr());
1166 }
1167 case Expr::CXXTypeidExprClass:
1169 case Expr::ParenExprClass:
1170 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
1171 case Expr::GenericSelectionExprClass:
1172 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
1173 case Expr::DeclRefExprClass:
1175 case Expr::ImplicitCastExprClass:
1176 case Expr::CStyleCastExprClass:
1177 case Expr::CXXStaticCastExprClass:
1178 case Expr::CXXDynamicCastExprClass:
1179 case Expr::CXXReinterpretCastExprClass:
1180 case Expr::CXXConstCastExprClass:
1181 case Expr::CXXFunctionalCastExprClass:
1182 // TODO(cir): The above list is missing
1183 // CXXAddrSpaceCastExprClass, and ObjCBridgedCastExprClass.
1184 return emitCastLValue(cast<CastExpr>(e));
1185 case Expr::MaterializeTemporaryExprClass:
1187 case Expr::OpaqueValueExprClass:
1189 case Expr::ChooseExprClass:
1190 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
1191 case Expr::SubstNonTypeTemplateParmExprClass:
1192 return emitLValue(cast<SubstNonTypeTemplateParmExpr>(e)->getReplacement());
1193 case Expr::InitListExprClass:
1195 case Expr::PseudoObjectExprClass:
1197 }
1198}
1199
1200static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
1201 SmallString<256> buffer;
1202 llvm::raw_svector_ostream out(buffer);
1203 out << name << cnt;
1204 return std::string(out.str());
1205}
1206
1208 return getVersionedTmpName("ref.tmp", counterRefTmp++);
1209}
1210
1212 return getVersionedTmpName("agg.tmp", counterAggTmp++);
1213}
1214
1215void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
1216 QualType ty) {
1217 // Ignore empty classes in C++.
1218 if (getLangOpts().CPlusPlus)
1219 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
1220 return;
1221
1222 // Cast the dest ptr to the appropriate i8 pointer type.
1223 if (builder.isInt8Ty(destPtr.getElementType())) {
1224 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
1225 }
1226
1227 // Get size and alignment info for this aggregate.
1228 const CharUnits size = getContext().getTypeSizeInChars(ty);
1229 if (size.isZero()) {
1230 // But note that getTypeInfo returns 0 for a VLA.
1231 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
1232 cgm.errorNYI(loc,
1233 "emitNullInitialization for zero size VariableArrayType");
1234 } else {
1235 return;
1236 }
1237 }
1238
1239 // If the type contains a pointer to data member we can't memset it to zero.
1240 // Instead, create a null constant and copy it to the destination.
1241 // TODO: there are other patterns besides zero that we can usefully memset,
1242 // like -1, which happens to be the pattern used by member-pointers.
1243 if (!cgm.getTypes().isZeroInitializable(ty)) {
1244 cgm.errorNYI(loc, "type is not zero initializable");
1245 }
1246
1247 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
1248 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
1249 // respective address.
1250 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1251 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
1252 builder.createStore(loc, zeroValue, destPtr);
1253}
1254
1256 const clang::Expr *e)
1257 : cgf(cgf) {
1258 ConstructorHelper(e->getFPFeaturesInEffect(cgf.getLangOpts()));
1259}
1260
1262 FPOptions fpFeatures)
1263 : cgf(cgf) {
1264 ConstructorHelper(fpFeatures);
1265}
1266
1267void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper(
1268 FPOptions fpFeatures) {
1269 oldFPFeatures = cgf.curFPFeatures;
1270 cgf.curFPFeatures = fpFeatures;
1271
1272 oldExcept = cgf.builder.getDefaultConstrainedExcept();
1273 oldRounding = cgf.builder.getDefaultConstrainedRounding();
1274
1275 if (oldFPFeatures == fpFeatures)
1276 return;
1277
1278 // TODO(cir): create guard to restore fast math configurations.
1280
1281 [[maybe_unused]] llvm::RoundingMode newRoundingBehavior =
1282 fpFeatures.getRoundingMode();
1283 // TODO(cir): override rounding behaviour once FM configs are guarded.
1284 [[maybe_unused]] llvm::fp::ExceptionBehavior newExceptionBehavior =
1286 fpFeatures.getExceptionMode()));
1287 // TODO(cir): override exception behaviour once FM configs are guarded.
1288
1289 // TODO(cir): override FP flags once FM configs are guarded.
1291
1292 assert((cgf.curFuncDecl == nullptr || cgf.builder.getIsFPConstrained() ||
1293 isa<CXXConstructorDecl>(cgf.curFuncDecl) ||
1294 isa<CXXDestructorDecl>(cgf.curFuncDecl) ||
1295 (newExceptionBehavior == llvm::fp::ebIgnore &&
1296 newRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
1297 "FPConstrained should be enabled on entire function");
1298
1299 // TODO(cir): mark CIR function with fast math attributes.
1301}
1302
1304 cgf.curFPFeatures = oldFPFeatures;
1305 cgf.builder.setDefaultConstrainedExcept(oldExcept);
1306 cgf.builder.setDefaultConstrainedRounding(oldRounding);
1307}
1308
1309// TODO(cir): should be shared with LLVM codegen.
1311 const Expr *e = ce->getSubExpr();
1312
1313 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
1314 return false;
1315
1316 if (isa<CXXThisExpr>(e->IgnoreParens())) {
1317 // We always assume that 'this' is never null.
1318 return false;
1319 }
1320
1321 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1322 // And that glvalue casts are never null.
1323 if (ice->isGLValue())
1324 return false;
1325 }
1326
1327 return true;
1328}
1329
1330/// Computes the length of an array in elements, as well as the base
1331/// element type and a properly-typed first element pointer.
1332mlir::Value
1334 QualType &baseType, Address &addr) {
1335 const clang::ArrayType *arrayType = origArrayType;
1336
1337 // If it's a VLA, we have to load the stored size. Note that
1338 // this is the size of the VLA in bytes, not its size in elements.
1339 mlir::Value numVLAElements = nullptr;
1342
1343 // Walk into all VLAs. This doesn't require changes to addr,
1344 // which has type T* where T is the first non-VLA element type.
1345 do {
1346 QualType elementType = arrayType->getElementType();
1347 arrayType = getContext().getAsArrayType(elementType);
1348
1349 // If we only have VLA components, 'addr' requires no adjustment.
1350 if (!arrayType) {
1351 baseType = elementType;
1352 return numVLAElements;
1353 }
1355
1356 // We get out here only if we find a constant array type
1357 // inside the VLA.
1358 }
1359
1360 // Classic codegen emits an all-zero inbounds GEP to convert addr from
1361 // [M x [N x T]]* to T*. CIR doesn't need this because callers handle
1362 // the array-to-element pointer conversion themselves (via array_to_ptrdecay
1363 // casts, ptr_bitcast, or manual array type peeling).
1364
1365 uint64_t countFromCLAs = 1;
1366 QualType eltType;
1367
1368 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
1369
1370 while (cirArrayType) {
1372 countFromCLAs *= cirArrayType.getSize();
1373 eltType = arrayType->getElementType();
1374
1375 cirArrayType =
1376 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
1377
1378 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1379 assert((!cirArrayType || arrayType) &&
1380 "CIR and Clang types are out-of-sync");
1381 }
1382
1383 if (arrayType) {
1384 // From this point onwards, the Clang array type has been emitted
1385 // as some other type (probably a packed struct). Compute the array
1386 // size, and just emit the 'begin' expression as a bitcast.
1387 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
1388 }
1389
1390 baseType = eltType;
1391
1392 mlir::Value numElements =
1393 builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
1394
1395 // If we had any VLA dimensions, factor them in.
1396 if (numVLAElements)
1397 numElements =
1398 builder.createMul(numVLAElements.getLoc(), numVLAElements, numElements,
1400
1401 return numElements;
1402}
1403
1405 // If we already made the indirect branch for indirect goto, return its block.
1407 return;
1408
1409 mlir::OpBuilder::InsertionGuard guard(builder);
1411 builder.createBlock(builder.getBlock()->getParent(), {}, {voidPtrTy},
1412 {builder.getUnknownLoc()});
1413}
1414
1416 mlir::Value ptrValue, QualType ty, SourceLocation loc,
1417 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
1419 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
1420 alignment, offsetValue);
1421}
1422
1424 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1425 int64_t alignment, mlir::Value offsetValue) {
1426 QualType ty = expr->getType();
1427 SourceLocation loc = expr->getExprLoc();
1428 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1429 offsetValue);
1430}
1431
1433 const VariableArrayType *vla =
1434 cgm.getASTContext().getAsVariableArrayType(type);
1435 assert(vla && "type was not a variable array type!");
1436 return getVLASize(vla);
1437}
1438
1441 // The number of elements so far; always size_t.
1442 mlir::Value numElements;
1443
1444 QualType elementType;
1445 do {
1446 elementType = type->getElementType();
1447 mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
1448 assert(vlaSize && "no size for VLA!");
1449 assert(vlaSize.getType() == sizeTy);
1450
1451 if (!numElements) {
1452 numElements = vlaSize;
1453 } else {
1454 // It's undefined behavior if this wraps around, so mark it that way.
1455 // FIXME: Teach -fsanitize=undefined to trap this.
1456
1457 numElements =
1458 builder.createMul(numElements.getLoc(), numElements, vlaSize,
1460 }
1461 } while ((type = getContext().getAsVariableArrayType(elementType)));
1462
1463 assert(numElements && "Undefined elements number");
1464 return {numElements, elementType};
1465}
1466
1469 mlir::Value vlaSize = vlaSizeMap[vla->getSizeExpr()];
1470 assert(vlaSize && "no size for VLA!");
1471 assert(vlaSize.getType() == sizeTy);
1472 return {vlaSize, vla->getElementType()};
1473}
1474
1475// TODO(cir): Most of this function can be shared between CIRGen
1476// and traditional LLVM codegen
1478 assert(type->isVariablyModifiedType() &&
1479 "Must pass variably modified type to EmitVLASizes!");
1480
1481 // We're going to walk down into the type and look for VLA
1482 // expressions.
1483 do {
1484 assert(type->isVariablyModifiedType());
1485
1486 const Type *ty = type.getTypePtr();
1487 switch (ty->getTypeClass()) {
1488 case Type::CountAttributed:
1489 case Type::PackIndexing:
1490 case Type::ArrayParameter:
1491 case Type::HLSLAttributedResource:
1492 case Type::HLSLInlineSpirv:
1493 case Type::PredefinedSugar:
1494 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1495 break;
1496
1497#define TYPE(Class, Base)
1498#define ABSTRACT_TYPE(Class, Base)
1499#define NON_CANONICAL_TYPE(Class, Base)
1500#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1501#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1502#include "clang/AST/TypeNodes.inc"
1503 llvm_unreachable(
1504 "dependent type must be resolved before the CIR codegen");
1505
1506 // These types are never variably-modified.
1507 case Type::Builtin:
1508 case Type::Complex:
1509 case Type::Vector:
1510 case Type::ExtVector:
1511 case Type::ConstantMatrix:
1512 case Type::Record:
1513 case Type::Enum:
1514 case Type::Using:
1515 case Type::TemplateSpecialization:
1516 case Type::ObjCTypeParam:
1517 case Type::ObjCObject:
1518 case Type::ObjCInterface:
1519 case Type::ObjCObjectPointer:
1520 case Type::BitInt:
1521 case Type::OverflowBehavior:
1522 llvm_unreachable("type class is never variably-modified!");
1523
1524 case Type::Adjusted:
1525 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1526 break;
1527
1528 case Type::Decayed:
1529 type = cast<clang::DecayedType>(ty)->getPointeeType();
1530 break;
1531
1532 case Type::Pointer:
1533 type = cast<clang::PointerType>(ty)->getPointeeType();
1534 break;
1535
1536 case Type::BlockPointer:
1537 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1538 break;
1539
1540 case Type::LValueReference:
1541 case Type::RValueReference:
1542 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1543 break;
1544
1545 case Type::MemberPointer:
1546 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1547 break;
1548
1549 case Type::ConstantArray:
1550 case Type::IncompleteArray:
1551 // Losing element qualification here is fine.
1552 type = cast<clang::ArrayType>(ty)->getElementType();
1553 break;
1554
1555 case Type::VariableArray: {
1556 // Losing element qualification here is fine.
1558
1559 // Unknown size indication requires no size computation.
1560 // Otherwise, evaluate and record it.
1561 if (const Expr *sizeExpr = vat->getSizeExpr()) {
1562 // It's possible that we might have emitted this already,
1563 // e.g. with a typedef and a pointer to it.
1564 mlir::Value &entry = vlaSizeMap[sizeExpr];
1565 if (!entry) {
1566 mlir::Value size = emitScalarExpr(sizeExpr);
1568
1569 // Always zexting here would be wrong if it weren't
1570 // undefined behavior to have a negative bound.
1571 // FIXME: What about when size's type is larger than size_t?
1572 entry = builder.createIntCast(size, sizeTy);
1573 }
1574 }
1575 type = vat->getElementType();
1576 break;
1577 }
1578
1579 case Type::FunctionProto:
1580 case Type::FunctionNoProto:
1581 type = cast<clang::FunctionType>(ty)->getReturnType();
1582 break;
1583
1584 case Type::Paren:
1585 case Type::TypeOf:
1586 case Type::UnaryTransform:
1587 case Type::Attributed:
1588 case Type::BTFTagAttributed:
1589 case Type::SubstTemplateTypeParm:
1590 case Type::MacroQualified:
1591 // Keep walking after single level desugaring.
1592 type = type.getSingleStepDesugaredType(getContext());
1593 break;
1594
1595 case Type::Typedef:
1596 case Type::Decltype:
1597 case Type::Auto:
1598 case Type::DeducedTemplateSpecialization:
1599 // Stop walking: nothing to do.
1600 return;
1601
1602 case Type::TypeOfExpr:
1603 // Stop walking: emit typeof expression.
1604 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1605 return;
1606
1607 case Type::Atomic:
1608 type = cast<clang::AtomicType>(ty)->getValueType();
1609 break;
1610
1611 case Type::Pipe:
1612 type = cast<clang::PipeType>(ty)->getElementType();
1613 break;
1614 }
1615 } while (type->isVariablyModifiedType());
1616}
1617
1619 if (getContext().getBuiltinVaListType()->isArrayType())
1620 return emitPointerWithAlignment(e);
1621 return emitLValue(e).getAddress();
1622}
1623
1624} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:508
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
SourceManager & getSourceManager()
Definition ASTContext.h:859
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3772
QualType getElementType() const
Definition TypeBase.h:3784
mlir::Type getElementType() const
Definition Address.h:123
An aggregate value slot.
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType)
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
A non-RAII class containing all the information about a bound opaque value.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
llvm::SmallVector< PendingCleanupEntry > deferredConditionalCleanupStack
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s, cxxTryBodyEmitter &bodyCallback)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitAggExprToLValue(const Expr *e)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
LValue emitPseudoObjectLValue(const PseudoObjectExpr *E)
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitInitListLValue(const InitListExpr *e)
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
bool inheritingCtorHasParams(const InheritedConstructor &inherited, CXXCtorType type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
Represents a C++ constructor within a class.
Definition DeclCXX.h:2624
Represents a C++ destructor within a class.
Definition DeclCXX.h:2889
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2275
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2299
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1741
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:601
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
SourceLocation getLocation() const
Definition DeclBase.h:447
bool hasAttr() const
Definition DeclBase.h:585
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition Expr.cpp:3989
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isPRValue() const
Definition Expr.h:285
QualType getType() const
Definition Expr.h:144
LangOptions::FPExceptionModeKind getExceptionMode() const
RoundingMode getRoundingMode() const
Represents a function declaration or definition.
Definition Decl.h:2015
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3281
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4551
bool hasBody(const FunctionDecl *&Definition) const
Returns true if the function has a body.
Definition Decl.cpp:3201
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
CXXCtorType getCtorType() const
Definition GlobalDecl.h:108
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3856
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
const Expr * getInit(unsigned Init) const
Definition Expr.h:5356
FPExceptionModeKind
Possible floating point exception behavior.
@ FPE_Default
Used internally to represent initial unspecified value.
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6803
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition Expr.h:6851
ArrayRef< Expr * > semantics()
Definition Expr.h:6875
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2914
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:86
StmtClass getStmtClass() const
Definition Stmt.h:1494
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
const char * getStmtClassName() const
Definition Stmt.cpp:86
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
bool isVoidType() const
Definition TypeBase.h:9034
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8803
TypeClass getTypeClass() const
Definition TypeBase.h:2433
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2203
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
Expr * getSizeExpr() const
Definition TypeBase.h:4030
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static bool previousOpIsNonYieldingCleanup(mlir::Block *block)
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var, mlir::Value value)
An argument came in as a promoted argument; demote it back to its declared type.
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
static std::variant< LValue, RValue > emitPseudoObjectExpr(CIRGenFunction &cgf, const PseudoObjectExpr *e, bool forLValue, AggValueSlot slot)
static llvm::fp::ExceptionBehavior toConstrainedExceptMd(LangOptions::FPExceptionModeKind kind)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_VectorDeleting
Vector deleting dtor.
Definition ABI.h:40
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool fastMathFuncAttributes()
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool fastMathGuard()
static bool fastMathFlags()
static bool generateDebugInfo()
static bool cleanupWithPreservedValues()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Location getRetLoc(mlir::Block *b)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650