clang 23.0.0git
CIRGenFunction.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Internal per-function state used for AST-to-ClangIR code gen
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
14
15#include "CIRGenCXXABI.h"
16#include "CIRGenCall.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/Location.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/ExprCXX.h"
23#include "llvm/ADT/ScopeExit.h"
24#include "llvm/IR/FPEnv.h"
25
26#include <cassert>
27
28namespace clang::CIRGen {
29
31 bool suppressNewContext)
32 : CIRGenTypeCache(cgm), cgm{cgm}, builder(builder) {
33 ehStack.setCGF(this);
34}
35
37
38// This is copied from clang/lib/CodeGen/CodeGenFunction.cpp
40 type = type.getCanonicalType();
41 while (true) {
42 switch (type->getTypeClass()) {
43#define TYPE(name, parent)
44#define ABSTRACT_TYPE(name, parent)
45#define NON_CANONICAL_TYPE(name, parent) case Type::name:
46#define DEPENDENT_TYPE(name, parent) case Type::name:
47#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
48#include "clang/AST/TypeNodes.inc"
49 llvm_unreachable("non-canonical or dependent type in IR-generation");
50
51 case Type::Auto:
52 case Type::DeducedTemplateSpecialization:
53 llvm_unreachable("undeduced type in IR-generation");
54
55 // Various scalar types.
56 case Type::Builtin:
57 case Type::Pointer:
58 case Type::BlockPointer:
59 case Type::LValueReference:
60 case Type::RValueReference:
61 case Type::MemberPointer:
62 case Type::Vector:
63 case Type::ExtVector:
64 case Type::ConstantMatrix:
65 case Type::FunctionProto:
66 case Type::FunctionNoProto:
67 case Type::Enum:
68 case Type::ObjCObjectPointer:
69 case Type::Pipe:
70 case Type::BitInt:
71 case Type::OverflowBehavior:
72 case Type::HLSLAttributedResource:
73 case Type::HLSLInlineSpirv:
74 return cir::TEK_Scalar;
75
76 // Complexes.
77 case Type::Complex:
78 return cir::TEK_Complex;
79
80 // Arrays, records, and Objective-C objects.
81 case Type::ConstantArray:
82 case Type::IncompleteArray:
83 case Type::VariableArray:
84 case Type::Record:
85 case Type::ObjCObject:
86 case Type::ObjCInterface:
87 case Type::ArrayParameter:
88 return cir::TEK_Aggregate;
89
90 // We operate on atomic values according to their underlying type.
91 case Type::Atomic:
92 type = cast<AtomicType>(type)->getValueType();
93 continue;
94 }
95 llvm_unreachable("unknown type kind!");
96 }
97}
98
100 return cgm.getTypes().convertTypeForMem(t);
101}
102
104 return cgm.getTypes().convertType(t);
105}
106
108 // Some AST nodes might contain invalid source locations (e.g.
109 // CXXDefaultArgExpr), workaround that to still get something out.
110 if (srcLoc.isValid()) {
112 PresumedLoc pLoc = sm.getPresumedLoc(srcLoc);
113 StringRef filename = pLoc.getFilename();
114 return mlir::FileLineColLoc::get(builder.getStringAttr(filename),
115 pLoc.getLine(), pLoc.getColumn());
116 }
117 // We expect to have a currSrcLoc set, so we assert here, but it isn't
118 // critical for the correctness of compilation, so in non-assert builds
119 // we fallback on using an unknown location.
120 assert(currSrcLoc && "expected to inherit some source location");
121 if (currSrcLoc)
122 return *currSrcLoc;
123 // We're brave, but time to give up.
124 return builder.getUnknownLoc();
125}
126
127mlir::Location CIRGenFunction::getLoc(SourceRange srcLoc) {
128 // Some AST nodes might contain invalid source locations (e.g.
129 // CXXDefaultArgExpr), workaround that to still get something out.
130 if (srcLoc.isValid()) {
131 mlir::Location beg = getLoc(srcLoc.getBegin());
132 mlir::Location end = getLoc(srcLoc.getEnd());
133 SmallVector<mlir::Location, 2> locs = {beg, end};
134 mlir::Attribute metadata;
135 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
136 }
137 // We expect to have a currSrcLoc set, so we assert here, but it isn't
138 // critical for the correctness of compilation, so in non-assert builds
139 // we fallback on using an unknown location.
140 assert(currSrcLoc && "expected to inherit some source location");
141 if (currSrcLoc)
142 return *currSrcLoc;
143 // We're brave, but time to give up.
144 return builder.getUnknownLoc();
145}
146
147mlir::Location CIRGenFunction::getLoc(mlir::Location lhs, mlir::Location rhs) {
148 SmallVector<mlir::Location, 2> locs = {lhs, rhs};
149 mlir::Attribute metadata;
150 return mlir::FusedLoc::get(locs, metadata, &getMLIRContext());
151}
152
153bool CIRGenFunction::containsLabel(const Stmt *s, bool ignoreCaseStmts) {
154 // Null statement, not a label!
155 if (!s)
156 return false;
157
158 // If this is a label, we have to emit the code, consider something like:
159 // if (0) { ... foo: bar(); } goto foo;
160 //
161 // TODO: If anyone cared, we could track __label__'s, since we know that you
162 // can't jump to one from outside their declared region.
163 if (isa<LabelStmt>(s))
164 return true;
165
166 // If this is a case/default statement, and we haven't seen a switch, we
167 // have to emit the code.
168 if (isa<SwitchCase>(s) && !ignoreCaseStmts)
169 return true;
170
171 // If this is a switch statement, we want to ignore case statements when we
172 // recursively process the sub-statements of the switch. If we haven't
173 // encountered a switch statement, we treat case statements like labels, but
174 // if we are processing a switch statement, case statements are expected.
175 if (isa<SwitchStmt>(s))
176 ignoreCaseStmts = true;
177
178 // Scan subexpressions for verboten labels.
179 return std::any_of(s->child_begin(), s->child_end(),
180 [=](const Stmt *subStmt) {
181 return containsLabel(subStmt, ignoreCaseStmts);
182 });
183}
184
185/// If the specified expression does not fold to a constant, or if it does but
186/// contains a label, return false. If it constant folds return true and set
187/// the boolean result in Result.
188bool CIRGenFunction::constantFoldsToBool(const Expr *cond, bool &resultBool,
189 bool allowLabels) {
190 llvm::APSInt resultInt;
191 if (!constantFoldsToSimpleInteger(cond, resultInt, allowLabels))
192 return false;
193
194 resultBool = resultInt.getBoolValue();
195 return true;
196}
197
198/// If the specified expression does not fold to a constant, or if it does
199/// fold but contains a label, return false. If it constant folds, return
200/// true and set the folded value.
202 llvm::APSInt &resultInt,
203 bool allowLabels) {
204 // FIXME: Rename and handle conversion of other evaluatable things
205 // to bool.
206 Expr::EvalResult result;
207 if (!cond->EvaluateAsInt(result, getContext()))
208 return false; // Not foldable, not integer or not fully evaluatable.
209
210 llvm::APSInt intValue = result.Val.getInt();
211 if (!allowLabels && containsLabel(cond))
212 return false; // Contains a label.
213
214 resultInt = intValue;
215 return true;
216}
217
218void CIRGenFunction::emitAndUpdateRetAlloca(QualType type, mlir::Location loc,
219 CharUnits alignment) {
220 if (!type->isVoidType()) {
221 mlir::Value addr = emitAlloca("__retval", convertType(type), loc, alignment,
222 /*insertIntoFnEntryBlock=*/false);
223 fnRetAlloca = addr;
224 returnValue = Address(addr, alignment);
225 }
226}
227
228void CIRGenFunction::declare(mlir::Value addrVal, const Decl *var, QualType ty,
229 mlir::Location loc, CharUnits alignment,
230 bool isParam) {
231 assert(isa<NamedDecl>(var) && "Needs a named decl");
232 assert(!symbolTable.count(var) && "not supposed to be available just yet");
233
234 auto allocaOp = addrVal.getDefiningOp<cir::AllocaOp>();
235 assert(allocaOp && "expected cir::AllocaOp");
236
237 if (isParam)
238 allocaOp.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
239 if (ty->isReferenceType() || ty.isConstQualified())
240 allocaOp.setConstantAttr(mlir::UnitAttr::get(&getMLIRContext()));
241
242 symbolTable.insert(var, allocaOp);
243}
244
246 CIRGenBuilderTy &builder = cgf.builder;
247 LexicalScope *localScope = cgf.curLexScope;
248
249 // Process all return blocks — emit cir.return ops.
250 // TODO(cir): Handle returning from a switch statement through a cleanup
251 // block. We can't simply jump to the cleanup block, because the cleanup block
252 // is not part of the case region. Either reemit all cleanups in the return
253 // block or wait for MLIR structured control flow to support early exits.
255 for (mlir::Block *retBlock : localScope->getRetBlocks()) {
256 mlir::OpBuilder::InsertionGuard guard(builder);
257 builder.setInsertionPointToEnd(retBlock);
258 retBlocks.push_back(retBlock);
259 mlir::Location retLoc = localScope->getRetLoc(retBlock);
260 emitReturn(retLoc);
261 }
262
263 // Pop cleanup scopes from the EH stack. In CIR, this emits cleanup code
264 // into the cleanup regions of cir.cleanup.scope ops — no CFG-level cleanup
265 // blocks or branches are needed.
266 if (performCleanup) {
268 forceCleanup();
269 }
270
271 mlir::Block *curBlock = builder.getBlock();
272 if (isGlobalInit() && !curBlock)
273 return;
274 if (curBlock->mightHaveTerminator() && curBlock->getTerminator())
275 return;
276
277 // Get rid of any empty block at the end of the scope. An empty non-entry
278 // block is created when a terminator (return/break/continue) is followed
279 // by unreachable code.
280 bool isEntryBlock = builder.getInsertionBlock()->isEntryBlock();
281 if (!isEntryBlock && curBlock->empty()) {
282 curBlock->erase();
283 for (mlir::Block *retBlock : retBlocks) {
284 if (retBlock->getUses().empty())
285 retBlock->erase();
286 }
287 return;
288 }
289
290 if (localScope->depth == 0) {
291 // Reached the end of the function.
292 if (localScope->getRetBlocks().size() == 1) {
293 mlir::Block *retBlock = localScope->getRetBlocks()[0];
294 mlir::Location retLoc = localScope->getRetLoc(retBlock);
295 if (retBlock->getUses().empty()) {
296 retBlock->erase();
297 } else {
298 cir::BrOp::create(builder, retLoc, retBlock);
299 return;
300 }
301 }
302 emitImplicitReturn();
303 return;
304 }
305
306 // End of any local scope != function.
307 // Ternary ops have to deal with matching arms for yielding types
308 // and do return a value, it must do its own cir.yield insertion.
309 if (!localScope->isTernary() && !curBlock->mightHaveTerminator()) {
310 !retVal ? cir::YieldOp::create(builder, localScope->endLoc)
311 : cir::YieldOp::create(builder, localScope->endLoc, retVal);
312 }
313}
314
315cir::ReturnOp CIRGenFunction::LexicalScope::emitReturn(mlir::Location loc) {
316 CIRGenBuilderTy &builder = cgf.getBuilder();
317
318 auto fn = dyn_cast<cir::FuncOp>(cgf.curFn);
319 assert(fn && "emitReturn from non-function");
320
321 if (!fn.getFunctionType().hasVoidReturn()) {
322 // Load the value from `__retval` and return it via the `cir.return` op.
323 auto value = cir::LoadOp::create(
324 builder, loc, fn.getFunctionType().getReturnType(), *cgf.fnRetAlloca);
325 return cir::ReturnOp::create(builder, loc,
326 llvm::ArrayRef(value.getResult()));
327 }
328 return cir::ReturnOp::create(builder, loc);
329}
330
331// This is copied from CodeGenModule::MayDropFunctionReturn. This is a
332// candidate for sharing between CIRGen and CodeGen.
333static bool mayDropFunctionReturn(const ASTContext &astContext,
334 QualType returnType) {
335 // We can't just discard the return value for a record type with a complex
336 // destructor or a non-trivially copyable type.
337 if (const auto *classDecl = returnType->getAsCXXRecordDecl())
338 return classDecl->hasTrivialDestructor();
339 return returnType.isTriviallyCopyableType(astContext);
340}
341
342static bool previousOpIsNonYieldingCleanup(mlir::Block *block) {
343 if (block->empty())
344 return false;
345 mlir::Operation *op = &block->back();
346 auto cleanupScopeOp = mlir::dyn_cast<cir::CleanupScopeOp>(op);
347 if (!cleanupScopeOp)
348 return false;
349
350 // Check whether the body region of the cleanup scope exits via cir.yield.
351 // Exits via cir.return or cir.goto do not fall through to the operation
352 // following the cleanup scope, and exits via break, continue, and resume
353 // are not expected here.
354 for (mlir::Block &bodyBlock : cleanupScopeOp.getBodyRegion()) {
355 if (bodyBlock.mightHaveTerminator()) {
356 if (mlir::isa<cir::YieldOp>(bodyBlock.getTerminator()))
357 return false;
358 assert(!mlir::isa<cir::BreakOp>(bodyBlock.getTerminator()) &&
359 !mlir::isa<cir::ContinueOp>(bodyBlock.getTerminator()) &&
360 !mlir::isa<cir::ResumeOp>(bodyBlock.getTerminator()));
361 }
362 }
363 return true;
364}
365
366void CIRGenFunction::LexicalScope::emitImplicitReturn() {
367 CIRGenBuilderTy &builder = cgf.getBuilder();
368 LexicalScope *localScope = cgf.curLexScope;
369
370 const auto *fd = cast<clang::FunctionDecl>(cgf.curGD.getDecl());
371
372 // In C++, flowing off the end of a non-void function is always undefined
373 // behavior. In C, flowing off the end of a non-void function is undefined
374 // behavior only if the non-existent return value is used by the caller.
375 // That influences whether the terminating op is trap, unreachable, or
376 // return.
377 if (cgf.getLangOpts().CPlusPlus && !fd->hasImplicitReturnZero() &&
378 !cgf.sawAsmBlock && !fd->getReturnType()->isVoidType() &&
379 builder.getInsertionBlock() &&
380 !previousOpIsNonYieldingCleanup(builder.getInsertionBlock())) {
381 bool shouldEmitUnreachable =
382 cgf.cgm.getCodeGenOpts().StrictReturn ||
383 !mayDropFunctionReturn(fd->getASTContext(), fd->getReturnType());
384
385 if (shouldEmitUnreachable) {
387 if (cgf.cgm.getCodeGenOpts().OptimizationLevel == 0)
388 cir::TrapOp::create(builder, localScope->endLoc);
389 else
390 cir::UnreachableOp::create(builder, localScope->endLoc);
391 builder.clearInsertionPoint();
392 return;
393 }
394 }
395
396 (void)emitReturn(localScope->endLoc);
397}
398
400 LexicalScope *scope = this;
401 while (scope) {
402 if (scope->isTry())
403 return scope->getTry();
404 scope = scope->parentScope;
405 }
406 return nullptr;
407}
408
409/// An argument came in as a promoted argument; demote it back to its
410/// declared type.
411static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var,
412 mlir::Value value) {
413 mlir::Type ty = cgf.convertType(var->getType());
414
415 // This can happen with promotions that actually don't change the
416 // underlying type, like the enum promotions.
417 if (value.getType() == ty)
418 return value;
419
420 assert((mlir::isa<cir::IntType>(ty) || cir::isAnyFloatingPointType(ty)) &&
421 "unexpected promotion type");
422
423 if (mlir::isa<cir::IntType>(ty))
424 return cgf.getBuilder().CIRBaseBuilderTy::createIntCast(value, ty);
425
426 return cgf.getBuilder().createFloatingCast(value, ty);
427}
428
430 mlir::Block *entryBB,
431 const FunctionDecl *fd,
432 SourceLocation bodyBeginLoc) {
433 // Naked functions don't have prologues.
434 if (fd && fd->hasAttr<NakedAttr>()) {
435 cgm.errorNYI(bodyBeginLoc, "naked function decl");
436 }
437
438 // Declare all the function arguments in the symbol table.
439 for (const auto nameValue : llvm::zip(args, entryBB->getArguments())) {
440 const VarDecl *paramVar = std::get<0>(nameValue);
441 mlir::Value paramVal = std::get<1>(nameValue);
442 CharUnits alignment = getContext().getDeclAlign(paramVar);
443 mlir::Location paramLoc = getLoc(paramVar->getSourceRange());
444 paramVal.setLoc(paramLoc);
445
446 mlir::Value addrVal =
447 emitAlloca(cast<NamedDecl>(paramVar)->getName(),
448 convertType(paramVar->getType()), paramLoc, alignment,
449 /*insertIntoFnEntryBlock=*/true);
450
451 declare(addrVal, paramVar, paramVar->getType(), paramLoc, alignment,
452 /*isParam=*/true);
453
454 setAddrOfLocalVar(paramVar, Address(addrVal, alignment));
455
456 bool isPromoted = isa<ParmVarDecl>(paramVar) &&
457 cast<ParmVarDecl>(paramVar)->isKNRPromoted();
459 if (isPromoted)
460 paramVal = emitArgumentDemotion(*this, paramVar, paramVal);
461
462 // Location of the store to the param storage tracked as beginning of
463 // the function body.
464 mlir::Location fnBodyBegin = getLoc(bodyBeginLoc);
465 builder.CIRBaseBuilderTy::createStore(fnBodyBegin, paramVal, addrVal);
466 }
467 assert(builder.getInsertionBlock() && "Should be valid");
468}
469
471 cir::FuncOp fn, cir::FuncType funcType,
473 SourceLocation startLoc) {
474 assert(!curFn &&
475 "CIRGenFunction can only be used for one function at a time");
476
477 curFn = fn;
478
479 const Decl *d = gd.getDecl();
480
481 didCallStackSave = false;
482 curCodeDecl = d;
483 const auto *fd = dyn_cast_or_null<FunctionDecl>(d);
484 curFuncDecl = (d ? d->getNonClosureContext() : nullptr);
485
486 prologueCleanupDepth = ehStack.stable_begin();
487
488 mlir::Block *entryBB = &fn.getBlocks().front();
489 builder.setInsertionPointToStart(entryBB);
490
491 // Determine the function body begin location for the prolog.
492 // If fd is null or has no body, use startLoc as fallback.
493 SourceLocation bodyBeginLoc = startLoc;
494 if (fd) {
495 if (Stmt *body = fd->getBody())
496 bodyBeginLoc = body->getBeginLoc();
497 else
498 bodyBeginLoc = fd->getLocation();
499 }
500
501 emitFunctionProlog(args, entryBB, fd, bodyBeginLoc);
502
503 // When the current function is not void, create an address to store the
504 // result value.
505 if (!returnType->isVoidType()) {
506 // Determine the function body end location.
507 // If fd is null or has no body, use loc as fallback.
508 SourceLocation bodyEndLoc = loc;
509 if (fd) {
510 if (Stmt *body = fd->getBody())
511 bodyEndLoc = body->getEndLoc();
512 else
513 bodyEndLoc = fd->getLocation();
514 }
515 emitAndUpdateRetAlloca(returnType, getLoc(bodyEndLoc),
516 getContext().getTypeAlignInChars(returnType));
517
518 // If this is an implicit-return-zero function, initialize the return
519 // value. This mirrors the implicit-return-zero handling in classic
520 // codegen's EmitFunctionProlog (CGCall.cpp). It is done here, after
521 // emitAndUpdateRetAlloca, because in CIR the return slot is created
522 // after the prolog (the opposite of classic codegen, where ReturnValue
523 // is set up before EmitFunctionProlog runs).
524 // TODO(cir): Align prolog handling with classic codegen.
525 if (fd && fd->hasImplicitReturnZero()) {
526 mlir::Type cirRetTy = convertType(returnType.getUnqualifiedType());
527 mlir::Location bodyBeginMLIRLoc = getLoc(bodyBeginLoc);
528 mlir::Value zero = builder.getNullValue(cirRetTy, bodyBeginMLIRLoc);
529 builder.CIRBaseBuilderTy::createStore(bodyBeginMLIRLoc, zero,
530 returnValue.getPointer());
531 }
532 }
533
534 if (isa_and_nonnull<CXXMethodDecl>(d) &&
535 cast<CXXMethodDecl>(d)->isInstance()) {
536 cgm.getCXXABI().emitInstanceFunctionProlog(loc, *this);
537
538 const auto *md = cast<CXXMethodDecl>(d);
539 if (md->getParent()->isLambda() && md->getOverloadedOperator() == OO_Call) {
540 // We're in a lambda.
541 auto fn = dyn_cast<cir::FuncOp>(curFn);
542 assert(fn && "lambda in non-function region");
543 fn.setLambda(true);
544
545 // Figure out the captures.
546 md->getParent()->getCaptureFields(lambdaCaptureFields,
549 // If the lambda captures the object referred to by '*this' - either by
550 // value or by reference, make sure CXXThisValue points to the correct
551 // object.
552
553 // Get the lvalue for the field (which is a copy of the enclosing object
554 // or contains the address of the enclosing object).
555 LValue thisFieldLValue =
557 if (!lambdaThisCaptureField->getType()->isPointerType()) {
558 // If the enclosing object was captured by value, just use its
559 // address. Sign this pointer.
560 cxxThisValue = thisFieldLValue.getPointer();
561 } else {
562 // Load the lvalue pointed to by the field, since '*this' was captured
563 // by reference.
565 emitLoadOfLValue(thisFieldLValue, SourceLocation()).getValue();
566 }
567 }
568 for (auto *fd : md->getParent()->fields()) {
569 if (fd->hasCapturedVLAType())
570 cgm.errorNYI(loc, "lambda captured VLA type");
571 }
572 } else {
573 // Not in a lambda; just use 'this' from the method.
574 // FIXME: Should we generate a new load for each use of 'this'? The fast
575 // register allocator would be happier...
577 }
578
581 }
582
583 // If any of the arguments have a variably modified type, make sure to
584 // emit the type size, but only if the function is not naked. Naked functions
585 // have no prolog to run this evaluation.
586 if (!fd || !fd->hasAttr<NakedAttr>()) {
587 for (const VarDecl *vd : args) {
588 // Dig out the type as written from ParmVarDecls; it's unclear whether
589 // the standard (C99 6.9.1p10) requires this, but we're following the
590 // precedent set by gcc.
591 QualType ty;
592 if (const auto *pvd = dyn_cast<ParmVarDecl>(vd))
593 ty = pvd->getOriginalType();
594 else
595 ty = vd->getType();
596 if (ty->isVariablyModifiedType())
598 }
599 }
600}
601
603 for (cir::BlockAddressOp &blockAddress : cgm.unresolvedBlockAddressToLabel) {
604 cir::LabelOp labelOp =
605 cgm.lookupBlockAddressInfo(blockAddress.getBlockAddrInfo());
606 assert(labelOp && "expected cir.labelOp to already be emitted");
607 cgm.updateResolvedBlockAddress(blockAddress, labelOp);
608 }
609 cgm.unresolvedBlockAddressToLabel.clear();
610}
611
614 return;
617 mlir::OpBuilder::InsertionGuard guard(builder);
618 builder.setInsertionPointToEnd(indirectGotoBlock);
619 for (auto &[blockAdd, labelOp] : cgm.blockAddressToLabel) {
620 succesors.push_back(labelOp->getBlock());
621 rangeOperands.push_back(labelOp->getBlock()->getArguments());
622 }
623 cir::IndirectBrOp::create(builder, builder.getUnknownLoc(),
624 indirectGotoBlock->getArgument(0), false,
625 rangeOperands, succesors);
626 cgm.blockAddressToLabel.clear();
627}
628
630 // Resolve block address-to-label mappings, then emit the indirect branch
631 // with the corresponding targets.
634
635 // If a label address was taken but no indirect goto was used, we can't remove
636 // the block argument here. Instead, we mark the 'indirectbr' op
637 // as poison so that the cleanup can be deferred to lowering, since the
638 // verifier doesn't allow the 'indirectbr' target address to be null.
639 if (indirectGotoBlock && indirectGotoBlock->hasNoPredecessors()) {
640 auto indrBr = cast<cir::IndirectBrOp>(indirectGotoBlock->front());
641 indrBr.setPoison(true);
642 }
643
644 // Pop any cleanups that might have been associated with the
645 // parameters. Do this in whatever block we're currently in; it's
646 // important to do this before we enter the return block or return
647 // edges will be *really* confused.
648 // TODO(cir): Use prologueCleanupDepth here.
649 bool hasCleanups = ehStack.stable_begin() != prologueCleanupDepth;
650 if (hasCleanups) {
652 // FIXME(cir): should we clearInsertionPoint? breaks many testcases
654 }
655
656 assert(deferredConditionalCleanupStack.empty() &&
657 "deferred conditional cleanups were not consumed by a "
658 "FullExprCleanupScope");
659}
660
661mlir::LogicalResult CIRGenFunction::emitFunctionBody(const clang::Stmt *body) {
662 // We start with function level scope for variables.
664
665 if (const CompoundStmt *block = dyn_cast<CompoundStmt>(body))
666 return emitCompoundStmtWithoutScope(*block);
667
668 return emitStmt(body, /*useCurrentScope=*/true);
669}
670
671static void eraseEmptyAndUnusedBlocks(cir::FuncOp func) {
672 // Remove any leftover blocks that are unreachable and empty, since they do
673 // not represent unreachable code useful for warnings nor anything deemed
674 // useful in general.
675 SmallVector<mlir::Block *> blocksToDelete;
676 for (mlir::Block &block : func.getBlocks()) {
677 if (block.empty() && block.getUses().empty())
678 blocksToDelete.push_back(&block);
679 }
680 for (mlir::Block *block : blocksToDelete)
681 block->erase();
682}
683
684cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
685 cir::FuncType funcType) {
686 const auto *funcDecl = cast<FunctionDecl>(gd.getDecl());
687 curGD = gd;
688
689 if (funcDecl->isInlineBuiltinDeclaration()) {
690 // When generating code for a builtin with an inline declaration, use a
691 // mangled name to hold the actual body, while keeping an external
692 // declaration in case the function pointer is referenced somewhere.
693 std::string fdInlineName = (cgm.getMangledName(funcDecl) + ".inline").str();
694 cir::FuncOp clone =
695 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
696 if (!clone) {
697 mlir::OpBuilder::InsertionGuard guard(builder);
698 builder.setInsertionPoint(fn);
699 clone = cir::FuncOp::create(builder, fn.getLoc(), fdInlineName,
700 fn.getFunctionType());
701 cgm.insertGlobalSymbol(clone);
702 clone.setLinkage(cir::GlobalLinkageKind::InternalLinkage);
703 clone.setSymVisibility("private");
704 clone.setInlineKind(cir::InlineKind::AlwaysInline);
705 }
706 fn.setLinkage(cir::GlobalLinkageKind::ExternalLinkage);
707 fn.setSymVisibility("private");
708 fn = clone;
709 } else {
710 // Detect the unusual situation where an inline version is shadowed by a
711 // non-inline version. In that case we should pick the external one
712 // everywhere. That's GCC behavior too.
713 for (const FunctionDecl *pd = funcDecl->getPreviousDecl(); pd;
714 pd = pd->getPreviousDecl()) {
715 if (LLVM_UNLIKELY(pd->isInlineBuiltinDeclaration())) {
716 std::string inlineName = funcDecl->getName().str() + ".inline";
717 if (auto inlineFn = mlir::cast_or_null<cir::FuncOp>(
718 cgm.getGlobalValue(inlineName))) {
719 // Replace all uses of the .inline function with the regular function
720 // FIXME: This performs a linear walk over the module. Introduce some
721 // caching here.
722 if (inlineFn
723 .replaceAllSymbolUses(fn.getSymNameAttr(), cgm.getModule())
724 .failed())
725 llvm_unreachable("Failed to replace inline builtin symbol uses");
726 cgm.eraseGlobalSymbol(inlineFn);
727 inlineFn.erase();
728 }
729 break;
730 }
731 }
732 }
733
734 SourceLocation loc = funcDecl->getLocation();
735 Stmt *body = funcDecl->getBody();
736 SourceRange bodyRange =
737 body ? body->getSourceRange() : funcDecl->getLocation();
738
739 SourceLocRAIIObject fnLoc{*this, loc.isValid() ? getLoc(loc)
740 : builder.getUnknownLoc()};
741
742 auto validMLIRLoc = [&](clang::SourceLocation clangLoc) {
743 return clangLoc.isValid() ? getLoc(clangLoc) : builder.getUnknownLoc();
744 };
745 const mlir::Location fusedLoc = mlir::FusedLoc::get(
747 {validMLIRLoc(bodyRange.getBegin()), validMLIRLoc(bodyRange.getEnd())});
748 mlir::Block *entryBB = fn.addEntryBlock();
749
750 FunctionArgList args;
751 QualType retTy = buildFunctionArgList(gd, args);
752
753 // Create a scope in the symbol table to hold variable declarations.
755 {
756 LexicalScope lexScope(*this, fusedLoc, entryBB);
757
758 // Emit the standard function prologue.
759 startFunction(gd, retTy, fn, funcType, args, loc, bodyRange.getBegin());
760
761 // Save parameters for coroutine function.
762 if (body && isa_and_nonnull<CoroutineBodyStmt>(body))
763 llvm::append_range(fnArgs, funcDecl->parameters());
764
765 if (isa<CXXDestructorDecl>(funcDecl)) {
766 emitDestructorBody(args);
767 } else if (isa<CXXConstructorDecl>(funcDecl)) {
769 } else if (getLangOpts().CUDA && !getLangOpts().CUDAIsDevice &&
770 funcDecl->hasAttr<CUDAGlobalAttr>()) {
771 cgm.getCUDARuntime().emitDeviceStub(*this, fn, args);
772 } else if (isa<CXXMethodDecl>(funcDecl) &&
773 cast<CXXMethodDecl>(funcDecl)->isLambdaStaticInvoker()) {
774 // The lambda static invoker function is special, because it forwards or
775 // clones the body of the function call operator (but is actually
776 // static).
778 } else if (funcDecl->isDefaulted() && isa<CXXMethodDecl>(funcDecl) &&
779 (cast<CXXMethodDecl>(funcDecl)->isCopyAssignmentOperator() ||
780 cast<CXXMethodDecl>(funcDecl)->isMoveAssignmentOperator())) {
781 // Implicit copy-assignment gets the same special treatment as implicit
782 // copy-constructors.
784 } else if (body) {
785 // Emit standard function body.
786 if (mlir::failed(emitFunctionBody(body))) {
787 return nullptr;
788 }
789 } else {
790 // Anything without a body should have been handled above.
791 llvm_unreachable("no definition for normal function");
792 }
793
794 if (mlir::failed(fn.verifyBody()))
795 return nullptr;
796
797 finishFunction(bodyRange.getEnd());
798 }
799
801 return fn;
802}
803
806 const auto *ctor = cast<CXXConstructorDecl>(curGD.getDecl());
807 CXXCtorType ctorType = curGD.getCtorType();
808
809 assert((cgm.getTarget().getCXXABI().hasConstructorVariants() ||
810 ctorType == Ctor_Complete) &&
811 "can only generate complete ctor for this ABI");
812
813 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), ctor);
814
815 if (ctorType == Ctor_Complete && isConstructorDelegationValid(ctor) &&
816 cgm.getTarget().getCXXABI().hasConstructorVariants()) {
817 emitDelegateCXXConstructorCall(ctor, Ctor_Base, args, ctor->getEndLoc());
818 return;
819 }
820
821 const FunctionDecl *definition = nullptr;
822 Stmt *body = ctor->getBody(definition);
823 assert(definition == ctor && "emitting wrong constructor body");
824
825 bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
826
827 // A type that handles the emission of the constructor body, that can be
828 // called directly for cases where we don't have a try-body, or passed to
829 // emitCXXTryStmt.
830 struct ctorTryBodyEmitter final : cxxTryBodyEmitter {
831 const CXXConstructorDecl *ctor = nullptr;
832 CXXCtorType ctorType;
833 FunctionArgList &args;
834 Stmt *emitterBody = nullptr;
835 ctorTryBodyEmitter(const CXXConstructorDecl *ctor, CXXCtorType ctorType,
836 FunctionArgList &args, bool isTryBody, Stmt *b)
837 : ctor(ctor), ctorType(ctorType), args(args),
838 emitterBody(isTryBody ? cast<CXXTryStmt>(b)->getTryBlock() : b) {}
839 ~ctorTryBodyEmitter() override = default;
840
841 mlir::LogicalResult operator()(CIRGenFunction &cgf) override {
844
845 //// TODO: in restricted cases, we can emit the vbase initializers of a
846 //// complete ctor and then delegate to the base ctor.
847
848 cgf.emitCtorPrologue(ctor, ctorType, args);
849 return cgf.emitStmt(emitterBody, /*useCurrentScope=*/true);
850 }
851 };
852
853 ctorTryBodyEmitter emitter{ctor, ctorType, args, isTryBody, body};
854 mlir::LogicalResult bodyRes =
855 isTryBody ? emitCXXTryStmt(*cast<CXXTryStmt>(body), emitter)
856 : emitter(*this);
857
858 // TODO(cir): propagate this result via mlir::logical result. Just
859 // unreachable now just to have it handled.
860 if (bodyRes.failed())
861 cgm.errorNYI(ctor->getSourceRange(),
862 "emitConstructorBody: emit body statement failed.");
863}
864
865/// Emits the body of the current destructor.
867 const CXXDestructorDecl *dtor = cast<CXXDestructorDecl>(curGD.getDecl());
868 CXXDtorType dtorType = curGD.getDtorType();
869
870 cgm.setCXXSpecialMemberAttr(cast<cir::FuncOp>(curFn), dtor);
871
872 // For an abstract class, non-base destructors are never used (and can't
873 // be emitted in general, because vbase dtors may not have been validated
874 // by Sema), but the Itanium ABI doesn't make them optional and Clang may
875 // in fact emit references to them from other compilations, so emit them
876 // as functions containing a trap instruction.
877 if (dtorType != Dtor_Base && dtor->getParent()->isAbstract()) {
878 SourceLocation loc =
879 dtor->hasBody() ? dtor->getBody()->getBeginLoc() : dtor->getLocation();
880 emitTrap(getLoc(loc), true);
881 return;
882 }
883
884 Stmt *body = dtor->getBody();
886
887 // The call to operator delete in a deleting destructor happens
888 // outside of the function-try-block, which means it's always
889 // possible to delegate the destructor body to the complete
890 // destructor. Do so.
891 if (dtorType == Dtor_Deleting || dtorType == Dtor_VectorDeleting) {
893 cgm.errorNYI(dtor->getSourceRange(), "emitConditionalArrayDtorCall");
894 RunCleanupsScope dtorEpilogue(*this);
896 if (haveInsertPoint()) {
898 emitCXXDestructorCall(dtor, Dtor_Complete, /*forVirtualBase=*/false,
899 /*delegating=*/false, loadCXXThisAddress(), thisTy);
900 }
901 return;
902 }
903
904 // If the body is a function-try-block, enter the try before
905 // anything else.
906 const bool isTryBody = isa_and_nonnull<CXXTryStmt>(body);
907 if (isTryBody)
908 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
909
911
912 // Enter the epilogue cleanups.
913 RunCleanupsScope dtorEpilogue(*this);
914
915 // If this is the complete variant, just invoke the base variant;
916 // the epilogue will destruct the virtual bases. But we can't do
917 // this optimization if the body is a function-try-block, because
918 // we'd introduce *two* handler blocks. In the Microsoft ABI, we
919 // always delegate because we might not have a definition in this TU.
920 switch (dtorType) {
921 case Dtor_Unified:
922 llvm_unreachable("not expecting a unified dtor");
923 case Dtor_Comdat:
924 llvm_unreachable("not expecting a COMDAT");
925 case Dtor_Deleting:
927 llvm_unreachable("already handled deleting case");
928
929 case Dtor_Complete:
930 assert((body || getTarget().getCXXABI().isMicrosoft()) &&
931 "can't emit a dtor without a body for non-Microsoft ABIs");
932
933 // Enter the cleanup scopes for virtual bases.
935
936 if (!isTryBody) {
938 emitCXXDestructorCall(dtor, Dtor_Base, /*forVirtualBase=*/false,
939 /*delegating=*/false, loadCXXThisAddress(), thisTy);
940 break;
941 }
942
943 // Fallthrough: act like we're in the base variant.
944 [[fallthrough]];
945
946 case Dtor_Base:
947 assert(body);
948
949 // Enter the cleanup scopes for fields and non-virtual bases.
951
953
954 if (isTryBody) {
955 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
956 } else if (body) {
957 (void)emitStmt(body, /*useCurrentScope=*/true);
958 } else {
959 assert(dtor->isImplicit() && "bodyless dtor not implicit");
960 // nothing to do besides what's in the epilogue
961 }
962 // -fapple-kext must inline any call to this dtor into
963 // the caller's body.
965
966 break;
967 }
968
969 // Jump out through the epilogue cleanups.
970 dtorEpilogue.forceCleanup();
971
972 // Exit the try if applicable.
973 if (isTryBody)
974 cgm.errorNYI(dtor->getSourceRange(), "function-try-block destructor");
975}
976
977/// Given a value of type T* that may not be to a complete object, construct
978/// an l-vlaue withi the natural pointee alignment of T.
980 QualType ty) {
981 // FIXME(cir): is it safe to assume Op->getResult(0) is valid? Perhaps
982 // assert on the result type first.
983 LValueBaseInfo baseInfo;
985 CharUnits align = cgm.getNaturalTypeAlignment(ty, &baseInfo);
986 return makeAddrLValue(Address(val, align), ty, baseInfo);
987}
988
990 QualType ty) {
991 LValueBaseInfo baseInfo;
992 CharUnits alignment = cgm.getNaturalTypeAlignment(ty, &baseInfo);
993 Address addr(val, convertTypeForMem(ty), alignment);
995 return makeAddrLValue(addr, ty, baseInfo);
996}
997
998// Map the LangOption for exception behavior into the corresponding enum in
999// the IR.
1000static llvm::fp::ExceptionBehavior
1002 switch (kind) {
1004 return llvm::fp::ebIgnore;
1006 return llvm::fp::ebMayTrap;
1008 return llvm::fp::ebStrict;
1010 llvm_unreachable("expected explicitly initialized exception behavior");
1011 }
1012 llvm_unreachable("unsupported FP exception behavior");
1013}
1014
1016 FunctionArgList &args) {
1017 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1018 QualType retTy = fd->getReturnType();
1019
1020 const auto *md = dyn_cast<CXXMethodDecl>(fd);
1021 if (md && md->isInstance()) {
1022 if (cgm.getCXXABI().hasThisReturn(gd))
1023 cgm.errorNYI(fd->getSourceRange(), "this return");
1024 else if (cgm.getCXXABI().hasMostDerivedReturn(gd))
1025 cgm.errorNYI(fd->getSourceRange(), "most derived return");
1026 cgm.getCXXABI().buildThisParam(*this, args);
1027 }
1028
1029 bool passedParams = true;
1030 if (const auto *cd = dyn_cast<CXXConstructorDecl>(fd))
1031 if (auto inherited = cd->getInheritedConstructor())
1032 passedParams =
1033 getTypes().inheritingCtorHasParams(inherited, gd.getCtorType());
1034
1035 if (passedParams) {
1036 for (auto *param : fd->parameters()) {
1037 args.push_back(param);
1038 if (!param->hasAttr<PassObjectSizeAttr>())
1039 continue;
1040
1041 auto *implicit = ImplicitParamDecl::Create(
1042 getContext(), param->getDeclContext(), param->getLocation(),
1043 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamKind::Other);
1044 sizeArguments[param] = implicit;
1045 args.push_back(implicit);
1046 }
1047 }
1048
1049 if (md && (isa<CXXConstructorDecl>(md) || isa<CXXDestructorDecl>(md)))
1050 cgm.getCXXABI().addImplicitStructorParams(*this, retTy, args);
1051
1052 return retTy;
1053}
1054
1056 // Initializing an aggregate temporary in C++11: T{...}.
1057 if (!e->isGLValue())
1058 return emitAggExprToLValue(e);
1059
1060 // An lvalue initializer list must be initializing a reference.
1061 assert(e->isTransparent() && "non-transparent glvalue init list");
1062 return emitLValue(e->getInit(0));
1063}
1064
1065static std::variant<LValue, RValue>
1067 bool forLValue, AggValueSlot slot) {
1069 SmallVector<OVMD> opaques;
1070 llvm::scope_exit opaque_cleanup{
1071 [&]() { llvm::for_each(opaques, [&](OVMD &o) { o.unbind(cgf); }); }};
1072
1073 // Find the result expression, if any.
1074 const Expr *resultExpr = e->getResultExpr();
1075 std::variant<LValue, RValue> result;
1076
1077 for (const Expr *semantic : e->semantics()) {
1078 // If this semantic expression is an opaque value, bind it
1079 // to the result of its source expression.
1080 if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
1081
1082 // Skip unique OVEs.
1083 if (ov->isUnique()) {
1084 // FIXME: This doesn't really affect anything, but I cannot find a test
1085 // for this, so leave an ErrorNYI here until we can find one.
1086 cgf.cgm.errorNYI(e->getSourceRange(),
1087 "emitPseudoObjectExpr skipped for uniqueness");
1088 assert(ov != resultExpr &&
1089 "A unique OVE cannot be used as the result expression");
1090 continue;
1091 }
1092
1093 // If this is the result expression, we may need to evaluate
1094 // directly into the slot.
1095 OVMD opaqueData;
1096 if (ov == resultExpr && ov->isPRValue() && !forLValue &&
1098 cgf.cgm.errorNYI(e->getSourceRange(),
1099 "emitPseudoObjectExpr for RValue & aggregate kind");
1100 } else {
1101 opaqueData = OVMD::bind(cgf, ov, ov->getSourceExpr());
1102
1103 // If this is the result, also evaluate the result now.
1104 if (ov == resultExpr) {
1105 // FIXME: This doesn't really affect anything, but I cannot find a
1106 // test for this, so leave an ErrorNYI here until we can find one.
1107 cgf.cgm.errorNYI(e->getSourceRange(),
1108 "emitPseudoObjectExpr as result");
1109 if (forLValue)
1110 result = cgf.emitLValue(ov);
1111 else
1112 cgf.cgm.errorNYI(e->getSourceRange(),
1113 "emitPseudoObjectExpr as an RValue");
1114 }
1115 }
1116 opaques.push_back(opaqueData);
1117 } else if (semantic == resultExpr) {
1118 // Otherwise, if the expression is the result, evaluate it
1119 // and remember the result.
1120 if (forLValue)
1121 result = cgf.emitLValue(semantic);
1122 else
1123 cgf.cgm.errorNYI(
1124 e->getSourceRange(),
1125 "emitPseudoObjectExpr as an RValue, when semantic is result");
1126 } else {
1127 // FIXME: best I can tell, this is only reachable as an r-value, so this
1128 // isn't properly tested.
1129 cgf.cgm.errorNYI(e->getSourceRange(),
1130 "emitPseudoObjectExpr as an ignored value");
1131 // Otherwise, evaluate the expression in an ignored context.
1132 cgf.emitIgnoredExpr(semantic);
1133 }
1134 }
1135
1136 return result;
1137}
1138
1140 return std::get<LValue>(emitPseudoObjectExpr(*this, e, /*forLValue=*/true,
1142}
1143
1144/// Emit code to compute a designator that specifies the location
1145/// of the expression.
1146/// FIXME: document this function better.
1148 // FIXME: ApplyDebugLocation DL(*this, e);
1149 switch (e->getStmtClass()) {
1150 default:
1152 std::string("l-value not implemented for '") +
1153 e->getStmtClassName() + "'");
1154 return LValue();
1155 case Expr::ConditionalOperatorClass:
1157 case Expr::BinaryConditionalOperatorClass:
1159 case Expr::ArraySubscriptExprClass:
1161 case Expr::ExtVectorElementExprClass:
1163 case Expr::UnaryOperatorClass:
1165 case Expr::StringLiteralClass:
1167 case Expr::MemberExprClass:
1169 case Expr::CompoundLiteralExprClass:
1171 case Expr::PredefinedExprClass:
1173 case Expr::BinaryOperatorClass:
1175 case Expr::CompoundAssignOperatorClass: {
1176 QualType ty = e->getType();
1177 if (ty->getAs<AtomicType>()) {
1178 cgm.errorNYI(e->getSourceRange(),
1179 "CompoundAssignOperator with AtomicType");
1180 return LValue();
1181 }
1182 if (!ty->isAnyComplexType())
1184
1186 }
1187 case Expr::CallExprClass:
1188 case Expr::CXXMemberCallExprClass:
1189 case Expr::CXXOperatorCallExprClass:
1190 case Expr::UserDefinedLiteralClass:
1192 case Expr::ExprWithCleanupsClass: {
1193 const auto *cleanups = cast<ExprWithCleanups>(e);
1194 RunCleanupsScope scope(*this);
1195 LValue lv = emitLValue(cleanups->getSubExpr());
1197 return lv;
1198 }
1199 case Expr::CXXDefaultArgExprClass: {
1200 auto *dae = cast<CXXDefaultArgExpr>(e);
1201 CXXDefaultArgExprScope scope(*this, dae);
1202 return emitLValue(dae->getExpr());
1203 }
1204 case Expr::CXXTypeidExprClass:
1206 case Expr::ParenExprClass:
1207 return emitLValue(cast<ParenExpr>(e)->getSubExpr());
1208 case Expr::GenericSelectionExprClass:
1209 return emitLValue(cast<GenericSelectionExpr>(e)->getResultExpr());
1210 case Expr::DeclRefExprClass:
1212 case Expr::ImplicitCastExprClass:
1213 case Expr::CStyleCastExprClass:
1214 case Expr::CXXStaticCastExprClass:
1215 case Expr::CXXDynamicCastExprClass:
1216 case Expr::CXXReinterpretCastExprClass:
1217 case Expr::CXXConstCastExprClass:
1218 case Expr::CXXFunctionalCastExprClass:
1219 // TODO(cir): The above list is missing
1220 // CXXAddrSpaceCastExprClass, and ObjCBridgedCastExprClass.
1221 return emitCastLValue(cast<CastExpr>(e));
1222 case Expr::MaterializeTemporaryExprClass:
1224 case Expr::OpaqueValueExprClass:
1226 case Expr::ChooseExprClass:
1227 return emitLValue(cast<ChooseExpr>(e)->getChosenSubExpr());
1228 case Expr::SubstNonTypeTemplateParmExprClass:
1229 return emitLValue(cast<SubstNonTypeTemplateParmExpr>(e)->getReplacement());
1230 case Expr::InitListExprClass:
1232 case Expr::PseudoObjectExprClass:
1234 case Expr::CXXDefaultInitExprClass: {
1235 auto *die = cast<CXXDefaultInitExpr>(e);
1236 CXXDefaultInitExprScope scope(*this, die);
1237 return emitLValue(die->getExpr());
1238 }
1239 }
1240}
1241
1242static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt) {
1243 SmallString<256> buffer;
1244 llvm::raw_svector_ostream out(buffer);
1245 out << name << cnt;
1246 return std::string(out.str());
1247}
1248
1250 return getVersionedTmpName("ref.tmp", counterRefTmp++);
1251}
1252
1254 return getVersionedTmpName("agg.tmp", counterAggTmp++);
1255}
1256
1257void CIRGenFunction::emitNullInitialization(mlir::Location loc, Address destPtr,
1258 QualType ty) {
1259 // Ignore empty classes in C++.
1260 if (getLangOpts().CPlusPlus)
1261 if (const auto *rd = ty->getAsCXXRecordDecl(); rd && rd->isEmpty())
1262 return;
1263
1264 // Cast the dest ptr to the appropriate i8 pointer type.
1265 if (builder.isInt8Ty(destPtr.getElementType())) {
1266 cgm.errorNYI(loc, "Cast the dest ptr to the appropriate i8 pointer type");
1267 }
1268
1269 // Get size and alignment info for this aggregate.
1270 const CharUnits size = getContext().getTypeSizeInChars(ty);
1271 if (size.isZero()) {
1272 // But note that getTypeInfo returns 0 for a VLA.
1273 if (isa<VariableArrayType>(getContext().getAsArrayType(ty))) {
1274 cgm.errorNYI(loc,
1275 "emitNullInitialization for zero size VariableArrayType");
1276 } else {
1277 return;
1278 }
1279 }
1280
1281 // If the type contains a pointer to data member we can't memset it to zero.
1282 // Instead, create a null constant and copy it to the destination.
1283 // TODO: there are other patterns besides zero that we can usefully memset,
1284 // like -1, which happens to be the pattern used by member-pointers.
1285 if (!cgm.getTypes().isZeroInitializable(ty)) {
1286 cgm.errorNYI(loc, "type is not zero initializable");
1287 }
1288
1289 // In LLVM Codegen: otherwise, just memset the whole thing to zero using
1290 // Builder.CreateMemSet. In CIR just emit a store of #cir.zero to the
1291 // respective address.
1292 // Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1293 const mlir::Value zeroValue = builder.getNullValue(convertType(ty), loc);
1294 builder.createStore(loc, zeroValue, destPtr);
1295}
1296
1298 const clang::Expr *e)
1299 : cgf(cgf) {
1300 ConstructorHelper(e->getFPFeaturesInEffect(cgf.getLangOpts()));
1301}
1302
1304 FPOptions fpFeatures)
1305 : cgf(cgf) {
1306 ConstructorHelper(fpFeatures);
1307}
1308
1309void CIRGenFunction::CIRGenFPOptionsRAII::ConstructorHelper(
1310 FPOptions fpFeatures) {
1311 oldFPFeatures = cgf.curFPFeatures;
1312 cgf.curFPFeatures = fpFeatures;
1313
1314 oldExcept = cgf.builder.getDefaultConstrainedExcept();
1315 oldRounding = cgf.builder.getDefaultConstrainedRounding();
1316
1317 if (oldFPFeatures == fpFeatures)
1318 return;
1319
1320 // TODO(cir): create guard to restore fast math configurations.
1322
1323 [[maybe_unused]] llvm::RoundingMode newRoundingBehavior =
1324 fpFeatures.getRoundingMode();
1325 // TODO(cir): override rounding behaviour once FM configs are guarded.
1326 [[maybe_unused]] llvm::fp::ExceptionBehavior newExceptionBehavior =
1328 fpFeatures.getExceptionMode()));
1329 // TODO(cir): override exception behaviour once FM configs are guarded.
1330
1331 // TODO(cir): override FP flags once FM configs are guarded.
1333
1334 assert((cgf.curFuncDecl == nullptr || cgf.builder.getIsFPConstrained() ||
1335 isa<CXXConstructorDecl>(cgf.curFuncDecl) ||
1336 isa<CXXDestructorDecl>(cgf.curFuncDecl) ||
1337 (newExceptionBehavior == llvm::fp::ebIgnore &&
1338 newRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
1339 "FPConstrained should be enabled on entire function");
1340
1341 // TODO(cir): mark CIR function with fast math attributes.
1343}
1344
1346 cgf.curFPFeatures = oldFPFeatures;
1347 cgf.builder.setDefaultConstrainedExcept(oldExcept);
1348 cgf.builder.setDefaultConstrainedRounding(oldRounding);
1349}
1350
1351// TODO(cir): should be shared with LLVM codegen.
1353 const Expr *e = ce->getSubExpr();
1354
1355 if (ce->getCastKind() == CK_UncheckedDerivedToBase)
1356 return false;
1357
1358 if (isa<CXXThisExpr>(e->IgnoreParens())) {
1359 // We always assume that 'this' is never null.
1360 return false;
1361 }
1362
1363 if (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(ce)) {
1364 // And that glvalue casts are never null.
1365 if (ice->isGLValue())
1366 return false;
1367 }
1368
1369 return true;
1370}
1371
1372/// Computes the length of an array in elements, as well as the base
1373/// element type and a properly-typed first element pointer.
1374mlir::Value
1376 QualType &baseType, Address &addr) {
1377 const clang::ArrayType *arrayType = origArrayType;
1378
1379 // If it's a VLA, we have to load the stored size. Note that
1380 // this is the size of the VLA in bytes, not its size in elements.
1381 mlir::Value numVLAElements = nullptr;
1384
1385 // Walk into all VLAs. This doesn't require changes to addr,
1386 // which has type T* where T is the first non-VLA element type.
1387 do {
1388 QualType elementType = arrayType->getElementType();
1389 arrayType = getContext().getAsArrayType(elementType);
1390
1391 // If we only have VLA components, 'addr' requires no adjustment.
1392 if (!arrayType) {
1393 baseType = elementType;
1394 return numVLAElements;
1395 }
1397
1398 // We get out here only if we find a constant array type
1399 // inside the VLA.
1400 }
1401
1402 // Classic codegen emits an all-zero inbounds GEP to convert addr from
1403 // [M x [N x T]]* to T*. CIR doesn't need this because callers handle
1404 // the array-to-element pointer conversion themselves (via array_to_ptrdecay
1405 // casts, ptr_bitcast, or manual array type peeling).
1406
1407 uint64_t countFromCLAs = 1;
1408 QualType eltType;
1409
1410 auto cirArrayType = mlir::dyn_cast<cir::ArrayType>(addr.getElementType());
1411
1412 while (cirArrayType) {
1414 countFromCLAs *= cirArrayType.getSize();
1415 eltType = arrayType->getElementType();
1416
1417 cirArrayType =
1418 mlir::dyn_cast<cir::ArrayType>(cirArrayType.getElementType());
1419
1420 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1421 assert((!cirArrayType || arrayType) &&
1422 "CIR and Clang types are out-of-sync");
1423 }
1424
1425 if (arrayType) {
1426 // From this point onwards, the Clang array type has been emitted
1427 // as some other type (probably a packed struct). Compute the array
1428 // size, and just emit the 'begin' expression as a bitcast.
1429 cgm.errorNYI(*currSrcLoc, "length for non-array underlying types");
1430 }
1431
1432 baseType = eltType;
1433
1434 mlir::Value numElements =
1435 builder.getConstInt(*currSrcLoc, sizeTy, countFromCLAs);
1436
1437 // If we had any VLA dimensions, factor them in.
1438 if (numVLAElements)
1439 numElements =
1440 builder.createMul(numVLAElements.getLoc(), numVLAElements, numElements,
1442
1443 return numElements;
1444}
1445
1447 // If we already made the indirect branch for indirect goto, return its block.
1449 return;
1450
1451 mlir::OpBuilder::InsertionGuard guard(builder);
1453 builder.createBlock(builder.getBlock()->getParent(), {}, {voidPtrTy},
1454 {builder.getUnknownLoc()});
1455}
1456
1458 mlir::Value ptrValue, QualType ty, SourceLocation loc,
1459 SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue) {
1461 return cir::AssumeAlignedOp::create(builder, getLoc(assumptionLoc), ptrValue,
1462 alignment, offsetValue);
1463}
1464
1466 mlir::Value ptrValue, const Expr *expr, SourceLocation assumptionLoc,
1467 int64_t alignment, mlir::Value offsetValue) {
1468 QualType ty = expr->getType();
1469 SourceLocation loc = expr->getExprLoc();
1470 return emitAlignmentAssumption(ptrValue, ty, loc, assumptionLoc, alignment,
1471 offsetValue);
1472}
1473
1475 const VariableArrayType *vla =
1476 cgm.getASTContext().getAsVariableArrayType(type);
1477 assert(vla && "type was not a variable array type!");
1478 return getVLASize(vla);
1479}
1480
1483 // The number of elements so far; always size_t.
1484 mlir::Value numElements;
1485
1486 QualType elementType;
1487 do {
1488 elementType = type->getElementType();
1489 mlir::Value vlaSize = vlaSizeMap[type->getSizeExpr()];
1490 assert(vlaSize && "no size for VLA!");
1491 assert(vlaSize.getType() == sizeTy);
1492
1493 if (!numElements) {
1494 numElements = vlaSize;
1495 } else {
1496 // It's undefined behavior if this wraps around, so mark it that way.
1497 // FIXME: Teach -fsanitize=undefined to trap this.
1498
1499 numElements =
1500 builder.createMul(numElements.getLoc(), numElements, vlaSize,
1502 }
1503 } while ((type = getContext().getAsVariableArrayType(elementType)));
1504
1505 assert(numElements && "Undefined elements number");
1506 return {numElements, elementType};
1507}
1508
1511 mlir::Value vlaSize = vlaSizeMap[vla->getSizeExpr()];
1512 assert(vlaSize && "no size for VLA!");
1513 assert(vlaSize.getType() == sizeTy);
1514 return {vlaSize, vla->getElementType()};
1515}
1516
1517// TODO(cir): Most of this function can be shared between CIRGen
1518// and traditional LLVM codegen
1520 assert(type->isVariablyModifiedType() &&
1521 "Must pass variably modified type to EmitVLASizes!");
1522
1523 // We're going to walk down into the type and look for VLA
1524 // expressions.
1525 do {
1526 assert(type->isVariablyModifiedType());
1527
1528 const Type *ty = type.getTypePtr();
1529 switch (ty->getTypeClass()) {
1530 case Type::CountAttributed:
1531 case Type::PackIndexing:
1532 case Type::ArrayParameter:
1533 case Type::HLSLAttributedResource:
1534 case Type::HLSLInlineSpirv:
1535 case Type::PredefinedSugar:
1536 cgm.errorNYI("CIRGenFunction::emitVariablyModifiedType");
1537 break;
1538
1539#define TYPE(Class, Base)
1540#define ABSTRACT_TYPE(Class, Base)
1541#define NON_CANONICAL_TYPE(Class, Base)
1542#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1543#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1544#include "clang/AST/TypeNodes.inc"
1545 llvm_unreachable(
1546 "dependent type must be resolved before the CIR codegen");
1547
1548 // These types are never variably-modified.
1549 case Type::Builtin:
1550 case Type::Complex:
1551 case Type::Vector:
1552 case Type::ExtVector:
1553 case Type::ConstantMatrix:
1554 case Type::Record:
1555 case Type::Enum:
1556 case Type::Using:
1557 case Type::TemplateSpecialization:
1558 case Type::ObjCTypeParam:
1559 case Type::ObjCObject:
1560 case Type::ObjCInterface:
1561 case Type::ObjCObjectPointer:
1562 case Type::BitInt:
1563 case Type::OverflowBehavior:
1564 llvm_unreachable("type class is never variably-modified!");
1565
1566 case Type::Adjusted:
1567 type = cast<clang::AdjustedType>(ty)->getAdjustedType();
1568 break;
1569
1570 case Type::Decayed:
1571 type = cast<clang::DecayedType>(ty)->getPointeeType();
1572 break;
1573
1574 case Type::Pointer:
1575 type = cast<clang::PointerType>(ty)->getPointeeType();
1576 break;
1577
1578 case Type::BlockPointer:
1579 type = cast<clang::BlockPointerType>(ty)->getPointeeType();
1580 break;
1581
1582 case Type::LValueReference:
1583 case Type::RValueReference:
1584 type = cast<clang::ReferenceType>(ty)->getPointeeType();
1585 break;
1586
1587 case Type::MemberPointer:
1588 type = cast<clang::MemberPointerType>(ty)->getPointeeType();
1589 break;
1590
1591 case Type::ConstantArray:
1592 case Type::IncompleteArray:
1593 // Losing element qualification here is fine.
1594 type = cast<clang::ArrayType>(ty)->getElementType();
1595 break;
1596
1597 case Type::VariableArray: {
1598 // Losing element qualification here is fine.
1600
1601 // Unknown size indication requires no size computation.
1602 // Otherwise, evaluate and record it.
1603 if (const Expr *sizeExpr = vat->getSizeExpr()) {
1604 // It's possible that we might have emitted this already,
1605 // e.g. with a typedef and a pointer to it.
1606 mlir::Value &entry = vlaSizeMap[sizeExpr];
1607 if (!entry) {
1608 mlir::Value size = emitScalarExpr(sizeExpr);
1610
1611 // Always zexting here would be wrong if it weren't
1612 // undefined behavior to have a negative bound.
1613 // FIXME: What about when size's type is larger than size_t?
1614 entry = builder.createIntCast(size, sizeTy);
1615 }
1616 }
1617 type = vat->getElementType();
1618 break;
1619 }
1620
1621 case Type::FunctionProto:
1622 case Type::FunctionNoProto:
1623 type = cast<clang::FunctionType>(ty)->getReturnType();
1624 break;
1625
1626 case Type::Paren:
1627 case Type::TypeOf:
1628 case Type::UnaryTransform:
1629 case Type::Attributed:
1630 case Type::BTFTagAttributed:
1631 case Type::SubstTemplateTypeParm:
1632 case Type::MacroQualified:
1633 // Keep walking after single level desugaring.
1634 type = type.getSingleStepDesugaredType(getContext());
1635 break;
1636
1637 case Type::Typedef:
1638 case Type::Decltype:
1639 case Type::Auto:
1640 case Type::DeducedTemplateSpecialization:
1641 // Stop walking: nothing to do.
1642 return;
1643
1644 case Type::TypeOfExpr:
1645 // Stop walking: emit typeof expression.
1646 emitIgnoredExpr(cast<clang::TypeOfExprType>(ty)->getUnderlyingExpr());
1647 return;
1648
1649 case Type::Atomic:
1650 type = cast<clang::AtomicType>(ty)->getValueType();
1651 break;
1652
1653 case Type::Pipe:
1654 type = cast<clang::PipeType>(ty)->getElementType();
1655 break;
1656 }
1657 } while (type->isVariablyModifiedType());
1658}
1659
1661 if (getContext().getBuiltinVaListType()->isArrayType())
1662 return emitPointerWithAlignment(e);
1663 return emitLValue(e).getAddress();
1664}
1665
1666} // namespace clang::CIRGen
Defines the clang::Expr interface and subclasses for C++ expressions.
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
APSInt & getInt()
Definition APValue.h:508
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
SourceManager & getSourceManager()
Definition ASTContext.h:866
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3777
QualType getElementType() const
Definition TypeBase.h:3789
mlir::Type getElementType() const
Definition Address.h:123
An aggregate value slot.
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
mlir::Value createFloatingCast(mlir::Value v, mlir::Type destType)
CIRGenFPOptionsRAII(CIRGenFunction &cgf, FPOptions FPFeatures)
A non-RAII class containing all the information about a bound opaque value.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
void forceCleanup(ArrayRef< mlir::Value * > valuesToReload={})
Force the emission of cleanups now, instead of waiting until this object is destroyed.
static bool isConstructorDelegationValid(const clang::CXXConstructorDecl *ctor)
Checks whether the given constructor is a valid subject for the complete-to-base constructor delegati...
void emitFunctionProlog(const FunctionArgList &args, mlir::Block *entryBB, const FunctionDecl *fd, SourceLocation bodyBeginLoc)
Emit the function prologue: declare function arguments in the symbol table.
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
EHScopeStack::stable_iterator prologueCleanupDepth
The cleanup depth enclosing all the cleanups associated with the parameters.
cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn, cir::FuncType funcType)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void emitVariablyModifiedType(QualType ty)
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
VlaSizePair getVLASize(const VariableArrayType *type)
Returns an MLIR::Value+QualType pair that corresponds to the size, in non-variably-sized elements,...
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
LValue emitMemberExpr(const MemberExpr *e)
const TargetInfo & getTarget() const
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
LValue emitLValueForLambdaField(const FieldDecl *field)
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
llvm::DenseMap< const Expr *, mlir::Value > vlaSizeMap
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
bool constantFoldsToBool(const clang::Expr *cond, bool &resultBool, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does but contains a label,...
void emitDelegateCXXConstructorCall(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, const FunctionArgList &args, clang::SourceLocation loc)
VlaSizePair getVLAElements1D(const VariableArrayType *vla)
Return the number of elements for a single dimension for the given array type.
mlir::Value emitArrayLength(const clang::ArrayType *arrayType, QualType &baseType, Address &addr)
Computes the length of an array in elements, as well as the base element type and a properly-typed fi...
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
llvm::ScopedHashTableScope< const clang::Decl *, mlir::Value > SymTableScopeTy
mlir::Block * indirectGotoBlock
IndirectBranch - The first time an indirect goto is seen we create a block reserved for the indirect ...
mlir::Operation * curFn
The current function or global initializer that is generated code for.
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void enterDtorCleanups(const CXXDestructorDecl *dtor, CXXDtorType type)
Enter the cleanups necessary to complete the given phase of destruction for a destructor.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
llvm::SmallVector< PendingCleanupEntry > deferredConditionalCleanupStack
std::optional< mlir::Value > fnRetAlloca
The compiler-generated variable that holds the return value.
void emitImplicitAssignmentOperatorBody(FunctionArgList &args)
mlir::Type convertTypeForMem(QualType t)
mlir::LogicalResult emitCXXTryStmt(const clang::CXXTryStmt &s, cxxTryBodyEmitter &bodyCallback)
clang::QualType buildFunctionArgList(clang::GlobalDecl gd, FunctionArgList &args)
void emitCtorPrologue(const clang::CXXConstructorDecl *ctor, clang::CXXCtorType ctorType, FunctionArgList &args)
This routine generates necessary code to initialize base classes and non-static data members belongin...
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
LValue emitAggExprToLValue(const Expr *e)
LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e)
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
void finishFunction(SourceLocation endLoc)
mlir::LogicalResult emitFunctionBody(const clang::Stmt *body)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
clang::FieldDecl * lambdaThisCaptureField
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
void emitConstructorBody(FunctionArgList &args)
LValue emitCallExprLValue(const clang::CallExpr *e)
bool haveInsertPoint() const
True if an insertion point is defined.
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
llvm::SmallDenseMap< const ParmVarDecl *, const ImplicitParamDecl * > sizeArguments
If a ParmVarDecl had the pass_object_size attribute, this will contain a mapping from said ParmVarDec...
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
LValue emitPseudoObjectLValue(const PseudoObjectExpr *E)
void popCleanupBlocks(EHScopeStack::stable_iterator oldCleanupStackDepth, ArrayRef< mlir::Value * > valuesToReload={})
Takes the old cleanup stack size and emits the cleanup blocks that have been added.
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
bool didCallStackSave
Whether a cir.stacksave operation has been added.
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
void startFunction(clang::GlobalDecl gd, clang::QualType returnType, cir::FuncOp fn, cir::FuncType funcType, FunctionArgList args, clang::SourceLocation loc, clang::SourceLocation startLoc)
Emit code for the start of a function.
unsigned counterRefTmp
Hold counters for incrementally naming temporaries.
mlir::MLIRContext & getMLIRContext()
void emitDestructorBody(FunctionArgList &args)
Emits the body of the current destructor.
LValue emitInitListLValue(const InitListExpr *e)
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
LValue emitCXXTypeidLValue(const CXXTypeidExpr *e)
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
LValue emitPredefinedLValue(const PredefinedExpr *e)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
void emitLambdaStaticInvokeBody(const CXXMethodDecl *md)
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
void setAddrOfLocalVar(const clang::VarDecl *vd, Address addr)
Set the address of a local variable.
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::LogicalResult emitCompoundStmtWithoutScope(const clang::CompoundStmt &s, Address *lastValue=nullptr, AggValueSlot slot=AggValueSlot::ignored())
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
bool inheritingCtorHasParams(const InheritedConstructor &inherited, CXXCtorType type)
Determine if a C++ inheriting constructor should have parameters matching those of its inherited cons...
Type for representing both the decl and type of parameters to a function.
Definition CIRGenCall.h:193
Address getAddress() const
mlir::Value getPointer() const
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
Represents a C++ constructor within a class.
Definition DeclCXX.h:2620
Represents a C++ destructor within a class.
Definition DeclCXX.h:2882
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2271
QualType getFunctionObjectParameterType() const
Definition DeclCXX.h:2295
bool isAbstract() const
Determine whether this class has a pure virtual function.
Definition DeclCXX.h:1221
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3679
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
CompoundStmt - This represents a group of statements like { stmt stmt }.
Definition Stmt.h:1746
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:601
Decl * getNonClosureContext()
Find the innermost non-closure ancestor of this declaration, walking up through blocks,...
SourceLocation getLocation() const
Definition DeclBase.h:447
bool hasAttr() const
Definition DeclBase.h:585
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition Expr.h:287
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Returns the set of floating point options that apply to this expression.
Definition Expr.cpp:3989
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isPRValue() const
Definition Expr.h:285
QualType getType() const
Definition Expr.h:144
LangOptions::FPExceptionModeKind getExceptionMode() const
RoundingMode getRoundingMode() const
Represents a function declaration or definition.
Definition Decl.h:2018
Stmt * getBody(const FunctionDecl *&Definition) const
Retrieve the body (definition) of the function.
Definition Decl.cpp:3274
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4544
bool hasBody(const FunctionDecl *&Definition) const
Returns true if the function has a body.
Definition Decl.cpp:3194
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
CXXCtorType getCtorType() const
Definition GlobalDecl.h:108
const Decl * getDecl() const
Definition GlobalDecl.h:106
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition Expr.h:3856
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5597
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isTransparent() const
Is this a transparent initializer list (that is, an InitListExpr that is purely syntactic,...
Definition Expr.cpp:2462
const Expr * getInit(unsigned Init) const
Definition Expr.h:5354
FPExceptionModeKind
Possible floating point exception behavior.
@ FPE_Default
Used internally to represent initial unspecified value.
@ FPE_Strict
Strictly preserve the floating-point exception semantics.
@ FPE_MayTrap
Transformations do not cause new exceptions but may hide some.
@ FPE_Ignore
Assume that floating-point exceptions are masked.
Represents an unpacked "presumed" location which can be presented to the user.
unsigned getColumn() const
Return the presumed column number of this location.
const char * getFilename() const
Return the presumed filename of this location.
unsigned getLine() const
Return the presumed line number of this location.
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition Expr.h:6805
Expr * getResultExpr()
Return the result-bearing expression, or null if there is none.
Definition Expr.h:6853
ArrayRef< Expr * > semantics()
Definition Expr.h:6877
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2961
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8530
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
This class handles loading and caching of source files into memory.
PresumedLoc getPresumedLoc(SourceLocation Loc, bool UseLineDirectives=true) const
Returns the "presumed" location of a SourceLocation specifies.
A trivial tuple used to represent a source range.
SourceLocation getEnd() const
SourceLocation getBegin() const
Stmt - This represents one statement.
Definition Stmt.h:86
StmtClass getStmtClass() const
Definition Stmt.h:1499
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
const char * getStmtClassName() const
Definition Stmt.cpp:86
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
bool isVoidType() const
Definition TypeBase.h:9039
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isAnyComplexType() const
Definition TypeBase.h:8808
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2855
TypeClass getTypeClass() const
Definition TypeBase.h:2438
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:924
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:2169
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4021
Expr * getSizeExpr() const
Definition TypeBase.h:4035
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static bool previousOpIsNonYieldingCleanup(mlir::Block *block)
static std::string getVersionedTmpName(llvm::StringRef name, unsigned cnt)
static bool mayDropFunctionReturn(const ASTContext &astContext, QualType returnType)
static mlir::Value emitArgumentDemotion(CIRGenFunction &cgf, const VarDecl *var, mlir::Value value)
An argument came in as a promoted argument; demote it back to its declared type.
static void eraseEmptyAndUnusedBlocks(cir::FuncOp func)
static std::variant< LValue, RValue > emitPseudoObjectExpr(CIRGenFunction &cgf, const PseudoObjectExpr *e, bool forLValue, AggValueSlot slot)
static llvm::fp::ExceptionBehavior toConstrainedExceptMd(LangOptions::FPExceptionModeKind kind)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
CXXDtorType
C++ destructor types.
Definition ABI.h:34
@ Dtor_VectorDeleting
Vector deleting dtor.
Definition ABI.h:40
@ Dtor_Comdat
The COMDAT used for dtors.
Definition ABI.h:38
@ Dtor_Unified
GCC-style unified dtor.
Definition ABI.h:39
@ Dtor_Base
Base object dtor.
Definition ABI.h:37
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
@ Dtor_Deleting
Deleting dtor.
Definition ABI.h:35
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1763
static bool fastMathFuncAttributes()
static bool vtableInitialization()
static bool constructABIArgDirectExtend()
static bool runCleanupsScope()
static bool emitTypeCheck()
static bool fastMathGuard()
static bool fastMathFlags()
static bool generateDebugInfo()
static bool cleanupWithPreservedValues()
static bool incrementProfileCounter()
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
llvm::ArrayRef< mlir::Block * > getRetBlocks()
LexicalScope(CIRGenFunction &cgf, mlir::Location loc, mlir::Block *eb)
mlir::Location getRetLoc(mlir::Block *b)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650