clang 23.0.0git
CIRGenExprCXX.cpp
Go to the documentation of this file.
1//===--- CIRGenExprCXX.cpp - Emit CIR Code for C++ expressions ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ expressions
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenCXXABI.h"
15#include "CIRGenFunction.h"
16
17#include "clang/AST/CharUnits.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/ExprObjC.h"
23#include "llvm/Support/TrailingObjects.h"
24
25using namespace clang;
26using namespace clang::CIRGen;
27
28namespace {
29struct MemberCallInfo {
30 RequiredArgs reqArgs;
31 // Number of prefix arguments for the call. Ignores the `this` pointer.
32 unsigned prefixSize;
33};
34} // namespace
35
37 CIRGenFunction &cgf, const CXXMethodDecl *md, mlir::Value thisPtr,
38 mlir::Value implicitParam, QualType implicitParamTy, const CallExpr *ce,
39 CallArgList &args, CallArgList *rtlArgs) {
40 assert(ce == nullptr || isa<CXXMemberCallExpr>(ce) ||
42 assert(md->isInstance() &&
43 "Trying to emit a member or operator call expr on a static method!");
44
45 // Push the this ptr.
46 const CXXRecordDecl *rd =
48 args.add(RValue::get(thisPtr), cgf.getTypes().deriveThisType(rd, md));
49
50 // If there is an implicit parameter (e.g. VTT), emit it.
51 if (implicitParam) {
52 args.add(RValue::get(implicitParam), implicitParamTy);
53 }
54
55 const auto *fpt = md->getType()->castAs<FunctionProtoType>();
56 RequiredArgs required =
58 unsigned prefixSize = args.size() - 1;
59
60 // Add the rest of the call args
61 if (rtlArgs) {
62 // Special case: if the caller emitted the arguments right-to-left already
63 // (prior to emitting the *this argument), we're done. This happens for
64 // assignment operators.
65 args.addFrom(*rtlArgs);
66 } else if (ce) {
67 // Special case: skip first argument of CXXOperatorCall (it is "this").
68 unsigned argsToSkip = isa<CXXOperatorCallExpr>(ce) ? 1 : 0;
69 cgf.emitCallArgs(args, fpt, drop_begin(ce->arguments(), argsToSkip),
70 ce->getDirectCallee());
71 } else {
72 assert(
73 fpt->getNumParams() == 0 &&
74 "No CallExpr specified for function with non-zero number of arguments");
75 }
76
77 // return {required, prefixSize};
78 return {required, prefixSize};
79}
80
84 const BinaryOperator *bo =
86 const Expr *baseExpr = bo->getLHS();
87 const Expr *memFnExpr = bo->getRHS();
88
89 const auto *mpt = memFnExpr->getType()->castAs<MemberPointerType>();
90 const auto *fpt = mpt->getPointeeType()->castAs<FunctionProtoType>();
91
92 // Emit the 'this' pointer.
93 Address thisAddr = Address::invalid();
94 if (bo->getOpcode() == BO_PtrMemI)
95 thisAddr = emitPointerWithAlignment(baseExpr);
96 else
97 thisAddr = emitLValue(baseExpr).getAddress();
98
100
101 // Get the member function pointer.
102 mlir::Value memFnPtr = emitScalarExpr(memFnExpr);
103
104 // Resolve the member function pointer to the actual callee and adjust the
105 // "this" pointer for call.
106 mlir::Location loc = getLoc(ce->getExprLoc());
107 auto [/*mlir::Value*/ calleePtr, /*mlir::Value*/ adjustedThis] =
108 builder.createGetMethod(loc, memFnPtr, thisAddr.getPointer());
109
110 // Prepare the call arguments.
111 CallArgList argsList;
112 argsList.add(RValue::get(adjustedThis), getContext().VoidPtrTy);
113 emitCallArgs(argsList, fpt, ce->arguments());
114
116
117 // Build the call.
118 CIRGenCallee callee(fpt, calleePtr.getDefiningOp());
120 return emitCall(cgm.getTypes().arrangeCXXMethodCall(argsList, fpt, required,
121 /*PrefixSize=*/0),
122 callee, returnValue, argsList, nullptr, loc);
123}
124
127 bool hasQualifier, NestedNameSpecifier qualifier, bool isArrow,
128 const Expr *base) {
130
131 // Compute the object pointer.
132 bool canUseVirtualCall = md->isVirtual() && !hasQualifier;
133 const CXXMethodDecl *devirtualizedMethod = nullptr;
135
136 // Note on trivial assignment
137 // --------------------------
138 // Classic codegen avoids generating the trivial copy/move assignment operator
139 // when it isn't necessary, choosing instead to just produce IR with an
140 // equivalent effect. We have chosen not to do that in CIR, instead emitting
141 // trivial copy/move assignment operators and allowing later transformations
142 // to optimize them away if appropriate.
143
144 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
145 // operator before the LHS.
146 CallArgList rtlArgStorage;
147 CallArgList *rtlArgs = nullptr;
148 if (auto *oce = dyn_cast<CXXOperatorCallExpr>(ce)) {
149 if (oce->isAssignmentOp()) {
150 rtlArgs = &rtlArgStorage;
151 emitCallArgs(*rtlArgs, md->getType()->castAs<FunctionProtoType>(),
152 drop_begin(ce->arguments(), 1), ce->getDirectCallee(),
153 /*ParamsToSkip*/ 0);
154 }
155 }
156
157 LValue thisPtr;
158 if (isArrow) {
159 LValueBaseInfo baseInfo;
161 Address thisValue = emitPointerWithAlignment(base, &baseInfo);
162 thisPtr = makeAddrLValue(thisValue, base->getType(), baseInfo);
163 } else {
164 thisPtr = emitLValue(base);
165 }
166
167 if (isa<CXXConstructorDecl>(md)) {
168 cgm.errorNYI(ce->getSourceRange(),
169 "emitCXXMemberOrOperatorMemberCallExpr: constructor call");
170 return RValue::get(nullptr);
171 }
172
173 if ((md->isTrivial() || (md->isDefaulted() && md->getParent()->isUnion())) &&
175 return RValue::get(nullptr);
176
177 // Compute the function type we're calling
178 const CXXMethodDecl *calleeDecl =
179 devirtualizedMethod ? devirtualizedMethod : md;
180 const CIRGenFunctionInfo *fInfo = nullptr;
181 if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl))
182 fInfo = &cgm.getTypes().arrangeCXXStructorDeclaration(
184 else
185 fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl);
186
187 cir::FuncType ty = cgm.getTypes().getFunctionType(*fInfo);
188
191
192 // C++ [class.virtual]p12:
193 // Explicit qualification with the scope operator (5.1) suppresses the
194 // virtual call mechanism.
195 //
196 // We also don't emit a virtual call if the base expression has a record type
197 // because then we know what the type is.
198 bool useVirtualCall = canUseVirtualCall && !devirtualizedMethod;
199
200 if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl)) {
201 assert(ce->arg_begin() == ce->arg_end() &&
202 "Destructor shouldn't have explicit parameters");
203 assert(returnValue.isNull() && "Destructor shouldn't have return value");
204 if (useVirtualCall) {
205 cgm.getCXXABI().emitVirtualDestructorCall(*this, dtor, Dtor_Complete,
206 thisPtr.getAddress(),
208 } else {
209 GlobalDecl globalDecl(dtor, Dtor_Complete);
210 CIRGenCallee callee;
212 if (!devirtualizedMethod) {
214 cgm.getAddrOfCXXStructor(globalDecl, fInfo, ty), globalDecl);
215 } else {
216 cgm.errorNYI(ce->getSourceRange(), "devirtualized destructor call");
217 return RValue::get(nullptr);
218 }
219
220 QualType thisTy =
221 isArrow ? base->getType()->getPointeeType() : base->getType();
222 // CIRGen does not pass CallOrInvoke here (different from OG LLVM codegen)
223 // because in practice it always null even in OG.
224 emitCXXDestructorCall(globalDecl, callee, thisPtr.getPointer(), thisTy,
225 /*implicitParam=*/nullptr,
226 /*implicitParamTy=*/QualType(), ce);
227 }
228 return RValue::get(nullptr);
229 }
230
231 CIRGenCallee callee;
232 if (useVirtualCall) {
233 callee = CIRGenCallee::forVirtual(ce, md, thisPtr.getAddress(), ty);
234 } else {
236 if (getLangOpts().AppleKext) {
237 cgm.errorNYI(ce->getSourceRange(),
238 "emitCXXMemberOrOperatorMemberCallExpr: AppleKext");
239 return RValue::get(nullptr);
240 }
241
242 callee = CIRGenCallee::forDirect(cgm.getAddrOfFunction(calleeDecl, ty),
243 GlobalDecl(calleeDecl));
244 }
245
246 if (md->isVirtual()) {
247 Address newThisAddr =
248 cgm.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
249 *this, calleeDecl, thisPtr.getAddress(), useVirtualCall);
250 thisPtr.setAddress(newThisAddr);
251 }
252
254 calleeDecl, callee, returnValue, thisPtr.getPointer(),
255 /*ImplicitParam=*/nullptr, QualType(), ce, rtlArgs);
256}
257
258RValue
260 const CXXMethodDecl *md,
262 assert(md->isInstance() &&
263 "Trying to emit a member call expr on a static method!");
265 e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
266 /*IsArrow=*/false, e->getArg(0));
267}
268
271 // Emit as a device kernel call if CUDA device code is to be generated.
272 if (!getLangOpts().HIP && getLangOpts().CUDAIsDevice)
273 cgm.errorNYI("CUDA Device side kernel call");
274 return cgm.getCUDARuntime().emitCUDAKernelCallExpr(*this, expr, returnValue);
275}
276
278 const CXXMethodDecl *md, const CIRGenCallee &callee,
279 ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam,
280 QualType implicitParamTy, const CallExpr *ce, CallArgList *rtlArgs) {
281 const auto *fpt = md->getType()->castAs<FunctionProtoType>();
282 CallArgList args;
283 MemberCallInfo callInfo = commonBuildCXXMemberOrOperatorCall(
284 *this, md, thisPtr, implicitParam, implicitParamTy, ce, args, rtlArgs);
285 auto &fnInfo = cgm.getTypes().arrangeCXXMethodCall(
286 args, fpt, callInfo.reqArgs, callInfo.prefixSize);
287 assert((ce || currSrcLoc) && "expected source location");
288 mlir::Location loc = ce ? getLoc(ce->getExprLoc()) : *currSrcLoc;
290 return emitCall(fnInfo, callee, returnValue, args, nullptr, loc);
291}
292
294 Address destPtr,
295 const CXXRecordDecl *base) {
296 if (base->isEmpty())
297 return;
298
299 const ASTRecordLayout &layout = cgf.getContext().getASTRecordLayout(base);
300 CharUnits nvSize = layout.getNonVirtualSize();
301
302 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
303 // present, they are initialized by the most derived class before calling the
304 // constructor.
306 stores.emplace_back(CharUnits::Zero(), nvSize);
307
308 // Each store is split by the existence of a vbptr.
309 // TODO(cir): This only needs handling for the MS CXXABI.
311
312 // If the type contains a pointer to data member we can't memset it to zero.
313 // Instead, create a null constant and copy it to the destination.
314 // TODO: there are other patterns besides zero that we can usefully memset,
315 // like -1, which happens to be the pattern used by member-pointers.
316 // TODO: isZeroInitializable can be over-conservative in the case where a
317 // virtual base contains a member pointer.
318 mlir::TypedAttr nullConstantForBase = cgf.cgm.emitNullConstantForBase(base);
319 if (!cgf.getBuilder().isNullValue(nullConstantForBase)) {
320 cgf.cgm.errorNYI(
321 base->getSourceRange(),
322 "emitNullBaseClassInitialization: base constant is not null");
323 } else {
324 // Otherwise, just memset the whole thing to zero. This is legal
325 // because in LLVM, all default initializers (other than the ones we just
326 // handled above) are guaranteed to have a bit pattern of all zeros.
327 // TODO(cir): When the MS CXXABI is supported, we will need to iterate over
328 // `stores` and create a separate memset for each one. For now, we know that
329 // there will only be one store and it will begin at offset zero, so that
330 // simplifies this code considerably.
331 assert(stores.size() == 1 && "Expected only one store");
332 assert(stores[0].first == CharUnits::Zero() &&
333 "Expected store to begin at offset zero");
334 CIRGenBuilderTy builder = cgf.getBuilder();
335 mlir::Location loc = cgf.getLoc(base->getBeginLoc());
336 builder.createStore(loc, builder.getConstant(loc, nullConstantForBase),
337 destPtr);
338 }
339}
340
342 AggValueSlot dest) {
343 assert(!dest.isIgnored() && "Must have a destination!");
344 const CXXConstructorDecl *cd = e->getConstructor();
345
346 // If we require zero initialization before (or instead of) calling the
347 // constructor, as can be the case with a non-user-provided default
348 // constructor, emit the zero initialization now, unless destination is
349 // already zeroed.
350 if (e->requiresZeroInitialization() && !dest.isZeroed()) {
351 switch (e->getConstructionKind()) {
355 e->getType());
356 break;
360 cd->getParent());
361 break;
362 }
363 }
364
365 // If this is a call to a trivial default constructor, do nothing.
366 if (cd->isTrivial() && cd->isDefaultConstructor())
367 return;
368
369 // Elide the constructor if we're constructing from a temporary
370 if (getLangOpts().ElideConstructors && e->isElidable()) {
371 // FIXME: This only handles the simplest case, where the source object is
372 // passed directly as the first argument to the constructor. This
373 // should also handle stepping through implicit casts and conversion
374 // sequences which involve two steps, with a conversion operator
375 // follwed by a converting constructor.
376 const Expr *srcObj = e->getArg(0);
377 assert(srcObj->isTemporaryObject(getContext(), cd->getParent()));
378 assert(
379 getContext().hasSameUnqualifiedType(e->getType(), srcObj->getType()));
380 emitAggExpr(srcObj, dest);
381 return;
382 }
383
384 if (const ArrayType *arrayType = getContext().getAsArrayType(e->getType())) {
386 emitCXXAggrConstructorCall(cd, arrayType, dest.getAddress(), e, false);
387 } else {
388
390 bool forVirtualBase = false;
391 bool delegating = false;
392
393 switch (e->getConstructionKind()) {
396 break;
398 // We should be emitting a constructor; GlobalDecl will assert this
399 type = curGD.getCtorType();
400 delegating = true;
401 break;
403 forVirtualBase = true;
404 [[fallthrough]];
406 type = Ctor_Base;
407 break;
408 }
409
410 emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
411 }
412}
413
415 const CXXNewExpr *e) {
416 if (!e->isArray())
417 return CharUnits::Zero();
418
419 // No cookie is required if the operator new[] being used is the
420 // reserved placement operator new[].
422 return CharUnits::Zero();
423
424 return cgf.cgm.getCXXABI().getArrayCookieSize(e);
425}
426
427static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
428 unsigned minElements,
429 mlir::Value &numElements,
430 mlir::Value &sizeWithoutCookie) {
432 mlir::Location loc = cgf.getLoc(e->getSourceRange());
433
434 if (!e->isArray()) {
436 sizeWithoutCookie = cgf.getBuilder().getConstant(
437 loc, cir::IntAttr::get(cgf.sizeTy, typeSize.getQuantity()));
438 return sizeWithoutCookie;
439 }
440
441 // The width of size_t.
442 unsigned sizeWidth = cgf.cgm.getDataLayout().getTypeSizeInBits(cgf.sizeTy);
443
444 // The number of elements can be have an arbitrary integer type;
445 // essentially, we need to multiply it by a constant factor, add a
446 // cookie size, and verify that the result is representable as a
447 // size_t. That's just a gloss, though, and it's wrong in one
448 // important way: if the count is negative, it's an error even if
449 // the cookie size would bring the total size >= 0.
450 //
451 // If the array size is constant, Sema will have prevented negative
452 // values and size overflow.
453
454 // Compute the constant factor.
455 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
456 while (const ConstantArrayType *cat =
458 type = cat->getElementType();
459 arraySizeMultiplier *= cat->getSize();
460 }
461
463 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
464 typeSizeMultiplier *= arraySizeMultiplier;
465
466 // Figure out the cookie size.
467 llvm::APInt cookieSize(sizeWidth,
468 calculateCookiePadding(cgf, e).getQuantity());
469
470 // This will be a size_t.
471 mlir::Value size;
472
473 // Emit the array size expression.
474 // We multiply the size of all dimensions for NumElements.
475 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
476 const Expr *arraySize = *e->getArraySize();
477 mlir::Attribute constNumElements =
478 ConstantEmitter(cgf.cgm, &cgf)
479 .tryEmitAbstract(arraySize, arraySize->getType());
480 if (constNumElements) {
481 // Get an APInt from the constant
482 const llvm::APInt &count =
483 mlir::cast<cir::IntAttr>(constNumElements).getValue();
484
485 [[maybe_unused]] unsigned numElementsWidth = count.getBitWidth();
486 bool hasAnyOverflow = false;
487
488 // The equivalent code in CodeGen/CGExprCXX.cpp handles these cases as
489 // overflow, but that should never happen. The size argument is implicitly
490 // cast to a size_t, so it can never be negative and numElementsWidth will
491 // always equal sizeWidth.
492 assert(!count.isNegative() && "Expected non-negative array size");
493 assert(numElementsWidth == sizeWidth &&
494 "Expected a size_t array size constant");
495
496 // Okay, compute a count at the right width.
497 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
498
499 // Scale numElements by that. This might overflow, but we don't
500 // care because it only overflows if allocationSize does too, and
501 // if that overflows then we shouldn't use this.
502 // This emits a constant that may not be used, but we can't tell here
503 // whether it will be needed or not.
504 numElements =
505 cgf.getBuilder().getConstInt(loc, adjustedCount * arraySizeMultiplier);
506
507 // Compute the size before cookie, and track whether it overflowed.
508 bool overflow;
509 llvm::APInt allocationSize =
510 adjustedCount.umul_ov(typeSizeMultiplier, overflow);
511
512 // Sema prevents us from hitting this case
513 assert(!overflow && "Overflow in array allocation size");
514
515 // Add in the cookie, and check whether it's overflowed.
516 if (cookieSize != 0) {
517 // Save the current size without a cookie. This shouldn't be
518 // used if there was overflow
519 sizeWithoutCookie = cgf.getBuilder().getConstInt(
520 loc, allocationSize.zextOrTrunc(sizeWidth));
521
522 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
523 hasAnyOverflow |= overflow;
524 }
525
526 // On overflow, produce a -1 so operator new will fail
527 if (hasAnyOverflow) {
528 size =
529 cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
530 } else {
531 size = cgf.getBuilder().getConstInt(loc, allocationSize);
532 }
533 } else {
534 // Create a value for the variable number of elements
535 numElements = cgf.emitScalarExpr(*e->getArraySize());
536 auto numElementsType = mlir::cast<cir::IntType>(numElements.getType());
537 [[maybe_unused]] unsigned numElementsWidth = numElementsType.getWidth();
538
539 // We might need check for overflow.
540
541 mlir::Value hasOverflow;
542 // Classic codegen checks for the size variable being signed, having a
543 // smaller width than size_t, and having a larger width than size_t.
544 // However, the AST implicitly casts the size variable to size_t so none of
545 // these conditions will ever be met.
546 assert(
547 !(*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType() &&
548 (numElementsWidth == sizeWidth) &&
549 (numElements.getType() == cgf.sizeTy) &&
550 "Expected array size to be implicitly cast to size_t!");
551
552 // There are up to three conditions we need to test for:
553 // 1) if minElements > 0, we need to check whether numElements is smaller
554 // than that.
555 // 2) we need to compute
556 // sizeWithoutCookie := numElements * typeSizeMultiplier
557 // and check whether it overflows; and
558 // 3) if we need a cookie, we need to compute
559 // size := sizeWithoutCookie + cookieSize
560 // and check whether it overflows.
561
562 if (minElements) {
563 // Don't allow allocation of fewer elements than we have initializers.
564 if (!hasOverflow) {
565 // FIXME: Avoid creating this twice. It may happen above.
566 mlir::Value minElementsV = cgf.getBuilder().getConstInt(
567 loc, llvm::APInt(sizeWidth, minElements));
568 hasOverflow = cgf.getBuilder().createCompare(loc, cir::CmpOpKind::lt,
569 numElements, minElementsV);
570 }
571 }
572
573 size = numElements;
574
575 // Multiply by the type size if necessary. This multiplier
576 // includes all the factors for nested arrays.
577 //
578 // This step also causes numElements to be scaled up by the
579 // nested-array factor if necessary. Overflow on this computation
580 // can be ignored because the result shouldn't be used if
581 // allocation fails.
582 if (typeSizeMultiplier != 1) {
583 mlir::Value tsmV = cgf.getBuilder().getConstInt(loc, typeSizeMultiplier);
584 auto mulOp = cir::BinOpOverflowOp::create(
585 cgf.getBuilder(), loc, mlir::cast<cir::IntType>(cgf.sizeTy),
586 cir::BinOpOverflowKind::Mul, size, tsmV);
587
588 if (hasOverflow)
589 hasOverflow =
590 cgf.getBuilder().createOr(loc, hasOverflow, mulOp.getOverflow());
591 else
592 hasOverflow = mulOp.getOverflow();
593
594 size = mulOp.getResult();
595
596 // Also scale up numElements by the array size multiplier.
597 if (arraySizeMultiplier != 1) {
598 // If the base element type size is 1, then we can re-use the
599 // multiply we just did.
600 if (typeSize.isOne()) {
601 assert(arraySizeMultiplier == typeSizeMultiplier);
602 numElements = size;
603
604 // Otherwise we need a separate multiply.
605 } else {
606 mlir::Value asmV =
607 cgf.getBuilder().getConstInt(loc, arraySizeMultiplier);
608 numElements = cgf.getBuilder().createMul(loc, numElements, asmV);
609 }
610 }
611 } else {
612 // numElements doesn't need to be scaled.
613 assert(arraySizeMultiplier == 1);
614 }
615
616 // Add in the cookie size if necessary.
617 if (cookieSize != 0) {
618 sizeWithoutCookie = size;
619 mlir::Value cookieSizeV = cgf.getBuilder().getConstInt(loc, cookieSize);
620 auto addOp = cir::BinOpOverflowOp::create(
621 cgf.getBuilder(), loc, mlir::cast<cir::IntType>(cgf.sizeTy),
622 cir::BinOpOverflowKind::Add, size, cookieSizeV);
623
624 if (hasOverflow)
625 hasOverflow =
626 cgf.getBuilder().createOr(loc, hasOverflow, addOp.getOverflow());
627 else
628 hasOverflow = addOp.getOverflow();
629
630 size = addOp.getResult();
631 }
632
633 // If we had any possibility of dynamic overflow, make a select to
634 // overwrite 'size' with an all-ones value, which should cause
635 // operator new to throw.
636 if (hasOverflow) {
637 mlir::Value allOnes =
638 cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
639 size = cgf.getBuilder().createSelect(loc, hasOverflow, allOnes, size);
640 }
641 }
642
643 if (cookieSize == 0)
644 sizeWithoutCookie = size;
645 else
646 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
647
648 return size;
649}
650
651/// Emit a call to an operator new or operator delete function, as implicitly
652/// created by new-expressions and delete-expressions.
654 const FunctionDecl *calleeDecl,
655 const FunctionProtoType *calleeType,
656 const CallArgList &args) {
657 cir::CIRCallOpInterface callOrTryCall;
658 cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
659 CIRGenCallee callee =
660 CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
661 RValue rv =
662 cgf.emitCall(cgf.cgm.getTypes().arrangeFreeFunctionCall(args, calleeType),
663 callee, ReturnValueSlot(), args, &callOrTryCall);
664
665 /// C++1y [expr.new]p10:
666 /// [In a new-expression,] an implementation is allowed to omit a call
667 /// to a replaceable global allocation function.
668 ///
669 /// We model such elidable calls with the 'builtin' attribute.
671 return rv;
672}
673
675 const CallExpr *callExpr,
677 CallArgList args;
678 emitCallArgs(args, type, callExpr->arguments());
679 // Find the allocation or deallocation function that we're calling.
680 ASTContext &astContext = getContext();
681 assert(op == OO_New || op == OO_Delete);
683
684 clang::DeclContextLookupResult lookupResult =
685 astContext.getTranslationUnitDecl()->lookup(name);
686 for (const NamedDecl *decl : lookupResult) {
687 if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
688 if (astContext.hasSameType(funcDecl->getType().getTypePtr(), type)) {
689 if (sanOpts.has(SanitizerKind::AllocToken)) {
690 // TODO: Set !alloc_token metadata.
692 cgm.errorNYI("Alloc token sanitizer not yet supported!");
693 }
694
695 // Emit the call to operator new/delete.
696 return emitNewDeleteCall(*this, funcDecl, type, args);
697 }
698 }
699 }
700
701 llvm_unreachable("predeclared global operator new/delete is missing");
702}
703
704namespace {
705template <typename Traits> struct PlacementArg {
706 typename Traits::RValueTy argValue;
707 QualType argType;
708};
709
710/// A cleanup to call the given 'operator delete' function upon abnormal
711/// exit from a new expression. Templated on a traits type that deals with
712/// ensuring that the arguments dominate the cleanup if necessary.
713template <typename Traits>
714class CallDeleteDuringNew final
715 : public EHScopeStack::Cleanup,
716 private llvm::TrailingObjects<CallDeleteDuringNew<Traits>,
717 PlacementArg<Traits>> {
718 using TrailingObj =
719 llvm::TrailingObjects<CallDeleteDuringNew<Traits>, PlacementArg<Traits>>;
720 friend TrailingObj;
721 using TrailingObj::getTrailingObjects;
722
723 /// Type used to hold llvm::Value*s.
724 typedef typename Traits::ValueTy ValueTy;
725 /// Type used to hold RValues.
726 typedef typename Traits::RValueTy RValueTy;
727
728 unsigned numPlacementArgs : 30;
729 LLVM_PREFERRED_TYPE(AlignedAllocationMode)
730 unsigned passAlignmentToPlacementDelete : 1;
731 const FunctionDecl *operatorDelete;
732 ValueTy ptr;
733 ValueTy allocSize;
734 CharUnits allocAlign;
735
736 PlacementArg<Traits> *getPlacementArgs() { return getTrailingObjects(); }
737
738 void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
739 assert(i < numPlacementArgs && "index out of range");
740 getPlacementArgs()[i] = {argValue, argType};
741 }
742
743public:
744 static size_t getExtraSize(size_t numPlacementArgs) {
745 return TrailingObj::template additionalSizeToAlloc<PlacementArg<Traits>>(
746 numPlacementArgs);
747 }
748
749 CallDeleteDuringNew(size_t numPlacementArgs,
750 const FunctionDecl *operatorDelete, ValueTy ptr,
751 ValueTy allocSize,
752 const ImplicitAllocationParameters &iap,
753 CharUnits allocAlign, const CallArgList *newArgs,
754 unsigned numNonPlacementArgs, CIRGenFunction *cgf,
755 mlir::Location loc)
756 : numPlacementArgs(numPlacementArgs),
757 passAlignmentToPlacementDelete(isAlignedAllocation(iap.PassAlignment)),
758 operatorDelete(operatorDelete), ptr(ptr), allocSize(allocSize),
759 allocAlign(allocAlign) {
760 for (unsigned i = 0, n = numPlacementArgs; i != n; ++i) {
761 const CallArg &arg = (*newArgs)[i + numNonPlacementArgs];
762 setPlacementArg(i, arg.getRValue(*cgf, loc), arg.ty);
763 }
764 }
765
766 void emit(CIRGenFunction &cgf, Flags flags) override {
767 const auto *fpt = operatorDelete->getType()->castAs<FunctionProtoType>();
768 CallArgList deleteArgs;
769
770 unsigned firstNonTypeArg = 0;
771 TypeAwareAllocationMode typeAwareDeallocation = TypeAwareAllocationMode::No;
773
774 // The first argument after type-identity parameter (if any) is always
775 // a void* (or C* for a destroying operator delete for class type C).
776 deleteArgs.add(Traits::get(cgf, ptr), fpt->getParamType(firstNonTypeArg));
777
778 // Figure out what other parameters we should be implicitly passing.
779 UsualDeleteParams params;
780 if (numPlacementArgs) {
781 // A placement deallocation function is implicitly passed an alignment
782 // if the placement allocation function was, but is never passed a size.
783 params.Alignment =
784 alignedAllocationModeFromBool(passAlignmentToPlacementDelete);
785 params.TypeAwareDelete = typeAwareDeallocation;
787 } else {
788 // For a non-placement new-expression, 'operator delete' can take a
789 // size and/or an alignment if it has the right parameters.
790 params = operatorDelete->getUsualDeleteParams();
791 }
792
793 assert(!params.DestroyingDelete &&
794 "should not call destroying delete in a new-expression");
795
796 // The second argument can be a std::size_t (for non-placement delete).
797 if (params.Size)
798 deleteArgs.add(Traits::get(cgf, allocSize),
799 cgf.getContext().getSizeType());
800
801 // The next (second or third) argument can be a std::align_val_t, which
802 // is an enum whose underlying type is std::size_t.
803 // FIXME: Use the right type as the parameter type. Note that in a call
804 // to operator delete(size_t, ...), we may not have it available.
805 if (isAlignedAllocation(params.Alignment))
806 cgf.cgm.errorNYI("CallDeleteDuringNew: aligned allocation");
807
808 // Pass the rest of the arguments, which must match exactly.
809 for (unsigned i = 0; i != numPlacementArgs; ++i) {
810 auto arg = getPlacementArgs()[i];
811 deleteArgs.add(Traits::get(cgf, arg.argValue), arg.argType);
812 }
813
814 // Call 'operator delete'.
815 emitNewDeleteCall(cgf, operatorDelete, fpt, deleteArgs);
816 }
817};
818} // namespace
819
820/// Enter a cleanup to call 'operator delete' if the initializer in a
821/// new-expression throws.
823 Address newPtr, mlir::Value allocSize,
824 CharUnits allocAlign,
825 const CallArgList &newArgs) {
826 unsigned numNonPlacementArgs = e->getNumImplicitArgs();
827
828 // If we're not inside a conditional branch, then the cleanup will
829 // dominate and we can do the easier (and more efficient) thing.
830 if (!cgf.isInConditionalBranch()) {
831 struct DirectCleanupTraits {
832 typedef mlir::Value ValueTy;
833 typedef RValue RValueTy;
834 static RValue get(CIRGenFunction &, ValueTy v) { return RValue::get(v); }
835 static RValue get(CIRGenFunction &, RValueTy v) { return v; }
836 };
837
838 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
839
841 cgf.ehStack.pushCleanupWithExtra<DirectCleanup>(
843 newPtr.getPointer(), allocSize, e->implicitAllocationParameters(),
844 allocAlign, &newArgs, numNonPlacementArgs, &cgf,
845 cgf.getLoc(e->getSourceRange()));
846
847 return;
848 }
849
850 cgf.cgm.errorNYI(e->getSourceRange(),
851 "enterNewDeleteCleanup: conditional branch");
852}
853
854static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init,
855 QualType allocType, Address newPtr,
856 AggValueSlot::Overlap_t mayOverlap) {
857 // FIXME: Refactor with emitExprAsInit.
858 switch (cgf.getEvaluationKind(allocType)) {
859 case cir::TEK_Scalar:
860 cgf.emitScalarInit(init, cgf.getLoc(init->getSourceRange()),
861 cgf.makeAddrLValue(newPtr, allocType), false);
862 return;
863 case cir::TEK_Complex:
864 cgf.emitComplexExprIntoLValue(init, cgf.makeAddrLValue(newPtr, allocType),
865 /*isInit*/ true);
866 return;
867 case cir::TEK_Aggregate: {
871 newPtr, allocType.getQualifiers(), AggValueSlot::IsDestructed,
873 cgf.emitAggExpr(init, slot);
874 return;
875 }
876 }
877 llvm_unreachable("bad evaluation kind");
878}
879
881 const CXXNewExpr *e, QualType elementType, mlir::Type elementTy,
882 Address beginPtr, mlir::Value numElements,
883 mlir::Value allocSizeWithoutCookie) {
884 // If we have a type with trivial initialization and no initializer,
885 // there's nothing to do.
886 if (!e->hasInitializer())
887 return;
888
889 Address curPtr = beginPtr;
890
891 unsigned initListElements = 0;
892
893 const Expr *init = e->getInitializer();
894 Address endOfInit = Address::invalid();
895 QualType::DestructionKind dtorKind = elementType.isDestructedType();
897
898 // Attempt to perform zero-initialization using memset.
899 auto tryMemsetInitialization = [&]() -> bool {
900 mlir::Location loc = numElements.getLoc();
901
902 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
903 // we can initialize with a memset to -1.
904 if (!cgm.getTypes().isZeroInitializable(elementType))
905 return false;
906
907 // Optimization: since zero initialization will just set the memory
908 // to all zeroes, generate a single memset to do it in one shot.
909
910 // Subtract out the size of any elements we've already initialized.
911 auto remainingSize = allocSizeWithoutCookie;
912 if (initListElements) {
913 // We know this can't overflow; we check this when doing the allocation.
914 unsigned initializedSize =
915 getContext().getTypeSizeInChars(elementType).getQuantity() *
916 initListElements;
917 cir::ConstantOp initSizeOp =
918 builder.getConstInt(loc, remainingSize.getType(), initializedSize);
919 remainingSize = builder.createSub(loc, remainingSize, initSizeOp);
920 }
921
922 // Create the memset.
923 mlir::Value castOp =
924 builder.createPtrBitcast(curPtr.getPointer(), cgm.voidTy);
925 builder.createMemSet(loc, castOp, builder.getConstInt(loc, cgm.uInt8Ty, 0),
926 remainingSize);
927 return true;
928 };
929
930 const InitListExpr *ile = dyn_cast<InitListExpr>(init);
931 const CXXParenListInitExpr *cplie = nullptr;
932 const StringLiteral *sl = nullptr;
933 const ObjCEncodeExpr *ocee = nullptr;
934 const Expr *ignoreParen = nullptr;
935 if (!ile) {
936 ignoreParen = init->IgnoreParenImpCasts();
937 cplie = dyn_cast<CXXParenListInitExpr>(ignoreParen);
938 sl = dyn_cast<StringLiteral>(ignoreParen);
939 ocee = dyn_cast<ObjCEncodeExpr>(ignoreParen);
940 }
941 // If the initializer is an initializer list, first do the explicit elements.
942 if (ile || cplie || sl || ocee) {
943 // Initializing from a (braced) string literal is a special case; the init
944 // list element does not initialize a (single) array element.
945 if ((ile && ile->isStringLiteralInit()) || sl || ocee) {
946 cgm.errorNYI(ile->getSourceRange(),
947 "emitNewArrayInitializer: string literal init");
948 return;
949 }
950
951 ArrayRef<const Expr *> initExprs =
952 ile ? ile->inits() : cplie->getInitExprs();
953 initListElements = initExprs.size();
954
955 // If this is a multi-dimensional array new, we will initialize multiple
956 // elements with each init list element.
957 QualType allocType = e->getAllocatedType();
958 if (const ConstantArrayType *cat = dyn_cast_or_null<ConstantArrayType>(
959 allocType->getAsArrayTypeUnsafe())) {
960 (void)cat;
961 cgm.errorNYI(ile->getSourceRange(),
962 "emitNewArrayInitializer: constant array init");
963 return;
964 }
965
966 // Enter a partial-destruction Cleanup if necessary.
967 if (dtorKind) {
968 cgm.errorNYI(ile->getSourceRange(),
969 "emitNewArrayInitializer: init requires dtor");
970 return;
971 }
972
973 CharUnits elementSize = getContext().getTypeSizeInChars(elementType);
974 CharUnits startAlign = curPtr.getAlignment();
975 unsigned i = 0;
976 for (const Expr *ie : initExprs) {
977 // Tell the cleanup that it needs to destroy up to this
978 // element. TODO: some of these stores can be trivially
979 // observed to be unnecessary.
980 if (endOfInit.isValid()) {
981 cgm.errorNYI(ie->getSourceRange(),
982 "emitNewArrayInitializer: update dtor cleanup ptr");
983 return;
984 }
985 // FIXME: If the last initializer is an incomplete initializer list for
986 // an array, and we have an array filler, we can fold together the two
987 // initialization loops.
988 storeAnyExprIntoOneUnit(*this, ie, ie->getType(), curPtr,
990 mlir::Location loc = getLoc(ie->getExprLoc());
991 mlir::Value castOp = builder.createPtrBitcast(
992 curPtr.getPointer(), convertTypeForMem(allocType));
993 mlir::Value offsetOp = builder.getSignedInt(loc, 1, /*width=*/32);
994 mlir::Value dataPtr = builder.createPtrStride(loc, castOp, offsetOp);
995 curPtr = Address(dataPtr, curPtr.getElementType(),
996 startAlign.alignmentAtOffset((++i) * elementSize));
997 }
998
999 // The remaining elements are filled with the array filler expression.
1000 init = ile ? ile->getArrayFiller() : cplie->getArrayFiller();
1001
1002 // Extract the initializer for the individual array elements by pulling
1003 // out the array filler from all the nested initializer lists. This avoids
1004 // generating a nested loop for the initialization.
1005 while (init && init->getType()->isConstantArrayType()) {
1006 auto *subIle = dyn_cast<InitListExpr>(init);
1007 if (!subIle)
1008 break;
1009 assert(subIle->getNumInits() == 0 && "explicit inits in array filler?");
1010 init = subIle->getArrayFiller();
1011 }
1012
1013 // Switch back to initializing one base element at a time.
1014 curPtr = curPtr.withElementType(builder, beginPtr.getElementType());
1015 }
1016
1017 // If all elements have already been initialized, skip any further
1018 // initialization.
1019 auto constOp = mlir::dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
1020 if (constOp) {
1021 auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constOp.getValue());
1022 // Just skip out if the constant count is zero.
1023 if (constIntAttr && constIntAttr.getUInt() <= initListElements)
1024 return;
1025 }
1026
1027 assert(init && "have trailing elements to initialize but no initializer");
1028
1029 // If this is a constructor call, try to optimize it out, and failing that
1030 // emit a single loop to initialize all remaining elements.
1031 if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
1032 CXXConstructorDecl *ctor = cce->getConstructor();
1033 if (ctor->isTrivial()) {
1034 // If new expression did not specify value-initialization, then there
1035 // is no initialization.
1036 if (!cce->requiresZeroInitialization())
1037 return;
1038
1039 cgm.errorNYI(cce->getSourceRange(),
1040 "emitNewArrayInitializer: trivial ctor zero-init");
1041 return;
1042 }
1043
1044 cgm.errorNYI(cce->getSourceRange(),
1045 "emitNewArrayInitializer: ctor initializer");
1046 return;
1047 }
1048
1049 // If this is value-initialization, we can usually use memset.
1050 if (isa<ImplicitValueInitExpr>(init)) {
1051 if (tryMemsetInitialization())
1052 return;
1053 cgm.errorNYI(init->getSourceRange(),
1054 "emitNewArrayInitializer: implicit value init");
1055 return;
1056 }
1057
1058 cgm.errorNYI(init->getSourceRange(),
1059 "emitNewArrayInitializer: unsupported initializer");
1060 return;
1061}
1062
1064 QualType elementType, mlir::Type elementTy,
1065 Address newPtr, mlir::Value numElements,
1066 mlir::Value allocSizeWithoutCookie) {
1068 if (e->isArray()) {
1069 cgf.emitNewArrayInitializer(e, elementType, elementTy, newPtr, numElements,
1070 allocSizeWithoutCookie);
1071 } else if (const Expr *init = e->getInitializer()) {
1072 storeAnyExprIntoOneUnit(cgf, init, e->getAllocatedType(), newPtr,
1074 }
1075}
1076
1078 GlobalDecl dtor, const CIRGenCallee &callee, mlir::Value thisVal,
1079 QualType thisTy, mlir::Value implicitParam, QualType implicitParamTy,
1080 const CallExpr *ce) {
1081 const CXXMethodDecl *dtorDecl = cast<CXXMethodDecl>(dtor.getDecl());
1082
1083 assert(!thisTy.isNull());
1084 assert(thisTy->getAsCXXRecordDecl() == dtorDecl->getParent() &&
1085 "Pointer/Object mixup");
1086
1088
1089 CallArgList args;
1090 commonBuildCXXMemberOrOperatorCall(*this, dtorDecl, thisVal, implicitParam,
1091 implicitParamTy, ce, args, nullptr);
1092 assert((ce || dtor.getDecl()) && "expected source location provider");
1094 return emitCall(cgm.getTypes().arrangeCXXStructorDeclaration(dtor), callee,
1095 ReturnValueSlot(), args, nullptr,
1096 ce ? getLoc(ce->getExprLoc())
1097 : getLoc(dtor.getDecl()->getSourceRange()));
1098}
1099
1102 QualType destroyedType = expr->getDestroyedType();
1103 if (destroyedType.hasStrongOrWeakObjCLifetime()) {
1105 cgm.errorNYI(expr->getExprLoc(),
1106 "emitCXXPseudoDestructorExpr: Objective-C lifetime is NYI");
1107 } else {
1108 // C++ [expr.pseudo]p1:
1109 // The result shall only be used as the operand for the function call
1110 // operator (), and the result of such a call has type void. The only
1111 // effect is the evaluation of the postfix-expression before the dot or
1112 // arrow.
1113 emitIgnoredExpr(expr->getBase());
1114 }
1115
1116 return RValue::get(nullptr);
1117}
1118
1119namespace {
1120/// Calls the given 'operator delete' on a single object.
1121struct CallObjectDelete final : EHScopeStack::Cleanup {
1122 mlir::Value ptr;
1123 const FunctionDecl *operatorDelete;
1124 QualType elementType;
1125
1126 CallObjectDelete(mlir::Value ptr, const FunctionDecl *operatorDelete,
1127 QualType elementType)
1128 : ptr(ptr), operatorDelete(operatorDelete), elementType(elementType) {}
1129
1130 void emit(CIRGenFunction &cgf, Flags flags) override {
1131 cgf.emitDeleteCall(operatorDelete, ptr, elementType);
1132 }
1133};
1134} // namespace
1135
1136/// Emit the code for deleting a single object.
1138 Address ptr, QualType elementType) {
1139 // C++11 [expr.delete]p3:
1140 // If the static type of the object to be deleted is different from its
1141 // dynamic type, the static type shall be a base class of the dynamic type
1142 // of the object to be deleted and the static type shall have a virtual
1143 // destructor or the behavior is undefined.
1145
1146 const FunctionDecl *operatorDelete = de->getOperatorDelete();
1147 assert(!operatorDelete->isDestroyingOperatorDelete());
1148
1149 // Find the destructor for the type, if applicable. If the
1150 // destructor is virtual, we'll just emit the vcall and return.
1151 const CXXDestructorDecl *dtor = nullptr;
1152 if (const auto *rd = elementType->getAsCXXRecordDecl()) {
1153 if (rd->hasDefinition() && !rd->hasTrivialDestructor()) {
1154 dtor = rd->getDestructor();
1155
1156 if (dtor->isVirtual()) {
1158 cgf.cgm.getCXXABI().emitVirtualObjectDelete(cgf, de, ptr, elementType,
1159 dtor);
1160 return;
1161 }
1162 }
1163 }
1164
1165 // Make sure that we call delete even if the dtor throws.
1166 // This doesn't have to a conditional cleanup because we're going
1167 // to pop it off in a second.
1168 cgf.ehStack.pushCleanup<CallObjectDelete>(
1169 NormalAndEHCleanup, ptr.getPointer(), operatorDelete, elementType);
1170
1171 if (dtor) {
1173 /*ForVirtualBase=*/false,
1174 /*Delegating=*/false, ptr, elementType);
1175 } else if (elementType.getObjCLifetime()) {
1177 cgf.cgm.errorNYI(de->getSourceRange(), "emitObjectDelete: ObjCLifetime");
1178 }
1179
1180 // In traditional LLVM codegen null checks are emitted to save a delete call.
1181 // In CIR we optimize for size by default, the null check should be added into
1182 // this function callers.
1184
1185 cgf.popCleanupBlock();
1186}
1187
1189 const Expr *arg = e->getArgument();
1191
1192 // Null check the pointer.
1193 //
1194 // We could avoid this null check if we can determine that the object
1195 // destruction is trivial and doesn't require an array cookie; we can
1196 // unconditionally perform the operator delete call in that case. For now, we
1197 // assume that deleted pointers are null rarely enough that it's better to
1198 // keep the branch. This might be worth revisiting for a -O0 code size win.
1199 //
1200 // CIR note: emit the code size friendly by default for now, such as mentioned
1201 // in `emitObjectDelete`.
1203 QualType deleteTy = e->getDestroyedType();
1204
1205 // A destroying operator delete overrides the entire operation of the
1206 // delete expression.
1208 cgm.errorNYI(e->getSourceRange(),
1209 "emitCXXDeleteExpr: destroying operator delete");
1210 return;
1211 }
1212
1213 // We might be deleting a pointer to array.
1214 deleteTy = getContext().getBaseElementType(deleteTy);
1215 ptr = ptr.withElementType(builder, convertTypeForMem(deleteTy));
1216
1217 if (e->isArrayForm() &&
1218 cgm.getASTContext().getTargetInfo().emitVectorDeletingDtors(
1219 cgm.getASTContext().getLangOpts())) {
1220 cgm.errorNYI(e->getSourceRange(),
1221 "emitCXXDeleteExpr: emitVectorDeletingDtors");
1222 }
1223
1224 if (e->isArrayForm()) {
1226 cgm.errorNYI(e->getSourceRange(), "emitCXXDeleteExpr: array delete");
1227 return;
1228 } else {
1229 emitObjectDelete(*this, e, ptr, deleteTy);
1230 }
1231}
1232
1234 // The element type being allocated.
1236
1237 // 1. Build a call to the allocation function.
1238 FunctionDecl *allocator = e->getOperatorNew();
1239
1240 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1241 unsigned minElements = 0;
1242 if (e->isArray() && e->hasInitializer()) {
1243 const InitListExpr *ile = dyn_cast<InitListExpr>(e->getInitializer());
1244 if (ile && ile->isStringLiteralInit())
1245 minElements =
1247 ->getSize()
1248 .getZExtValue();
1249 else if (ile)
1250 minElements = ile->getNumInits();
1251 }
1252
1253 mlir::Value numElements = nullptr;
1254 mlir::Value allocSizeWithoutCookie = nullptr;
1255 mlir::Value allocSize = emitCXXNewAllocSize(
1256 *this, e, minElements, numElements, allocSizeWithoutCookie);
1257 CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1258
1259 // Emit the allocation call.
1260 Address allocation = Address::invalid();
1261 CallArgList allocatorArgs;
1262 if (allocator->isReservedGlobalPlacementOperator()) {
1263 // If the allocator is a global placement operator, just
1264 // "inline" it directly.
1265 assert(e->getNumPlacementArgs() == 1);
1266 const Expr *arg = *e->placement_arguments().begin();
1267
1268 LValueBaseInfo baseInfo;
1269 allocation = emitPointerWithAlignment(arg, &baseInfo);
1270
1271 // The pointer expression will, in many cases, be an opaque void*.
1272 // In these cases, discard the computed alignment and use the
1273 // formal alignment of the allocated type.
1274 if (baseInfo.getAlignmentSource() != AlignmentSource::Decl)
1275 allocation = allocation.withAlignment(allocAlign);
1276
1277 // Set up allocatorArgs for the call to operator delete if it's not
1278 // the reserved global operator.
1279 if (e->getOperatorDelete() &&
1281 cgm.errorNYI(e->getSourceRange(),
1282 "emitCXXNewExpr: reserved placement new with delete");
1283 }
1284 } else {
1285 const FunctionProtoType *allocatorType =
1286 allocator->getType()->castAs<FunctionProtoType>();
1287 unsigned paramsToSkip = 0;
1288
1289 // The allocation size is the first argument.
1290 QualType sizeType = getContext().getSizeType();
1291 allocatorArgs.add(RValue::get(allocSize), sizeType);
1292 ++paramsToSkip;
1293
1294 if (allocSize != allocSizeWithoutCookie) {
1295 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1296 allocAlign = std::max(allocAlign, cookieAlign);
1297 }
1298
1299 // The allocation alignment may be passed as the second argument.
1300 if (e->passAlignment()) {
1301 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: pass alignment");
1302 }
1303
1304 // FIXME: Why do we not pass a CalleeDecl here?
1305 emitCallArgs(allocatorArgs, allocatorType, e->placement_arguments(),
1306 AbstractCallee(), paramsToSkip);
1307 RValue rv =
1308 emitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1309
1310 // Set !heapallocsite metadata on the call to operator new.
1312
1313 // If this was a call to a global replaceable allocation function that does
1314 // not take an alignment argument, the allocator is known to produce storage
1315 // that's suitably aligned for any object that fits, up to a known
1316 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1317 CharUnits allocationAlign = allocAlign;
1318 if (!e->passAlignment() &&
1319 allocator->isReplaceableGlobalAllocationFunction()) {
1320 const TargetInfo &target = cgm.getASTContext().getTargetInfo();
1321 unsigned allocatorAlign = llvm::bit_floor(std::min<uint64_t>(
1322 target.getNewAlign(), getContext().getTypeSize(allocType)));
1323 allocationAlign = std::max(
1324 allocationAlign, getContext().toCharUnitsFromBits(allocatorAlign));
1325 }
1326
1327 mlir::Value allocPtr = rv.getValue();
1328 allocation = Address(
1329 allocPtr, mlir::cast<cir::PointerType>(allocPtr.getType()).getPointee(),
1330 allocationAlign);
1331 }
1332
1333 // Emit a null check on the allocation result if the allocation
1334 // function is allowed to return null (because it has a non-throwing
1335 // exception spec or is the reserved placement new) and we have an
1336 // interesting initializer will be running sanitizers on the initialization.
1337 bool nullCheck = e->shouldNullCheckAllocation() &&
1338 (!allocType.isPODType(getContext()) || e->hasInitializer());
1340 if (nullCheck)
1341 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: null check");
1342
1343 // If there's an operator delete, enter a cleanup to call it if an
1344 // exception is thrown. If we do this, we'll be creating the result pointer
1345 // inside a cleanup scope, either with a bitcast or an offset based on the
1346 // array cookie size. However, we need to return that pointer from outside
1347 // the cleanup scope, so we need to store it in a temporary variable.
1348 bool useNewDeleteCleanup =
1349 e->getOperatorDelete() &&
1351 EHScopeStack::stable_iterator operatorDeleteCleanup;
1352 mlir::Operation *cleanupDominator = nullptr;
1353 if (useNewDeleteCleanup) {
1355 enterNewDeleteCleanup(*this, e, allocation, allocSize, allocAlign,
1356 allocatorArgs);
1357 operatorDeleteCleanup = ehStack.stable_begin();
1358 cleanupDominator =
1359 cir::UnreachableOp::create(builder, getLoc(e->getSourceRange()))
1360 .getOperation();
1361 }
1362
1363 if (allocSize != allocSizeWithoutCookie) {
1364 assert(e->isArray());
1365 allocation = cgm.getCXXABI().initializeArrayCookie(
1366 *this, allocation, numElements, e, allocType);
1367 }
1368
1369 mlir::Type elementTy;
1370 if (e->isArray()) {
1371 // For array new, use the allocated type to handle multidimensional arrays
1372 // correctly
1373 elementTy = convertTypeForMem(e->getAllocatedType());
1374 } else {
1375 elementTy = convertTypeForMem(allocType);
1376 }
1377 Address result = builder.createElementBitCast(getLoc(e->getSourceRange()),
1378 allocation, elementTy);
1379
1380 // If we're inside a new delete cleanup, store the result pointer.
1381 Address resultPtr = Address::invalid();
1382 if (useNewDeleteCleanup) {
1383 resultPtr =
1384 createTempAlloca(builder.getPointerTo(elementTy), result.getAlignment(),
1385 getLoc(e->getSourceRange()), "__new_result");
1386 builder.createStore(getLoc(e->getSourceRange()), result.getPointer(),
1387 resultPtr);
1388 }
1389
1390 // Passing pointer through launder.invariant.group to avoid propagation of
1391 // vptrs information which may be included in previous type.
1392 // To not break LTO with different optimizations levels, we do it regardless
1393 // of optimization level.
1394 if (cgm.getCodeGenOpts().StrictVTablePointers &&
1395 allocator->isReservedGlobalPlacementOperator())
1396 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: strict vtable pointers");
1397
1399
1400 emitNewInitializer(*this, e, allocType, elementTy, result, numElements,
1401 allocSizeWithoutCookie);
1402
1403 // Deactivate the 'operator delete' cleanup if we finished
1404 // initialization.
1405 if (useNewDeleteCleanup) {
1406 assert(operatorDeleteCleanup.isValid());
1407 assert(resultPtr.isValid());
1408 deactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1409 cleanupDominator->erase();
1410 cir::LoadOp loadResult =
1411 builder.createLoad(getLoc(e->getSourceRange()), resultPtr);
1412 result = result.withPointer(loadResult.getResult());
1413 }
1414
1416
1417 return result.getPointer();
1418}
1419
1421 mlir::Value ptr, QualType deleteTy) {
1423
1424 const auto *deleteFTy = deleteFD->getType()->castAs<FunctionProtoType>();
1425 CallArgList deleteArgs;
1426
1427 UsualDeleteParams params = deleteFD->getUsualDeleteParams();
1428 auto paramTypeIt = deleteFTy->param_type_begin();
1429
1430 // Pass std::type_identity tag if present
1432 cgm.errorNYI(deleteFD->getSourceRange(),
1433 "emitDeleteCall: type aware delete");
1434
1435 // Pass the pointer itself.
1436 QualType argTy = *paramTypeIt++;
1437 mlir::Value deletePtr =
1438 builder.createBitcast(ptr.getLoc(), ptr, convertType(argTy));
1439 deleteArgs.add(RValue::get(deletePtr), argTy);
1440
1441 // Pass the std::destroying_delete tag if present.
1442 if (params.DestroyingDelete)
1443 cgm.errorNYI(deleteFD->getSourceRange(),
1444 "emitDeleteCall: destroying delete");
1445
1446 // Pass the size if the delete function has a size_t parameter.
1447 if (params.Size) {
1448 QualType sizeType = *paramTypeIt++;
1449 CharUnits deleteTypeSize = getContext().getTypeSizeInChars(deleteTy);
1450 assert(mlir::isa<cir::IntType>(convertType(sizeType)) &&
1451 "expected cir::IntType");
1452 cir::ConstantOp size = builder.getConstInt(
1453 *currSrcLoc, convertType(sizeType), deleteTypeSize.getQuantity());
1454
1455 deleteArgs.add(RValue::get(size), sizeType);
1456 }
1457
1458 // Pass the alignment if the delete function has an align_val_t parameter.
1459 if (isAlignedAllocation(params.Alignment))
1460 cgm.errorNYI(deleteFD->getSourceRange(),
1461 "emitDeleteCall: aligned allocation");
1462
1463 assert(paramTypeIt == deleteFTy->param_type_end() &&
1464 "unknown parameter to usual delete function");
1465
1466 // Emit the call to delete.
1467 emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
1468}
1469
1471 mlir::Location loc, QualType destTy) {
1472 mlir::Type destCIRTy = cgf.convertType(destTy);
1473 assert(mlir::isa<cir::PointerType>(destCIRTy) &&
1474 "result of dynamic_cast should be a ptr");
1475
1476 if (!destTy->isPointerType()) {
1477 mlir::Region *currentRegion = cgf.getBuilder().getBlock()->getParent();
1478 /// C++ [expr.dynamic.cast]p9:
1479 /// A failed cast to reference type throws std::bad_cast
1480 cgf.cgm.getCXXABI().emitBadCastCall(cgf, loc);
1481
1482 // The call to bad_cast will terminate the current block. Create a new block
1483 // to hold any follow up code.
1484 cgf.getBuilder().createBlock(currentRegion, currentRegion->end());
1485 }
1486
1487 return cgf.getBuilder().getNullPtr(destCIRTy, loc);
1488}
1489
1491 const CXXDynamicCastExpr *dce) {
1492 mlir::Location loc = getLoc(dce->getSourceRange());
1493
1494 cgm.emitExplicitCastExprType(dce, this);
1495 QualType destTy = dce->getTypeAsWritten();
1496 QualType srcTy = dce->getSubExpr()->getType();
1497
1498 // C++ [expr.dynamic.cast]p7:
1499 // If T is "pointer to cv void," then the result is a pointer to the most
1500 // derived object pointed to by v.
1501 bool isDynCastToVoid = destTy->isVoidPointerType();
1502 bool isRefCast = destTy->isReferenceType();
1503
1504 QualType srcRecordTy;
1505 QualType destRecordTy;
1506 if (isDynCastToVoid) {
1507 srcRecordTy = srcTy->getPointeeType();
1508 // No destRecordTy.
1509 } else if (const PointerType *destPTy = destTy->getAs<PointerType>()) {
1510 srcRecordTy = srcTy->castAs<PointerType>()->getPointeeType();
1511 destRecordTy = destPTy->getPointeeType();
1512 } else {
1513 srcRecordTy = srcTy;
1514 destRecordTy = destTy->castAs<ReferenceType>()->getPointeeType();
1515 }
1516
1517 assert(srcRecordTy->isRecordType() && "source type must be a record type!");
1519
1520 if (dce->isAlwaysNull())
1521 return emitDynamicCastToNull(*this, loc, destTy);
1522
1523 auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy));
1524 return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy,
1525 destCirTy, isRefCast, thisAddr);
1526}
1527
1528static mlir::Value emitCXXTypeidFromVTable(CIRGenFunction &cgf, const Expr *e,
1529 mlir::Type typeInfoPtrTy,
1530 bool hasNullCheck) {
1531 Address thisPtr = cgf.emitLValue(e).getAddress();
1532 QualType srcType = e->getType();
1533
1534 // C++ [class.cdtor]p4:
1535 // If the operand of typeid refers to the object under construction or
1536 // destruction and the static type of the operand is neither the constructor
1537 // or destructor’s class nor one of its bases, the behavior is undefined.
1539
1540 if (hasNullCheck && cgf.cgm.getCXXABI().shouldTypeidBeNullChecked(srcType)) {
1541 mlir::Value isThisNull =
1542 cgf.getBuilder().createPtrIsNull(thisPtr.getPointer());
1543 // We don't really care about the value, we just want to make sure the
1544 // 'true' side calls bad-type-id.
1545 cir::IfOp::create(
1546 cgf.getBuilder(), cgf.getLoc(e->getSourceRange()), isThisNull,
1547 /*withElseRegion=*/false, [&](mlir::OpBuilder &, mlir::Location loc) {
1548 cgf.cgm.getCXXABI().emitBadTypeidCall(cgf, loc);
1549 });
1550 }
1551
1552 return cgf.cgm.getCXXABI().emitTypeid(cgf, srcType, thisPtr, typeInfoPtrTy);
1553}
1554
1556 mlir::Location loc = getLoc(e->getSourceRange());
1557 mlir::Type resultType = cir::PointerType::get(convertType(e->getType()));
1559 : e->getExprOperand()->getType();
1560
1561 // If the non-default global var address space is not default, we need to do
1562 // an address-space cast here.
1564
1565 // C++ [expr.typeid]p2:
1566 // When typeid is applied to a glvalue expression whose type is a
1567 // polymorphic class type, the result refers to a std::type_info object
1568 // representing the type of the most derived object (that is, the dynamic
1569 // type) to which the glvalue refers.
1570 // If the operand is already most derived object, no need to look up vtable.
1571 if (!e->isTypeOperand() && e->isPotentiallyEvaluated() &&
1573 return emitCXXTypeidFromVTable(*this, e->getExprOperand(), resultType,
1574 e->hasNullCheck());
1575
1576 auto typeInfo =
1577 cast<cir::GlobalViewAttr>(cgm.getAddrOfRTTIDescriptor(loc, ty));
1578 // `getAddrOfRTTIDescriptor` lies to us and always gives us a uint8ptr as its
1579 // type, however we need the value of the actual global to call the
1580 // get-global-op, so look it up here.
1581 auto typeInfoGlobal =
1582 cast<cir::GlobalOp>(cgm.getGlobalValue(typeInfo.getSymbol().getValue()));
1583 auto getTypeInfo = cir::GetGlobalOp::create(
1584 builder, loc, builder.getPointerTo(typeInfoGlobal.getSymType()),
1585 typeInfoGlobal.getSymName());
1586 // The ABI is just generating these sometimes as ptr to u8, but they are
1587 // simply a representation of the type_info. So we have to cast this, if
1588 // necessary (createBitcast is a noop if the types match).
1589 return builder.createBitcast(getTypeInfo, resultType);
1590}
static void emit(Program &P, llvm::SmallVectorImpl< std::byte > &Code, const T &Val, bool &Success)
Helper to write bytecode and bail out if 32-bit offsets become invalid.
static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address newPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
static mlir::Value emitCXXTypeidFromVTable(CIRGenFunction &cgf, const Expr *e, mlir::Type typeInfoPtrTy, bool hasNullCheck)
static void emitObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de, Address ptr, QualType elementType)
Emit the code for deleting a single object.
static void emitNullBaseClassInitialization(CIRGenFunction &cgf, Address destPtr, const CXXRecordDecl *base)
static void enterNewDeleteCleanup(CIRGenFunction &cgf, const CXXNewExpr *e, Address newPtr, mlir::Value allocSize, CharUnits allocAlign, const CallArgList &newArgs)
Enter a cleanup to call 'operator delete' if the initializer in a new-expression throws.
static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e, unsigned minElements, mlir::Value &numElements, mlir::Value &sizeWithoutCookie)
static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init, QualType allocType, Address newPtr, AggValueSlot::Overlap_t mayOverlap)
static CharUnits calculateCookiePadding(CIRGenFunction &cgf, const CXXNewExpr *e)
static mlir::Value emitDynamicCastToNull(CIRGenFunction &cgf, mlir::Location loc, QualType destTy)
static MemberCallInfo commonBuildCXXMemberOrOperatorCall(CIRGenFunction &cgf, const CXXMethodDecl *md, mlir::Value thisPtr, mlir::Value implicitParam, QualType implicitParamTy, const CallExpr *ce, CallArgList &args, CallArgList *rtlArgs)
static RValue emitNewDeleteCall(CIRGenFunction &cgf, const FunctionDecl *calleeDecl, const FunctionProtoType *calleeType, const CallArgList &args)
Emit a call to an operator new or operator delete function, as implicitly created by new-expressions ...
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the clang::Expr interface and subclasses for C++ expressions.
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createOr(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
mlir::Value createPtrIsNull(mlir::Value ptr)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Value createMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, OverflowBehavior ob=OverflowBehavior::None)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
TranslationUnitDecl * getTranslationUnitDecl() const
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
DeclarationNameTable DeclarationNames
Definition ASTContext.h:801
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3730
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:81
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:87
bool isValid() const
Definition Address.h:75
An aggregate value slot.
IsZeroed_t isZeroed() const
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
bool isNullValue(mlir::Attribute attr) const
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
virtual void emitVirtualObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de, Address ptr, QualType elementType, const CXXDestructorDecl *dtor)=0
virtual const clang::CXXRecordDecl * getThisArgumentTypeForMethod(const clang::CXXMethodDecl *md)
Get the type of the implicit "this" parameter used by a method.
virtual bool shouldTypeidBeNullChecked(QualType srcTy)=0
virtual mlir::Value emitTypeid(CIRGenFunction &cgf, QualType srcTy, Address thisPtr, mlir::Type typeInfoPtrTy)=0
virtual void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc)=0
virtual CharUnits getArrayCookieSize(const CXXNewExpr *e)
Returns the extra size required in order to store the array cookie for the given new-expression.
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
static CIRGenCallee forVirtual(const clang::CallExpr *ce, clang::GlobalDecl md, Address addr, cir::FuncType fTy)
Definition CIRGenCall.h:154
An abstract representation of regular/ObjC call/message targets.
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
Address returnValue
The temporary alloca to hold the return value.
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::TypedAttr emitNullConstantForBase(const CXXRecordDecl *record)
Return a null constant appropriate for zero-initializing a base class with the given type.
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
const cir::CIRDataLayout getDataLayout() const
CIRGenCXXABI & getCXXABI() const
const CIRGenFunctionInfo & arrangeFreeFunctionCall(const CallArgList &args, const FunctionType *fnType)
clang::CanQualType deriveThisType(const clang::CXXRecordDecl *rd, const clang::CXXMethodDecl *md)
Derives the 'this' type for CIRGen purposes, i.e.
void addFrom(const CallArgList &other)
Add all the arguments from another CallArgList to this one.
Definition CIRGenCall.h:248
void add(RValue rvalue, clang::QualType type)
Definition CIRGenCall.h:239
mlir::Attribute tryEmitAbstract(const Expr *e, QualType destType)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
T * pushCleanupWithExtra(CleanupKind kind, size_t n, As... a)
Push a cleanup with non-constant storage requirements on the stack.
AlignmentSource getAlignmentSource() const
Address getAddress() const
mlir::Value getPointer() const
void setAddress(Address address)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
A class for recording the number of arguments that a function signature requires.
static RequiredArgs getFromProtoWithExtraSlots(const clang::FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a call to a CUDA kernel function.
Definition ExprCXX.h:235
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
bool isElidable() const
Whether this construction is elidable.
Definition ExprCXX.h:1618
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1692
bool requiresZeroInitialization() const
Whether this construction first requires zero-initialization before the initializer is called.
Definition ExprCXX.h:1651
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1612
CXXConstructionKind getConstructionKind() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1660
Represents a C++ constructor within a class.
Definition DeclCXX.h:2611
bool isDefaultConstructor() const
Whether this constructor is a default constructor (C++ [class.ctor]p5), which can be used to default-...
Definition DeclCXX.cpp:3017
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2627
FunctionDecl * getOperatorDelete() const
Definition ExprCXX.h:2666
bool isArrayForm() const
Definition ExprCXX.h:2653
QualType getDestroyedType() const
Retrieve the type being destroyed.
Definition ExprCXX.cpp:338
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:482
bool isAlwaysNull() const
isAlwaysNull - Return whether the result of the dynamic_cast is proven to always be null.
Definition ExprCXX.cpp:838
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprCXX.h:221
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
bool isVirtual() const
Definition DeclCXX.h:2191
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2262
bool isInstance() const
Definition DeclCXX.h:2163
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2356
bool isArray() const
Definition ExprCXX.h:2465
llvm::iterator_range< arg_iterator > placement_arguments()
Definition ExprCXX.h:2573
QualType getAllocatedType() const
Definition ExprCXX.h:2435
unsigned getNumImplicitArgs() const
Definition ExprCXX.h:2512
std::optional< Expr * > getArraySize()
This might return std::nullopt even if isArray() returns true, since there might not be an array size...
Definition ExprCXX.h:2470
ImplicitAllocationParameters implicitAllocationParameters() const
Provides the full set of information about expected implicit parameters in this call.
Definition ExprCXX.h:2563
bool hasInitializer() const
Whether this new-expression has any initializer at all.
Definition ExprCXX.h:2525
bool shouldNullCheckAllocation() const
True if the allocation result needs to be null-checked.
Definition ExprCXX.cpp:326
bool passAlignment() const
Indicates whether the required alignment should be implicitly passed to the allocation function.
Definition ExprCXX.h:2552
FunctionDecl * getOperatorDelete() const
Definition ExprCXX.h:2462
unsigned getNumPlacementArgs() const
Definition ExprCXX.h:2495
SourceRange getSourceRange() const
Definition ExprCXX.h:2611
FunctionDecl * getOperatorNew() const
Definition ExprCXX.h:2460
Expr * getInitializer()
The initializer of this new-expression.
Definition ExprCXX.h:2534
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:85
Represents a list-initialization with parenthesis.
Definition ExprCXX.h:5142
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5182
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2746
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
bool isTypeOperand() const
Definition ExprCXX.h:885
QualType getTypeOperand(const ASTContext &Context) const
Retrieves the type operand of this typeid() expression after various required adjustments (removing r...
Definition ExprCXX.cpp:161
Expr * getExprOperand() const
Definition ExprCXX.h:896
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:903
bool isMostDerived(const ASTContext &Context) const
Best-effort check if the expression operand refers to a most derived object.
Definition ExprCXX.cpp:149
bool isPotentiallyEvaluated() const
Determine whether this typeid has a type operand which is potentially evaluated, per C++11 [expr....
Definition ExprCXX.cpp:134
bool hasNullCheck() const
Whether this is of a form like "typeid(*ptr)" that can throw a std::bad_typeid if a pointer is a null...
Definition ExprCXX.cpp:200
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
arg_iterator arg_begin()
Definition Expr.h:3203
arg_iterator arg_end()
Definition Expr.h:3206
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
arg_range arguments()
Definition Expr.h:3198
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
Represents the canonical version of C arrays with a specified constant size.
Definition TypeBase.h:3768
The results of name lookup within a DeclContext.
Definition DeclBase.h:1382
lookup_result lookup(DeclarationName Name) const
lookup - Find the declarations (if any) with the given Name in this context.
virtual SourceRange getSourceRange() const LLVM_READONLY
Source range that this declaration covers.
Definition DeclBase.h:427
DeclarationName getCXXOperatorName(OverloadedOperatorKind Op)
Get the name of the overloadable C++ operator corresponding to Op.
The name of a declaration.
QualType getTypeAsWritten() const
getTypeAsWritten - Returns the type that this expression is casting to, as written in the source code...
Definition Expr.h:3958
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isTemporaryObject(ASTContext &Ctx, const CXXRecordDecl *TempTy) const
Determine whether the result of this expression is a temporary object of the given class type.
Definition Expr.cpp:3253
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:2000
bool isDestroyingOperatorDelete() const
Determine whether this is a destroying operator delete.
Definition Decl.cpp:3552
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition Decl.h:2377
UsualDeleteParams getUsualDeleteParams() const
Definition Decl.cpp:3568
bool isReservedGlobalPlacementOperator() const
Determines whether this operator new or delete is one of the reserved global placement operators: voi...
Definition Decl.cpp:3404
bool isDefaulted() const
Whether this function is defaulted.
Definition Decl.h:2385
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4550
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5315
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isStringLiteralInit() const
Is this an initializer for an array of characters, initialized by a string literal or an @encode?
Definition Expr.cpp:2448
unsigned getNumInits() const
Definition Expr.h:5332
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5404
ArrayRef< Expr * > inits()
Definition Expr.h:5352
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3661
QualType getPointeeType() const
Definition TypeBase.h:3679
This represents a decl that may have a name.
Definition Decl.h:274
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
ObjCEncodeExpr, used for @encode in Objective-C.
Definition ExprObjC.h:407
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3336
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8428
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition Type.cpp:2738
bool hasStrongOrWeakObjCLifetime() const
Definition TypeBase.h:1452
Base for LValueReferenceType and RValueReferenceType.
Definition TypeBase.h:3581
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4894
bool isUnion() const
Definition Decl.h:3928
Exposes information about the current target.
Definition TargetInfo.h:226
unsigned getNewAlign() const
Return the largest alignment for which a suitably-sized allocation with 'operator new(size_t)' is gua...
Definition TargetInfo.h:766
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Decl.h:3547
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8728
bool isVoidPointerType() const
Definition Type.cpp:713
bool isPointerType() const
Definition TypeBase.h:8625
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9285
bool isReferenceType() const
Definition TypeBase.h:8649
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9271
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9218
bool isRecordType() const
Definition TypeBase.h:8752
QualType getType() const
Definition Decl.h:723
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
AlignedAllocationMode alignedAllocationModeFromBool(bool IsAligned)
Definition ExprCXX.h:2270
bool isAlignedAllocation(AlignedAllocationMode Mode)
Definition ExprCXX.h:2266
AlignedAllocationMode
Definition ExprCXX.h:2264
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
bool isTypeAwareAllocation(TypeAwareAllocationMode Mode)
Definition ExprCXX.h:2254
TypeAwareAllocationMode
Definition ExprCXX.h:2252
U cast(CodeGen::Address addr)
Definition Address.h:327
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
static bool objCLifetime()
static bool addressSpace()
static bool devirtualizeDestructor()
static bool aggValueSlotGC()
static bool devirtualizeMemberFunction()
static bool deleteArray()
static bool emitTypeCheck()
static bool cleanupDeactivationScope()
static bool opCallMustTail()
static bool typeAwareAllocation()
static bool exprNewNullCheck()
static bool attributeBuiltin()
static bool emitNullCheckForDeleteCalls()
static bool generateDebugInfo()
clang::CharUnits getSizeAlign() const
The parameters to pass to a usual operator delete.
Definition ExprCXX.h:2345
TypeAwareAllocationMode TypeAwareDelete
Definition ExprCXX.h:2346
AlignedAllocationMode Alignment
Definition ExprCXX.h:2349