clang 23.0.0git
CIRGenExprCXX.cpp
Go to the documentation of this file.
1//===--- CIRGenExprCXX.cpp - Emit CIR Code for C++ expressions ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ expressions
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenCXXABI.h"
15#include "CIRGenFunction.h"
16
17#include "clang/AST/CharUnits.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/ExprObjC.h"
23#include "llvm/ADT/Sequence.h"
24#include "llvm/Support/TrailingObjects.h"
25
26using namespace clang;
27using namespace clang::CIRGen;
28
29namespace {
30struct MemberCallInfo {
31 RequiredArgs reqArgs;
32 // Number of prefix arguments for the call. Ignores the `this` pointer.
33 unsigned prefixSize;
34};
35} // namespace
36
38 CIRGenFunction &cgf, const CXXMethodDecl *md, mlir::Value thisPtr,
39 mlir::Value implicitParam, QualType implicitParamTy, const CallExpr *ce,
40 CallArgList &args, CallArgList *rtlArgs) {
41 assert(ce == nullptr || isa<CXXMemberCallExpr>(ce) ||
43 assert(md->isInstance() &&
44 "Trying to emit a member or operator call expr on a static method!");
45
46 // Push the this ptr.
47 const CXXRecordDecl *rd =
49 args.add(RValue::get(thisPtr), cgf.getTypes().deriveThisType(rd, md));
50
51 // If there is an implicit parameter (e.g. VTT), emit it.
52 if (implicitParam) {
53 args.add(RValue::get(implicitParam), implicitParamTy);
54 }
55
56 const auto *fpt = md->getType()->castAs<FunctionProtoType>();
57 RequiredArgs required =
59 unsigned prefixSize = args.size() - 1;
60
61 // Add the rest of the call args
62 if (rtlArgs) {
63 // Special case: if the caller emitted the arguments right-to-left already
64 // (prior to emitting the *this argument), we're done. This happens for
65 // assignment operators.
66 args.addFrom(*rtlArgs);
67 } else if (ce) {
68 // Special case: skip first argument of CXXOperatorCall (it is "this").
69 unsigned argsToSkip = isa<CXXOperatorCallExpr>(ce) ? 1 : 0;
70 cgf.emitCallArgs(args, fpt, drop_begin(ce->arguments(), argsToSkip),
71 ce->getDirectCallee());
72 } else {
73 assert(
74 fpt->getNumParams() == 0 &&
75 "No CallExpr specified for function with non-zero number of arguments");
76 }
77
78 // return {required, prefixSize};
79 return {required, prefixSize};
80}
81
85 const BinaryOperator *bo =
87 const Expr *baseExpr = bo->getLHS();
88 const Expr *memFnExpr = bo->getRHS();
89
90 const auto *mpt = memFnExpr->getType()->castAs<MemberPointerType>();
91 const auto *fpt = mpt->getPointeeType()->castAs<FunctionProtoType>();
92
93 // Emit the 'this' pointer.
94 Address thisAddr = Address::invalid();
95 if (bo->getOpcode() == BO_PtrMemI)
96 thisAddr = emitPointerWithAlignment(baseExpr);
97 else
98 thisAddr = emitLValue(baseExpr).getAddress();
99
101
102 // Get the member function pointer.
103 mlir::Value memFnPtr = emitScalarExpr(memFnExpr);
104
105 // Resolve the member function pointer to the actual callee and adjust the
106 // "this" pointer for call.
107 mlir::Location loc = getLoc(ce->getExprLoc());
108 auto [/*mlir::Value*/ calleePtr, /*mlir::Value*/ adjustedThis] =
109 builder.createGetMethod(loc, memFnPtr, thisAddr.getPointer());
110
111 // Prepare the call arguments.
112 CallArgList argsList;
113 argsList.add(RValue::get(adjustedThis), getContext().VoidPtrTy);
114 emitCallArgs(argsList, fpt, ce->arguments());
115
117
118 // Build the call.
119 CIRGenCallee callee(fpt, calleePtr.getDefiningOp());
121 return emitCall(cgm.getTypes().arrangeCXXMethodCall(argsList, fpt, required,
122 /*PrefixSize=*/0),
123 callee, returnValue, argsList, nullptr, loc);
124}
125
128 bool hasQualifier, NestedNameSpecifier qualifier, bool isArrow,
129 const Expr *base) {
131
132 // Compute the object pointer.
133 bool canUseVirtualCall = md->isVirtual() && !hasQualifier;
134 const CXXMethodDecl *devirtualizedMethod = nullptr;
135 // TODO: This devirtualization logic should be hoisted to the AST layer so it
136 // can be shared with classic codegen (see CGExprCXX.cpp).
137 if (canUseVirtualCall &&
138 md->getDevirtualizedMethod(base, getLangOpts().AppleKext)) {
139 const CXXRecordDecl *bestDynamicDecl = base->getBestDynamicClassType();
140 devirtualizedMethod = md->getCorrespondingMethodInClass(bestDynamicDecl);
141 assert(devirtualizedMethod);
142 const CXXRecordDecl *devirtualizedClass = devirtualizedMethod->getParent();
143 const Expr *inner = base->IgnoreParenBaseCasts();
144 auto getCXXRecord = [](const Expr *e) -> const CXXRecordDecl * {
145 QualType t = e->getType();
146 if (t->isRecordType())
147 return t->getAsCXXRecordDecl();
148 return t->getPointeeCXXRecordDecl();
149 };
150 if (devirtualizedMethod->getReturnType().getCanonicalType() !=
152 // If the return types are not the same, this might be a case where more
153 // code needs to run to compensate for it. For example, the derived
154 // method might return a type that inherits from the return type of MD
155 // and has a prefix.
156 // For now we just avoid devirtualizing these covariant cases.
157 devirtualizedMethod = nullptr;
158 else if (getCXXRecord(inner) == devirtualizedClass)
159 // If the class of the Inner expression is where the dynamic method
160 // is defined, build the this pointer from it.
161 base = inner;
162 else if (getCXXRecord(base) != devirtualizedClass) {
163 // If the method is defined in a class that is not the best dynamic
164 // one or the one of the full expression, we would have to build
165 // a derived-to-base cast to compute the correct this pointer, but
166 // we don't have support for that yet, so do a virtual call.
167 devirtualizedMethod = nullptr;
168 }
169 }
170
171 // Note on trivial assignment
172 // --------------------------
173 // Classic codegen avoids generating the trivial copy/move assignment operator
174 // when it isn't necessary, choosing instead to just produce IR with an
175 // equivalent effect. We have chosen not to do that in CIR, instead emitting
176 // trivial copy/move assignment operators and allowing later transformations
177 // to optimize them away if appropriate.
178
179 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
180 // operator before the LHS.
181 CallArgList rtlArgStorage;
182 CallArgList *rtlArgs = nullptr;
183 if (auto *oce = dyn_cast<CXXOperatorCallExpr>(ce)) {
184 if (oce->isAssignmentOp()) {
185 rtlArgs = &rtlArgStorage;
186 emitCallArgs(*rtlArgs, md->getType()->castAs<FunctionProtoType>(),
187 drop_begin(ce->arguments(), 1), ce->getDirectCallee(),
188 /*ParamsToSkip*/ 0);
189 }
190 }
191
192 LValue thisPtr;
193 if (isArrow) {
194 LValueBaseInfo baseInfo;
196 Address thisValue = emitPointerWithAlignment(base, &baseInfo);
197 thisPtr = makeAddrLValue(thisValue, base->getType(), baseInfo);
198 } else {
199 thisPtr = emitLValue(base);
200 }
201
202 if (isa<CXXConstructorDecl>(md)) {
203 cgm.errorNYI(ce->getSourceRange(),
204 "emitCXXMemberOrOperatorMemberCallExpr: constructor call");
205 return RValue::get(nullptr);
206 }
207
208 if ((md->isTrivial() || (md->isDefaulted() && md->getParent()->isUnion())) &&
210 return RValue::get(nullptr);
211
212 // Compute the function type we're calling
213 const CXXMethodDecl *calleeDecl =
214 devirtualizedMethod ? devirtualizedMethod : md;
215 const CIRGenFunctionInfo *fInfo = nullptr;
216 if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl))
217 fInfo = &cgm.getTypes().arrangeCXXStructorDeclaration(
219 else
220 fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl);
221
222 cir::FuncType ty = cgm.getTypes().getFunctionType(*fInfo);
223
226
227 // C++ [class.virtual]p12:
228 // Explicit qualification with the scope operator (5.1) suppresses the
229 // virtual call mechanism.
230 //
231 // We also don't emit a virtual call if the base expression has a record type
232 // because then we know what the type is.
233 bool useVirtualCall = canUseVirtualCall && !devirtualizedMethod;
234
235 if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl)) {
236 assert(ce->arg_begin() == ce->arg_end() &&
237 "Destructor shouldn't have explicit parameters");
238 assert(returnValue.isNull() && "Destructor shouldn't have return value");
239 if (useVirtualCall) {
240 cgm.getCXXABI().emitVirtualDestructorCall(*this, dtor, Dtor_Complete,
241 thisPtr.getAddress(),
243 } else {
244 GlobalDecl globalDecl(dtor, Dtor_Complete);
245 CIRGenCallee callee;
247 if (!devirtualizedMethod) {
249 cgm.getAddrOfCXXStructor(globalDecl, fInfo, ty), globalDecl);
250 } else {
251 callee = CIRGenCallee::forDirect(cgm.getAddrOfFunction(globalDecl, ty),
252 globalDecl);
253 }
254
255 QualType thisTy =
256 isArrow ? base->getType()->getPointeeType() : base->getType();
257 // CIRGen does not pass CallOrInvoke here (different from OG LLVM codegen)
258 // because in practice it always null even in OG.
259 emitCXXDestructorCall(globalDecl, callee, thisPtr.getPointer(), thisTy,
260 /*implicitParam=*/nullptr,
261 /*implicitParamTy=*/QualType(), ce);
262 }
263 return RValue::get(nullptr);
264 }
265
266 CIRGenCallee callee;
267 if (useVirtualCall) {
268 callee = CIRGenCallee::forVirtual(ce, md, thisPtr.getAddress(), ty);
269 } else {
271 if (getLangOpts().AppleKext) {
272 cgm.errorNYI(ce->getSourceRange(),
273 "emitCXXMemberOrOperatorMemberCallExpr: AppleKext");
274 return RValue::get(nullptr);
275 }
276
277 callee = CIRGenCallee::forDirect(cgm.getAddrOfFunction(calleeDecl, ty),
278 GlobalDecl(calleeDecl));
279 }
280
281 if (md->isVirtual()) {
282 Address newThisAddr =
283 cgm.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
284 *this, calleeDecl, thisPtr.getAddress(), useVirtualCall);
285 thisPtr.setAddress(newThisAddr);
286 }
287
289 calleeDecl, callee, returnValue, thisPtr.getPointer(),
290 /*ImplicitParam=*/nullptr, QualType(), ce, rtlArgs);
291}
292
293RValue
295 const CXXMethodDecl *md,
297 assert(md->isInstance() &&
298 "Trying to emit a member call expr on a static method!");
300 e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
301 /*IsArrow=*/false, e->getArg(0));
302}
303
306 // Emit as a device kernel call if CUDA device code is to be generated.
307 if (!getLangOpts().HIP && getLangOpts().CUDAIsDevice)
308 cgm.errorNYI("CUDA Device side kernel call");
309 return cgm.getCUDARuntime().emitCUDAKernelCallExpr(*this, expr, returnValue);
310}
311
313 const CXXMethodDecl *md, const CIRGenCallee &callee,
314 ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam,
315 QualType implicitParamTy, const CallExpr *ce, CallArgList *rtlArgs) {
316 const auto *fpt = md->getType()->castAs<FunctionProtoType>();
317 CallArgList args;
318 MemberCallInfo callInfo = commonBuildCXXMemberOrOperatorCall(
319 *this, md, thisPtr, implicitParam, implicitParamTy, ce, args, rtlArgs);
320 auto &fnInfo = cgm.getTypes().arrangeCXXMethodCall(
321 args, fpt, callInfo.reqArgs, callInfo.prefixSize);
322 assert((ce || currSrcLoc) && "expected source location");
323 mlir::Location loc = ce ? getLoc(ce->getExprLoc()) : *currSrcLoc;
325 return emitCall(fnInfo, callee, returnValue, args, nullptr, loc);
326}
327
329 Address destPtr,
330 const CXXRecordDecl *base) {
331 if (base->isEmpty())
332 return;
333
334 const ASTRecordLayout &layout = cgf.getContext().getASTRecordLayout(base);
335 CharUnits nvSize = layout.getNonVirtualSize();
336
337 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
338 // present, they are initialized by the most derived class before calling the
339 // constructor.
341 stores.emplace_back(CharUnits::Zero(), nvSize);
342
343 // Each store is split by the existence of a vbptr.
344 // TODO(cir): This only needs handling for the MS CXXABI.
346
347 // If the type contains a pointer to data member we can't memset it to zero.
348 // Instead, create a null constant and copy it to the destination.
349 // TODO: there are other patterns besides zero that we can usefully memset,
350 // like -1, which happens to be the pattern used by member-pointers.
351 // TODO: isZeroInitializable can be over-conservative in the case where a
352 // virtual base contains a member pointer.
353 mlir::TypedAttr nullConstantForBase = cgf.cgm.emitNullConstantForBase(base);
354 if (!cgf.getBuilder().isNullValue(nullConstantForBase)) {
355 cgf.cgm.errorNYI(
356 base->getSourceRange(),
357 "emitNullBaseClassInitialization: base constant is not null");
358 } else {
359 // Otherwise, just memset the whole thing to zero. This is legal
360 // because in LLVM, all default initializers (other than the ones we just
361 // handled above) are guaranteed to have a bit pattern of all zeros.
362 // TODO(cir): When the MS CXXABI is supported, we will need to iterate over
363 // `stores` and create a separate memset for each one. For now, we know that
364 // there will only be one store and it will begin at offset zero, so that
365 // simplifies this code considerably.
366 assert(stores.size() == 1 && "Expected only one store");
367 assert(stores[0].first == CharUnits::Zero() &&
368 "Expected store to begin at offset zero");
369 CIRGenBuilderTy builder = cgf.getBuilder();
370 mlir::Location loc = cgf.getLoc(base->getBeginLoc());
371 builder.createStore(loc, builder.getConstant(loc, nullConstantForBase),
372 destPtr);
373 }
374}
375
377 AggValueSlot dest) {
378 assert(!dest.isIgnored() && "Must have a destination!");
379 const CXXConstructorDecl *cd = e->getConstructor();
380
381 // If we require zero initialization before (or instead of) calling the
382 // constructor, as can be the case with a non-user-provided default
383 // constructor, emit the zero initialization now, unless destination is
384 // already zeroed.
385 if (e->requiresZeroInitialization() && !dest.isZeroed()) {
386 switch (e->getConstructionKind()) {
390 e->getType());
391 break;
395 cd->getParent());
396 break;
397 }
398 }
399
400 // If this is a call to a trivial default constructor, do nothing.
401 if (cd->isTrivial() && cd->isDefaultConstructor())
402 return;
403
404 // Elide the constructor if we're constructing from a temporary
405 if (getLangOpts().ElideConstructors && e->isElidable()) {
406 // FIXME: This only handles the simplest case, where the source object is
407 // passed directly as the first argument to the constructor. This
408 // should also handle stepping through implicit casts and conversion
409 // sequences which involve two steps, with a conversion operator
410 // follwed by a converting constructor.
411 const Expr *srcObj = e->getArg(0);
412 assert(srcObj->isTemporaryObject(getContext(), cd->getParent()));
413 assert(
414 getContext().hasSameUnqualifiedType(e->getType(), srcObj->getType()));
415 emitAggExpr(srcObj, dest);
416 return;
417 }
418
419 if (const ArrayType *arrayType = getContext().getAsArrayType(e->getType())) {
421 emitCXXAggrConstructorCall(cd, arrayType, dest.getAddress(), e, false);
422 } else {
423
425 bool forVirtualBase = false;
426 bool delegating = false;
427
428 switch (e->getConstructionKind()) {
431 break;
433 // We should be emitting a constructor; GlobalDecl will assert this
434 type = curGD.getCtorType();
435 delegating = true;
436 break;
438 forVirtualBase = true;
439 [[fallthrough]];
441 type = Ctor_Base;
442 break;
443 }
444
445 emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
446 }
447}
448
450 const CXXNewExpr *e) {
451 if (!e->isArray())
452 return CharUnits::Zero();
453
454 // No cookie is required if the operator new[] being used is the
455 // reserved placement operator new[].
457 return CharUnits::Zero();
458
459 return cgf.cgm.getCXXABI().getArrayCookieSize(e);
460}
461
462static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
463 unsigned minElements,
464 mlir::Value &numElements,
465 mlir::Value &sizeWithoutCookie) {
467 mlir::Location loc = cgf.getLoc(e->getSourceRange());
468
469 if (!e->isArray()) {
471 sizeWithoutCookie = cgf.getBuilder().getConstant(
472 loc, cir::IntAttr::get(cgf.sizeTy, typeSize.getQuantity()));
473 return sizeWithoutCookie;
474 }
475
476 // The width of size_t.
477 unsigned sizeWidth = cgf.cgm.getDataLayout().getTypeSizeInBits(cgf.sizeTy);
478
479 // The number of elements can be have an arbitrary integer type;
480 // essentially, we need to multiply it by a constant factor, add a
481 // cookie size, and verify that the result is representable as a
482 // size_t. That's just a gloss, though, and it's wrong in one
483 // important way: if the count is negative, it's an error even if
484 // the cookie size would bring the total size >= 0.
485 //
486 // If the array size is constant, Sema will have prevented negative
487 // values and size overflow.
488
489 // Compute the constant factor.
490 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
491 while (const ConstantArrayType *cat =
493 type = cat->getElementType();
494 arraySizeMultiplier *= cat->getSize();
495 }
496
498 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
499 typeSizeMultiplier *= arraySizeMultiplier;
500
501 // Figure out the cookie size.
502 llvm::APInt cookieSize(sizeWidth,
503 calculateCookiePadding(cgf, e).getQuantity());
504
505 // This will be a size_t.
506 mlir::Value size;
507
508 // Emit the array size expression.
509 // We multiply the size of all dimensions for NumElements.
510 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
511 const Expr *arraySize = *e->getArraySize();
512 mlir::Attribute constNumElements =
513 ConstantEmitter(cgf.cgm, &cgf)
514 .tryEmitAbstract(arraySize, arraySize->getType());
515 if (constNumElements) {
516 // Get an APInt from the constant
517 const llvm::APInt &count =
518 mlir::cast<cir::IntAttr>(constNumElements).getValue();
519
520 [[maybe_unused]] unsigned numElementsWidth = count.getBitWidth();
521 bool hasAnyOverflow = false;
522
523 // The equivalent code in CodeGen/CGExprCXX.cpp handles these cases as
524 // overflow, but that should never happen. The size argument is implicitly
525 // cast to a size_t, so it can never be negative and numElementsWidth will
526 // always equal sizeWidth. However, sometimes in operator-new, it seems that
527 // `numElements` might remain an 'int', so we have to support smaller than
528 // that. We immediately do the zextOrTrunc below (which should really only
529 // do zext, since our assert handles the trunc), but it will make sure the
530 // width is correct.
531 assert(!count.isNegative() && "Expected non-negative array size");
532 assert(numElementsWidth <= sizeWidth &&
533 "Expected a size_t array size constant");
534
535 // Okay, compute a count at the right width.
536 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
537
538 // Scale numElements by that. This might overflow, but we don't
539 // care because it only overflows if allocationSize does too, and
540 // if that overflows then we shouldn't use this.
541 // This emits a constant that may not be used, but we can't tell here
542 // whether it will be needed or not.
543 numElements =
544 cgf.getBuilder().getConstInt(loc, adjustedCount * arraySizeMultiplier);
545
546 // Compute the size before cookie, and track whether it overflowed.
547 bool overflow;
548 llvm::APInt allocationSize =
549 adjustedCount.umul_ov(typeSizeMultiplier, overflow);
550
551 // Sema prevents us from hitting this case
552 assert(!overflow && "Overflow in array allocation size");
553
554 // Add in the cookie, and check whether it's overflowed.
555 if (cookieSize != 0) {
556 // Save the current size without a cookie. This shouldn't be
557 // used if there was overflow
558 sizeWithoutCookie = cgf.getBuilder().getConstInt(
559 loc, allocationSize.zextOrTrunc(sizeWidth));
560
561 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
562 hasAnyOverflow |= overflow;
563 }
564
565 // On overflow, produce a -1 so operator new will fail
566 if (hasAnyOverflow) {
567 size =
568 cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
569 } else {
570 size = cgf.getBuilder().getConstInt(loc, allocationSize);
571 }
572 } else {
573 // Create a value for the variable number of elements
574 numElements = cgf.emitScalarExpr(*e->getArraySize());
575 auto numElementsType = mlir::cast<cir::IntType>(numElements.getType());
576 unsigned numElementsWidth = numElementsType.getWidth();
577
578 // The number of elements can have an arbitrary integer type;
579 // essentially, we need to multiply it by a constant factor, add a
580 // cookie size, and verify that the result is representable as a
581 // size_t. That's just a gloss, though, and it's wrong in one
582 // important way: if the count is negative, it's an error even if
583 // the cookie size would bring the total size >= 0.
584 bool isSigned =
585 (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
586
587 // There are up to five conditions we need to test for:
588 // 1) if isSigned, we need to check whether numElements is negative;
589 // 2) if numElementsWidth > sizeWidth, we need to check whether
590 // numElements is larger than something representable in size_t;
591 // 3) if minElements > 0, we need to check whether numElements is smaller
592 // than that.
593 // 4) we need to compute
594 // sizeWithoutCookie := numElements * typeSizeMultiplier
595 // and check whether it overflows; and
596 // 5) if we need a cookie, we need to compute
597 // size := sizeWithoutCookie + cookieSize
598 // and check whether it overflows.
599
600 mlir::Value hasOverflow;
601
602 // If numElementsWidth > sizeWidth, then one way or another, we're
603 // going to have to do a comparison for (2), and this happens to
604 // take care of (1), too.
605 if (numElementsWidth > sizeWidth) {
606 llvm::APInt threshold =
607 llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth);
608
609 // Use an unsigned comparison regardless of the sign of numElements.
610 mlir::Value unsignedNumElements = numElements;
611 if (isSigned)
612 unsignedNumElements = cgf.getBuilder().createIntCast(
613 numElements, cgf.getBuilder().getUIntNTy(numElementsWidth));
614
615 mlir::Value thresholdV =
616 cgf.getBuilder().getConstInt(loc, threshold, /*isUnsigned=*/true);
617 hasOverflow = cgf.getBuilder().createCompare(
618 loc, cir::CmpOpKind::ge, unsignedNumElements, thresholdV);
619 numElements = cgf.getBuilder().createIntCast(
620 unsignedNumElements, mlir::cast<cir::IntType>(cgf.sizeTy));
621
622 // Otherwise, if we're signed, we want to sext up to size_t.
623 } else if (isSigned) {
624 if (numElementsWidth < sizeWidth)
625 numElements = cgf.getBuilder().createIntCast(
626 numElements, cgf.getBuilder().getSIntNTy(sizeWidth));
627
628 // If there's a non-1 type size multiplier, then we can do the
629 // signedness check at the same time as we do the multiply
630 // because a negative number times anything will cause an
631 // unsigned overflow. Otherwise, we have to do it here. But at
632 // least in this case, we can subsume the >= minElements check.
633 if (typeSizeMultiplier == 1)
634 hasOverflow = cgf.getBuilder().createCompare(
635 loc, cir::CmpOpKind::lt, numElements,
636 cgf.getBuilder().getConstInt(loc, numElements.getType(),
637 minElements));
638
639 numElements = cgf.getBuilder().createIntCast(
640 numElements, mlir::cast<cir::IntType>(cgf.sizeTy));
641
642 // Otherwise, zext up to size_t if necessary.
643 } else if (numElementsWidth < sizeWidth) {
644 numElements = cgf.getBuilder().createIntCast(
645 numElements, mlir::cast<cir::IntType>(cgf.sizeTy));
646 }
647
648 assert(numElements.getType() == cgf.sizeTy);
649
650 if (minElements) {
651 // Don't allow allocation of fewer elements than we have initializers.
652 if (!hasOverflow) {
653 mlir::Value minElementsV = cgf.getBuilder().getConstInt(
654 loc, llvm::APInt(sizeWidth, minElements));
655 hasOverflow = cgf.getBuilder().createCompare(loc, cir::CmpOpKind::lt,
656 numElements, minElementsV);
657 } else if (numElementsWidth > sizeWidth) {
658 // The other existing overflow subsumes this check.
659 // We do an unsigned comparison, since any signed value < -1 is
660 // taken care of either above or below.
661 mlir::Value minElementsV = cgf.getBuilder().getConstInt(
662 loc, llvm::APInt(sizeWidth, minElements));
663 hasOverflow = cgf.getBuilder().createOr(
664 loc, hasOverflow,
665 cgf.getBuilder().createCompare(loc, cir::CmpOpKind::lt, numElements,
666 minElementsV));
667 }
668 }
669
670 size = numElements;
671
672 // Multiply by the type size if necessary. This multiplier
673 // includes all the factors for nested arrays.
674 //
675 // This step also causes numElements to be scaled up by the
676 // nested-array factor if necessary. Overflow on this computation
677 // can be ignored because the result shouldn't be used if
678 // allocation fails.
679 if (typeSizeMultiplier != 1) {
680 mlir::Value tsmV = cgf.getBuilder().getConstInt(loc, typeSizeMultiplier);
681 auto mulOp = cir::MulOverflowOp::create(
682 cgf.getBuilder(), loc, mlir::cast<cir::IntType>(cgf.sizeTy), size,
683 tsmV);
684
685 if (hasOverflow)
686 hasOverflow =
687 cgf.getBuilder().createOr(loc, hasOverflow, mulOp.getOverflow());
688 else
689 hasOverflow = mulOp.getOverflow();
690
691 size = mulOp.getResult();
692
693 // Also scale up numElements by the array size multiplier.
694 if (arraySizeMultiplier != 1) {
695 // If the base element type size is 1, then we can re-use the
696 // multiply we just did.
697 if (typeSize.isOne()) {
698 assert(arraySizeMultiplier == typeSizeMultiplier);
699 numElements = size;
700
701 // Otherwise we need a separate multiply.
702 } else {
703 mlir::Value asmV =
704 cgf.getBuilder().getConstInt(loc, arraySizeMultiplier);
705 numElements = cgf.getBuilder().createMul(loc, numElements, asmV);
706 }
707 }
708 } else {
709 // numElements doesn't need to be scaled.
710 assert(arraySizeMultiplier == 1);
711 }
712
713 // Add in the cookie size if necessary.
714 if (cookieSize != 0) {
715 sizeWithoutCookie = size;
716 mlir::Value cookieSizeV = cgf.getBuilder().getConstInt(loc, cookieSize);
717 auto addOp = cir::AddOverflowOp::create(
718 cgf.getBuilder(), loc, mlir::cast<cir::IntType>(cgf.sizeTy), size,
719 cookieSizeV);
720
721 if (hasOverflow)
722 hasOverflow =
723 cgf.getBuilder().createOr(loc, hasOverflow, addOp.getOverflow());
724 else
725 hasOverflow = addOp.getOverflow();
726
727 size = addOp.getResult();
728 }
729
730 // If we had any possibility of dynamic overflow, make a select to
731 // overwrite 'size' with an all-ones value, which should cause
732 // operator new to throw.
733 if (hasOverflow) {
734 mlir::Value allOnes =
735 cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
736 size = cgf.getBuilder().createSelect(loc, hasOverflow, allOnes, size);
737 }
738 }
739
740 if (cookieSize == 0)
741 sizeWithoutCookie = size;
742 else
743 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
744
745 return size;
746}
747
748/// Emit a call to an operator new or operator delete function, as implicitly
749/// created by new-expressions and delete-expressions.
751 const FunctionDecl *calleeDecl,
752 const FunctionProtoType *calleeType,
753 const CallArgList &args) {
754 cir::CIRCallOpInterface callOrTryCall;
755 cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
756 CIRGenCallee callee =
757 CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
758 RValue rv =
759 cgf.emitCall(cgf.cgm.getTypes().arrangeFreeFunctionCall(args, calleeType),
760 callee, ReturnValueSlot(), args, &callOrTryCall);
761
762 /// C++1y [expr.new]p10:
763 /// [In a new-expression,] an implementation is allowed to omit a call
764 /// to a replaceable global allocation function.
765 ///
766 /// We model such elidable calls with the 'builtin' attribute.
767 if (calleeDecl->isReplaceableGlobalAllocationFunction() && calleePtr &&
768 calleePtr->hasAttr(cir::CIRDialect::getNoBuiltinAttrName())) {
769 callOrTryCall->setAttr(cir::CIRDialect::getBuiltinAttrName(),
770 mlir::UnitAttr::get(callOrTryCall->getContext()));
771 }
772
773 return rv;
774}
775
777 const CallExpr *callExpr,
779 CallArgList args;
780 emitCallArgs(args, type, callExpr->arguments());
781 // Find the allocation or deallocation function that we're calling.
782 ASTContext &astContext = getContext();
783 assert(op == OO_New || op == OO_Delete);
785
786 clang::DeclContextLookupResult lookupResult =
787 astContext.getTranslationUnitDecl()->lookup(name);
788 for (const NamedDecl *decl : lookupResult) {
789 if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
790 if (astContext.hasSameType(funcDecl->getType().getTypePtr(), type)) {
791 if (sanOpts.has(SanitizerKind::AllocToken)) {
792 // TODO: Set !alloc_token metadata.
794 cgm.errorNYI("Alloc token sanitizer not yet supported!");
795 }
796
797 // Emit the call to operator new/delete.
798 return emitNewDeleteCall(*this, funcDecl, type, args);
799 }
800 }
801 }
802
803 llvm_unreachable("predeclared global operator new/delete is missing");
804}
805
806namespace {
807template <typename Traits> struct PlacementArg {
808 typename Traits::RValueTy argValue;
809 QualType argType;
810};
811
812/// A cleanup to call the given 'operator delete' function upon abnormal
813/// exit from a new expression. Templated on a traits type that deals with
814/// ensuring that the arguments dominate the cleanup if necessary.
815template <typename Traits>
816class CallDeleteDuringNew final
817 : public EHScopeStack::Cleanup,
818 private llvm::TrailingObjects<CallDeleteDuringNew<Traits>,
819 PlacementArg<Traits>> {
820 using TrailingObj =
821 llvm::TrailingObjects<CallDeleteDuringNew<Traits>, PlacementArg<Traits>>;
822 friend TrailingObj;
823 using TrailingObj::getTrailingObjects;
824
825 /// Type used to hold llvm::Value*s.
826 typedef typename Traits::ValueTy ValueTy;
827 /// Type used to hold RValues.
828 typedef typename Traits::RValueTy RValueTy;
829
830 unsigned numPlacementArgs : 30;
831 LLVM_PREFERRED_TYPE(AlignedAllocationMode)
832 unsigned passAlignmentToPlacementDelete : 1;
833 const FunctionDecl *operatorDelete;
834 ValueTy ptr;
835 ValueTy allocSize;
836 CharUnits allocAlign;
837
838 PlacementArg<Traits> *getPlacementArgs() { return getTrailingObjects(); }
839
840public:
841 void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
842 assert(i < numPlacementArgs && "index out of range");
843 getPlacementArgs()[i] = {argValue, argType};
844 }
845
846 static size_t getExtraSize(size_t numPlacementArgs) {
847 return TrailingObj::template additionalSizeToAlloc<PlacementArg<Traits>>(
848 numPlacementArgs);
849 }
850
851 CallDeleteDuringNew(size_t numPlacementArgs,
852 const FunctionDecl *operatorDelete, ValueTy ptr,
853 ValueTy allocSize,
854 const ImplicitAllocationParameters &iap,
855 CharUnits allocAlign)
856 : numPlacementArgs(numPlacementArgs),
857 passAlignmentToPlacementDelete(isAlignedAllocation(iap.PassAlignment)),
858 operatorDelete(operatorDelete), ptr(ptr), allocSize(allocSize),
859 allocAlign(allocAlign) {}
860
861 void emit(CIRGenFunction &cgf, Flags flags) override {
862 const auto *fpt = operatorDelete->getType()->castAs<FunctionProtoType>();
863 CallArgList deleteArgs;
864
865 unsigned firstNonTypeArg = 0;
866 TypeAwareAllocationMode typeAwareDeallocation = TypeAwareAllocationMode::No;
868
869 // The first argument after type-identity parameter (if any) is always
870 // a void* (or C* for a destroying operator delete for class type C).
871 deleteArgs.add(Traits::get(cgf, ptr), fpt->getParamType(firstNonTypeArg));
872
873 // Figure out what other parameters we should be implicitly passing.
874 UsualDeleteParams params;
875 if (numPlacementArgs) {
876 // A placement deallocation function is implicitly passed an alignment
877 // if the placement allocation function was, but is never passed a size.
878 params.Alignment =
879 alignedAllocationModeFromBool(passAlignmentToPlacementDelete);
880 params.TypeAwareDelete = typeAwareDeallocation;
882 } else {
883 // For a non-placement new-expression, 'operator delete' can take a
884 // size and/or an alignment if it has the right parameters.
885 params = operatorDelete->getUsualDeleteParams();
886 }
887
888 assert(!params.DestroyingDelete &&
889 "should not call destroying delete in a new-expression");
890
891 // The second argument can be a std::size_t (for non-placement delete).
892 if (params.Size)
893 deleteArgs.add(Traits::get(cgf, allocSize),
894 cgf.getContext().getSizeType());
895
896 // The next (second or third) argument can be a std::align_val_t, which
897 // is an enum whose underlying type is std::size_t.
898 // FIXME: Use the right type as the parameter type. Note that in a call
899 // to operator delete(size_t, ...), we may not have it available.
900 if (isAlignedAllocation(params.Alignment))
901 cgf.cgm.errorNYI("CallDeleteDuringNew: aligned allocation");
902
903 // Pass the rest of the arguments, which must match exactly.
904 for (unsigned i = 0; i != numPlacementArgs; ++i) {
905 auto arg = getPlacementArgs()[i];
906 deleteArgs.add(Traits::get(cgf, arg.argValue), arg.argType);
907 }
908
909 // Call 'operator delete'.
910 emitNewDeleteCall(cgf, operatorDelete, fpt, deleteArgs);
911 }
912};
913} // namespace
914
915/// Enter a cleanup to call 'operator delete' if the initializer in a
916/// new-expression throws.
918 Address newPtr, mlir::Value allocSize,
919 CharUnits allocAlign,
920 const CallArgList &newArgs) {
921 unsigned numNonPlacementArgs = e->getNumImplicitArgs();
922
923 // If we're not inside a conditional branch, then the cleanup will
924 // dominate and we can do the easier (and more efficient) thing.
925 if (!cgf.isInConditionalBranch()) {
926 struct DirectCleanupTraits {
927 typedef mlir::Value ValueTy;
928 typedef RValue RValueTy;
929 static RValue get(CIRGenFunction &, ValueTy v) { return RValue::get(v); }
930 static RValue get(CIRGenFunction &, RValueTy v) { return v; }
931 };
932
933 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
934
936 DirectCleanup *cleanup = cgf.ehStack.pushCleanupWithExtra<DirectCleanup>(
938 newPtr.getPointer(), allocSize, e->implicitAllocationParameters(),
939 allocAlign);
940 for (auto i : llvm::seq<unsigned>(0, e->getNumPlacementArgs())) {
941 const CallArg &arg = newArgs[i + numNonPlacementArgs];
942 cleanup->setPlacementArg(
943 i, arg.getRValue(cgf, cgf.getLoc(e->getSourceRange())), arg.ty);
944 }
945
946 return;
947 }
948
949 // Otherwise, we need to save all this stuff.
950 auto saveValue = [&](mlir::Value value) -> mlir::Value {
952 cgf.cgm.getDataLayout().getABITypeAlign(value.getType()));
953 Address alloca = cgf.createTempAlloca(value.getType(), align,
954 value.getLoc(), "cond-cleanup.save");
955 cgf.getBuilder().createStore(value.getLoc(), value, alloca);
956 return alloca.emitRawPointer();
957 };
958
959 mlir::Value savedNewPtr = saveValue(newPtr.getPointer());
960 mlir::Value savedAllocSize = saveValue(allocSize);
961
962 struct ConditionalCleanupTraits {
963 typedef mlir::Value ValueTy;
964 typedef mlir::Value RValueTy;
965 static RValue get(CIRGenFunction &cgf, ValueTy v) {
966 auto alloca = v.getDefiningOp<cir::AllocaOp>();
968 alloca.getLoc(), alloca.getAllocaType(), alloca,
969 llvm::MaybeAlign(alloca.getAlignment())));
970 }
971 };
972 typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
973
975 ConditionalCleanup *cleanup =
976 cgf.ehStack.pushCleanupWithExtra<ConditionalCleanup>(
978 savedNewPtr, savedAllocSize, e->implicitAllocationParameters(),
979 allocAlign);
980 for (auto i : llvm::seq<unsigned>(0, e->getNumPlacementArgs())) {
981 const CallArg &arg = newArgs[i + numNonPlacementArgs];
982 cleanup->setPlacementArg(
983 i,
984 saveValue(
985 arg.getRValue(cgf, cgf.getLoc(e->getSourceRange())).getValue()),
986 arg.ty);
987 }
988
990}
991
992static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init,
993 QualType allocType, Address newPtr,
994 AggValueSlot::Overlap_t mayOverlap) {
995 // FIXME: Refactor with emitExprAsInit.
996 switch (cgf.getEvaluationKind(allocType)) {
997 case cir::TEK_Scalar:
998 cgf.emitScalarInit(init, cgf.getLoc(init->getSourceRange()),
999 cgf.makeAddrLValue(newPtr, allocType), false);
1000 return;
1001 case cir::TEK_Complex:
1002 cgf.emitComplexExprIntoLValue(init, cgf.makeAddrLValue(newPtr, allocType),
1003 /*isInit*/ true);
1004 return;
1005 case cir::TEK_Aggregate: {
1009 newPtr, allocType.getQualifiers(), AggValueSlot::IsDestructed,
1011 cgf.emitAggExpr(init, slot);
1012 return;
1013 }
1014 }
1015 llvm_unreachable("bad evaluation kind");
1016}
1017
1019 const CXXNewExpr *e, QualType elementType, mlir::Type elementTy,
1020 Address beginPtr, mlir::Value numElements,
1021 mlir::Value allocSizeWithoutCookie) {
1022 // If we have a type with trivial initialization and no initializer,
1023 // there's nothing to do.
1024 if (!e->hasInitializer())
1025 return;
1026
1027 Address curPtr = beginPtr;
1028
1029 unsigned initListElements = 0;
1030
1031 const Expr *init = e->getInitializer();
1032 Address endOfInit = Address::invalid();
1033 QualType::DestructionKind dtorKind = elementType.isDestructedType();
1035
1036 // Attempt to perform zero-initialization using memset.
1037 auto tryMemsetInitialization = [&]() -> bool {
1038 mlir::Location loc = numElements.getLoc();
1039
1040 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1041 // we can initialize with a memset to -1.
1042 if (!cgm.getTypes().isZeroInitializable(elementType))
1043 return false;
1044
1045 // Optimization: since zero initialization will just set the memory
1046 // to all zeroes, generate a single memset to do it in one shot.
1047
1048 // Subtract out the size of any elements we've already initialized.
1049 auto remainingSize = allocSizeWithoutCookie;
1050 if (initListElements) {
1051 // We know this can't overflow; we check this when doing the allocation.
1052 unsigned initializedSize =
1053 getContext().getTypeSizeInChars(elementType).getQuantity() *
1054 initListElements;
1055 cir::ConstantOp initSizeOp =
1056 builder.getConstInt(loc, remainingSize.getType(), initializedSize);
1057 remainingSize = builder.createSub(loc, remainingSize, initSizeOp);
1058 }
1059
1060 // Create the memset.
1061 mlir::Value castOp =
1062 builder.createPtrBitcast(curPtr.getPointer(), cgm.voidTy);
1063 builder.createMemSet(loc, castOp, builder.getConstInt(loc, cgm.uInt8Ty, 0),
1064 remainingSize);
1065 return true;
1066 };
1067
1068 const InitListExpr *ile = dyn_cast<InitListExpr>(init);
1069 const CXXParenListInitExpr *cplie = nullptr;
1070 const StringLiteral *sl = nullptr;
1071 const ObjCEncodeExpr *ocee = nullptr;
1072 const Expr *ignoreParen = nullptr;
1073 if (!ile) {
1074 ignoreParen = init->IgnoreParenImpCasts();
1075 cplie = dyn_cast<CXXParenListInitExpr>(ignoreParen);
1076 sl = dyn_cast<StringLiteral>(ignoreParen);
1077 ocee = dyn_cast<ObjCEncodeExpr>(ignoreParen);
1078 }
1079 // If the initializer is an initializer list, first do the explicit elements.
1080 if (ile || cplie || sl || ocee) {
1081 // Initializing from a (braced) string literal is a special case; the init
1082 // list element does not initialize a (single) array element.
1083 if ((ile && ile->isStringLiteralInit()) || sl || ocee) {
1084 if (!ile)
1085 init = ignoreParen;
1086
1087 // Initialize the initial portion of length equal to that of the string
1088 // literal. The allocation must be for at least this much; we emitted a
1089 // check for that earlier. Since we intend to use a cir.copy here, we must
1090 // introduce a cast to the string-literal-size here, so that cir.copy does
1091 // the right thing.
1092
1093 const Expr *initExpr = ile ? ile->getInit(0) : init;
1094 mlir::Type initExprTy = convertType(initExpr->getType());
1095 Address coercedPtr = curPtr.withElementType(builder, initExprTy);
1096
1098 coercedPtr, elementType.getQualifiers(), AggValueSlot::IsDestructed,
1103 emitAggExpr(initExpr, slot);
1104
1105 // Move past these elements.
1106 initListElements =
1108 ->getZExtSize();
1109
1110 bool alreadyInitedAll = false;
1111 auto constElts = numElements.getDefiningOp<cir::ConstantOp>();
1112 if (constElts) {
1113 int64_t constVal = getZExtIntValueFromConstOp(numElements);
1114 alreadyInitedAll = (constVal == initListElements);
1115 }
1116
1117 // Init the rest with memset, unless we've already done everything.
1118 if (!alreadyInitedAll) {
1119 mlir::Location initLoc = cgm.getLoc(init->getSourceRange());
1120 mlir::Value initListElementsOp = builder.getUnsignedInt(
1121 initLoc, initListElements,
1122 getContext().getTypeSize(getContext().getSizeType()));
1123 curPtr = curPtr.withPointer(builder.createPtrStride(
1124 initLoc, curPtr.getPointer(), initListElementsOp));
1125
1126 bool ok = tryMemsetInitialization();
1127 (void)ok;
1128 assert(ok && "couldn't memset character type?");
1129 }
1130 return;
1131 }
1132
1133 ArrayRef<const Expr *> initExprs =
1134 ile ? ile->inits() : cplie->getInitExprs();
1135 initListElements = initExprs.size();
1136
1137 // If this is a multi-dimensional array new, we will initialize multiple
1138 // elements with each init list element.
1139 QualType allocType = e->getAllocatedType();
1140 if (const ConstantArrayType *cat = dyn_cast_or_null<ConstantArrayType>(
1141 allocType->getAsArrayTypeUnsafe())) {
1142 elementTy = convertTypeForMem(allocType);
1143 curPtr = curPtr.withElementType(builder, elementTy);
1144 initListElements *= getContext().getConstantArrayElementCount(cat);
1145 }
1146
1147 // Enter a partial-destruction Cleanup if necessary.
1148 if (dtorKind) {
1149 cgm.errorNYI(ile->getSourceRange(),
1150 "emitNewArrayInitializer: init requires dtor");
1151 return;
1152 }
1153
1154 CharUnits elementSize = getContext().getTypeSizeInChars(elementType);
1155 CharUnits startAlign = curPtr.getAlignment();
1156 unsigned i = 0;
1157 for (const Expr *ie : initExprs) {
1158 // Tell the cleanup that it needs to destroy up to this
1159 // element. TODO: some of these stores can be trivially
1160 // observed to be unnecessary.
1161 if (endOfInit.isValid()) {
1162 cgm.errorNYI(ie->getSourceRange(),
1163 "emitNewArrayInitializer: update dtor cleanup ptr");
1164 return;
1165 }
1166 // FIXME: If the last initializer is an incomplete initializer list for
1167 // an array, and we have an array filler, we can fold together the two
1168 // initialization loops.
1169 storeAnyExprIntoOneUnit(*this, ie, ie->getType(), curPtr,
1171 mlir::Location loc = getLoc(ie->getExprLoc());
1172 mlir::Value castOp = builder.createPtrBitcast(
1173 curPtr.getPointer(), convertTypeForMem(allocType));
1174 mlir::Value offsetOp = builder.getSignedInt(loc, 1, /*width=*/32);
1175 mlir::Value dataPtr = builder.createPtrStride(loc, castOp, offsetOp);
1176 curPtr = Address(dataPtr, curPtr.getElementType(),
1177 startAlign.alignmentAtOffset((++i) * elementSize));
1178 }
1179
1180 // The remaining elements are filled with the array filler expression.
1181 init = ile ? ile->getArrayFiller() : cplie->getArrayFiller();
1182
1183 // Extract the initializer for the individual array elements by pulling
1184 // out the array filler from all the nested initializer lists. This avoids
1185 // generating a nested loop for the initialization.
1186 while (init && init->getType()->isConstantArrayType()) {
1187 auto *subIle = dyn_cast<InitListExpr>(init);
1188 if (!subIle)
1189 break;
1190 assert(subIle->getNumInits() == 0 && "explicit inits in array filler?");
1191 init = subIle->getArrayFiller();
1192 }
1193
1194 // Switch back to initializing one base element at a time.
1195 curPtr = curPtr.withElementType(builder, beginPtr.getElementType());
1196 }
1197
1198 // If all elements have already been initialized, skip any further
1199 // initialization.
1200 auto constOp = mlir::dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
1201 if (constOp) {
1202 auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constOp.getValue());
1203 // Just skip out if the constant count is zero.
1204 if (constIntAttr && constIntAttr.getUInt() <= initListElements)
1205 return;
1206 }
1207
1208 assert(init && "have trailing elements to initialize but no initializer");
1209
1210 // If this is a constructor call, try to optimize it out, and failing that
1211 // emit a single loop to initialize all remaining elements.
1212 if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
1213 CXXConstructorDecl *ctor = cce->getConstructor();
1214 if (ctor->isTrivial()) {
1215 // If new expression did not specify value-initialization, then there
1216 // is no initialization.
1217 if (!cce->requiresZeroInitialization())
1218 return;
1219
1220 cgm.errorNYI(cce->getSourceRange(),
1221 "emitNewArrayInitializer: trivial ctor zero-init");
1222 return;
1223 }
1224
1225 // Store the new Cleanup position for irregular Cleanups.
1226 //
1227 // FIXME: Share this cleanup with the constructor call emission rather than
1228 // having it create a cleanup of its own.
1229 if (endOfInit.isValid())
1230 builder.createStore(getLoc(e->getSourceRange()), curPtr.emitRawPointer(),
1231 endOfInit);
1232
1233 mlir::Type initType = convertType(cce->getType());
1234 // Emit a constructor call loop to initialize the remaining elements.
1235 if (initListElements) {
1236 // If the number of elements is a constant, we will have already gotten
1237 // the constant op above. Here we use it to get the number of remaining
1238 // elements as a new constant.
1239 if (constOp) {
1240 auto constIntAttr = mlir::cast<cir::IntAttr>(constOp.getValue());
1241 uint64_t numRemainingElements =
1242 constIntAttr.getUInt() - initListElements;
1243 numElements =
1244 builder.getConstInt(getLoc(e->getSourceRange()),
1245 numElements.getType(), numRemainingElements);
1246 // Currently, the AST gives us a pointer to the element type here
1247 // rather than an array. That's inconsistent with what it does
1248 // without an explicit initializer list, so we need to create an
1249 // array type here. That will decay back to a pointer when we lower
1250 // the cir.array.ctor op, but we need an array type for the initial
1251 // representation.
1252 if (!mlir::isa<cir::ArrayType>(initType))
1253 initType = cir::ArrayType::get(initType, numRemainingElements);
1254 } else {
1255 cgm.errorNYI(e->getSourceRange(),
1256 "emitNewArrayInitializer: numRemainingElements with "
1257 "non-constant count");
1258 return;
1259 }
1260 }
1261
1262 curPtr = curPtr.withElementType(builder, initType);
1263 emitCXXAggrConstructorCall(ctor, numElements, curPtr, cce,
1264 /*newPointerIsChecked=*/true,
1265 cce->requiresZeroInitialization());
1266 if (getContext().getTargetInfo().emitVectorDeletingDtors(
1267 getContext().getLangOpts())) {
1268 cgm.errorNYI(e->getSourceRange(),
1269 "emitNewArrayInitializer: emitVectorDeletingDtors");
1270 }
1271 return;
1272 }
1273
1274 // If this is value-initialization, we can usually use memset.
1275 ImplicitValueInitExpr ivie(elementType);
1276 if (isa<ImplicitValueInitExpr>(init)) {
1277 if (tryMemsetInitialization())
1278 return;
1279 // Switch to an ImplicitValueInitExpr for the element type. This handles
1280 // only one case: multidimensional array new of pointers to members. In
1281 // all other cases, we already have an initializer for the array element.
1282 init = &ivie;
1283 }
1284
1285 // At this point we should have found an initializer for the individual
1286 // elements of the array.
1287 assert(getContext().hasSameUnqualifiedType(elementType, init->getType()) &&
1288 "got wrong type of element to initialize");
1289
1290 // If we have a struct whose every field is value-initialized, we can
1291 // usually use memset.
1292 if (auto *ile = dyn_cast<InitListExpr>(init)) {
1293
1294 // If we have an empty initializer list, we can usually use memset.
1295 if (ile->getNumInits() == 0 && tryMemsetInitialization())
1296 return;
1297
1298 if (const auto *rtype = ile->getType()->getAsCanonical<RecordType>()) {
1299 if (rtype->getDecl()->isStruct()) {
1300 const RecordDecl *rd = rtype->getDecl()->getDefinitionOrSelf();
1301 unsigned numElements = 0;
1302 if (auto *cxxrd = dyn_cast<CXXRecordDecl>(rd))
1303 numElements = cxxrd->getNumBases();
1304 for (FieldDecl *field : rd->fields())
1305 if (!field->isUnnamedBitField())
1306 ++numElements;
1307 // FIXME: Recurse into nested InitListExprs.
1308 if (ile->getNumInits() == numElements)
1309 for (unsigned i = 0, e = ile->getNumInits(); i != e; ++i)
1310 if (!isa<ImplicitValueInitExpr>(ile->getInit(i)))
1311 --numElements;
1312 if (ile->getNumInits() == numElements && tryMemsetInitialization())
1313 return;
1314 }
1315 }
1316 }
1317
1318 // The rest of this has to go through the rest of the initializer, generating
1319 // a loop with cleanups/destruction/etc. See the test
1320 // 'check_array_value_init'(currently disabled) in
1321 // CodeGenCXX/new-array-init.cpp when we get more of this implemented.
1322 cgm.errorNYI(init->getSourceRange(),
1323 "emitNewArrayInitializer: unsupported initializer");
1324 return;
1325}
1326
1328 QualType elementType, mlir::Type elementTy,
1329 Address newPtr, mlir::Value numElements,
1330 mlir::Value allocSizeWithoutCookie) {
1332 if (e->isArray()) {
1333 cgf.emitNewArrayInitializer(e, elementType, elementTy, newPtr, numElements,
1334 allocSizeWithoutCookie);
1335 } else if (const Expr *init = e->getInitializer()) {
1336 storeAnyExprIntoOneUnit(cgf, init, e->getAllocatedType(), newPtr,
1338 }
1339}
1340
1342 GlobalDecl dtor, const CIRGenCallee &callee, mlir::Value thisVal,
1343 QualType thisTy, mlir::Value implicitParam, QualType implicitParamTy,
1344 const CallExpr *ce) {
1345 const CXXMethodDecl *dtorDecl = cast<CXXMethodDecl>(dtor.getDecl());
1346
1347 assert(!thisTy.isNull());
1348 assert(thisTy->getAsCXXRecordDecl() == dtorDecl->getParent() &&
1349 "Pointer/Object mixup");
1350
1352
1353 CallArgList args;
1354 commonBuildCXXMemberOrOperatorCall(*this, dtorDecl, thisVal, implicitParam,
1355 implicitParamTy, ce, args, nullptr);
1356 assert((ce || dtor.getDecl()) && "expected source location provider");
1358 return emitCall(cgm.getTypes().arrangeCXXStructorDeclaration(dtor), callee,
1359 ReturnValueSlot(), args, nullptr,
1360 ce ? getLoc(ce->getExprLoc())
1361 : getLoc(dtor.getDecl()->getSourceRange()));
1362}
1363
1366 QualType destroyedType = expr->getDestroyedType();
1367 if (destroyedType.hasStrongOrWeakObjCLifetime()) {
1369 cgm.errorNYI(expr->getExprLoc(),
1370 "emitCXXPseudoDestructorExpr: Objective-C lifetime is NYI");
1371 } else {
1372 // C++ [expr.pseudo]p1:
1373 // The result shall only be used as the operand for the function call
1374 // operator (), and the result of such a call has type void. The only
1375 // effect is the evaluation of the postfix-expression before the dot or
1376 // arrow.
1377 emitIgnoredExpr(expr->getBase());
1378 }
1379
1380 return RValue::get(nullptr);
1381}
1382
1383namespace {
1384/// Calls the given 'operator delete' on a single object.
1385struct CallObjectDelete final : EHScopeStack::Cleanup {
1386 mlir::Value ptr;
1387 const FunctionDecl *operatorDelete;
1388 QualType elementType;
1389
1390 CallObjectDelete(mlir::Value ptr, const FunctionDecl *operatorDelete,
1391 QualType elementType)
1392 : ptr(ptr), operatorDelete(operatorDelete), elementType(elementType) {}
1393
1394 void emit(CIRGenFunction &cgf, Flags flags) override {
1395 cgf.emitDeleteCall(operatorDelete, ptr, elementType);
1396 }
1397};
1398} // namespace
1399
1400/// Emit the code for deleting a single object via a destroying operator
1401/// delete. If the element type has a non-virtual destructor, Ptr has already
1402/// been converted to the type of the parameter of 'operator delete'. Otherwise
1403/// Ptr points to an object of the static type.
1405 const CXXDeleteExpr *de, Address ptr,
1406 QualType elementType) {
1407 const CXXDestructorDecl *dtor =
1408 elementType->getAsCXXRecordDecl()->getDestructor();
1409 if (dtor && dtor->isVirtual()) {
1410 cgf.cgm.getCXXABI().emitVirtualObjectDelete(cgf, de, ptr, elementType,
1411 dtor);
1412 return;
1413 }
1414
1415 cgf.emitDeleteCall(de->getOperatorDelete(), ptr.getPointer(), elementType);
1416}
1417
1418/// Emit the code for deleting a single object.
1420 Address ptr, QualType elementType) {
1421 // C++11 [expr.delete]p3:
1422 // If the static type of the object to be deleted is different from its
1423 // dynamic type, the static type shall be a base class of the dynamic type
1424 // of the object to be deleted and the static type shall have a virtual
1425 // destructor or the behavior is undefined.
1427
1428 const FunctionDecl *operatorDelete = de->getOperatorDelete();
1429 assert(!operatorDelete->isDestroyingOperatorDelete());
1430
1431 // Find the destructor for the type, if applicable. If the
1432 // destructor is virtual, we'll just emit the vcall and return.
1433 const CXXDestructorDecl *dtor = nullptr;
1434 if (const auto *rd = elementType->getAsCXXRecordDecl()) {
1435 if (rd->hasDefinition() && !rd->hasTrivialDestructor()) {
1436 dtor = rd->getDestructor();
1437
1438 if (dtor->isVirtual()) {
1440 cgf.cgm.getCXXABI().emitVirtualObjectDelete(cgf, de, ptr, elementType,
1441 dtor);
1442 return;
1443 }
1444 }
1445 }
1446
1447 // Make sure that we call delete even if the dtor throws.
1448 // This doesn't have to a conditional cleanup because we're going
1449 // to pop it off in a second.
1450 cgf.ehStack.pushCleanup<CallObjectDelete>(
1451 NormalAndEHCleanup, ptr.getPointer(), operatorDelete, elementType);
1452
1453 if (dtor) {
1455 /*ForVirtualBase=*/false,
1456 /*Delegating=*/false, ptr, elementType);
1457 } else if (elementType.getObjCLifetime()) {
1459 cgf.cgm.errorNYI(de->getSourceRange(), "emitObjectDelete: ObjCLifetime");
1460 }
1461
1462 cgf.popCleanupBlock();
1463}
1464
1466 const Expr *arg = e->getArgument();
1468
1469 // Null check the pointer.
1470 //
1471 // We could avoid this null check if we can determine that the object
1472 // destruction is trivial and doesn't require an array cookie; we can
1473 // unconditionally perform the operator delete call in that case. For now, we
1474 // assume that deleted pointers are null rarely enough that it's better to
1475 // keep the branch. This might be worth revisiting for a -O0 code size win.
1477 cir::YieldOp thenYield;
1478 mlir::Value notNull = builder.createPtrIsNotNull(ptr.getPointer());
1479 cir::IfOp::create(builder, getLoc(e->getExprLoc()), notNull,
1480 /*withElseRegion=*/false,
1481 /*thenBuilder=*/
1482 [&](mlir::OpBuilder &b, mlir::Location loc) {
1483 thenYield = builder.createYield(loc);
1484 });
1485
1486 // Emit the rest of the CIR inside the if-op's then region, but restore the
1487 // insertion point to the point after the if when this function returns.
1488 mlir::OpBuilder::InsertionGuard guard(builder);
1489 builder.setInsertionPoint(thenYield);
1490
1491 QualType deleteTy = e->getDestroyedType();
1492
1493 // A destroying operator delete overrides the entire operation of the
1494 // delete expression.
1496 emitDestroyingObjectDelete(*this, e, ptr, deleteTy);
1497 return;
1498 }
1499
1500 // We might be deleting a pointer to array.
1501 deleteTy = getContext().getBaseElementType(deleteTy);
1502 ptr = ptr.withElementType(builder, convertTypeForMem(deleteTy));
1503
1504 if (e->isArrayForm() &&
1505 cgm.getASTContext().getTargetInfo().emitVectorDeletingDtors(
1506 cgm.getASTContext().getLangOpts())) {
1507 cgm.errorNYI(e->getSourceRange(),
1508 "emitCXXDeleteExpr: emitVectorDeletingDtors");
1509 }
1510
1511 if (e->isArrayForm()) {
1512 const FunctionDecl *operatorDelete = e->getOperatorDelete();
1513 cir::FuncOp operatorDeleteFn = cgm.getAddrOfFunction(operatorDelete);
1514 auto deleteFn =
1515 mlir::FlatSymbolRefAttr::get(operatorDeleteFn.getSymNameAttr());
1516 UsualDeleteParams udp = operatorDelete->getUsualDeleteParams();
1517 auto deleteParams = cir::UsualDeleteParamsAttr::get(
1518 builder.getContext(), udp.Size, isAlignedAllocation(udp.Alignment),
1520
1521 mlir::FlatSymbolRefAttr elementDtor;
1522 bool hasThrowingDtor = false;
1523 if (const auto *rd = deleteTy->getAsCXXRecordDecl()) {
1524 if (rd->hasDefinition() && !rd->hasTrivialDestructor()) {
1525 const CXXDestructorDecl *dtor = rd->getDestructor();
1526 if (dtor->getType()->castAs<FunctionProtoType>()->canThrow())
1527 hasThrowingDtor = true;
1528 cir::FuncOp dtorFn =
1529 cgm.getAddrOfCXXStructor(GlobalDecl(dtor, Dtor_Complete));
1530 elementDtor = mlir::FlatSymbolRefAttr::get(builder.getContext(),
1531 dtorFn.getSymNameAttr());
1532 }
1533 }
1534
1535 cir::DeleteArrayOp::create(builder, ptr.getPointer().getLoc(),
1536 ptr.getPointer(), deleteFn, deleteParams,
1537 elementDtor, hasThrowingDtor);
1538 } else {
1539 emitObjectDelete(*this, e, ptr, deleteTy);
1540 }
1541}
1542
1544 // The element type being allocated.
1546
1547 // 1. Build a call to the allocation function.
1548 FunctionDecl *allocator = e->getOperatorNew();
1549
1550 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1551 unsigned minElements = 0;
1552 if (e->isArray() && e->hasInitializer()) {
1553 const Expr *init = e->getInitializer();
1554 const InitListExpr *ile = dyn_cast<InitListExpr>(init);
1555 const CXXParenListInitExpr *cplie = dyn_cast<CXXParenListInitExpr>(init);
1556 const Expr *ignoreParen = init->IgnoreParenImpCasts();
1557 if ((ile && ile->isStringLiteralInit()) ||
1558 isa<StringLiteral>(ignoreParen) || isa<ObjCEncodeExpr>(ignoreParen)) {
1559 minElements =
1561 ->getSize()
1562 .getZExtValue();
1563 } else if (ile || cplie) {
1564 minElements = ile ? ile->getNumInits() : cplie->getInitExprs().size();
1565 }
1566 }
1567
1568 mlir::Value numElements = nullptr;
1569 mlir::Value allocSizeWithoutCookie = nullptr;
1570 mlir::Value allocSize = emitCXXNewAllocSize(
1571 *this, e, minElements, numElements, allocSizeWithoutCookie);
1572 CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1573
1574 // Emit the allocation call.
1575 Address allocation = Address::invalid();
1576 CallArgList allocatorArgs;
1577 if (allocator->isReservedGlobalPlacementOperator()) {
1578 // If the allocator is a global placement operator, just
1579 // "inline" it directly.
1580 assert(e->getNumPlacementArgs() == 1);
1581 const Expr *arg = *e->placement_arguments().begin();
1582
1583 LValueBaseInfo baseInfo;
1584 allocation = emitPointerWithAlignment(arg, &baseInfo);
1585
1586 // The pointer expression will, in many cases, be an opaque void*.
1587 // In these cases, discard the computed alignment and use the
1588 // formal alignment of the allocated type.
1589 if (baseInfo.getAlignmentSource() != AlignmentSource::Decl)
1590 allocation = allocation.withAlignment(allocAlign);
1591
1592 // Set up allocatorArgs for the call to operator delete if it's not
1593 // the reserved global operator.
1594 if (e->getOperatorDelete() &&
1596 cgm.errorNYI(e->getSourceRange(),
1597 "emitCXXNewExpr: reserved placement new with delete");
1598 }
1599 } else {
1600 const FunctionProtoType *allocatorType =
1601 allocator->getType()->castAs<FunctionProtoType>();
1602 unsigned paramsToSkip = 0;
1603
1604 // The allocation size is the first argument.
1605 QualType sizeType = getContext().getSizeType();
1606 allocatorArgs.add(RValue::get(allocSize), sizeType);
1607 ++paramsToSkip;
1608
1609 if (allocSize != allocSizeWithoutCookie) {
1610 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1611 allocAlign = std::max(allocAlign, cookieAlign);
1612 }
1613
1614 // The allocation alignment may be passed as the second argument.
1615 if (e->passAlignment()) {
1616 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: pass alignment");
1617 }
1618
1619 // FIXME: Why do we not pass a CalleeDecl here?
1620 emitCallArgs(allocatorArgs, allocatorType, e->placement_arguments(),
1621 AbstractCallee(), paramsToSkip);
1622 RValue rv =
1623 emitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1624
1625 // Set !heapallocsite metadata on the call to operator new.
1627
1628 // If this was a call to a global replaceable allocation function that does
1629 // not take an alignment argument, the allocator is known to produce storage
1630 // that's suitably aligned for any object that fits, up to a known
1631 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1632 CharUnits allocationAlign = allocAlign;
1633 if (!e->passAlignment() &&
1634 allocator->isReplaceableGlobalAllocationFunction()) {
1635 const TargetInfo &target = cgm.getASTContext().getTargetInfo();
1636 unsigned allocatorAlign = llvm::bit_floor(std::min<uint64_t>(
1637 target.getNewAlign(), getContext().getTypeSize(allocType)));
1638 allocationAlign = std::max(
1639 allocationAlign, getContext().toCharUnitsFromBits(allocatorAlign));
1640 }
1641
1642 mlir::Value allocPtr = rv.getValue();
1643 allocation = Address(
1644 allocPtr, mlir::cast<cir::PointerType>(allocPtr.getType()).getPointee(),
1645 allocationAlign);
1646 }
1647
1648 // Emit a null check on the allocation result if the allocation
1649 // function is allowed to return null (because it has a non-throwing
1650 // exception spec or is the reserved placement new) and we have an
1651 // interesting initializer will be running sanitizers on the initialization.
1652 bool nullCheck = e->shouldNullCheckAllocation() &&
1653 (!allocType.isPODType(getContext()) || e->hasInitializer());
1654
1655 // If there's an operator delete, enter a cleanup to call it if an
1656 // exception is thrown. If we do this, we'll be creating the result pointer
1657 // inside a cleanup scope, either with a bitcast or an offset based on the
1658 // array cookie size. However, we need to return that pointer from outside
1659 // the cleanup scope, so we need to store it in a temporary variable.
1660 bool useNewDeleteCleanup =
1661 e->getOperatorDelete() &&
1663
1664 mlir::Type elementTy;
1665 // For array new, use the allocated type to handle multidimensional arrays
1666 // correctly.
1667 if (e->isArray())
1668 elementTy = convertTypeForMem(e->getAllocatedType());
1669 else
1670 elementTy = convertTypeForMem(allocType);
1671
1672 // Lambda that emits the init sequence: cleanup setup, cookie init,
1673 // bitcast + initializer, and cleanup deactivation.
1674 Address result = Address::invalid();
1675 Address resultPtr = Address::invalid();
1676 auto emitInit = [&]() {
1677 EHScopeStack::stable_iterator operatorDeleteCleanup;
1678 mlir::Operation *cleanupDominator = nullptr;
1679 if (useNewDeleteCleanup) {
1681 enterNewDeleteCleanup(*this, e, allocation, allocSize, allocAlign,
1682 allocatorArgs);
1683 operatorDeleteCleanup = ehStack.stable_begin();
1684 cleanupDominator =
1685 cir::UnreachableOp::create(builder, getLoc(e->getSourceRange()))
1686 .getOperation();
1687 resultPtr = createTempAlloca(builder.getPointerTo(elementTy),
1688 allocation.getAlignment(),
1689 getLoc(e->getSourceRange()), "__new_result");
1690 }
1691
1692 if (allocSize != allocSizeWithoutCookie) {
1693 assert(e->isArray());
1694 allocation = cgm.getCXXABI().initializeArrayCookie(
1695 *this, allocation, numElements, e, allocType);
1696 }
1697
1698 result = builder.createElementBitCast(getLoc(e->getSourceRange()),
1699 allocation, elementTy);
1700
1701 // Store the result pointer before initialization so that it is available
1702 // to the cleanup if the initializer throws.
1703 if (resultPtr.isValid())
1704 builder.createStore(getLoc(e->getSourceRange()), result.getPointer(),
1705 resultPtr);
1706
1707 // Passing pointer through launder.invariant.group to avoid propagation of
1708 // vptrs information which may be included in previous type. To not break
1709 // LTO with different optimizations levels, we do it regardless of
1710 // optimization level.
1711 if (cgm.getCodeGenOpts().StrictVTablePointers &&
1712 allocator->isReservedGlobalPlacementOperator())
1713 cgm.errorNYI(e->getSourceRange(),
1714 "emitCXXNewExpr: strict vtable pointers");
1715
1717
1718 emitNewInitializer(*this, e, allocType, elementTy, result, numElements,
1719 allocSizeWithoutCookie);
1720
1721 // Deactivate the 'operator delete' cleanup if we finished
1722 // initialization.
1723 if (useNewDeleteCleanup) {
1724 deactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1725 cleanupDominator->erase();
1726 cir::LoadOp loadResult =
1727 builder.createLoad(getLoc(e->getSourceRange()), resultPtr);
1728 result = result.withPointer(loadResult.getResult());
1729 }
1730 };
1731
1732 cir::IfOp nullCheckOp;
1733 if (nullCheck) {
1734 mlir::Value isNotNull = builder.createPtrIsNotNull(allocation.getPointer());
1735 nullCheckOp =
1736 cir::IfOp::create(builder, getLoc(e->getSourceRange()), isNotNull,
1737 /*withElseRegion=*/false,
1738 /*thenBuilder=*/
1739 [&](mlir::OpBuilder &, mlir::Location loc) {
1740 emitInit();
1741 builder.createYield(loc);
1742 });
1743 } else {
1744 emitInit();
1745 }
1746
1747 mlir::Value resultValue = result.getPointer();
1748
1749 if (nullCheck) {
1750 mlir::Type resultTy = resultValue.getType();
1751
1752 // If we needed a NewDeleteCleanup, allocation may have been modified
1753 // inside the cir.if (e.g. by cookie adjustment). Use the result stored
1754 // in the alloca instead, since the alloca dominates this point.
1755 mlir::Value trueVal;
1756 if (useNewDeleteCleanup) {
1757 trueVal = builder.createLoad(getLoc(e->getSourceRange()), resultPtr)
1758 .getResult();
1759 } else {
1760 trueVal = allocation.getPointer();
1761 }
1762 if (trueVal.getType() != resultTy)
1763 trueVal = builder.createBitcast(trueVal, resultTy);
1764 mlir::Value nullPtr =
1765 builder.getNullPtr(resultTy, getLoc(e->getSourceRange())).getResult();
1766 resultValue =
1767 builder.createSelect(getLoc(e->getSourceRange()),
1768 nullCheckOp.getCondition(), trueVal, nullPtr);
1769 }
1770
1771 return resultValue;
1772}
1773
1775 mlir::Value ptr, QualType deleteTy) {
1777
1778 const auto *deleteFTy = deleteFD->getType()->castAs<FunctionProtoType>();
1779 CallArgList deleteArgs;
1780
1781 UsualDeleteParams params = deleteFD->getUsualDeleteParams();
1782 auto paramTypeIter = deleteFTy->param_type_begin();
1783
1784 // Pass std::type_identity tag if present
1786 cgm.errorNYI(deleteFD->getSourceRange(),
1787 "emitDeleteCall: type aware delete");
1788
1789 // Pass the pointer itself.
1790 QualType argTy = *paramTypeIter;
1791 std::advance(paramTypeIter, 1);
1792 mlir::Value deletePtr =
1793 builder.createBitcast(ptr.getLoc(), ptr, convertType(argTy));
1794 deleteArgs.add(RValue::get(deletePtr), argTy);
1795
1796 // Pass the std::destroying_delete tag if present.
1797 if (params.DestroyingDelete) {
1798 QualType tagType = *paramTypeIter;
1799 std::advance(paramTypeIter, 1);
1800 Address tagAddr =
1801 createMemTemp(tagType, ptr.getLoc(), "destroying.delete.tag");
1802 deleteArgs.add(RValue::getAggregate(tagAddr), tagType);
1803 }
1804
1805 // Pass the size if the delete function has a size_t parameter.
1806 if (params.Size) {
1807 QualType sizeType = *paramTypeIter;
1808 std::advance(paramTypeIter, 1);
1809 CharUnits deleteTypeSize = getContext().getTypeSizeInChars(deleteTy);
1810 assert(mlir::isa<cir::IntType>(convertType(sizeType)) &&
1811 "expected cir::IntType");
1812 cir::ConstantOp size = builder.getConstInt(
1813 *currSrcLoc, convertType(sizeType), deleteTypeSize.getQuantity());
1814
1815 deleteArgs.add(RValue::get(size), sizeType);
1816 }
1817
1818 // Pass the alignment if the delete function has an align_val_t parameter.
1819 if (isAlignedAllocation(params.Alignment))
1820 cgm.errorNYI(deleteFD->getSourceRange(),
1821 "emitDeleteCall: aligned allocation");
1822
1823 assert(paramTypeIter == deleteFTy->param_type_end() &&
1824 "unknown parameter to usual delete function");
1825
1826 // Emit the call to delete.
1827 emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
1828}
1829
1831 mlir::Location loc, QualType destTy) {
1832 mlir::Type destCIRTy = cgf.convertType(destTy);
1833 assert(mlir::isa<cir::PointerType>(destCIRTy) &&
1834 "result of dynamic_cast should be a ptr");
1835
1836 if (!destTy->isPointerType()) {
1837 mlir::Region *currentRegion = cgf.getBuilder().getBlock()->getParent();
1838 /// C++ [expr.dynamic.cast]p9:
1839 /// A failed cast to reference type throws std::bad_cast
1840 cgf.cgm.getCXXABI().emitBadCastCall(cgf, loc);
1841
1842 // The call to bad_cast will terminate the current block. Create a new block
1843 // to hold any follow up code.
1844 cgf.getBuilder().createBlock(currentRegion, currentRegion->end());
1845 }
1846
1847 return cgf.getBuilder().getNullPtr(destCIRTy, loc);
1848}
1849
1851 const CXXDynamicCastExpr *dce) {
1852 mlir::Location loc = getLoc(dce->getSourceRange());
1853
1854 cgm.emitExplicitCastExprType(dce, this);
1855 QualType destTy = dce->getTypeAsWritten();
1856 QualType srcTy = dce->getSubExpr()->getType();
1857
1858 // C++ [expr.dynamic.cast]p7:
1859 // If T is "pointer to cv void," then the result is a pointer to the most
1860 // derived object pointed to by v.
1861 bool isDynCastToVoid = destTy->isVoidPointerType();
1862 bool isRefCast = destTy->isReferenceType();
1863
1864 QualType srcRecordTy;
1865 QualType destRecordTy;
1866 if (isDynCastToVoid) {
1867 srcRecordTy = srcTy->getPointeeType();
1868 // No destRecordTy.
1869 } else if (const PointerType *destPTy = destTy->getAs<PointerType>()) {
1870 srcRecordTy = srcTy->castAs<PointerType>()->getPointeeType();
1871 destRecordTy = destPTy->getPointeeType();
1872 } else {
1873 srcRecordTy = srcTy;
1874 destRecordTy = destTy->castAs<ReferenceType>()->getPointeeType();
1875 }
1876
1877 assert(srcRecordTy->isRecordType() && "source type must be a record type!");
1879
1880 if (dce->isAlwaysNull())
1881 return emitDynamicCastToNull(*this, loc, destTy);
1882
1883 auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy));
1884 return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy,
1885 destCirTy, isRefCast, thisAddr);
1886}
1887
1888static mlir::Value emitCXXTypeidFromVTable(CIRGenFunction &cgf, const Expr *e,
1889 mlir::Type typeInfoPtrTy,
1890 bool hasNullCheck) {
1891 Address thisPtr = cgf.emitLValue(e).getAddress();
1892 QualType srcType = e->getType();
1893
1894 // C++ [class.cdtor]p4:
1895 // If the operand of typeid refers to the object under construction or
1896 // destruction and the static type of the operand is neither the constructor
1897 // or destructor’s class nor one of its bases, the behavior is undefined.
1899
1900 if (hasNullCheck && cgf.cgm.getCXXABI().shouldTypeidBeNullChecked(srcType)) {
1901 mlir::Value isThisNull =
1902 cgf.getBuilder().createPtrIsNull(thisPtr.getPointer());
1903 // We don't really care about the value, we just want to make sure the
1904 // 'true' side calls bad-type-id.
1905 cir::IfOp::create(
1906 cgf.getBuilder(), cgf.getLoc(e->getSourceRange()), isThisNull,
1907 /*withElseRegion=*/false, [&](mlir::OpBuilder &, mlir::Location loc) {
1908 cgf.cgm.getCXXABI().emitBadTypeidCall(cgf, loc);
1909 });
1910 }
1911
1912 return cgf.cgm.getCXXABI().emitTypeid(cgf, srcType, thisPtr, typeInfoPtrTy);
1913}
1914
1916 mlir::Location loc = getLoc(e->getSourceRange());
1917 mlir::Type resultType = cir::PointerType::get(convertType(e->getType()));
1919 : e->getExprOperand()->getType();
1920
1921 // If the non-default global var address space is not default, we need to do
1922 // an address-space cast here.
1924
1925 // C++ [expr.typeid]p2:
1926 // When typeid is applied to a glvalue expression whose type is a
1927 // polymorphic class type, the result refers to a std::type_info object
1928 // representing the type of the most derived object (that is, the dynamic
1929 // type) to which the glvalue refers.
1930 // If the operand is already most derived object, no need to look up vtable.
1931 if (!e->isTypeOperand() && e->isPotentiallyEvaluated() &&
1933 return emitCXXTypeidFromVTable(*this, e->getExprOperand(), resultType,
1934 e->hasNullCheck());
1935
1936 auto typeInfo =
1937 cast<cir::GlobalViewAttr>(cgm.getAddrOfRTTIDescriptor(loc, ty));
1938 // `getAddrOfRTTIDescriptor` lies to us and always gives us a uint8ptr as its
1939 // type, however we need the value of the actual global to call the
1940 // get-global-op, so look it up here.
1941 auto typeInfoGlobal =
1942 cast<cir::GlobalOp>(cgm.getGlobalValue(typeInfo.getSymbol().getValue()));
1943 auto getTypeInfo = cir::GetGlobalOp::create(
1944 builder, loc, builder.getPointerTo(typeInfoGlobal.getSymType()),
1945 typeInfoGlobal.getSymName());
1946 // The ABI is just generating these sometimes as ptr to u8, but they are
1947 // simply a representation of the type_info. So we have to cast this, if
1948 // necessary (createBitcast is a noop if the types match).
1949 return builder.createBitcast(getTypeInfo, resultType);
1950}
static void emit(Program &P, llvm::SmallVectorImpl< std::byte > &Code, const T &Val, bool &Success)
Helper to write bytecode and bail out if 32-bit offsets become invalid.
static CXXRecordDecl * getCXXRecord(const Expr *E)
static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address newPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
static mlir::Value emitCXXTypeidFromVTable(CIRGenFunction &cgf, const Expr *e, mlir::Type typeInfoPtrTy, bool hasNullCheck)
static void emitObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de, Address ptr, QualType elementType)
Emit the code for deleting a single object.
static void emitNullBaseClassInitialization(CIRGenFunction &cgf, Address destPtr, const CXXRecordDecl *base)
static void enterNewDeleteCleanup(CIRGenFunction &cgf, const CXXNewExpr *e, Address newPtr, mlir::Value allocSize, CharUnits allocAlign, const CallArgList &newArgs)
Enter a cleanup to call 'operator delete' if the initializer in a new-expression throws.
static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e, unsigned minElements, mlir::Value &numElements, mlir::Value &sizeWithoutCookie)
static void emitDestroyingObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de, Address ptr, QualType elementType)
Emit the code for deleting a single object via a destroying operator delete.
static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init, QualType allocType, Address newPtr, AggValueSlot::Overlap_t mayOverlap)
static CharUnits calculateCookiePadding(CIRGenFunction &cgf, const CXXNewExpr *e)
static mlir::Value emitDynamicCastToNull(CIRGenFunction &cgf, mlir::Location loc, QualType destTy)
static MemberCallInfo commonBuildCXXMemberOrOperatorCall(CIRGenFunction &cgf, const CXXMethodDecl *md, mlir::Value thisPtr, mlir::Value implicitParam, QualType implicitParamTy, const CallExpr *ce, CallArgList &args, CallArgList *rtlArgs)
static RValue emitNewDeleteCall(CIRGenFunction &cgf, const FunctionDecl *calleeDecl, const FunctionProtoType *calleeType, const CallArgList &args)
Emit a call to an operator new or operator delete function, as implicitly created by new-expressions ...
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the clang::Expr interface and subclasses for C++ expressions.
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code, ArrayRef< tooling::Range > Ranges, StringRef FileName="<stdin>")
Clean up any erroneous/redundant code in the given Ranges in Code.
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__device__ __2f16 b
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createOr(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
mlir::Value createPtrIsNull(mlir::Value ptr)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Value createMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, OverflowBehavior ob=OverflowBehavior::None)
llvm::Align getABITypeAlign(mlir::Type ty) const
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
TranslationUnitDecl * getTranslationUnitDecl() const
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
DeclarationNameTable DeclarationNames
Definition ASTContext.h:809
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const
Return number of constant array elements.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3777
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:81
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:87
bool isValid() const
Definition Address.h:75
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
An aggregate value slot.
IsZeroed_t isZeroed() const
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::IntType getSIntNTy(int n)
cir::LoadOp createAlignedLoad(mlir::Location loc, mlir::Type ty, mlir::Value ptr, llvm::MaybeAlign align)
bool isNullValue(mlir::Attribute attr) const
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::IntType getUIntNTy(int n)
virtual void emitVirtualObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de, Address ptr, QualType elementType, const CXXDestructorDecl *dtor)=0
virtual const clang::CXXRecordDecl * getThisArgumentTypeForMethod(const clang::CXXMethodDecl *md)
Get the type of the implicit "this" parameter used by a method.
virtual bool shouldTypeidBeNullChecked(QualType srcTy)=0
virtual mlir::Value emitTypeid(CIRGenFunction &cgf, QualType srcTy, Address thisPtr, mlir::Type typeInfoPtrTy)=0
virtual void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc)=0
virtual CharUnits getArrayCookieSize(const CXXNewExpr *e)
Returns the extra size required in order to store the array cookie for the given new-expression.
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
static CIRGenCallee forVirtual(const clang::CallExpr *ce, clang::GlobalDecl md, Address addr, cir::FuncType fTy)
Definition CIRGenCall.h:154
An abstract representation of regular/ObjC call/message targets.
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
void initFullExprCleanup()
Set up the last cleanup that was pushed as a conditional full-expression cleanup.
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
static int64_t getZExtIntValueFromConstOp(mlir::Value val)
Get zero-extended integer from a mlir::Value that is an int constant or a constant op.
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
Address returnValue
The temporary alloca to hold the return value.
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
void popCleanupBlock(bool forDeactivation=false)
Pop a cleanup block from the stack.
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::TypedAttr emitNullConstantForBase(const CXXRecordDecl *record)
Return a null constant appropriate for zero-initializing a base class with the given type.
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
const cir::CIRDataLayout getDataLayout() const
CIRGenCXXABI & getCXXABI() const
const CIRGenFunctionInfo & arrangeFreeFunctionCall(const CallArgList &args, const FunctionType *fnType)
clang::CanQualType deriveThisType(const clang::CXXRecordDecl *rd, const clang::CXXMethodDecl *md)
Derives the 'this' type for CIRGen purposes, i.e.
void addFrom(const CallArgList &other)
Add all the arguments from another CallArgList to this one.
Definition CIRGenCall.h:248
void add(RValue rvalue, clang::QualType type)
Definition CIRGenCall.h:239
mlir::Attribute tryEmitAbstract(const Expr *e, QualType destType)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
T * pushCleanupWithExtra(CleanupKind kind, size_t n, As... a)
Push a cleanup with non-constant storage requirements on the stack.
AlignmentSource getAlignmentSource() const
Address getAddress() const
mlir::Value getPointer() const
void setAddress(Address address)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
A class for recording the number of arguments that a function signature requires.
static RequiredArgs getFromProtoWithExtraSlots(const clang::FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a call to a CUDA kernel function.
Definition ExprCXX.h:238
Represents a call to a C++ constructor.
Definition ExprCXX.h:1552
bool isElidable() const
Whether this construction is elidable.
Definition ExprCXX.h:1621
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1695
bool requiresZeroInitialization() const
Whether this construction first requires zero-initialization before the initializer is called.
Definition ExprCXX.h:1654
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1615
CXXConstructionKind getConstructionKind() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1663
Represents a C++ constructor within a class.
Definition DeclCXX.h:2620
bool isDefaultConstructor() const
Whether this constructor is a default constructor (C++ [class.ctor]p5), which can be used to default-...
Definition DeclCXX.cpp:3051
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2630
FunctionDecl * getOperatorDelete() const
Definition ExprCXX.h:2669
bool isArrayForm() const
Definition ExprCXX.h:2656
QualType getDestroyedType() const
Retrieve the type being destroyed.
Definition ExprCXX.cpp:338
Represents a C++ destructor within a class.
Definition DeclCXX.h:2882
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:485
bool isAlwaysNull() const
isAlwaysNull - Return whether the result of the dynamic_cast is proven to always be null.
Definition ExprCXX.cpp:840
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:183
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprCXX.h:224
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2132
bool isVirtual() const
Definition DeclCXX.h:2187
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2271
bool isInstance() const
Definition DeclCXX.h:2159
CXXMethodDecl * getDevirtualizedMethod(const Expr *Base, bool IsAppleKext)
If it's possible to devirtualize a call to this method, return the called function.
Definition DeclCXX.cpp:2526
CXXMethodDecl * getCorrespondingMethodInClass(const CXXRecordDecl *RD, bool MayBeBase=false)
Find the method in RD that corresponds to this one.
Definition DeclCXX.cpp:2472
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2359
bool isArray() const
Definition ExprCXX.h:2468
llvm::iterator_range< arg_iterator > placement_arguments()
Definition ExprCXX.h:2576
QualType getAllocatedType() const
Definition ExprCXX.h:2438
unsigned getNumImplicitArgs() const
Definition ExprCXX.h:2515
std::optional< Expr * > getArraySize()
This might return std::nullopt even if isArray() returns true, since there might not be an array size...
Definition ExprCXX.h:2473
ImplicitAllocationParameters implicitAllocationParameters() const
Provides the full set of information about expected implicit parameters in this call.
Definition ExprCXX.h:2566
bool hasInitializer() const
Whether this new-expression has any initializer at all.
Definition ExprCXX.h:2528
bool shouldNullCheckAllocation() const
True if the allocation result needs to be null-checked.
Definition ExprCXX.cpp:326
bool passAlignment() const
Indicates whether the required alignment should be implicitly passed to the allocation function.
Definition ExprCXX.h:2555
FunctionDecl * getOperatorDelete() const
Definition ExprCXX.h:2465
unsigned getNumPlacementArgs() const
Definition ExprCXX.h:2498
SourceRange getSourceRange() const
Definition ExprCXX.h:2614
FunctionDecl * getOperatorNew() const
Definition ExprCXX.h:2463
Expr * getInitializer()
The initializer of this new-expression.
Definition ExprCXX.h:2537
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:85
Represents a list-initialization with parenthesis.
Definition ExprCXX.h:5141
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5181
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2749
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CXXDestructorDecl * getDestructor() const
Returns the destructor decl for this class.
Definition DeclCXX.cpp:2131
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:852
bool isTypeOperand() const
Definition ExprCXX.h:888
QualType getTypeOperand(const ASTContext &Context) const
Retrieves the type operand of this typeid() expression after various required adjustments (removing r...
Definition ExprCXX.cpp:161
Expr * getExprOperand() const
Definition ExprCXX.h:899
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:906
bool isMostDerived(const ASTContext &Context) const
Best-effort check if the expression operand refers to a most derived object.
Definition ExprCXX.cpp:149
bool isPotentiallyEvaluated() const
Determine whether this typeid has a type operand which is potentially evaluated, per C++11 [expr....
Definition ExprCXX.cpp:134
bool hasNullCheck() const
Whether this is of a form like "typeid(*ptr)" that can throw a std::bad_typeid if a pointer is a null...
Definition ExprCXX.cpp:200
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
arg_iterator arg_begin()
Definition Expr.h:3203
arg_iterator arg_end()
Definition Expr.h:3206
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
arg_range arguments()
Definition Expr.h:3198
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
Represents the canonical version of C arrays with a specified constant size.
Definition TypeBase.h:3815
The results of name lookup within a DeclContext.
Definition DeclBase.h:1395
lookup_result lookup(DeclarationName Name) const
lookup - Find the declarations (if any) with the given Name in this context.
virtual SourceRange getSourceRange() const LLVM_READONLY
Source range that this declaration covers.
Definition DeclBase.h:435
DeclarationName getCXXOperatorName(OverloadedOperatorKind Op)
Get the name of the overloadable C++ operator corresponding to Op.
The name of a declaration.
QualType getTypeAsWritten() const
getTypeAsWritten - Returns the type that this expression is casting to, as written in the source code...
Definition Expr.h:3958
This represents one expression.
Definition Expr.h:112
const CXXRecordDecl * getBestDynamicClassType() const
For an expression of class type or pointer to class type, return the most derived class decl the expr...
Definition Expr.cpp:69
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
Expr * IgnoreParenBaseCasts() LLVM_READONLY
Skip past any parentheses and derived-to-base casts until reaching a fixed point.
Definition Expr.cpp:3112
bool isTemporaryObject(ASTContext &Ctx, const CXXRecordDecl *TempTy) const
Determine whether the result of this expression is a temporary object of the given class type.
Definition Expr.cpp:3253
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3178
Represents a function declaration or definition.
Definition Decl.h:2018
bool isDestroyingOperatorDelete() const
Determine whether this is a destroying operator delete.
Definition Decl.cpp:3546
QualType getReturnType() const
Definition Decl.h:2863
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition Decl.h:2395
bool isReplaceableGlobalAllocationFunction(UnsignedOrNone *AlignmentParam=nullptr, bool *IsNothrow=nullptr) const
Determines whether this function is one of the replaceable global allocation functions: void *operato...
Definition Decl.h:2612
UsualDeleteParams getUsualDeleteParams() const
Definition Decl.cpp:3562
bool isReservedGlobalPlacementOperator() const
Determines whether this operator new or delete is one of the reserved global placement operators: voi...
Definition Decl.cpp:3398
bool isDefaulted() const
Whether this function is defaulted.
Definition Decl.h:2403
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4544
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
CanThrowResult canThrow() const
Determine whether this function type has a non-throwing exception specification.
Definition Type.cpp:3967
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents an implicitly-generated value initialization of an object of a given type.
Definition Expr.h:6058
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isStringLiteralInit() const
Is this an initializer for an array of characters, initialized by a string literal or an @encode?
Definition Expr.cpp:2448
unsigned getNumInits() const
Definition Expr.h:5332
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5402
const Expr * getInit(unsigned Init) const
Definition Expr.h:5354
ArrayRef< Expr * > inits() const
Definition Expr.h:5352
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3708
QualType getPointeeType() const
Definition TypeBase.h:3726
This represents a decl that may have a name.
Definition Decl.h:274
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
ObjCEncodeExpr, used for @encode in Objective-C.
Definition ExprObjC.h:441
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3383
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8476
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1449
QualType getCanonicalType() const
Definition TypeBase.h:8488
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1556
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition Type.cpp:2788
bool hasStrongOrWeakObjCLifetime() const
Definition TypeBase.h:1457
Represents a struct/union/class.
Definition Decl.h:4343
field_range fields() const
Definition Decl.h:4546
RecordDecl * getDefinitionOrSelf() const
Definition Decl.h:4531
Base for LValueReferenceType and RValueReferenceType.
Definition TypeBase.h:3628
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4888
bool isUnion() const
Definition Decl.h:3946
Exposes information about the current target.
Definition TargetInfo.h:227
unsigned getNewAlign() const
Return the largest alignment for which a suitably-sized allocation with 'operator new(size_t)' is gua...
Definition TargetInfo.h:767
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Decl.h:3565
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8776
bool isVoidPointerType() const
Definition Type.cpp:749
bool isPointerType() const
Definition TypeBase.h:8673
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9333
bool isReferenceType() const
Definition TypeBase.h:8697
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1958
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9319
const T * getAsCanonical() const
If this type is canonically the specified type, return its canonical type cast to that specified type...
Definition TypeBase.h:2976
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
bool isRecordType() const
Definition TypeBase.h:8800
QualType getType() const
Definition Decl.h:723
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const AstTypeMatcher< TagType > tagType
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
AlignedAllocationMode alignedAllocationModeFromBool(bool IsAligned)
Definition ExprCXX.h:2273
bool isAlignedAllocation(AlignedAllocationMode Mode)
Definition ExprCXX.h:2269
AlignedAllocationMode
Definition ExprCXX.h:2267
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
bool isTypeAwareAllocation(TypeAwareAllocationMode Mode)
Definition ExprCXX.h:2257
TypeAwareAllocationMode
Definition ExprCXX.h:2255
U cast(CodeGen::Address addr)
Definition Address.h:327
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
static bool objCLifetime()
static bool addressSpace()
static bool devirtualizeDestructor()
static bool aggValueSlotGC()
static bool deleteArray()
static bool emitTypeCheck()
static bool cleanupDeactivationScope()
static bool opCallMustTail()
static bool typeAwareAllocation()
static bool emitNullCheckForDeleteCalls()
static bool generateDebugInfo()
clang::CharUnits getSizeAlign() const
The parameters to pass to a usual operator delete.
Definition ExprCXX.h:2348
TypeAwareAllocationMode TypeAwareDelete
Definition ExprCXX.h:2349
AlignedAllocationMode Alignment
Definition ExprCXX.h:2352