clang 23.0.0git
CIRGenExprCXX.cpp
Go to the documentation of this file.
1//===--- CIRGenExprCXX.cpp - Emit CIR Code for C++ expressions ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ expressions
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenCXXABI.h"
15#include "CIRGenFunction.h"
16
17#include "clang/AST/CharUnits.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/ExprCXX.h"
20#include "clang/AST/ExprObjC.h"
23#include "llvm/Support/TrailingObjects.h"
24
25using namespace clang;
26using namespace clang::CIRGen;
27
28namespace {
29struct MemberCallInfo {
30 RequiredArgs reqArgs;
31 // Number of prefix arguments for the call. Ignores the `this` pointer.
32 unsigned prefixSize;
33};
34} // namespace
35
37 CIRGenFunction &cgf, const CXXMethodDecl *md, mlir::Value thisPtr,
38 mlir::Value implicitParam, QualType implicitParamTy, const CallExpr *ce,
39 CallArgList &args, CallArgList *rtlArgs) {
40 assert(ce == nullptr || isa<CXXMemberCallExpr>(ce) ||
42 assert(md->isInstance() &&
43 "Trying to emit a member or operator call expr on a static method!");
44
45 // Push the this ptr.
46 const CXXRecordDecl *rd =
48 args.add(RValue::get(thisPtr), cgf.getTypes().deriveThisType(rd, md));
49
50 // If there is an implicit parameter (e.g. VTT), emit it.
51 if (implicitParam) {
52 args.add(RValue::get(implicitParam), implicitParamTy);
53 }
54
55 const auto *fpt = md->getType()->castAs<FunctionProtoType>();
56 RequiredArgs required =
58 unsigned prefixSize = args.size() - 1;
59
60 // Add the rest of the call args
61 if (rtlArgs) {
62 // Special case: if the caller emitted the arguments right-to-left already
63 // (prior to emitting the *this argument), we're done. This happens for
64 // assignment operators.
65 args.addFrom(*rtlArgs);
66 } else if (ce) {
67 // Special case: skip first argument of CXXOperatorCall (it is "this").
68 unsigned argsToSkip = isa<CXXOperatorCallExpr>(ce) ? 1 : 0;
69 cgf.emitCallArgs(args, fpt, drop_begin(ce->arguments(), argsToSkip),
70 ce->getDirectCallee());
71 } else {
72 assert(
73 fpt->getNumParams() == 0 &&
74 "No CallExpr specified for function with non-zero number of arguments");
75 }
76
77 // return {required, prefixSize};
78 return {required, prefixSize};
79}
80
84 const BinaryOperator *bo =
86 const Expr *baseExpr = bo->getLHS();
87 const Expr *memFnExpr = bo->getRHS();
88
89 const auto *mpt = memFnExpr->getType()->castAs<MemberPointerType>();
90 const auto *fpt = mpt->getPointeeType()->castAs<FunctionProtoType>();
91
92 // Emit the 'this' pointer.
93 Address thisAddr = Address::invalid();
94 if (bo->getOpcode() == BO_PtrMemI)
95 thisAddr = emitPointerWithAlignment(baseExpr);
96 else
97 thisAddr = emitLValue(baseExpr).getAddress();
98
100
101 // Get the member function pointer.
102 mlir::Value memFnPtr = emitScalarExpr(memFnExpr);
103
104 // Resolve the member function pointer to the actual callee and adjust the
105 // "this" pointer for call.
106 mlir::Location loc = getLoc(ce->getExprLoc());
107 auto [/*mlir::Value*/ calleePtr, /*mlir::Value*/ adjustedThis] =
108 builder.createGetMethod(loc, memFnPtr, thisAddr.getPointer());
109
110 // Prepare the call arguments.
111 CallArgList argsList;
112 argsList.add(RValue::get(adjustedThis), getContext().VoidPtrTy);
113 emitCallArgs(argsList, fpt, ce->arguments());
114
116
117 // Build the call.
118 CIRGenCallee callee(fpt, calleePtr.getDefiningOp());
120 return emitCall(cgm.getTypes().arrangeCXXMethodCall(argsList, fpt, required,
121 /*PrefixSize=*/0),
122 callee, returnValue, argsList, nullptr, loc);
123}
124
127 bool hasQualifier, NestedNameSpecifier qualifier, bool isArrow,
128 const Expr *base) {
130
131 // Compute the object pointer.
132 bool canUseVirtualCall = md->isVirtual() && !hasQualifier;
133 const CXXMethodDecl *devirtualizedMethod = nullptr;
135
136 // Note on trivial assignment
137 // --------------------------
138 // Classic codegen avoids generating the trivial copy/move assignment operator
139 // when it isn't necessary, choosing instead to just produce IR with an
140 // equivalent effect. We have chosen not to do that in CIR, instead emitting
141 // trivial copy/move assignment operators and allowing later transformations
142 // to optimize them away if appropriate.
143
144 // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
145 // operator before the LHS.
146 CallArgList rtlArgStorage;
147 CallArgList *rtlArgs = nullptr;
148 if (auto *oce = dyn_cast<CXXOperatorCallExpr>(ce)) {
149 if (oce->isAssignmentOp()) {
150 rtlArgs = &rtlArgStorage;
151 emitCallArgs(*rtlArgs, md->getType()->castAs<FunctionProtoType>(),
152 drop_begin(ce->arguments(), 1), ce->getDirectCallee(),
153 /*ParamsToSkip*/ 0);
154 }
155 }
156
157 LValue thisPtr;
158 if (isArrow) {
159 LValueBaseInfo baseInfo;
161 Address thisValue = emitPointerWithAlignment(base, &baseInfo);
162 thisPtr = makeAddrLValue(thisValue, base->getType(), baseInfo);
163 } else {
164 thisPtr = emitLValue(base);
165 }
166
167 if (isa<CXXConstructorDecl>(md)) {
168 cgm.errorNYI(ce->getSourceRange(),
169 "emitCXXMemberOrOperatorMemberCallExpr: constructor call");
170 return RValue::get(nullptr);
171 }
172
173 if ((md->isTrivial() || (md->isDefaulted() && md->getParent()->isUnion())) &&
175 return RValue::get(nullptr);
176
177 // Compute the function type we're calling
178 const CXXMethodDecl *calleeDecl =
179 devirtualizedMethod ? devirtualizedMethod : md;
180 const CIRGenFunctionInfo *fInfo = nullptr;
181 if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl))
182 fInfo = &cgm.getTypes().arrangeCXXStructorDeclaration(
184 else
185 fInfo = &cgm.getTypes().arrangeCXXMethodDeclaration(calleeDecl);
186
187 cir::FuncType ty = cgm.getTypes().getFunctionType(*fInfo);
188
191
192 // C++ [class.virtual]p12:
193 // Explicit qualification with the scope operator (5.1) suppresses the
194 // virtual call mechanism.
195 //
196 // We also don't emit a virtual call if the base expression has a record type
197 // because then we know what the type is.
198 bool useVirtualCall = canUseVirtualCall && !devirtualizedMethod;
199
200 if (const auto *dtor = dyn_cast<CXXDestructorDecl>(calleeDecl)) {
201 assert(ce->arg_begin() == ce->arg_end() &&
202 "Destructor shouldn't have explicit parameters");
203 assert(returnValue.isNull() && "Destructor shouldn't have return value");
204 if (useVirtualCall) {
205 cgm.getCXXABI().emitVirtualDestructorCall(*this, dtor, Dtor_Complete,
206 thisPtr.getAddress(),
208 } else {
209 GlobalDecl globalDecl(dtor, Dtor_Complete);
210 CIRGenCallee callee;
212 if (!devirtualizedMethod) {
214 cgm.getAddrOfCXXStructor(globalDecl, fInfo, ty), globalDecl);
215 } else {
216 cgm.errorNYI(ce->getSourceRange(), "devirtualized destructor call");
217 return RValue::get(nullptr);
218 }
219
220 QualType thisTy =
221 isArrow ? base->getType()->getPointeeType() : base->getType();
222 // CIRGen does not pass CallOrInvoke here (different from OG LLVM codegen)
223 // because in practice it always null even in OG.
224 emitCXXDestructorCall(globalDecl, callee, thisPtr.getPointer(), thisTy,
225 /*implicitParam=*/nullptr,
226 /*implicitParamTy=*/QualType(), ce);
227 }
228 return RValue::get(nullptr);
229 }
230
231 CIRGenCallee callee;
232 if (useVirtualCall) {
233 callee = CIRGenCallee::forVirtual(ce, md, thisPtr.getAddress(), ty);
234 } else {
236 if (getLangOpts().AppleKext) {
237 cgm.errorNYI(ce->getSourceRange(),
238 "emitCXXMemberOrOperatorMemberCallExpr: AppleKext");
239 return RValue::get(nullptr);
240 }
241
242 callee = CIRGenCallee::forDirect(cgm.getAddrOfFunction(calleeDecl, ty),
243 GlobalDecl(calleeDecl));
244 }
245
246 if (md->isVirtual()) {
247 Address newThisAddr =
248 cgm.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
249 *this, calleeDecl, thisPtr.getAddress(), useVirtualCall);
250 thisPtr.setAddress(newThisAddr);
251 }
252
254 calleeDecl, callee, returnValue, thisPtr.getPointer(),
255 /*ImplicitParam=*/nullptr, QualType(), ce, rtlArgs);
256}
257
258RValue
260 const CXXMethodDecl *md,
262 assert(md->isInstance() &&
263 "Trying to emit a member call expr on a static method!");
265 e, md, returnValue, /*HasQualifier=*/false, /*Qualifier=*/std::nullopt,
266 /*IsArrow=*/false, e->getArg(0));
267}
268
271 // Emit as a device kernel call if CUDA device code is to be generated.
272 if (!getLangOpts().HIP && getLangOpts().CUDAIsDevice)
273 cgm.errorNYI("CUDA Device side kernel call");
274 return cgm.getCUDARuntime().emitCUDAKernelCallExpr(*this, expr, returnValue);
275}
276
278 const CXXMethodDecl *md, const CIRGenCallee &callee,
279 ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam,
280 QualType implicitParamTy, const CallExpr *ce, CallArgList *rtlArgs) {
281 const auto *fpt = md->getType()->castAs<FunctionProtoType>();
282 CallArgList args;
283 MemberCallInfo callInfo = commonBuildCXXMemberOrOperatorCall(
284 *this, md, thisPtr, implicitParam, implicitParamTy, ce, args, rtlArgs);
285 auto &fnInfo = cgm.getTypes().arrangeCXXMethodCall(
286 args, fpt, callInfo.reqArgs, callInfo.prefixSize);
287 assert((ce || currSrcLoc) && "expected source location");
288 mlir::Location loc = ce ? getLoc(ce->getExprLoc()) : *currSrcLoc;
290 return emitCall(fnInfo, callee, returnValue, args, nullptr, loc);
291}
292
294 Address destPtr,
295 const CXXRecordDecl *base) {
296 if (base->isEmpty())
297 return;
298
299 const ASTRecordLayout &layout = cgf.getContext().getASTRecordLayout(base);
300 CharUnits nvSize = layout.getNonVirtualSize();
301
302 // We cannot simply zero-initialize the entire base sub-object if vbptrs are
303 // present, they are initialized by the most derived class before calling the
304 // constructor.
306 stores.emplace_back(CharUnits::Zero(), nvSize);
307
308 // Each store is split by the existence of a vbptr.
309 // TODO(cir): This only needs handling for the MS CXXABI.
311
312 // If the type contains a pointer to data member we can't memset it to zero.
313 // Instead, create a null constant and copy it to the destination.
314 // TODO: there are other patterns besides zero that we can usefully memset,
315 // like -1, which happens to be the pattern used by member-pointers.
316 // TODO: isZeroInitializable can be over-conservative in the case where a
317 // virtual base contains a member pointer.
318 mlir::TypedAttr nullConstantForBase = cgf.cgm.emitNullConstantForBase(base);
319 if (!cgf.getBuilder().isNullValue(nullConstantForBase)) {
320 cgf.cgm.errorNYI(
321 base->getSourceRange(),
322 "emitNullBaseClassInitialization: base constant is not null");
323 } else {
324 // Otherwise, just memset the whole thing to zero. This is legal
325 // because in LLVM, all default initializers (other than the ones we just
326 // handled above) are guaranteed to have a bit pattern of all zeros.
327 // TODO(cir): When the MS CXXABI is supported, we will need to iterate over
328 // `stores` and create a separate memset for each one. For now, we know that
329 // there will only be one store and it will begin at offset zero, so that
330 // simplifies this code considerably.
331 assert(stores.size() == 1 && "Expected only one store");
332 assert(stores[0].first == CharUnits::Zero() &&
333 "Expected store to begin at offset zero");
334 CIRGenBuilderTy builder = cgf.getBuilder();
335 mlir::Location loc = cgf.getLoc(base->getBeginLoc());
336 builder.createStore(loc, builder.getConstant(loc, nullConstantForBase),
337 destPtr);
338 }
339}
340
342 AggValueSlot dest) {
343 assert(!dest.isIgnored() && "Must have a destination!");
344 const CXXConstructorDecl *cd = e->getConstructor();
345
346 // If we require zero initialization before (or instead of) calling the
347 // constructor, as can be the case with a non-user-provided default
348 // constructor, emit the zero initialization now, unless destination is
349 // already zeroed.
350 if (e->requiresZeroInitialization() && !dest.isZeroed()) {
351 switch (e->getConstructionKind()) {
355 e->getType());
356 break;
360 cd->getParent());
361 break;
362 }
363 }
364
365 // If this is a call to a trivial default constructor, do nothing.
366 if (cd->isTrivial() && cd->isDefaultConstructor())
367 return;
368
369 // Elide the constructor if we're constructing from a temporary
370 if (getLangOpts().ElideConstructors && e->isElidable()) {
371 // FIXME: This only handles the simplest case, where the source object is
372 // passed directly as the first argument to the constructor. This
373 // should also handle stepping through implicit casts and conversion
374 // sequences which involve two steps, with a conversion operator
375 // follwed by a converting constructor.
376 const Expr *srcObj = e->getArg(0);
377 assert(srcObj->isTemporaryObject(getContext(), cd->getParent()));
378 assert(
379 getContext().hasSameUnqualifiedType(e->getType(), srcObj->getType()));
380 emitAggExpr(srcObj, dest);
381 return;
382 }
383
384 if (const ArrayType *arrayType = getContext().getAsArrayType(e->getType())) {
386 emitCXXAggrConstructorCall(cd, arrayType, dest.getAddress(), e, false);
387 } else {
388
390 bool forVirtualBase = false;
391 bool delegating = false;
392
393 switch (e->getConstructionKind()) {
396 break;
398 // We should be emitting a constructor; GlobalDecl will assert this
399 type = curGD.getCtorType();
400 delegating = true;
401 break;
403 forVirtualBase = true;
404 [[fallthrough]];
406 type = Ctor_Base;
407 break;
408 }
409
410 emitCXXConstructorCall(cd, type, forVirtualBase, delegating, dest, e);
411 }
412}
413
415 const CXXNewExpr *e) {
416 if (!e->isArray())
417 return CharUnits::Zero();
418
419 // No cookie is required if the operator new[] being used is the
420 // reserved placement operator new[].
422 return CharUnits::Zero();
423
424 return cgf.cgm.getCXXABI().getArrayCookieSize(e);
425}
426
427static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e,
428 unsigned minElements,
429 mlir::Value &numElements,
430 mlir::Value &sizeWithoutCookie) {
432 mlir::Location loc = cgf.getLoc(e->getSourceRange());
433
434 if (!e->isArray()) {
436 sizeWithoutCookie = cgf.getBuilder().getConstant(
437 loc, cir::IntAttr::get(cgf.sizeTy, typeSize.getQuantity()));
438 return sizeWithoutCookie;
439 }
440
441 // The width of size_t.
442 unsigned sizeWidth = cgf.cgm.getDataLayout().getTypeSizeInBits(cgf.sizeTy);
443
444 // The number of elements can be have an arbitrary integer type;
445 // essentially, we need to multiply it by a constant factor, add a
446 // cookie size, and verify that the result is representable as a
447 // size_t. That's just a gloss, though, and it's wrong in one
448 // important way: if the count is negative, it's an error even if
449 // the cookie size would bring the total size >= 0.
450 //
451 // If the array size is constant, Sema will have prevented negative
452 // values and size overflow.
453
454 // Compute the constant factor.
455 llvm::APInt arraySizeMultiplier(sizeWidth, 1);
456 while (const ConstantArrayType *cat =
458 type = cat->getElementType();
459 arraySizeMultiplier *= cat->getSize();
460 }
461
463 llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
464 typeSizeMultiplier *= arraySizeMultiplier;
465
466 // Figure out the cookie size.
467 llvm::APInt cookieSize(sizeWidth,
468 calculateCookiePadding(cgf, e).getQuantity());
469
470 // This will be a size_t.
471 mlir::Value size;
472
473 // Emit the array size expression.
474 // We multiply the size of all dimensions for NumElements.
475 // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
476 const Expr *arraySize = *e->getArraySize();
477 mlir::Attribute constNumElements =
478 ConstantEmitter(cgf.cgm, &cgf)
479 .tryEmitAbstract(arraySize, arraySize->getType());
480 if (constNumElements) {
481 // Get an APInt from the constant
482 const llvm::APInt &count =
483 mlir::cast<cir::IntAttr>(constNumElements).getValue();
484
485 [[maybe_unused]] unsigned numElementsWidth = count.getBitWidth();
486 bool hasAnyOverflow = false;
487
488 // The equivalent code in CodeGen/CGExprCXX.cpp handles these cases as
489 // overflow, but that should never happen. The size argument is implicitly
490 // cast to a size_t, so it can never be negative and numElementsWidth will
491 // always equal sizeWidth.
492 assert(!count.isNegative() && "Expected non-negative array size");
493 assert(numElementsWidth == sizeWidth &&
494 "Expected a size_t array size constant");
495
496 // Okay, compute a count at the right width.
497 llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
498
499 // Scale numElements by that. This might overflow, but we don't
500 // care because it only overflows if allocationSize does too, and
501 // if that overflows then we shouldn't use this.
502 // This emits a constant that may not be used, but we can't tell here
503 // whether it will be needed or not.
504 numElements =
505 cgf.getBuilder().getConstInt(loc, adjustedCount * arraySizeMultiplier);
506
507 // Compute the size before cookie, and track whether it overflowed.
508 bool overflow;
509 llvm::APInt allocationSize =
510 adjustedCount.umul_ov(typeSizeMultiplier, overflow);
511
512 // Sema prevents us from hitting this case
513 assert(!overflow && "Overflow in array allocation size");
514
515 // Add in the cookie, and check whether it's overflowed.
516 if (cookieSize != 0) {
517 // Save the current size without a cookie. This shouldn't be
518 // used if there was overflow
519 sizeWithoutCookie = cgf.getBuilder().getConstInt(
520 loc, allocationSize.zextOrTrunc(sizeWidth));
521
522 allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
523 hasAnyOverflow |= overflow;
524 }
525
526 // On overflow, produce a -1 so operator new will fail
527 if (hasAnyOverflow) {
528 size =
529 cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
530 } else {
531 size = cgf.getBuilder().getConstInt(loc, allocationSize);
532 }
533 } else {
534 // Create a value for the variable number of elements
535 numElements = cgf.emitScalarExpr(*e->getArraySize());
536 auto numElementsType = mlir::cast<cir::IntType>(numElements.getType());
537 unsigned numElementsWidth = numElementsType.getWidth();
538
539 // The number of elements can have an arbitrary integer type;
540 // essentially, we need to multiply it by a constant factor, add a
541 // cookie size, and verify that the result is representable as a
542 // size_t. That's just a gloss, though, and it's wrong in one
543 // important way: if the count is negative, it's an error even if
544 // the cookie size would bring the total size >= 0.
545 bool isSigned =
546 (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
547
548 // There are up to five conditions we need to test for:
549 // 1) if isSigned, we need to check whether numElements is negative;
550 // 2) if numElementsWidth > sizeWidth, we need to check whether
551 // numElements is larger than something representable in size_t;
552 // 3) if minElements > 0, we need to check whether numElements is smaller
553 // than that.
554 // 4) we need to compute
555 // sizeWithoutCookie := numElements * typeSizeMultiplier
556 // and check whether it overflows; and
557 // 5) if we need a cookie, we need to compute
558 // size := sizeWithoutCookie + cookieSize
559 // and check whether it overflows.
560
561 mlir::Value hasOverflow;
562
563 // If numElementsWidth > sizeWidth, then one way or another, we're
564 // going to have to do a comparison for (2), and this happens to
565 // take care of (1), too.
566 if (numElementsWidth > sizeWidth) {
567 llvm::APInt threshold =
568 llvm::APInt::getOneBitSet(numElementsWidth, sizeWidth);
569
570 // Use an unsigned comparison regardless of the sign of numElements.
571 mlir::Value unsignedNumElements = numElements;
572 if (isSigned)
573 unsignedNumElements = cgf.getBuilder().createIntCast(
574 numElements, cgf.getBuilder().getUIntNTy(numElementsWidth));
575
576 mlir::Value thresholdV =
577 cgf.getBuilder().getConstInt(loc, threshold, /*isUnsigned=*/true);
578 hasOverflow = cgf.getBuilder().createCompare(
579 loc, cir::CmpOpKind::ge, unsignedNumElements, thresholdV);
580 numElements = cgf.getBuilder().createIntCast(
581 unsignedNumElements, mlir::cast<cir::IntType>(cgf.sizeTy));
582
583 // Otherwise, if we're signed, we want to sext up to size_t.
584 } else if (isSigned) {
585 if (numElementsWidth < sizeWidth)
586 numElements = cgf.getBuilder().createIntCast(
587 numElements, cgf.getBuilder().getSIntNTy(sizeWidth));
588
589 // If there's a non-1 type size multiplier, then we can do the
590 // signedness check at the same time as we do the multiply
591 // because a negative number times anything will cause an
592 // unsigned overflow. Otherwise, we have to do it here. But at
593 // least in this case, we can subsume the >= minElements check.
594 if (typeSizeMultiplier == 1)
595 hasOverflow = cgf.getBuilder().createCompare(
596 loc, cir::CmpOpKind::lt, numElements,
597 cgf.getBuilder().getConstInt(loc, numElements.getType(),
598 minElements));
599
600 numElements = cgf.getBuilder().createIntCast(
601 numElements, mlir::cast<cir::IntType>(cgf.sizeTy));
602
603 // Otherwise, zext up to size_t if necessary.
604 } else if (numElementsWidth < sizeWidth) {
605 numElements = cgf.getBuilder().createIntCast(
606 numElements, mlir::cast<cir::IntType>(cgf.sizeTy));
607 }
608
609 assert(numElements.getType() == cgf.sizeTy);
610
611 if (minElements) {
612 // Don't allow allocation of fewer elements than we have initializers.
613 if (!hasOverflow) {
614 mlir::Value minElementsV = cgf.getBuilder().getConstInt(
615 loc, llvm::APInt(sizeWidth, minElements));
616 hasOverflow = cgf.getBuilder().createCompare(loc, cir::CmpOpKind::lt,
617 numElements, minElementsV);
618 } else if (numElementsWidth > sizeWidth) {
619 // The other existing overflow subsumes this check.
620 // We do an unsigned comparison, since any signed value < -1 is
621 // taken care of either above or below.
622 mlir::Value minElementsV = cgf.getBuilder().getConstInt(
623 loc, llvm::APInt(sizeWidth, minElements));
624 hasOverflow = cgf.getBuilder().createOr(
625 loc, hasOverflow,
626 cgf.getBuilder().createCompare(loc, cir::CmpOpKind::lt, numElements,
627 minElementsV));
628 }
629 }
630
631 size = numElements;
632
633 // Multiply by the type size if necessary. This multiplier
634 // includes all the factors for nested arrays.
635 //
636 // This step also causes numElements to be scaled up by the
637 // nested-array factor if necessary. Overflow on this computation
638 // can be ignored because the result shouldn't be used if
639 // allocation fails.
640 if (typeSizeMultiplier != 1) {
641 mlir::Value tsmV = cgf.getBuilder().getConstInt(loc, typeSizeMultiplier);
642 auto mulOp = cir::MulOverflowOp::create(
643 cgf.getBuilder(), loc, mlir::cast<cir::IntType>(cgf.sizeTy), size,
644 tsmV);
645
646 if (hasOverflow)
647 hasOverflow =
648 cgf.getBuilder().createOr(loc, hasOverflow, mulOp.getOverflow());
649 else
650 hasOverflow = mulOp.getOverflow();
651
652 size = mulOp.getResult();
653
654 // Also scale up numElements by the array size multiplier.
655 if (arraySizeMultiplier != 1) {
656 // If the base element type size is 1, then we can re-use the
657 // multiply we just did.
658 if (typeSize.isOne()) {
659 assert(arraySizeMultiplier == typeSizeMultiplier);
660 numElements = size;
661
662 // Otherwise we need a separate multiply.
663 } else {
664 mlir::Value asmV =
665 cgf.getBuilder().getConstInt(loc, arraySizeMultiplier);
666 numElements = cgf.getBuilder().createMul(loc, numElements, asmV);
667 }
668 }
669 } else {
670 // numElements doesn't need to be scaled.
671 assert(arraySizeMultiplier == 1);
672 }
673
674 // Add in the cookie size if necessary.
675 if (cookieSize != 0) {
676 sizeWithoutCookie = size;
677 mlir::Value cookieSizeV = cgf.getBuilder().getConstInt(loc, cookieSize);
678 auto addOp = cir::AddOverflowOp::create(
679 cgf.getBuilder(), loc, mlir::cast<cir::IntType>(cgf.sizeTy), size,
680 cookieSizeV);
681
682 if (hasOverflow)
683 hasOverflow =
684 cgf.getBuilder().createOr(loc, hasOverflow, addOp.getOverflow());
685 else
686 hasOverflow = addOp.getOverflow();
687
688 size = addOp.getResult();
689 }
690
691 // If we had any possibility of dynamic overflow, make a select to
692 // overwrite 'size' with an all-ones value, which should cause
693 // operator new to throw.
694 if (hasOverflow) {
695 mlir::Value allOnes =
696 cgf.getBuilder().getConstInt(loc, llvm::APInt::getAllOnes(sizeWidth));
697 size = cgf.getBuilder().createSelect(loc, hasOverflow, allOnes, size);
698 }
699 }
700
701 if (cookieSize == 0)
702 sizeWithoutCookie = size;
703 else
704 assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
705
706 return size;
707}
708
709/// Emit a call to an operator new or operator delete function, as implicitly
710/// created by new-expressions and delete-expressions.
712 const FunctionDecl *calleeDecl,
713 const FunctionProtoType *calleeType,
714 const CallArgList &args) {
715 cir::CIRCallOpInterface callOrTryCall;
716 cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
717 CIRGenCallee callee =
718 CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
719 RValue rv =
720 cgf.emitCall(cgf.cgm.getTypes().arrangeFreeFunctionCall(args, calleeType),
721 callee, ReturnValueSlot(), args, &callOrTryCall);
722
723 /// C++1y [expr.new]p10:
724 /// [In a new-expression,] an implementation is allowed to omit a call
725 /// to a replaceable global allocation function.
726 ///
727 /// We model such elidable calls with the 'builtin' attribute.
728 if (calleeDecl->isReplaceableGlobalAllocationFunction() && calleePtr &&
729 calleePtr->hasAttr(cir::CIRDialect::getNoBuiltinAttrName())) {
730 callOrTryCall->setAttr(cir::CIRDialect::getBuiltinAttrName(),
731 mlir::UnitAttr::get(callOrTryCall->getContext()));
732 }
733
734 return rv;
735}
736
738 const CallExpr *callExpr,
740 CallArgList args;
741 emitCallArgs(args, type, callExpr->arguments());
742 // Find the allocation or deallocation function that we're calling.
743 ASTContext &astContext = getContext();
744 assert(op == OO_New || op == OO_Delete);
746
747 clang::DeclContextLookupResult lookupResult =
748 astContext.getTranslationUnitDecl()->lookup(name);
749 for (const NamedDecl *decl : lookupResult) {
750 if (const auto *funcDecl = dyn_cast<FunctionDecl>(decl)) {
751 if (astContext.hasSameType(funcDecl->getType().getTypePtr(), type)) {
752 if (sanOpts.has(SanitizerKind::AllocToken)) {
753 // TODO: Set !alloc_token metadata.
755 cgm.errorNYI("Alloc token sanitizer not yet supported!");
756 }
757
758 // Emit the call to operator new/delete.
759 return emitNewDeleteCall(*this, funcDecl, type, args);
760 }
761 }
762 }
763
764 llvm_unreachable("predeclared global operator new/delete is missing");
765}
766
767namespace {
768template <typename Traits> struct PlacementArg {
769 typename Traits::RValueTy argValue;
770 QualType argType;
771};
772
773/// A cleanup to call the given 'operator delete' function upon abnormal
774/// exit from a new expression. Templated on a traits type that deals with
775/// ensuring that the arguments dominate the cleanup if necessary.
776template <typename Traits>
777class CallDeleteDuringNew final
778 : public EHScopeStack::Cleanup,
779 private llvm::TrailingObjects<CallDeleteDuringNew<Traits>,
780 PlacementArg<Traits>> {
781 using TrailingObj =
782 llvm::TrailingObjects<CallDeleteDuringNew<Traits>, PlacementArg<Traits>>;
783 friend TrailingObj;
784 using TrailingObj::getTrailingObjects;
785
786 /// Type used to hold llvm::Value*s.
787 typedef typename Traits::ValueTy ValueTy;
788 /// Type used to hold RValues.
789 typedef typename Traits::RValueTy RValueTy;
790
791 unsigned numPlacementArgs : 30;
792 LLVM_PREFERRED_TYPE(AlignedAllocationMode)
793 unsigned passAlignmentToPlacementDelete : 1;
794 const FunctionDecl *operatorDelete;
795 ValueTy ptr;
796 ValueTy allocSize;
797 CharUnits allocAlign;
798
799 PlacementArg<Traits> *getPlacementArgs() { return getTrailingObjects(); }
800
801 void setPlacementArg(unsigned i, RValueTy argValue, QualType argType) {
802 assert(i < numPlacementArgs && "index out of range");
803 getPlacementArgs()[i] = {argValue, argType};
804 }
805
806public:
807 static size_t getExtraSize(size_t numPlacementArgs) {
808 return TrailingObj::template additionalSizeToAlloc<PlacementArg<Traits>>(
809 numPlacementArgs);
810 }
811
812 CallDeleteDuringNew(size_t numPlacementArgs,
813 const FunctionDecl *operatorDelete, ValueTy ptr,
814 ValueTy allocSize,
815 const ImplicitAllocationParameters &iap,
816 CharUnits allocAlign, const CallArgList *newArgs,
817 unsigned numNonPlacementArgs, CIRGenFunction *cgf,
818 mlir::Location loc)
819 : numPlacementArgs(numPlacementArgs),
820 passAlignmentToPlacementDelete(isAlignedAllocation(iap.PassAlignment)),
821 operatorDelete(operatorDelete), ptr(ptr), allocSize(allocSize),
822 allocAlign(allocAlign) {
823 for (unsigned i = 0, n = numPlacementArgs; i != n; ++i) {
824 const CallArg &arg = (*newArgs)[i + numNonPlacementArgs];
825 setPlacementArg(i, arg.getRValue(*cgf, loc), arg.ty);
826 }
827 }
828
829 void emit(CIRGenFunction &cgf, Flags flags) override {
830 const auto *fpt = operatorDelete->getType()->castAs<FunctionProtoType>();
831 CallArgList deleteArgs;
832
833 unsigned firstNonTypeArg = 0;
834 TypeAwareAllocationMode typeAwareDeallocation = TypeAwareAllocationMode::No;
836
837 // The first argument after type-identity parameter (if any) is always
838 // a void* (or C* for a destroying operator delete for class type C).
839 deleteArgs.add(Traits::get(cgf, ptr), fpt->getParamType(firstNonTypeArg));
840
841 // Figure out what other parameters we should be implicitly passing.
842 UsualDeleteParams params;
843 if (numPlacementArgs) {
844 // A placement deallocation function is implicitly passed an alignment
845 // if the placement allocation function was, but is never passed a size.
846 params.Alignment =
847 alignedAllocationModeFromBool(passAlignmentToPlacementDelete);
848 params.TypeAwareDelete = typeAwareDeallocation;
850 } else {
851 // For a non-placement new-expression, 'operator delete' can take a
852 // size and/or an alignment if it has the right parameters.
853 params = operatorDelete->getUsualDeleteParams();
854 }
855
856 assert(!params.DestroyingDelete &&
857 "should not call destroying delete in a new-expression");
858
859 // The second argument can be a std::size_t (for non-placement delete).
860 if (params.Size)
861 deleteArgs.add(Traits::get(cgf, allocSize),
862 cgf.getContext().getSizeType());
863
864 // The next (second or third) argument can be a std::align_val_t, which
865 // is an enum whose underlying type is std::size_t.
866 // FIXME: Use the right type as the parameter type. Note that in a call
867 // to operator delete(size_t, ...), we may not have it available.
868 if (isAlignedAllocation(params.Alignment))
869 cgf.cgm.errorNYI("CallDeleteDuringNew: aligned allocation");
870
871 // Pass the rest of the arguments, which must match exactly.
872 for (unsigned i = 0; i != numPlacementArgs; ++i) {
873 auto arg = getPlacementArgs()[i];
874 deleteArgs.add(Traits::get(cgf, arg.argValue), arg.argType);
875 }
876
877 // Call 'operator delete'.
878 emitNewDeleteCall(cgf, operatorDelete, fpt, deleteArgs);
879 }
880};
881} // namespace
882
883/// Enter a cleanup to call 'operator delete' if the initializer in a
884/// new-expression throws.
886 Address newPtr, mlir::Value allocSize,
887 CharUnits allocAlign,
888 const CallArgList &newArgs) {
889 unsigned numNonPlacementArgs = e->getNumImplicitArgs();
890
891 // If we're not inside a conditional branch, then the cleanup will
892 // dominate and we can do the easier (and more efficient) thing.
893 if (!cgf.isInConditionalBranch()) {
894 struct DirectCleanupTraits {
895 typedef mlir::Value ValueTy;
896 typedef RValue RValueTy;
897 static RValue get(CIRGenFunction &, ValueTy v) { return RValue::get(v); }
898 static RValue get(CIRGenFunction &, RValueTy v) { return v; }
899 };
900
901 typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
902
904 cgf.ehStack.pushCleanupWithExtra<DirectCleanup>(
906 newPtr.getPointer(), allocSize, e->implicitAllocationParameters(),
907 allocAlign, &newArgs, numNonPlacementArgs, &cgf,
908 cgf.getLoc(e->getSourceRange()));
909
910 return;
911 }
912
913 cgf.cgm.errorNYI(e->getSourceRange(),
914 "enterNewDeleteCleanup: conditional branch");
915}
916
917static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init,
918 QualType allocType, Address newPtr,
919 AggValueSlot::Overlap_t mayOverlap) {
920 // FIXME: Refactor with emitExprAsInit.
921 switch (cgf.getEvaluationKind(allocType)) {
922 case cir::TEK_Scalar:
923 cgf.emitScalarInit(init, cgf.getLoc(init->getSourceRange()),
924 cgf.makeAddrLValue(newPtr, allocType), false);
925 return;
926 case cir::TEK_Complex:
927 cgf.emitComplexExprIntoLValue(init, cgf.makeAddrLValue(newPtr, allocType),
928 /*isInit*/ true);
929 return;
930 case cir::TEK_Aggregate: {
934 newPtr, allocType.getQualifiers(), AggValueSlot::IsDestructed,
936 cgf.emitAggExpr(init, slot);
937 return;
938 }
939 }
940 llvm_unreachable("bad evaluation kind");
941}
942
944 const CXXNewExpr *e, QualType elementType, mlir::Type elementTy,
945 Address beginPtr, mlir::Value numElements,
946 mlir::Value allocSizeWithoutCookie) {
947 // If we have a type with trivial initialization and no initializer,
948 // there's nothing to do.
949 if (!e->hasInitializer())
950 return;
951
952 Address curPtr = beginPtr;
953
954 unsigned initListElements = 0;
955
956 const Expr *init = e->getInitializer();
957 Address endOfInit = Address::invalid();
958 QualType::DestructionKind dtorKind = elementType.isDestructedType();
960
961 // Attempt to perform zero-initialization using memset.
962 auto tryMemsetInitialization = [&]() -> bool {
963 mlir::Location loc = numElements.getLoc();
964
965 // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
966 // we can initialize with a memset to -1.
967 if (!cgm.getTypes().isZeroInitializable(elementType))
968 return false;
969
970 // Optimization: since zero initialization will just set the memory
971 // to all zeroes, generate a single memset to do it in one shot.
972
973 // Subtract out the size of any elements we've already initialized.
974 auto remainingSize = allocSizeWithoutCookie;
975 if (initListElements) {
976 // We know this can't overflow; we check this when doing the allocation.
977 unsigned initializedSize =
978 getContext().getTypeSizeInChars(elementType).getQuantity() *
979 initListElements;
980 cir::ConstantOp initSizeOp =
981 builder.getConstInt(loc, remainingSize.getType(), initializedSize);
982 remainingSize = builder.createSub(loc, remainingSize, initSizeOp);
983 }
984
985 // Create the memset.
986 mlir::Value castOp =
987 builder.createPtrBitcast(curPtr.getPointer(), cgm.voidTy);
988 builder.createMemSet(loc, castOp, builder.getConstInt(loc, cgm.uInt8Ty, 0),
989 remainingSize);
990 return true;
991 };
992
993 const InitListExpr *ile = dyn_cast<InitListExpr>(init);
994 const CXXParenListInitExpr *cplie = nullptr;
995 const StringLiteral *sl = nullptr;
996 const ObjCEncodeExpr *ocee = nullptr;
997 const Expr *ignoreParen = nullptr;
998 if (!ile) {
999 ignoreParen = init->IgnoreParenImpCasts();
1000 cplie = dyn_cast<CXXParenListInitExpr>(ignoreParen);
1001 sl = dyn_cast<StringLiteral>(ignoreParen);
1002 ocee = dyn_cast<ObjCEncodeExpr>(ignoreParen);
1003 }
1004 // If the initializer is an initializer list, first do the explicit elements.
1005 if (ile || cplie || sl || ocee) {
1006 // Initializing from a (braced) string literal is a special case; the init
1007 // list element does not initialize a (single) array element.
1008 if ((ile && ile->isStringLiteralInit()) || sl || ocee) {
1009 cgm.errorNYI(ile->getSourceRange(),
1010 "emitNewArrayInitializer: string literal init");
1011 return;
1012 }
1013
1014 ArrayRef<const Expr *> initExprs =
1015 ile ? ile->inits() : cplie->getInitExprs();
1016 initListElements = initExprs.size();
1017
1018 // If this is a multi-dimensional array new, we will initialize multiple
1019 // elements with each init list element.
1020 QualType allocType = e->getAllocatedType();
1021 if (const ConstantArrayType *cat = dyn_cast_or_null<ConstantArrayType>(
1022 allocType->getAsArrayTypeUnsafe())) {
1023 (void)cat;
1024 cgm.errorNYI(ile->getSourceRange(),
1025 "emitNewArrayInitializer: constant array init");
1026 return;
1027 }
1028
1029 // Enter a partial-destruction Cleanup if necessary.
1030 if (dtorKind) {
1031 cgm.errorNYI(ile->getSourceRange(),
1032 "emitNewArrayInitializer: init requires dtor");
1033 return;
1034 }
1035
1036 CharUnits elementSize = getContext().getTypeSizeInChars(elementType);
1037 CharUnits startAlign = curPtr.getAlignment();
1038 unsigned i = 0;
1039 for (const Expr *ie : initExprs) {
1040 // Tell the cleanup that it needs to destroy up to this
1041 // element. TODO: some of these stores can be trivially
1042 // observed to be unnecessary.
1043 if (endOfInit.isValid()) {
1044 cgm.errorNYI(ie->getSourceRange(),
1045 "emitNewArrayInitializer: update dtor cleanup ptr");
1046 return;
1047 }
1048 // FIXME: If the last initializer is an incomplete initializer list for
1049 // an array, and we have an array filler, we can fold together the two
1050 // initialization loops.
1051 storeAnyExprIntoOneUnit(*this, ie, ie->getType(), curPtr,
1053 mlir::Location loc = getLoc(ie->getExprLoc());
1054 mlir::Value castOp = builder.createPtrBitcast(
1055 curPtr.getPointer(), convertTypeForMem(allocType));
1056 mlir::Value offsetOp = builder.getSignedInt(loc, 1, /*width=*/32);
1057 mlir::Value dataPtr = builder.createPtrStride(loc, castOp, offsetOp);
1058 curPtr = Address(dataPtr, curPtr.getElementType(),
1059 startAlign.alignmentAtOffset((++i) * elementSize));
1060 }
1061
1062 // The remaining elements are filled with the array filler expression.
1063 init = ile ? ile->getArrayFiller() : cplie->getArrayFiller();
1064
1065 // Extract the initializer for the individual array elements by pulling
1066 // out the array filler from all the nested initializer lists. This avoids
1067 // generating a nested loop for the initialization.
1068 while (init && init->getType()->isConstantArrayType()) {
1069 auto *subIle = dyn_cast<InitListExpr>(init);
1070 if (!subIle)
1071 break;
1072 assert(subIle->getNumInits() == 0 && "explicit inits in array filler?");
1073 init = subIle->getArrayFiller();
1074 }
1075
1076 // Switch back to initializing one base element at a time.
1077 curPtr = curPtr.withElementType(builder, beginPtr.getElementType());
1078 }
1079
1080 // If all elements have already been initialized, skip any further
1081 // initialization.
1082 auto constOp = mlir::dyn_cast<cir::ConstantOp>(numElements.getDefiningOp());
1083 if (constOp) {
1084 auto constIntAttr = mlir::dyn_cast<cir::IntAttr>(constOp.getValue());
1085 // Just skip out if the constant count is zero.
1086 if (constIntAttr && constIntAttr.getUInt() <= initListElements)
1087 return;
1088 }
1089
1090 assert(init && "have trailing elements to initialize but no initializer");
1091
1092 // If this is a constructor call, try to optimize it out, and failing that
1093 // emit a single loop to initialize all remaining elements.
1094 if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
1095 CXXConstructorDecl *ctor = cce->getConstructor();
1096 if (ctor->isTrivial()) {
1097 // If new expression did not specify value-initialization, then there
1098 // is no initialization.
1099 if (!cce->requiresZeroInitialization())
1100 return;
1101
1102 cgm.errorNYI(cce->getSourceRange(),
1103 "emitNewArrayInitializer: trivial ctor zero-init");
1104 return;
1105 }
1106
1107 // Store the new Cleanup position for irregular Cleanups.
1108 //
1109 // FIXME: Share this cleanup with the constructor call emission rather than
1110 // having it create a cleanup of its own.
1111 if (endOfInit.isValid())
1112 builder.createStore(getLoc(e->getSourceRange()), curPtr.emitRawPointer(),
1113 endOfInit);
1114
1115 mlir::Type initType = convertType(cce->getType());
1116 // Emit a constructor call loop to initialize the remaining elements.
1117 if (initListElements) {
1118 // If the number of elements is a constant, we will have already gotten
1119 // the constant op above. Here we use it to get the number of remaining
1120 // elements as a new constant.
1121 if (constOp) {
1122 auto constIntAttr = mlir::cast<cir::IntAttr>(constOp.getValue());
1123 uint64_t numRemainingElements =
1124 constIntAttr.getUInt() - initListElements;
1125 numElements =
1126 builder.getConstInt(getLoc(e->getSourceRange()),
1127 numElements.getType(), numRemainingElements);
1128 // Currently, the AST gives us a pointer to the element type here
1129 // rather than an array. That's inconsistent with what it does
1130 // without an explicit initializer list, so we need to create an
1131 // array type here. That will decay back to a pointer when we lower
1132 // the cir.array.ctor op, but we need an array type for the initial
1133 // representation.
1134 if (!mlir::isa<cir::ArrayType>(initType))
1135 initType = cir::ArrayType::get(initType, numRemainingElements);
1136 } else {
1137 cgm.errorNYI(e->getSourceRange(),
1138 "emitNewArrayInitializer: numRemainingElements with "
1139 "non-constant count");
1140 return;
1141 }
1142 }
1143
1144 curPtr = curPtr.withElementType(builder, initType);
1145 emitCXXAggrConstructorCall(ctor, numElements, curPtr, cce,
1146 /*newPointerIsChecked=*/true,
1147 cce->requiresZeroInitialization());
1148 if (getContext().getTargetInfo().emitVectorDeletingDtors(
1149 getContext().getLangOpts())) {
1150 cgm.errorNYI(e->getSourceRange(),
1151 "emitNewArrayInitializer: emitVectorDeletingDtors");
1152 }
1153 return;
1154 }
1155
1156 // If this is value-initialization, we can usually use memset.
1157 if (isa<ImplicitValueInitExpr>(init)) {
1158 if (tryMemsetInitialization())
1159 return;
1160 cgm.errorNYI(init->getSourceRange(),
1161 "emitNewArrayInitializer: implicit value init");
1162 return;
1163 }
1164
1165 cgm.errorNYI(init->getSourceRange(),
1166 "emitNewArrayInitializer: unsupported initializer");
1167 return;
1168}
1169
1171 QualType elementType, mlir::Type elementTy,
1172 Address newPtr, mlir::Value numElements,
1173 mlir::Value allocSizeWithoutCookie) {
1175 if (e->isArray()) {
1176 cgf.emitNewArrayInitializer(e, elementType, elementTy, newPtr, numElements,
1177 allocSizeWithoutCookie);
1178 } else if (const Expr *init = e->getInitializer()) {
1179 storeAnyExprIntoOneUnit(cgf, init, e->getAllocatedType(), newPtr,
1181 }
1182}
1183
1185 GlobalDecl dtor, const CIRGenCallee &callee, mlir::Value thisVal,
1186 QualType thisTy, mlir::Value implicitParam, QualType implicitParamTy,
1187 const CallExpr *ce) {
1188 const CXXMethodDecl *dtorDecl = cast<CXXMethodDecl>(dtor.getDecl());
1189
1190 assert(!thisTy.isNull());
1191 assert(thisTy->getAsCXXRecordDecl() == dtorDecl->getParent() &&
1192 "Pointer/Object mixup");
1193
1195
1196 CallArgList args;
1197 commonBuildCXXMemberOrOperatorCall(*this, dtorDecl, thisVal, implicitParam,
1198 implicitParamTy, ce, args, nullptr);
1199 assert((ce || dtor.getDecl()) && "expected source location provider");
1201 return emitCall(cgm.getTypes().arrangeCXXStructorDeclaration(dtor), callee,
1202 ReturnValueSlot(), args, nullptr,
1203 ce ? getLoc(ce->getExprLoc())
1204 : getLoc(dtor.getDecl()->getSourceRange()));
1205}
1206
1209 QualType destroyedType = expr->getDestroyedType();
1210 if (destroyedType.hasStrongOrWeakObjCLifetime()) {
1212 cgm.errorNYI(expr->getExprLoc(),
1213 "emitCXXPseudoDestructorExpr: Objective-C lifetime is NYI");
1214 } else {
1215 // C++ [expr.pseudo]p1:
1216 // The result shall only be used as the operand for the function call
1217 // operator (), and the result of such a call has type void. The only
1218 // effect is the evaluation of the postfix-expression before the dot or
1219 // arrow.
1220 emitIgnoredExpr(expr->getBase());
1221 }
1222
1223 return RValue::get(nullptr);
1224}
1225
1226namespace {
1227/// Calls the given 'operator delete' on a single object.
1228struct CallObjectDelete final : EHScopeStack::Cleanup {
1229 mlir::Value ptr;
1230 const FunctionDecl *operatorDelete;
1231 QualType elementType;
1232
1233 CallObjectDelete(mlir::Value ptr, const FunctionDecl *operatorDelete,
1234 QualType elementType)
1235 : ptr(ptr), operatorDelete(operatorDelete), elementType(elementType) {}
1236
1237 void emit(CIRGenFunction &cgf, Flags flags) override {
1238 cgf.emitDeleteCall(operatorDelete, ptr, elementType);
1239 }
1240};
1241} // namespace
1242
1243/// Emit the code for deleting a single object.
1245 Address ptr, QualType elementType) {
1246 // C++11 [expr.delete]p3:
1247 // If the static type of the object to be deleted is different from its
1248 // dynamic type, the static type shall be a base class of the dynamic type
1249 // of the object to be deleted and the static type shall have a virtual
1250 // destructor or the behavior is undefined.
1252
1253 const FunctionDecl *operatorDelete = de->getOperatorDelete();
1254 assert(!operatorDelete->isDestroyingOperatorDelete());
1255
1256 // Find the destructor for the type, if applicable. If the
1257 // destructor is virtual, we'll just emit the vcall and return.
1258 const CXXDestructorDecl *dtor = nullptr;
1259 if (const auto *rd = elementType->getAsCXXRecordDecl()) {
1260 if (rd->hasDefinition() && !rd->hasTrivialDestructor()) {
1261 dtor = rd->getDestructor();
1262
1263 if (dtor->isVirtual()) {
1265 cgf.cgm.getCXXABI().emitVirtualObjectDelete(cgf, de, ptr, elementType,
1266 dtor);
1267 return;
1268 }
1269 }
1270 }
1271
1272 // Make sure that we call delete even if the dtor throws.
1273 // This doesn't have to a conditional cleanup because we're going
1274 // to pop it off in a second.
1275 cgf.ehStack.pushCleanup<CallObjectDelete>(
1276 NormalAndEHCleanup, ptr.getPointer(), operatorDelete, elementType);
1277
1278 if (dtor) {
1280 /*ForVirtualBase=*/false,
1281 /*Delegating=*/false, ptr, elementType);
1282 } else if (elementType.getObjCLifetime()) {
1284 cgf.cgm.errorNYI(de->getSourceRange(), "emitObjectDelete: ObjCLifetime");
1285 }
1286
1287 cgf.popCleanupBlock();
1288}
1289
1291 const Expr *arg = e->getArgument();
1293
1294 // Null check the pointer.
1295 //
1296 // We could avoid this null check if we can determine that the object
1297 // destruction is trivial and doesn't require an array cookie; we can
1298 // unconditionally perform the operator delete call in that case. For now, we
1299 // assume that deleted pointers are null rarely enough that it's better to
1300 // keep the branch. This might be worth revisiting for a -O0 code size win.
1302 cir::YieldOp thenYield;
1303 mlir::Value notNull = builder.createPtrIsNotNull(ptr.getPointer());
1304 cir::IfOp::create(builder, getLoc(e->getExprLoc()), notNull,
1305 /*withElseRegion=*/false,
1306 /*thenBuilder=*/
1307 [&](mlir::OpBuilder &b, mlir::Location loc) {
1308 thenYield = builder.createYield(loc);
1309 });
1310
1311 // Emit the rest of the CIR inside the if-op's then region, but restore the
1312 // insertion point to the point after the if when this function returns.
1313 mlir::OpBuilder::InsertionGuard guard(builder);
1314 builder.setInsertionPoint(thenYield);
1315
1316 QualType deleteTy = e->getDestroyedType();
1317
1318 // A destroying operator delete overrides the entire operation of the
1319 // delete expression.
1321 cgm.errorNYI(e->getSourceRange(),
1322 "emitCXXDeleteExpr: destroying operator delete");
1323 return;
1324 }
1325
1326 // We might be deleting a pointer to array.
1327 deleteTy = getContext().getBaseElementType(deleteTy);
1328 ptr = ptr.withElementType(builder, convertTypeForMem(deleteTy));
1329
1330 if (e->isArrayForm() &&
1331 cgm.getASTContext().getTargetInfo().emitVectorDeletingDtors(
1332 cgm.getASTContext().getLangOpts())) {
1333 cgm.errorNYI(e->getSourceRange(),
1334 "emitCXXDeleteExpr: emitVectorDeletingDtors");
1335 }
1336
1337 if (e->isArrayForm()) {
1338 const FunctionDecl *operatorDelete = e->getOperatorDelete();
1339 cir::FuncOp operatorDeleteFn = cgm.getAddrOfFunction(operatorDelete);
1340 auto deleteFn =
1341 mlir::FlatSymbolRefAttr::get(operatorDeleteFn.getSymNameAttr());
1342 UsualDeleteParams udp = operatorDelete->getUsualDeleteParams();
1343 auto deleteParams = cir::UsualDeleteParamsAttr::get(
1344 builder.getContext(), udp.Size, isAlignedAllocation(udp.Alignment),
1346
1347 mlir::FlatSymbolRefAttr elementDtor;
1348 if (const auto *rd = deleteTy->getAsCXXRecordDecl()) {
1349 if (rd->hasDefinition() && !rd->hasTrivialDestructor()) {
1350 const CXXDestructorDecl *dtor = rd->getDestructor();
1351 if (dtor->getType()->castAs<FunctionProtoType>()->canThrow())
1352 cgm.errorNYI(e->getSourceRange(),
1353 "emitCXXDeleteExpr: throwing destructor");
1354 cir::FuncOp dtorFn =
1355 cgm.getAddrOfCXXStructor(GlobalDecl(dtor, Dtor_Complete));
1356 elementDtor = mlir::FlatSymbolRefAttr::get(builder.getContext(),
1357 dtorFn.getSymNameAttr());
1358 }
1359 }
1360
1361 cir::DeleteArrayOp::create(builder, ptr.getPointer().getLoc(),
1362 ptr.getPointer(), deleteFn, deleteParams,
1363 elementDtor);
1364 } else {
1365 emitObjectDelete(*this, e, ptr, deleteTy);
1366 }
1367}
1368
1370 // The element type being allocated.
1372
1373 // 1. Build a call to the allocation function.
1374 FunctionDecl *allocator = e->getOperatorNew();
1375
1376 // If there is a brace-initializer, cannot allocate fewer elements than inits.
1377 unsigned minElements = 0;
1378 if (e->isArray() && e->hasInitializer()) {
1379 const InitListExpr *ile = dyn_cast<InitListExpr>(e->getInitializer());
1380 if (ile && ile->isStringLiteralInit())
1381 minElements =
1383 ->getSize()
1384 .getZExtValue();
1385 else if (ile)
1386 minElements = ile->getNumInits();
1387 }
1388
1389 mlir::Value numElements = nullptr;
1390 mlir::Value allocSizeWithoutCookie = nullptr;
1391 mlir::Value allocSize = emitCXXNewAllocSize(
1392 *this, e, minElements, numElements, allocSizeWithoutCookie);
1393 CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1394
1395 // Emit the allocation call.
1396 Address allocation = Address::invalid();
1397 CallArgList allocatorArgs;
1398 if (allocator->isReservedGlobalPlacementOperator()) {
1399 // If the allocator is a global placement operator, just
1400 // "inline" it directly.
1401 assert(e->getNumPlacementArgs() == 1);
1402 const Expr *arg = *e->placement_arguments().begin();
1403
1404 LValueBaseInfo baseInfo;
1405 allocation = emitPointerWithAlignment(arg, &baseInfo);
1406
1407 // The pointer expression will, in many cases, be an opaque void*.
1408 // In these cases, discard the computed alignment and use the
1409 // formal alignment of the allocated type.
1410 if (baseInfo.getAlignmentSource() != AlignmentSource::Decl)
1411 allocation = allocation.withAlignment(allocAlign);
1412
1413 // Set up allocatorArgs for the call to operator delete if it's not
1414 // the reserved global operator.
1415 if (e->getOperatorDelete() &&
1417 cgm.errorNYI(e->getSourceRange(),
1418 "emitCXXNewExpr: reserved placement new with delete");
1419 }
1420 } else {
1421 const FunctionProtoType *allocatorType =
1422 allocator->getType()->castAs<FunctionProtoType>();
1423 unsigned paramsToSkip = 0;
1424
1425 // The allocation size is the first argument.
1426 QualType sizeType = getContext().getSizeType();
1427 allocatorArgs.add(RValue::get(allocSize), sizeType);
1428 ++paramsToSkip;
1429
1430 if (allocSize != allocSizeWithoutCookie) {
1431 CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1432 allocAlign = std::max(allocAlign, cookieAlign);
1433 }
1434
1435 // The allocation alignment may be passed as the second argument.
1436 if (e->passAlignment()) {
1437 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: pass alignment");
1438 }
1439
1440 // FIXME: Why do we not pass a CalleeDecl here?
1441 emitCallArgs(allocatorArgs, allocatorType, e->placement_arguments(),
1442 AbstractCallee(), paramsToSkip);
1443 RValue rv =
1444 emitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1445
1446 // Set !heapallocsite metadata on the call to operator new.
1448
1449 // If this was a call to a global replaceable allocation function that does
1450 // not take an alignment argument, the allocator is known to produce storage
1451 // that's suitably aligned for any object that fits, up to a known
1452 // threshold. Otherwise assume it's suitably aligned for the allocated type.
1453 CharUnits allocationAlign = allocAlign;
1454 if (!e->passAlignment() &&
1455 allocator->isReplaceableGlobalAllocationFunction()) {
1456 const TargetInfo &target = cgm.getASTContext().getTargetInfo();
1457 unsigned allocatorAlign = llvm::bit_floor(std::min<uint64_t>(
1458 target.getNewAlign(), getContext().getTypeSize(allocType)));
1459 allocationAlign = std::max(
1460 allocationAlign, getContext().toCharUnitsFromBits(allocatorAlign));
1461 }
1462
1463 mlir::Value allocPtr = rv.getValue();
1464 allocation = Address(
1465 allocPtr, mlir::cast<cir::PointerType>(allocPtr.getType()).getPointee(),
1466 allocationAlign);
1467 }
1468
1469 // Emit a null check on the allocation result if the allocation
1470 // function is allowed to return null (because it has a non-throwing
1471 // exception spec or is the reserved placement new) and we have an
1472 // interesting initializer will be running sanitizers on the initialization.
1473 bool nullCheck = e->shouldNullCheckAllocation() &&
1474 (!allocType.isPODType(getContext()) || e->hasInitializer());
1476 if (nullCheck)
1477 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: null check");
1478
1479 // If there's an operator delete, enter a cleanup to call it if an
1480 // exception is thrown. If we do this, we'll be creating the result pointer
1481 // inside a cleanup scope, either with a bitcast or an offset based on the
1482 // array cookie size. However, we need to return that pointer from outside
1483 // the cleanup scope, so we need to store it in a temporary variable.
1484 bool useNewDeleteCleanup =
1485 e->getOperatorDelete() &&
1487 EHScopeStack::stable_iterator operatorDeleteCleanup;
1488 mlir::Operation *cleanupDominator = nullptr;
1489 if (useNewDeleteCleanup) {
1491 enterNewDeleteCleanup(*this, e, allocation, allocSize, allocAlign,
1492 allocatorArgs);
1493 operatorDeleteCleanup = ehStack.stable_begin();
1494 cleanupDominator =
1495 cir::UnreachableOp::create(builder, getLoc(e->getSourceRange()))
1496 .getOperation();
1497 }
1498
1499 if (allocSize != allocSizeWithoutCookie) {
1500 assert(e->isArray());
1501 allocation = cgm.getCXXABI().initializeArrayCookie(
1502 *this, allocation, numElements, e, allocType);
1503 }
1504
1505 mlir::Type elementTy;
1506 if (e->isArray()) {
1507 // For array new, use the allocated type to handle multidimensional arrays
1508 // correctly
1509 elementTy = convertTypeForMem(e->getAllocatedType());
1510 } else {
1511 elementTy = convertTypeForMem(allocType);
1512 }
1513 Address result = builder.createElementBitCast(getLoc(e->getSourceRange()),
1514 allocation, elementTy);
1515
1516 // If we're inside a new delete cleanup, store the result pointer.
1517 Address resultPtr = Address::invalid();
1518 if (useNewDeleteCleanup) {
1519 resultPtr =
1520 createTempAlloca(builder.getPointerTo(elementTy), result.getAlignment(),
1521 getLoc(e->getSourceRange()), "__new_result");
1522 builder.createStore(getLoc(e->getSourceRange()), result.getPointer(),
1523 resultPtr);
1524 }
1525
1526 // Passing pointer through launder.invariant.group to avoid propagation of
1527 // vptrs information which may be included in previous type.
1528 // To not break LTO with different optimizations levels, we do it regardless
1529 // of optimization level.
1530 if (cgm.getCodeGenOpts().StrictVTablePointers &&
1531 allocator->isReservedGlobalPlacementOperator())
1532 cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: strict vtable pointers");
1533
1535
1536 emitNewInitializer(*this, e, allocType, elementTy, result, numElements,
1537 allocSizeWithoutCookie);
1538
1539 // Deactivate the 'operator delete' cleanup if we finished
1540 // initialization.
1541 if (useNewDeleteCleanup) {
1542 assert(operatorDeleteCleanup.isValid());
1543 assert(resultPtr.isValid());
1544 deactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1545 cleanupDominator->erase();
1546 cir::LoadOp loadResult =
1547 builder.createLoad(getLoc(e->getSourceRange()), resultPtr);
1548 result = result.withPointer(loadResult.getResult());
1549 }
1550
1552
1553 return result.getPointer();
1554}
1555
1557 mlir::Value ptr, QualType deleteTy) {
1559
1560 const auto *deleteFTy = deleteFD->getType()->castAs<FunctionProtoType>();
1561 CallArgList deleteArgs;
1562
1563 UsualDeleteParams params = deleteFD->getUsualDeleteParams();
1564 auto paramTypeIt = deleteFTy->param_type_begin();
1565
1566 // Pass std::type_identity tag if present
1568 cgm.errorNYI(deleteFD->getSourceRange(),
1569 "emitDeleteCall: type aware delete");
1570
1571 // Pass the pointer itself.
1572 QualType argTy = *paramTypeIt++;
1573 mlir::Value deletePtr =
1574 builder.createBitcast(ptr.getLoc(), ptr, convertType(argTy));
1575 deleteArgs.add(RValue::get(deletePtr), argTy);
1576
1577 // Pass the std::destroying_delete tag if present.
1578 if (params.DestroyingDelete)
1579 cgm.errorNYI(deleteFD->getSourceRange(),
1580 "emitDeleteCall: destroying delete");
1581
1582 // Pass the size if the delete function has a size_t parameter.
1583 if (params.Size) {
1584 QualType sizeType = *paramTypeIt++;
1585 CharUnits deleteTypeSize = getContext().getTypeSizeInChars(deleteTy);
1586 assert(mlir::isa<cir::IntType>(convertType(sizeType)) &&
1587 "expected cir::IntType");
1588 cir::ConstantOp size = builder.getConstInt(
1589 *currSrcLoc, convertType(sizeType), deleteTypeSize.getQuantity());
1590
1591 deleteArgs.add(RValue::get(size), sizeType);
1592 }
1593
1594 // Pass the alignment if the delete function has an align_val_t parameter.
1595 if (isAlignedAllocation(params.Alignment))
1596 cgm.errorNYI(deleteFD->getSourceRange(),
1597 "emitDeleteCall: aligned allocation");
1598
1599 assert(paramTypeIt == deleteFTy->param_type_end() &&
1600 "unknown parameter to usual delete function");
1601
1602 // Emit the call to delete.
1603 emitNewDeleteCall(*this, deleteFD, deleteFTy, deleteArgs);
1604}
1605
1607 mlir::Location loc, QualType destTy) {
1608 mlir::Type destCIRTy = cgf.convertType(destTy);
1609 assert(mlir::isa<cir::PointerType>(destCIRTy) &&
1610 "result of dynamic_cast should be a ptr");
1611
1612 if (!destTy->isPointerType()) {
1613 mlir::Region *currentRegion = cgf.getBuilder().getBlock()->getParent();
1614 /// C++ [expr.dynamic.cast]p9:
1615 /// A failed cast to reference type throws std::bad_cast
1616 cgf.cgm.getCXXABI().emitBadCastCall(cgf, loc);
1617
1618 // The call to bad_cast will terminate the current block. Create a new block
1619 // to hold any follow up code.
1620 cgf.getBuilder().createBlock(currentRegion, currentRegion->end());
1621 }
1622
1623 return cgf.getBuilder().getNullPtr(destCIRTy, loc);
1624}
1625
1627 const CXXDynamicCastExpr *dce) {
1628 mlir::Location loc = getLoc(dce->getSourceRange());
1629
1630 cgm.emitExplicitCastExprType(dce, this);
1631 QualType destTy = dce->getTypeAsWritten();
1632 QualType srcTy = dce->getSubExpr()->getType();
1633
1634 // C++ [expr.dynamic.cast]p7:
1635 // If T is "pointer to cv void," then the result is a pointer to the most
1636 // derived object pointed to by v.
1637 bool isDynCastToVoid = destTy->isVoidPointerType();
1638 bool isRefCast = destTy->isReferenceType();
1639
1640 QualType srcRecordTy;
1641 QualType destRecordTy;
1642 if (isDynCastToVoid) {
1643 srcRecordTy = srcTy->getPointeeType();
1644 // No destRecordTy.
1645 } else if (const PointerType *destPTy = destTy->getAs<PointerType>()) {
1646 srcRecordTy = srcTy->castAs<PointerType>()->getPointeeType();
1647 destRecordTy = destPTy->getPointeeType();
1648 } else {
1649 srcRecordTy = srcTy;
1650 destRecordTy = destTy->castAs<ReferenceType>()->getPointeeType();
1651 }
1652
1653 assert(srcRecordTy->isRecordType() && "source type must be a record type!");
1655
1656 if (dce->isAlwaysNull())
1657 return emitDynamicCastToNull(*this, loc, destTy);
1658
1659 auto destCirTy = mlir::cast<cir::PointerType>(convertType(destTy));
1660 return cgm.getCXXABI().emitDynamicCast(*this, loc, srcRecordTy, destRecordTy,
1661 destCirTy, isRefCast, thisAddr);
1662}
1663
1664static mlir::Value emitCXXTypeidFromVTable(CIRGenFunction &cgf, const Expr *e,
1665 mlir::Type typeInfoPtrTy,
1666 bool hasNullCheck) {
1667 Address thisPtr = cgf.emitLValue(e).getAddress();
1668 QualType srcType = e->getType();
1669
1670 // C++ [class.cdtor]p4:
1671 // If the operand of typeid refers to the object under construction or
1672 // destruction and the static type of the operand is neither the constructor
1673 // or destructor’s class nor one of its bases, the behavior is undefined.
1675
1676 if (hasNullCheck && cgf.cgm.getCXXABI().shouldTypeidBeNullChecked(srcType)) {
1677 mlir::Value isThisNull =
1678 cgf.getBuilder().createPtrIsNull(thisPtr.getPointer());
1679 // We don't really care about the value, we just want to make sure the
1680 // 'true' side calls bad-type-id.
1681 cir::IfOp::create(
1682 cgf.getBuilder(), cgf.getLoc(e->getSourceRange()), isThisNull,
1683 /*withElseRegion=*/false, [&](mlir::OpBuilder &, mlir::Location loc) {
1684 cgf.cgm.getCXXABI().emitBadTypeidCall(cgf, loc);
1685 });
1686 }
1687
1688 return cgf.cgm.getCXXABI().emitTypeid(cgf, srcType, thisPtr, typeInfoPtrTy);
1689}
1690
1692 mlir::Location loc = getLoc(e->getSourceRange());
1693 mlir::Type resultType = cir::PointerType::get(convertType(e->getType()));
1695 : e->getExprOperand()->getType();
1696
1697 // If the non-default global var address space is not default, we need to do
1698 // an address-space cast here.
1700
1701 // C++ [expr.typeid]p2:
1702 // When typeid is applied to a glvalue expression whose type is a
1703 // polymorphic class type, the result refers to a std::type_info object
1704 // representing the type of the most derived object (that is, the dynamic
1705 // type) to which the glvalue refers.
1706 // If the operand is already most derived object, no need to look up vtable.
1707 if (!e->isTypeOperand() && e->isPotentiallyEvaluated() &&
1709 return emitCXXTypeidFromVTable(*this, e->getExprOperand(), resultType,
1710 e->hasNullCheck());
1711
1712 auto typeInfo =
1713 cast<cir::GlobalViewAttr>(cgm.getAddrOfRTTIDescriptor(loc, ty));
1714 // `getAddrOfRTTIDescriptor` lies to us and always gives us a uint8ptr as its
1715 // type, however we need the value of the actual global to call the
1716 // get-global-op, so look it up here.
1717 auto typeInfoGlobal =
1718 cast<cir::GlobalOp>(cgm.getGlobalValue(typeInfo.getSymbol().getValue()));
1719 auto getTypeInfo = cir::GetGlobalOp::create(
1720 builder, loc, builder.getPointerTo(typeInfoGlobal.getSymType()),
1721 typeInfoGlobal.getSymName());
1722 // The ABI is just generating these sometimes as ptr to u8, but they are
1723 // simply a representation of the type_info. So we have to cast this, if
1724 // necessary (createBitcast is a noop if the types match).
1725 return builder.createBitcast(getTypeInfo, resultType);
1726}
static void emit(Program &P, llvm::SmallVectorImpl< std::byte > &Code, const T &Val, bool &Success)
Helper to write bytecode and bail out if 32-bit offsets become invalid.
static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address newPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
static mlir::Value emitCXXTypeidFromVTable(CIRGenFunction &cgf, const Expr *e, mlir::Type typeInfoPtrTy, bool hasNullCheck)
static void emitObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de, Address ptr, QualType elementType)
Emit the code for deleting a single object.
static void emitNullBaseClassInitialization(CIRGenFunction &cgf, Address destPtr, const CXXRecordDecl *base)
static void enterNewDeleteCleanup(CIRGenFunction &cgf, const CXXNewExpr *e, Address newPtr, mlir::Value allocSize, CharUnits allocAlign, const CallArgList &newArgs)
Enter a cleanup to call 'operator delete' if the initializer in a new-expression throws.
static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr *e, unsigned minElements, mlir::Value &numElements, mlir::Value &sizeWithoutCookie)
static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init, QualType allocType, Address newPtr, AggValueSlot::Overlap_t mayOverlap)
static CharUnits calculateCookiePadding(CIRGenFunction &cgf, const CXXNewExpr *e)
static mlir::Value emitDynamicCastToNull(CIRGenFunction &cgf, mlir::Location loc, QualType destTy)
static MemberCallInfo commonBuildCXXMemberOrOperatorCall(CIRGenFunction &cgf, const CXXMethodDecl *md, mlir::Value thisPtr, mlir::Value implicitParam, QualType implicitParamTy, const CallExpr *ce, CallArgList &args, CallArgList *rtlArgs)
static RValue emitNewDeleteCall(CIRGenFunction &cgf, const FunctionDecl *calleeDecl, const FunctionProtoType *calleeType, const CallArgList &args)
Emit a call to an operator new or operator delete function, as implicitly created by new-expressions ...
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the clang::Expr interface and subclasses for C++ expressions.
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__device__ __2f16 b
cir::ConstantOp getConstant(mlir::Location loc, mlir::TypedAttr attr)
mlir::Value createOr(mlir::Location loc, mlir::Value lhs, mlir::Value rhs)
mlir::Value createPtrIsNull(mlir::Value ptr)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
mlir::Value createMul(mlir::Location loc, mlir::Value lhs, mlir::Value rhs, OverflowBehavior ob=OverflowBehavior::None)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
TranslationUnitDecl * getTranslationUnitDecl() const
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
DeclarationNameTable DeclarationNames
Definition ASTContext.h:802
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
static bool hasSameType(QualType T1, QualType T2)
Determine whether the given types T1 and T2 are equivalent.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3772
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:4041
Expr * getLHS() const
Definition Expr.h:4091
Expr * getRHS() const
Definition Expr.h:4093
Opcode getOpcode() const
Definition Expr.h:4086
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:81
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:87
bool isValid() const
Definition Address.h:75
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
An aggregate value slot.
IsZeroed_t isZeroed() const
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::IntType getSIntNTy(int n)
bool isNullValue(mlir::Attribute attr) const
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
cir::IntType getUIntNTy(int n)
virtual void emitVirtualObjectDelete(CIRGenFunction &cgf, const CXXDeleteExpr *de, Address ptr, QualType elementType, const CXXDestructorDecl *dtor)=0
virtual const clang::CXXRecordDecl * getThisArgumentTypeForMethod(const clang::CXXMethodDecl *md)
Get the type of the implicit "this" parameter used by a method.
virtual bool shouldTypeidBeNullChecked(QualType srcTy)=0
virtual mlir::Value emitTypeid(CIRGenFunction &cgf, QualType srcTy, Address thisPtr, mlir::Type typeInfoPtrTy)=0
virtual void emitBadCastCall(CIRGenFunction &cgf, mlir::Location loc)=0
virtual CharUnits getArrayCookieSize(const CXXNewExpr *e)
Returns the extra size required in order to store the array cookie for the given new-expression.
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
static CIRGenCallee forVirtual(const clang::CallExpr *ce, clang::GlobalDecl md, Address addr, cir::FuncType fTy)
Definition CIRGenCall.h:154
An abstract representation of regular/ObjC call/message targets.
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
clang::GlobalDecl curGD
The GlobalDecl for the current function being compiled or the global variable currently being initial...
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
RValue emitCXXMemberPointerCallExpr(const CXXMemberCallExpr *ce, ReturnValueSlot returnValue)
void emitDeleteCall(const FunctionDecl *deleteFD, mlir::Value ptr, QualType deleteTy)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
RValue emitCXXMemberOrOperatorCall(const clang::CXXMethodDecl *md, const CIRGenCallee &callee, ReturnValueSlot returnValue, mlir::Value thisPtr, mlir::Value implicitParam, clang::QualType implicitParamTy, const clang::CallExpr *ce, CallArgList *rtlArgs)
void emitNullInitialization(mlir::Location loc, Address destPtr, QualType ty)
RValue emitCUDAKernelCallExpr(const CUDAKernelCallExpr *expr, ReturnValueSlot returnValue)
EHScopeStack ehStack
Tracks function scope overall cleanup handling.
void emitNewArrayInitializer(const CXXNewExpr *e, QualType elementType, mlir::Type elementTy, Address beginPtr, mlir::Value numElements, mlir::Value allocSizeWithoutCookie)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
mlir::Value emitCXXTypeidExpr(const CXXTypeidExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitCXXConstructExpr(const clang::CXXConstructExpr *e, AggValueSlot dest)
mlir::Value emitCXXNewExpr(const CXXNewExpr *e)
Address returnValue
The temporary alloca to hold the return value.
void emitCXXConstructorCall(const clang::CXXConstructorDecl *d, clang::CXXCtorType type, bool forVirtualBase, bool delegating, AggValueSlot thisAVS, const clang::CXXConstructExpr *e)
void emitScalarInit(const clang::Expr *init, mlir::Location loc, LValue lvalue, bool capturedByInit=false)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
void deactivateCleanupBlock(EHScopeStack::stable_iterator cleanup, mlir::Operation *dominatingIP)
Deactivates the given cleanup block.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitCXXDestructorCall(const CXXDestructorDecl *dd, CXXDtorType type, bool forVirtualBase, bool delegating, Address thisAddr, QualType thisTy)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
void emitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, const clang::ArrayType *arrayType, Address arrayBegin, const CXXConstructExpr *e, bool newPointerIsChecked, bool zeroInitialize=false)
Emit a loop to call a particular constructor for each of several members of an array.
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
void emitCXXDeleteExpr(const CXXDeleteExpr *e)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::TypedAttr emitNullConstantForBase(const CXXRecordDecl *record)
Return a null constant appropriate for zero-initializing a base class with the given type.
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
const cir::CIRDataLayout getDataLayout() const
CIRGenCXXABI & getCXXABI() const
const CIRGenFunctionInfo & arrangeFreeFunctionCall(const CallArgList &args, const FunctionType *fnType)
clang::CanQualType deriveThisType(const clang::CXXRecordDecl *rd, const clang::CXXMethodDecl *md)
Derives the 'this' type for CIRGen purposes, i.e.
void addFrom(const CallArgList &other)
Add all the arguments from another CallArgList to this one.
Definition CIRGenCall.h:248
void add(RValue rvalue, clang::QualType type)
Definition CIRGenCall.h:239
mlir::Attribute tryEmitAbstract(const Expr *e, QualType destType)
Information for lazily generating a cleanup.
A saved depth on the scope stack.
T * pushCleanupWithExtra(CleanupKind kind, size_t n, As... a)
Push a cleanup with non-constant storage requirements on the stack.
AlignmentSource getAlignmentSource() const
Address getAddress() const
mlir::Value getPointer() const
void setAddress(Address address)
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
A class for recording the number of arguments that a function signature requires.
static RequiredArgs getFromProtoWithExtraSlots(const clang::FunctionProtoType *prototype, unsigned additional)
Compute the arguments required by the given formal prototype, given that there may be some additional...
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
Represents a call to a CUDA kernel function.
Definition ExprCXX.h:235
Represents a call to a C++ constructor.
Definition ExprCXX.h:1549
bool isElidable() const
Whether this construction is elidable.
Definition ExprCXX.h:1618
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1692
bool requiresZeroInitialization() const
Whether this construction first requires zero-initialization before the initializer is called.
Definition ExprCXX.h:1651
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1612
CXXConstructionKind getConstructionKind() const
Determine whether this constructor is actually constructing a base class (rather than a complete obje...
Definition ExprCXX.h:1660
Represents a C++ constructor within a class.
Definition DeclCXX.h:2611
bool isDefaultConstructor() const
Whether this constructor is a default constructor (C++ [class.ctor]p5), which can be used to default-...
Definition DeclCXX.cpp:3017
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition ExprCXX.h:2627
FunctionDecl * getOperatorDelete() const
Definition ExprCXX.h:2666
bool isArrayForm() const
Definition ExprCXX.h:2653
QualType getDestroyedType() const
Retrieve the type being destroyed.
Definition ExprCXX.cpp:338
Represents a C++ destructor within a class.
Definition DeclCXX.h:2876
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition ExprCXX.h:482
bool isAlwaysNull() const
isAlwaysNull - Return whether the result of the dynamic_cast is proven to always be null.
Definition ExprCXX.cpp:838
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:180
SourceLocation getExprLoc() const LLVM_READONLY
Definition ExprCXX.h:221
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2136
bool isVirtual() const
Definition DeclCXX.h:2191
const CXXRecordDecl * getParent() const
Return the parent of this method declaration, which is the class in which this method is defined.
Definition DeclCXX.h:2262
bool isInstance() const
Definition DeclCXX.h:2163
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition ExprCXX.h:2356
bool isArray() const
Definition ExprCXX.h:2465
llvm::iterator_range< arg_iterator > placement_arguments()
Definition ExprCXX.h:2573
QualType getAllocatedType() const
Definition ExprCXX.h:2435
unsigned getNumImplicitArgs() const
Definition ExprCXX.h:2512
std::optional< Expr * > getArraySize()
This might return std::nullopt even if isArray() returns true, since there might not be an array size...
Definition ExprCXX.h:2470
ImplicitAllocationParameters implicitAllocationParameters() const
Provides the full set of information about expected implicit parameters in this call.
Definition ExprCXX.h:2563
bool hasInitializer() const
Whether this new-expression has any initializer at all.
Definition ExprCXX.h:2525
bool shouldNullCheckAllocation() const
True if the allocation result needs to be null-checked.
Definition ExprCXX.cpp:326
bool passAlignment() const
Indicates whether the required alignment should be implicitly passed to the allocation function.
Definition ExprCXX.h:2552
FunctionDecl * getOperatorDelete() const
Definition ExprCXX.h:2462
unsigned getNumPlacementArgs() const
Definition ExprCXX.h:2495
SourceRange getSourceRange() const
Definition ExprCXX.h:2611
FunctionDecl * getOperatorNew() const
Definition ExprCXX.h:2460
Expr * getInitializer()
The initializer of this new-expression.
Definition ExprCXX.h:2534
A call to an overloaded operator written using operator syntax.
Definition ExprCXX.h:85
Represents a list-initialization with parenthesis.
Definition ExprCXX.h:5142
MutableArrayRef< Expr * > getInitExprs()
Definition ExprCXX.h:5182
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition ExprCXX.h:2746
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
A C++ typeid expression (C++ [expr.typeid]), which gets the type_info that corresponds to the supplie...
Definition ExprCXX.h:849
bool isTypeOperand() const
Definition ExprCXX.h:885
QualType getTypeOperand(const ASTContext &Context) const
Retrieves the type operand of this typeid() expression after various required adjustments (removing r...
Definition ExprCXX.cpp:161
Expr * getExprOperand() const
Definition ExprCXX.h:896
SourceRange getSourceRange() const LLVM_READONLY
Definition ExprCXX.h:903
bool isMostDerived(const ASTContext &Context) const
Best-effort check if the expression operand refers to a most derived object.
Definition ExprCXX.cpp:149
bool isPotentiallyEvaluated() const
Determine whether this typeid has a type operand which is potentially evaluated, per C++11 [expr....
Definition ExprCXX.cpp:134
bool hasNullCheck() const
Whether this is of a form like "typeid(*ptr)" that can throw a std::bad_typeid if a pointer is a null...
Definition ExprCXX.cpp:200
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
arg_iterator arg_begin()
Definition Expr.h:3203
arg_iterator arg_end()
Definition Expr.h:3206
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
arg_range arguments()
Definition Expr.h:3198
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
bool isOne() const
isOne - Test whether the quantity equals one.
Definition CharUnits.h:125
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
Represents the canonical version of C arrays with a specified constant size.
Definition TypeBase.h:3810
The results of name lookup within a DeclContext.
Definition DeclBase.h:1382
lookup_result lookup(DeclarationName Name) const
lookup - Find the declarations (if any) with the given Name in this context.
virtual SourceRange getSourceRange() const LLVM_READONLY
Source range that this declaration covers.
Definition DeclBase.h:427
DeclarationName getCXXOperatorName(OverloadedOperatorKind Op)
Get the name of the overloadable C++ operator corresponding to Op.
The name of a declaration.
QualType getTypeAsWritten() const
getTypeAsWritten - Returns the type that this expression is casting to, as written in the source code...
Definition Expr.h:3958
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3086
bool isTemporaryObject(ASTContext &Ctx, const CXXRecordDecl *TempTy) const
Determine whether the result of this expression is a temporary object of the given class type.
Definition Expr.cpp:3253
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents a function declaration or definition.
Definition Decl.h:2015
bool isDestroyingOperatorDelete() const
Determine whether this is a destroying operator delete.
Definition Decl.cpp:3552
bool isTrivial() const
Whether this function is "trivial" in some specialized C++ senses.
Definition Decl.h:2392
bool isReplaceableGlobalAllocationFunction(UnsignedOrNone *AlignmentParam=nullptr, bool *IsNothrow=nullptr) const
Determines whether this function is one of the replaceable global allocation functions: void *operato...
Definition Decl.h:2609
UsualDeleteParams getUsualDeleteParams() const
Definition Decl.cpp:3568
bool isReservedGlobalPlacementOperator() const
Determines whether this operator new or delete is one of the reserved global placement operators: voi...
Definition Decl.cpp:3404
bool isDefaulted() const
Whether this function is defaulted.
Definition Decl.h:2400
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4550
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
CanThrowResult canThrow() const
Determine whether this function type has a non-throwing exception specification.
Definition Type.cpp:3918
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Describes an C or C++ initializer list.
Definition Expr.h:5302
bool isStringLiteralInit() const
Is this an initializer for an array of characters, initialized by a string literal or an @encode?
Definition Expr.cpp:2448
unsigned getNumInits() const
Definition Expr.h:5332
Expr * getArrayFiller()
If this initializer list initializes an array with more elements than there are initializers in the l...
Definition Expr.h:5404
ArrayRef< Expr * > inits()
Definition Expr.h:5352
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3703
QualType getPointeeType() const
Definition TypeBase.h:3721
This represents a decl that may have a name.
Definition Decl.h:274
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
ObjCEncodeExpr, used for @encode in Objective-C.
Definition ExprObjC.h:441
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8471
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1444
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1551
bool isPODType(const ASTContext &Context) const
Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
Definition Type.cpp:2739
bool hasStrongOrWeakObjCLifetime() const
Definition TypeBase.h:1452
Base for LValueReferenceType and RValueReferenceType.
Definition TypeBase.h:3623
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1802
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4894
bool isUnion() const
Definition Decl.h:3943
Exposes information about the current target.
Definition TargetInfo.h:227
unsigned getNewAlign() const
Return the largest alignment for which a suitably-sized allocation with 'operator new(size_t)' is gua...
Definition TargetInfo.h:767
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Decl.h:3562
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isConstantArrayType() const
Definition TypeBase.h:8771
bool isVoidPointerType() const
Definition Type.cpp:714
bool isPointerType() const
Definition TypeBase.h:8668
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
bool isReferenceType() const
Definition TypeBase.h:8692
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
const ArrayType * getAsArrayTypeUnsafe() const
A variant of getAs<> for array types which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9314
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
bool isRecordType() const
Definition TypeBase.h:8795
QualType getType() const
Definition Decl.h:723
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
const internal::VariadicDynCastAllOfMatcher< Stmt, CallExpr > callExpr
Matches call expressions.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
CXXCtorType
C++ constructor types.
Definition ABI.h:24
@ Ctor_Base
Base object ctor.
Definition ABI.h:26
@ Ctor_Complete
Complete object ctor.
Definition ABI.h:25
bool isa(CodeGen::Address addr)
Definition Address.h:330
AlignedAllocationMode alignedAllocationModeFromBool(bool IsAligned)
Definition ExprCXX.h:2270
bool isAlignedAllocation(AlignedAllocationMode Mode)
Definition ExprCXX.h:2266
AlignedAllocationMode
Definition ExprCXX.h:2264
@ Dtor_Complete
Complete object dtor.
Definition ABI.h:36
bool isTypeAwareAllocation(TypeAwareAllocationMode Mode)
Definition ExprCXX.h:2254
TypeAwareAllocationMode
Definition ExprCXX.h:2252
U cast(CodeGen::Address addr)
Definition Address.h:327
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)
static bool objCLifetime()
static bool addressSpace()
static bool devirtualizeDestructor()
static bool aggValueSlotGC()
static bool devirtualizeMemberFunction()
static bool deleteArray()
static bool emitTypeCheck()
static bool cleanupDeactivationScope()
static bool opCallMustTail()
static bool typeAwareAllocation()
static bool exprNewNullCheck()
static bool emitNullCheckForDeleteCalls()
static bool generateDebugInfo()
clang::CharUnits getSizeAlign() const
The parameters to pass to a usual operator delete.
Definition ExprCXX.h:2345
TypeAwareAllocationMode TypeAwareDelete
Definition ExprCXX.h:2346
AlignedAllocationMode Alignment
Definition ExprCXX.h:2349