clang 23.0.0git
CXXABILowering.cpp
Go to the documentation of this file.
1//==- CXXABILowering.cpp - lower C++ operations to target-specific ABI form -=//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include <deque>
10
11#include "PassDetail.h"
13
14#include "mlir/Dialect/OpenACC/OpenACCOpsDialect.h.inc"
15#include "mlir/Dialect/OpenMP/OpenMPOpsDialect.h.inc"
16#include "mlir/IR/PatternMatch.h"
17#include "mlir/Interfaces/DataLayoutInterfaces.h"
18#include "mlir/Pass/Pass.h"
19#include "mlir/Transforms/DialectConversion.h"
27
28#include "llvm/ADT/ScopeExit.h"
29#include "llvm/ADT/TypeSwitch.h"
30
31using namespace mlir;
32using namespace cir;
33
34namespace mlir {
35#define GEN_PASS_DEF_CXXABILOWERING
36#include "clang/CIR/Dialect/Passes.h.inc"
37} // namespace mlir
38
39namespace {
40// Check an attribute for legality. An attribute is only currently potentially
41// illegal if it contains a type, member pointers are our source of illegality
42// in regards to attributes.
43bool isCXXABIAttributeLegal(const mlir::TypeConverter &tc,
44 mlir::Attribute attr) {
45 // If we don't have an attribute, it can't have a type!
46 if (!attr)
47 return true;
48
49 // None of the OpenACC/OMP attributes contain a type of concern, so we can
50 // just treat them as legal.
51 if (isa<mlir::acc::OpenACCDialect, mlir::omp::OpenMPDialect>(
52 attr.getDialect()))
53 return true;
54
55 // These attributes either don't contain a type, or don't contain a type that
56 // can have a data member/method.
57 if (isa<mlir::DenseArrayAttr, mlir::FloatAttr, mlir::UnitAttr,
58 mlir::StringAttr, mlir::IntegerAttr, mlir::SymbolRefAttr,
59 cir::AnnotationAttr>(attr))
60 return true;
61
62 // Tablegen'ed always-legal attributes:
63 if (isa<
65#include "clang/CIR/Dialect/IR/CIRLowering.inc"
67 >(attr))
68 return true;
69
70 // Data Member and method are ALWAYS illegal.
71 if (isa<cir::DataMemberAttr, cir::MethodAttr>(attr))
72 return false;
73
74 return llvm::TypeSwitch<mlir::Attribute, bool>(attr)
75 // These attributes just have a type, so they are legal if their type is.
76 .Case<cir::ZeroAttr>(
77 [&tc](cir::ZeroAttr za) { return tc.isLegal(za.getType()); })
78 .Case<cir::PoisonAttr>(
79 [&tc](cir::PoisonAttr pa) { return tc.isLegal(pa.getType()); })
80 .Case<cir::UndefAttr>(
81 [&tc](cir::UndefAttr uda) { return tc.isLegal(uda.getType()); })
82 .Case<mlir::TypeAttr>(
83 [&tc](mlir::TypeAttr ta) { return tc.isLegal(ta.getValue()); })
84 .Case<cir::ConstPtrAttr>(
85 [&tc](cir::ConstPtrAttr cpa) { return tc.isLegal(cpa.getType()); })
86 .Case<cir::CXXCtorAttr>(
87 [&tc](cir::CXXCtorAttr ca) { return tc.isLegal(ca.getType()); })
88 .Case<cir::CXXDtorAttr>(
89 [&tc](cir::CXXDtorAttr da) { return tc.isLegal(da.getType()); })
90 .Case<cir::CXXAssignAttr>(
91 [&tc](cir::CXXAssignAttr aa) { return tc.isLegal(aa.getType()); })
92
93 // Collection attributes are legal if ALL of the attributes in them are
94 // also legal.
95 .Case<mlir::ArrayAttr>([&tc](mlir::ArrayAttr array) {
96 return llvm::all_of(array.getValue(), [&tc](mlir::Attribute attr) {
97 return isCXXABIAttributeLegal(tc, attr);
98 });
99 })
100 .Case<mlir::DictionaryAttr>([&tc](mlir::DictionaryAttr dict) {
101 return llvm::all_of(dict.getValue(), [&tc](mlir::NamedAttribute na) {
102 return isCXXABIAttributeLegal(tc, na.getValue());
103 });
104 })
105 // These attributes have sub-attributes that we should check for legality.
106 .Case<cir::ConstArrayAttr>([&tc](cir::ConstArrayAttr array) {
107 return tc.isLegal(array.getType()) &&
108 isCXXABIAttributeLegal(tc, array.getElts());
109 })
110 .Case<cir::GlobalViewAttr>([&tc](cir::GlobalViewAttr gva) {
111 return tc.isLegal(gva.getType()) &&
112 isCXXABIAttributeLegal(tc, gva.getIndices());
113 })
114 .Case<cir::VTableAttr>([&tc](cir::VTableAttr vta) {
115 return tc.isLegal(vta.getType()) &&
116 isCXXABIAttributeLegal(tc, vta.getData());
117 })
118 .Case<cir::TypeInfoAttr>([&tc](cir::TypeInfoAttr tia) {
119 return tc.isLegal(tia.getType()) &&
120 isCXXABIAttributeLegal(tc, tia.getData());
121 })
122 .Case<cir::DynamicCastInfoAttr>([&tc](cir::DynamicCastInfoAttr dcia) {
123 return isCXXABIAttributeLegal(tc, dcia.getSrcRtti()) &&
124 isCXXABIAttributeLegal(tc, dcia.getDestRtti()) &&
125 isCXXABIAttributeLegal(tc, dcia.getRuntimeFunc()) &&
126 isCXXABIAttributeLegal(tc, dcia.getBadCastFunc());
127 })
128 .Case<cir::ConstRecordAttr>([&tc](cir::ConstRecordAttr cra) {
129 return tc.isLegal(cra.getType()) &&
130 isCXXABIAttributeLegal(tc, cra.getMembers());
131 })
132 // We did an audit of all of our attributes (both in OpenACC and CIR), so
133 // it shouldn't be dangerous to consider everything we haven't considered
134 // 'illegal'. Any 'new' attributes will end up asserting in
135 // 'rewriteAttribute' to make sure we consider them here. Otherwise, we
136 // wouldn't discover a problematic new attribute until it contains a
137 // member/method.
138 .Default(false);
139}
140
141mlir::Attribute rewriteAttribute(const mlir::TypeConverter &tc,
142 mlir::MLIRContext *ctx, mlir::Attribute attr) {
143 // If the attribute is legal, there is no reason to rewrite it. This also
144 // filters out 'null' attributes.
145 if (isCXXABIAttributeLegal(tc, attr))
146 return attr;
147
148 // This switch needs to be kept in sync with the potentially-legal type switch
149 // from isCXXABIAttributeLegal. IF we miss any, this will end up causing
150 // verification/transformation issues later, often in the form of
151 // unrealized-conversion-casts.
152
153 return llvm::TypeSwitch<mlir::Attribute, mlir::Attribute>(attr)
154 // These attributes just have a type, so convert just the type.
155 .Case<cir::ZeroAttr>([&tc](cir::ZeroAttr za) {
156 return cir::ZeroAttr::get(tc.convertType(za.getType()));
157 })
158 .Case<cir::PoisonAttr>([&tc](cir::PoisonAttr pa) {
159 return cir::PoisonAttr::get(tc.convertType(pa.getType()));
160 })
161 .Case<cir::UndefAttr>([&tc](cir::UndefAttr uda) {
162 return cir::UndefAttr::get(tc.convertType(uda.getType()));
163 })
164 .Case<mlir::TypeAttr>([&tc](mlir::TypeAttr ta) {
165 return mlir::TypeAttr::get(tc.convertType(ta.getValue()));
166 })
167 .Case<cir::ConstPtrAttr>([&tc](cir::ConstPtrAttr cpa) {
168 return cir::ConstPtrAttr::get(tc.convertType(cpa.getType()),
169 cpa.getValue());
170 })
171 .Case<cir::CXXCtorAttr>([&tc](cir::CXXCtorAttr ca) {
172 return cir::CXXCtorAttr::get(tc.convertType(ca.getType()),
173 ca.getCtorKind(), ca.getIsTrivial());
174 })
175 .Case<cir::CXXDtorAttr>([&tc](cir::CXXDtorAttr da) {
176 return cir::CXXDtorAttr::get(tc.convertType(da.getType()),
177 da.getIsTrivial());
178 })
179 .Case<cir::CXXAssignAttr>([&tc](cir::CXXAssignAttr aa) {
180 return cir::CXXAssignAttr::get(tc.convertType(aa.getType()),
181 aa.getAssignKind(), aa.getIsTrivial());
182 })
183 // Collection attributes need to transform all of the attributes inside of
184 // them.
185 .Case<mlir::ArrayAttr>([&tc, ctx](mlir::ArrayAttr array) {
187 for (mlir::Attribute a : array.getValue())
188 elts.push_back(rewriteAttribute(tc, ctx, a));
189 return mlir::ArrayAttr::get(ctx, elts);
190 })
191 .Case<mlir::DictionaryAttr>([&tc, ctx](mlir::DictionaryAttr dict) {
193 for (mlir::NamedAttribute na : dict.getValue())
194 elts.emplace_back(na.getName(),
195 rewriteAttribute(tc, ctx, na.getValue()));
196
197 return mlir::DictionaryAttr::get(ctx, elts);
198 })
199 // These attributes have sub-attributes that need converting too.
200 .Case<cir::ConstArrayAttr>([&tc, ctx](cir::ConstArrayAttr array) {
201 return cir::ConstArrayAttr::get(
202 ctx, tc.convertType(array.getType()),
203 rewriteAttribute(tc, ctx, array.getElts()),
204 array.getTrailingZerosNum());
205 })
206 .Case<cir::GlobalViewAttr>([&tc, ctx](cir::GlobalViewAttr gva) {
207 return cir::GlobalViewAttr::get(
208 tc.convertType(gva.getType()), gva.getSymbol(),
209 mlir::cast<mlir::ArrayAttr>(
210 rewriteAttribute(tc, ctx, gva.getIndices())));
211 })
212 .Case<cir::VTableAttr>([&tc, ctx](cir::VTableAttr vta) {
213 return cir::VTableAttr::get(
214 tc.convertType(vta.getType()),
215 mlir::cast<mlir::ArrayAttr>(
216 rewriteAttribute(tc, ctx, vta.getData())));
217 })
218 .Case<cir::TypeInfoAttr>([&tc, ctx](cir::TypeInfoAttr tia) {
219 return cir::TypeInfoAttr::get(
220 tc.convertType(tia.getType()),
221 mlir::cast<mlir::ArrayAttr>(
222 rewriteAttribute(tc, ctx, tia.getData())));
223 })
224 .Case<cir::DynamicCastInfoAttr>([&tc,
225 ctx](cir::DynamicCastInfoAttr dcia) {
226 return cir::DynamicCastInfoAttr::get(
227 mlir::cast<cir::GlobalViewAttr>(
228 rewriteAttribute(tc, ctx, dcia.getSrcRtti())),
229 mlir::cast<cir::GlobalViewAttr>(
230 rewriteAttribute(tc, ctx, dcia.getDestRtti())),
231 dcia.getRuntimeFunc(), dcia.getBadCastFunc(), dcia.getOffsetHint());
232 })
233 .Case<cir::ConstRecordAttr>([&tc, ctx](cir::ConstRecordAttr cra) {
234 return cir::ConstRecordAttr::get(
235 ctx, tc.convertType(cra.getType()),
236 mlir::cast<mlir::ArrayAttr>(
237 rewriteAttribute(tc, ctx, cra.getMembers())));
238 })
239 .DefaultUnreachable("unrewritten illegal attribute kind");
240}
241
242#define GET_ABI_LOWERING_PATTERNS
243#include "clang/CIR/Dialect/IR/CIRLowering.inc"
244#undef GET_ABI_LOWERING_PATTERNS
245
246struct CXXABILoweringPass
247 : public impl::CXXABILoweringBase<CXXABILoweringPass> {
248 CXXABILoweringPass() = default;
249 void runOnOperation() override;
250};
251
252/// A generic ABI lowering rewrite pattern. This conversion pattern matches any
253/// CIR dialect operations with at least one operand or result of an
254/// ABI-dependent type. This conversion pattern rewrites the matched operation
255/// by replacing all its ABI-dependent operands and results with their
256/// lowered counterparts.
257class CIRGenericCXXABILoweringPattern : public mlir::ConversionPattern {
258public:
259 CIRGenericCXXABILoweringPattern(mlir::MLIRContext *context,
260 const mlir::TypeConverter &typeConverter)
261 : mlir::ConversionPattern(typeConverter, MatchAnyOpTypeTag(),
262 /*benefit=*/1, context) {}
263
264 mlir::LogicalResult
265 matchAndRewrite(mlir::Operation *op, llvm::ArrayRef<mlir::Value> operands,
266 mlir::ConversionPatternRewriter &rewriter) const override {
267 // Do not match on operations that have dedicated ABI lowering rewrite rules
268 if (llvm::isa<cir::AllocaOp, cir::BaseDataMemberOp, cir::BaseMethodOp,
269 cir::CastOp, cir::CmpOp, cir::ConstantOp, cir::DeleteArrayOp,
270 cir::DerivedDataMemberOp, cir::DerivedMethodOp, cir::FuncOp,
271 cir::GetMethodOp, cir::GetRuntimeMemberOp, cir::GlobalOp>(op))
272 return mlir::failure();
273
274 const mlir::TypeConverter *typeConverter = getTypeConverter();
275 assert(typeConverter &&
276 "CIRGenericCXXABILoweringPattern requires a type converter");
277 bool operandsAndResultsLegal = typeConverter->isLegal(op);
278 bool regionsLegal =
279 std::all_of(op->getRegions().begin(), op->getRegions().end(),
280 [typeConverter](mlir::Region &region) {
281 return typeConverter->isLegal(&region);
282 });
283 bool attrsLegal =
284 llvm::all_of(op->getAttrs(), [typeConverter](mlir::NamedAttribute na) {
285 return isCXXABIAttributeLegal(*typeConverter, na.getValue());
286 });
287
288 if (operandsAndResultsLegal && regionsLegal && attrsLegal) {
289 // The operation does not have any CXXABI-dependent operands or results,
290 // the match fails.
291 return mlir::failure();
292 }
293
294 mlir::OperationState loweredOpState(op->getLoc(), op->getName());
295 loweredOpState.addOperands(operands);
296 loweredOpState.addSuccessors(op->getSuccessors());
297
298 // Lower all attributes.
299 llvm::SmallVector<mlir::NamedAttribute> attrs;
300 for (const mlir::NamedAttribute &na : op->getAttrs())
301 attrs.push_back(
302 {na.getName(),
303 rewriteAttribute(*typeConverter, op->getContext(), na.getValue())});
304 loweredOpState.addAttributes(attrs);
305
306 // Lower all result types
307 llvm::SmallVector<mlir::Type> loweredResultTypes;
308 loweredResultTypes.reserve(op->getNumResults());
309 for (mlir::Type result : op->getResultTypes())
310 loweredResultTypes.push_back(typeConverter->convertType(result));
311 loweredOpState.addTypes(loweredResultTypes);
312
313 // Lower all regions
314 for (mlir::Region &region : op->getRegions()) {
315 mlir::Region *loweredRegion = loweredOpState.addRegion();
316 rewriter.inlineRegionBefore(region, *loweredRegion, loweredRegion->end());
317 if (mlir::failed(
318 rewriter.convertRegionTypes(loweredRegion, *getTypeConverter())))
319 return mlir::failure();
320 }
321
322 // Clone the operation with lowered operand types and result types
323 mlir::Operation *loweredOp = rewriter.create(loweredOpState);
324
325 rewriter.replaceOp(op, loweredOp);
326 return mlir::success();
327 }
328};
329
330} // namespace
331
332mlir::LogicalResult CIRAllocaOpABILowering::matchAndRewrite(
333 cir::AllocaOp op, OpAdaptor adaptor,
334 mlir::ConversionPatternRewriter &rewriter) const {
335 mlir::Type allocaPtrTy = op.getType();
336 mlir::Type allocaTy = op.getAllocaType();
337 mlir::Type loweredAllocaPtrTy = getTypeConverter()->convertType(allocaPtrTy);
338 mlir::Type loweredAllocaTy = getTypeConverter()->convertType(allocaTy);
339
340 cir::AllocaOp loweredOp = cir::AllocaOp::create(
341 rewriter, op.getLoc(), loweredAllocaPtrTy, loweredAllocaTy, op.getName(),
342 op.getAlignmentAttr(), /*dynAllocSize=*/adaptor.getDynAllocSize());
343 loweredOp.setInit(op.getInit());
344 loweredOp.setConstant(op.getConstant());
345 loweredOp.setAnnotationsAttr(op.getAnnotationsAttr());
346
347 rewriter.replaceOp(op, loweredOp);
348 return mlir::success();
349}
350
351mlir::LogicalResult CIRCastOpABILowering::matchAndRewrite(
352 cir::CastOp op, OpAdaptor adaptor,
353 mlir::ConversionPatternRewriter &rewriter) const {
354 mlir::Type srcTy = op.getSrc().getType();
355
356 if (mlir::isa<cir::DataMemberType, cir::MethodType>(srcTy)) {
357 switch (op.getKind()) {
358 case cir::CastKind::bitcast: {
359 mlir::Type destTy = getTypeConverter()->convertType(op.getType());
360 mlir::Value loweredResult;
361 if (mlir::isa<cir::DataMemberType>(srcTy))
362 loweredResult = lowerModule->getCXXABI().lowerDataMemberBitcast(
363 op, destTy, adaptor.getSrc(), rewriter);
364 else
365 loweredResult = lowerModule->getCXXABI().lowerMethodBitcast(
366 op, destTy, adaptor.getSrc(), rewriter);
367 rewriter.replaceOp(op, loweredResult);
368 return mlir::success();
369 }
370 case cir::CastKind::member_ptr_to_bool: {
371 mlir::Value loweredResult;
372 if (mlir::isa<cir::DataMemberType>(srcTy))
373 loweredResult = lowerModule->getCXXABI().lowerDataMemberToBoolCast(
374 op, adaptor.getSrc(), rewriter);
375 else
376 loweredResult = lowerModule->getCXXABI().lowerMethodToBoolCast(
377 op, adaptor.getSrc(), rewriter);
378 rewriter.replaceOp(op, loweredResult);
379 return mlir::success();
380 }
381 default:
382 break;
383 }
384 }
385
386 mlir::Value loweredResult = cir::CastOp::create(
387 rewriter, op.getLoc(), getTypeConverter()->convertType(op.getType()),
388 adaptor.getKind(), adaptor.getSrc());
389 rewriter.replaceOp(op, loweredResult);
390 return mlir::success();
391}
392
393// Helper function to lower a value for things like an initializer.
394static mlir::TypedAttr lowerInitialValue(const LowerModule *lowerModule,
395 const mlir::DataLayout &layout,
396 const mlir::TypeConverter &tc,
397 mlir::Type ty,
398 mlir::Attribute initVal) {
399 if (mlir::isa<cir::DataMemberType>(ty)) {
400 auto dataMemberVal = mlir::cast_if_present<cir::DataMemberAttr>(initVal);
401 return lowerModule->getCXXABI().lowerDataMemberConstant(dataMemberVal,
402 layout, tc);
403 }
404 if (mlir::isa<cir::MethodType>(ty)) {
405 auto methodVal = mlir::cast_if_present<cir::MethodAttr>(initVal);
406 return lowerModule->getCXXABI().lowerMethodConstant(methodVal, layout, tc);
407 }
408
409 if (auto arrTy = mlir::dyn_cast<cir::ArrayType>(ty)) {
410 auto loweredArrTy = mlir::cast<cir::ArrayType>(tc.convertType(arrTy));
411
412 if (!initVal)
413 return {};
414
415 if (auto zeroVal = mlir::dyn_cast_if_present<cir::ZeroAttr>(initVal))
416 return cir::ZeroAttr::get(loweredArrTy);
417
418 auto arrayVal = mlir::cast<cir::ConstArrayAttr>(initVal);
419
420 // String-literal arrays store their bytes as a StringAttr in `elts`. The
421 // backing i8 element type is never rewritten by the CXX ABI type
422 // converter, so the attribute is already legal and can be passed through
423 // unchanged.
424 if (mlir::isa<mlir::StringAttr>(arrayVal.getElts())) {
425 assert(loweredArrTy == arrTy &&
426 "string-literal array type should not change under CXX ABI");
427 return arrayVal;
428 }
429
430 auto arrayElts = mlir::cast<ArrayAttr>(arrayVal.getElts());
431 SmallVector<mlir::Attribute> loweredElements;
432 loweredElements.reserve(arrTy.getSize());
433 for (const mlir::Attribute &attr : arrayElts) {
434 auto typedAttr = cast<mlir::TypedAttr>(attr);
435 loweredElements.push_back(lowerInitialValue(
436 lowerModule, layout, tc, typedAttr.getType(), typedAttr));
437 }
438
439 return cir::ConstArrayAttr::get(
440 loweredArrTy, mlir::ArrayAttr::get(ty.getContext(), loweredElements),
441 arrayVal.getTrailingZerosNum());
442 }
443
444 if (auto recordTy = mlir::dyn_cast<cir::RecordType>(ty)) {
445 auto convertedTy = mlir::cast<cir::RecordType>(tc.convertType(recordTy));
446
447 if (auto recVal = mlir::dyn_cast_if_present<cir::ZeroAttr>(initVal))
448 return cir::ZeroAttr::get(convertedTy);
449
450 if (auto undefVal = mlir::dyn_cast_if_present<cir::UndefAttr>(initVal))
451 return cir::UndefAttr::get(convertedTy);
452
453 // This might not be possible from Clang directly, but we can get here with
454 // hand-written IR.
455 if (auto poisonVal = mlir::dyn_cast_if_present<cir::PoisonAttr>(initVal))
456 return cir::PoisonAttr::get(convertedTy);
457
458 if (auto recVal =
459 mlir::dyn_cast_if_present<cir::ConstRecordAttr>(initVal)) {
460 auto recordMembers = mlir::cast<ArrayAttr>(recVal.getMembers());
461
462 SmallVector<mlir::Attribute> loweredMembers;
463 loweredMembers.reserve(recordMembers.size());
464
465 for (const mlir::Attribute &attr : recordMembers) {
466 auto typedAttr = cast<mlir::TypedAttr>(attr);
467 loweredMembers.push_back(lowerInitialValue(
468 lowerModule, layout, tc, typedAttr.getType(), typedAttr));
469 }
470
471 return cir::ConstRecordAttr::get(
472 convertedTy, mlir::ArrayAttr::get(ty.getContext(), loweredMembers));
473 }
474
475 assert(!initVal && "Record init val type not handled");
476 return {};
477 }
478
479 // Pointers can contain record types, which can change.
480 if (auto ptrTy = mlir::dyn_cast<cir::PointerType>(ty)) {
481 auto convertedTy = mlir::cast<cir::PointerType>(tc.convertType(ptrTy));
482 // pointers don't change other than their types.
483
484 if (auto gva = mlir::dyn_cast_if_present<cir::GlobalViewAttr>(initVal))
485 return cir::GlobalViewAttr::get(convertedTy, gva.getSymbol(),
486 gva.getIndices());
487
488 auto constPtr = mlir::cast_if_present<cir::ConstPtrAttr>(initVal);
489 if (!constPtr)
490 return {};
491 return cir::ConstPtrAttr::get(convertedTy, constPtr.getValue());
492 }
493
494 assert(ty == tc.convertType(ty) &&
495 "cir.global or constant operand is not an CXXABI-dependent type");
496
497 // Every other type can be left alone.
498 return cast<mlir::TypedAttr>(initVal);
499}
500
501mlir::LogicalResult CIRConstantOpABILowering::matchAndRewrite(
502 cir::ConstantOp op, OpAdaptor adaptor,
503 mlir::ConversionPatternRewriter &rewriter) const {
504
505 mlir::DataLayout layout(op->getParentOfType<mlir::ModuleOp>());
506 mlir::TypedAttr newValue = lowerInitialValue(
507 lowerModule, layout, *getTypeConverter(), op.getType(), op.getValue());
508 rewriter.replaceOpWithNewOp<ConstantOp>(op, newValue);
509 return mlir::success();
510}
511
512mlir::LogicalResult CIRCmpOpABILowering::matchAndRewrite(
513 cir::CmpOp op, OpAdaptor adaptor,
514 mlir::ConversionPatternRewriter &rewriter) const {
515 mlir::Type type = op.getLhs().getType();
516
517 mlir::Value loweredResult;
518 if (mlir::isa<cir::DataMemberType>(type))
519 loweredResult = lowerModule->getCXXABI().lowerDataMemberCmp(
520 op, adaptor.getLhs(), adaptor.getRhs(), rewriter);
521 else if (mlir::isa<cir::MethodType>(type))
522 loweredResult = lowerModule->getCXXABI().lowerMethodCmp(
523 op, adaptor.getLhs(), adaptor.getRhs(), rewriter);
524 else
525 loweredResult = cir::CmpOp::create(
526 rewriter, op.getLoc(), getTypeConverter()->convertType(op.getType()),
527 adaptor.getKind(), adaptor.getLhs(), adaptor.getRhs());
528
529 rewriter.replaceOp(op, loweredResult);
530 return mlir::success();
531}
532
533mlir::LogicalResult CIRFuncOpABILowering::matchAndRewrite(
534 cir::FuncOp op, OpAdaptor adaptor,
535 mlir::ConversionPatternRewriter &rewriter) const {
536 cir::FuncType opFuncType = op.getFunctionType();
537 mlir::TypeConverter::SignatureConversion signatureConversion(
538 opFuncType.getNumInputs());
539
540 for (const auto &[i, argType] : llvm::enumerate(opFuncType.getInputs())) {
541 mlir::Type loweredArgType = getTypeConverter()->convertType(argType);
542 if (!loweredArgType)
543 return mlir::failure();
544 signatureConversion.addInputs(i, loweredArgType);
545 }
546
547 mlir::Type loweredResultType =
548 getTypeConverter()->convertType(opFuncType.getReturnType());
549 if (!loweredResultType)
550 return mlir::failure();
551
552 auto loweredFuncType =
553 cir::FuncType::get(signatureConversion.getConvertedTypes(),
554 loweredResultType, /*isVarArg=*/opFuncType.isVarArg());
555
556 // Create a new cir.func operation for the CXXABI-lowered function.
557 cir::FuncOp loweredFuncOp = rewriter.cloneWithoutRegions(op);
558 loweredFuncOp.setFunctionType(loweredFuncType);
559
561 for (const mlir::NamedAttribute &na : op->getAttrs())
562 attrs.push_back(
563 {na.getName(), rewriteAttribute(*getTypeConverter(), op->getContext(),
564 na.getValue())});
565
566 loweredFuncOp->setAttrs(attrs);
567
568 rewriter.inlineRegionBefore(op.getBody(), loweredFuncOp.getBody(),
569 loweredFuncOp.end());
570 if (mlir::failed(rewriter.convertRegionTypes(
571 &loweredFuncOp.getBody(), *getTypeConverter(), &signatureConversion)))
572 return mlir::failure();
573
574 rewriter.eraseOp(op);
575 return mlir::success();
576}
577
578mlir::LogicalResult CIRGlobalOpABILowering::matchAndRewrite(
579 cir::GlobalOp op, OpAdaptor adaptor,
580 mlir::ConversionPatternRewriter &rewriter) const {
581 mlir::Type ty = op.getSymType();
582 mlir::Type loweredTy = getTypeConverter()->convertType(ty);
583 if (!loweredTy)
584 return mlir::failure();
585
586 mlir::DataLayout layout(op->getParentOfType<mlir::ModuleOp>());
587
588 mlir::Attribute loweredInit = lowerInitialValue(
589 lowerModule, layout, *getTypeConverter(), ty, op.getInitialValueAttr());
590
591 auto newOp = mlir::cast<cir::GlobalOp>(rewriter.clone(*op.getOperation()));
592 newOp.setInitialValueAttr(loweredInit);
593 newOp.setSymType(loweredTy);
594 rewriter.replaceOp(op, newOp);
595 return mlir::success();
596}
597
598mlir::LogicalResult CIRBaseDataMemberOpABILowering::matchAndRewrite(
599 cir::BaseDataMemberOp op, OpAdaptor adaptor,
600 mlir::ConversionPatternRewriter &rewriter) const {
601 mlir::Value loweredResult = lowerModule->getCXXABI().lowerBaseDataMember(
602 op, adaptor.getSrc(), rewriter);
603 rewriter.replaceOp(op, loweredResult);
604 return mlir::success();
605}
606
607mlir::LogicalResult CIRBaseMethodOpABILowering::matchAndRewrite(
608 cir::BaseMethodOp op, OpAdaptor adaptor,
609 mlir::ConversionPatternRewriter &rewriter) const {
610 mlir::Value loweredResult =
611 lowerModule->getCXXABI().lowerBaseMethod(op, adaptor.getSrc(), rewriter);
612 rewriter.replaceOp(op, loweredResult);
613 return mlir::success();
614}
615
616mlir::LogicalResult CIRDeleteArrayOpABILowering::matchAndRewrite(
617 cir::DeleteArrayOp op, OpAdaptor adaptor,
618 mlir::ConversionPatternRewriter &rewriter) const {
619 mlir::FlatSymbolRefAttr deleteFn = op.getDeleteFnAttr();
620 mlir::Location loc = op->getLoc();
621 mlir::Value loweredAddress = adaptor.getAddress();
622
623 cir::UsualDeleteParamsAttr deleteParams = op.getDeleteParams();
624 bool cookieRequired = deleteParams.getSize() || op.getElementDtorAttr();
625
626 if (deleteParams.getTypeAwareDelete() || deleteParams.getDestroyingDelete() ||
627 deleteParams.getAlignment())
628 return rewriter.notifyMatchFailure(
629 op, "type-aware, destroying, or aligned delete not yet supported");
630
631 const CIRCXXABI &cxxABI = lowerModule->getCXXABI();
632 CIRBaseBuilderTy cirBuilder(rewriter);
633
634 // Read the array cookie (or compute the void* pointer for the
635 // non-cookie case) before creating the cleanup scope. The cookie read
636 // produces values that are needed by both the destruction loop in the
637 // body region (numElements for the array.dtor) and the operator
638 // delete[] call in the cleanup region (deletePtr / numElements for the
639 // total-size computation), so it must dominate both regions.
640 mlir::Value deletePtr;
641 mlir::Value numElements;
642 cir::PointerType ptrTy;
643 clang::CharUnits cookieSize;
644 mlir::DataLayout dl(op->getParentOfType<mlir::ModuleOp>());
645 unsigned ptrWidth =
646 lowerModule->getTarget().getPointerWidth(clang::LangAS::Default);
647 cir::IntType sizeTy = cirBuilder.getUIntNTy(ptrWidth);
648
649 if (cookieRequired) {
650 ptrTy = mlir::cast<cir::PointerType>(loweredAddress.getType());
651 cxxABI.readArrayCookie(loc, loweredAddress, dl, cirBuilder, numElements,
652 deletePtr, cookieSize);
653 } else {
654 deletePtr = cir::CastOp::create(rewriter, loc, cirBuilder.getVoidPtrTy(),
655 cir::CastKind::bitcast, loweredAddress);
656 }
657
658 // Create a cleanup scope to wrap the ArrayDtor operation (if needed) and
659 // call the array delete operator from the cleanup region. If no exceptions
660 // are thrown during the array dtor, the normal control flow will call the
661 // delete operator. The ArrayDtor operation will get its own cleanup region
662 // when it is expanded during LoweringPrepare. If an exception is thrown, the
663 // exception handling flow will be connected to the cleanup region here to
664 // call the delete operator on the exception path.
665 mlir::FlatSymbolRefAttr dtorFn = op.getElementDtorAttr();
666 cir::CleanupKind cleanupKind =
667 op.getDtorMayThrow() ? cir::CleanupKind::All : cir::CleanupKind::Normal;
668 cir::CleanupScopeOp::create(
669 rewriter, loc, cleanupKind,
670 /*bodyBuilder=*/
671 [&](mlir::OpBuilder &b, mlir::Location l) {
672 if (dtorFn) {
673 auto eltPtrTy = cir::PointerType::get(ptrTy.getPointee());
674 auto arrayDtor = cir::ArrayDtor::create(
675 b, l, loweredAddress, numElements,
676 [&](mlir::OpBuilder &bb, mlir::Location ll) {
677 mlir::Value arg =
678 bb.getInsertionBlock()->addArgument(eltPtrTy, ll);
679 auto dtorCall = cir::CallOp::create(
680 bb, ll, dtorFn, cir::VoidType(), mlir::ValueRange{arg});
681 if (!op.getDtorMayThrow())
682 dtorCall.setNothrowAttr(bb.getUnitAttr());
683 cir::YieldOp::create(bb, ll);
684 });
685 if (op.getDtorMayThrow())
686 arrayDtor.setDtorMayThrow(true);
687 }
688 cir::YieldOp::create(b, l);
689 },
690 /*cleanupBuilder=*/
691 [&](mlir::OpBuilder &b, mlir::Location l) {
693 callArgs.push_back(deletePtr);
694 if (deleteParams.getSize()) {
695 uint64_t eltSizeBytes = dl.getTypeSizeInBits(ptrTy.getPointee()) / 8;
696 auto eltSizeVal = cir::ConstantOp::create(
697 b, l, cir::IntAttr::get(sizeTy, eltSizeBytes));
698 mlir::Value allocSize =
699 cir::MulOp::create(b, l, sizeTy, eltSizeVal, numElements);
700 auto cookieSizeVal = cir::ConstantOp::create(
701 b, l, cir::IntAttr::get(sizeTy, cookieSize.getQuantity()));
702 allocSize =
703 cir::AddOp::create(b, l, sizeTy, allocSize, cookieSizeVal);
704 callArgs.push_back(allocSize);
705 }
706 auto deleteCall =
707 cir::CallOp::create(b, l, deleteFn, cir::VoidType(), callArgs);
708 // operator delete[] is implicitly nothrow per [basic.stc.dynamic],
709 // matching classic CodeGen's `nounwind` attribute on the call.
710 deleteCall.setNothrowAttr(b.getUnitAttr());
711 cir::YieldOp::create(b, l);
712 });
713
714 rewriter.eraseOp(op);
715 return mlir::success();
716}
717
718mlir::LogicalResult CIRDerivedDataMemberOpABILowering::matchAndRewrite(
719 cir::DerivedDataMemberOp op, OpAdaptor adaptor,
720 mlir::ConversionPatternRewriter &rewriter) const {
721 mlir::Value loweredResult = lowerModule->getCXXABI().lowerDerivedDataMember(
722 op, adaptor.getSrc(), rewriter);
723 rewriter.replaceOp(op, loweredResult);
724 return mlir::success();
725}
726
727mlir::LogicalResult CIRDerivedMethodOpABILowering::matchAndRewrite(
728 cir::DerivedMethodOp op, OpAdaptor adaptor,
729 mlir::ConversionPatternRewriter &rewriter) const {
730 mlir::Value loweredResult = lowerModule->getCXXABI().lowerDerivedMethod(
731 op, adaptor.getSrc(), rewriter);
732 rewriter.replaceOp(op, loweredResult);
733 return mlir::success();
734}
735
736mlir::LogicalResult CIRDynamicCastOpABILowering::matchAndRewrite(
737 cir::DynamicCastOp op, OpAdaptor adaptor,
738 mlir::ConversionPatternRewriter &rewriter) const {
739 mlir::Value loweredResult =
740 lowerModule->getCXXABI().lowerDynamicCast(op, rewriter);
741 rewriter.replaceOp(op, loweredResult);
742 return mlir::success();
743}
744
745mlir::LogicalResult CIRGetMethodOpABILowering::matchAndRewrite(
746 cir::GetMethodOp op, OpAdaptor adaptor,
747 mlir::ConversionPatternRewriter &rewriter) const {
748 mlir::Value callee;
749 mlir::Value thisArg;
750 lowerModule->getCXXABI().lowerGetMethod(
751 op, callee, thisArg, adaptor.getMethod(), adaptor.getObject(), rewriter);
752 rewriter.replaceOp(op, {callee, thisArg});
753 return mlir::success();
754}
755
756mlir::LogicalResult CIRGetRuntimeMemberOpABILowering::matchAndRewrite(
757 cir::GetRuntimeMemberOp op, OpAdaptor adaptor,
758 mlir::ConversionPatternRewriter &rewriter) const {
759 mlir::Type resTy = getTypeConverter()->convertType(op.getType());
760 mlir::Operation *newOp = lowerModule->getCXXABI().lowerGetRuntimeMember(
761 op, resTy, adaptor.getAddr(), adaptor.getMember(), rewriter);
762 rewriter.replaceOp(op, newOp);
763 return mlir::success();
764}
765
766mlir::LogicalResult CIRVTableGetTypeInfoOpABILowering::matchAndRewrite(
767 cir::VTableGetTypeInfoOp op, OpAdaptor adaptor,
768 mlir::ConversionPatternRewriter &rewriter) const {
769 mlir::Value loweredResult =
770 lowerModule->getCXXABI().lowerVTableGetTypeInfo(op, rewriter);
771 rewriter.replaceOp(op, loweredResult);
772 return mlir::success();
773}
774
775namespace {
776// A small type to handle type conversion for the the CXXABILoweringPass.
777// Even though this is a CIR-to-CIR pass, we are eliminating some CIR types.
778// Most importantly, this pass solves recursive type conversion problems by
779// keeping a call stack.
780class CIRABITypeConverter : public mlir::TypeConverter {
781
782 mlir::MLIRContext &context;
783
784 // Recursive structure detection.
785 // We store one entry per thread here, and rely on locking. This works the
786 // same way as the LLVM-IR lowering does it, which has a similar problem.
787 DenseMap<uint64_t, std::unique_ptr<SmallVector<cir::RecordType>>>
788 conversionCallStack;
789 llvm::sys::SmartRWMutex<true> callStackMutex;
790
791 // In order to let us 'change the names' back after the fact, we collect them
792 // along the way. They should only be added/accessed via the thread-safe
793 // functions below.
794 llvm::SmallVector<cir::RecordType> convertedRecordTypes;
795 llvm::sys::SmartRWMutex<true> recordTypeMutex;
796
797 // This provides a stack for the RecordTypes being processed on the current
798 // thread, which lets us solve recursive conversions. This implementation is
799 // cribbed from the LLVMTypeConverter which solves a similar but not identical
800 // problem.
801 SmallVector<cir::RecordType> &getCurrentThreadRecursiveStack() {
802 {
803 // Most of the time, the entry already exists in the map.
804 std::shared_lock<decltype(callStackMutex)> lock(callStackMutex,
805 std::defer_lock);
806 if (context.isMultithreadingEnabled())
807 lock.lock();
808 auto recursiveStack = conversionCallStack.find(llvm::get_threadid());
809 if (recursiveStack != conversionCallStack.end())
810 return *recursiveStack->second;
811 }
812
813 // First time this thread gets here, we have to get an exclusive access to
814 // insert in the map
815 std::unique_lock<decltype(callStackMutex)> lock(callStackMutex);
816 auto recursiveStackInserted = conversionCallStack.insert(
817 std::make_pair(llvm::get_threadid(),
818 std::make_unique<SmallVector<cir::RecordType>>()));
819 return *recursiveStackInserted.first->second;
820 }
821
822 void addConvertedRecordType(cir::RecordType rt) {
823 std::unique_lock<decltype(recordTypeMutex)> lock(recordTypeMutex);
824 convertedRecordTypes.push_back(rt);
825 }
826
827 llvm::SmallVector<mlir::Type> convertRecordMemberTypes(cir::RecordType type) {
828 llvm::SmallVector<mlir::Type> loweredMemberTypes;
829 loweredMemberTypes.reserve(type.getNumElements());
830
831 if (mlir::failed(convertTypes(type.getMembers(), loweredMemberTypes)))
832 return {};
833
834 return loweredMemberTypes;
835 }
836
837 cir::RecordType convertRecordType(cir::RecordType type) {
838 // Unnamed record types can't be referred to recursively, so we can just
839 // convert this one. It also doesn't have uniqueness problems, so we can
840 // just do a conversion on it.
841 if (!type.getName())
842 return cir::RecordType::get(
843 type.getContext(), convertRecordMemberTypes(type), type.getPacked(),
844 type.getPadded(), type.getKind());
845
846 assert(!type.isIncomplete() || type.getMembers().empty());
847
848 // If the type has already been converted, we can just return, since there
849 // is nothing to do. Also, if it is incomplete, it can't have invalid
850 // members! So we can skip transforming it.
851 if (type.isIncomplete() || type.isABIConvertedRecord())
852 return type;
853
854 SmallVectorImpl<cir::RecordType> &recursiveStack =
855 getCurrentThreadRecursiveStack();
856
857 auto convertedType = cir::RecordType::get(
858 type.getContext(), type.getABIConvertedName(), type.getKind());
859
860 // This type has already been converted, just return it.
861 if (convertedType.isComplete())
862 return convertedType;
863
864 // We put the existing 'type' into the vector if we're in the process of
865 // converting it (and pop it when we're done). To prevent recursion,
866 // just return the 'incomplete' version, and the 'top level' version of this
867 // call will call 'complete' on it.
868 if (llvm::is_contained(recursiveStack, type))
869 return convertedType;
870
871 recursiveStack.push_back(type);
872 llvm::scope_exit popConvertingType(
873 [&recursiveStack]() { recursiveStack.pop_back(); });
874
875 SmallVector<mlir::Type> convertedMembers = convertRecordMemberTypes(type);
876
877 convertedType.complete(convertedMembers, type.getPacked(),
878 type.getPadded());
879 addConvertedRecordType(convertedType);
880 return convertedType;
881 }
882
883public:
884 CIRABITypeConverter(mlir::MLIRContext &ctx, mlir::DataLayout &dataLayout,
885 cir::LowerModule &lowerModule)
886 : context(ctx) {
887 addConversion([&](mlir::Type type) -> mlir::Type { return type; });
888 // This is necessary in order to convert CIR pointer types that are
889 // pointing to CIR types that we are lowering in this pass.
890 addConversion([&](cir::PointerType type) -> mlir::Type {
891 mlir::Type loweredPointeeType = convertType(type.getPointee());
892 if (!loweredPointeeType)
893 return {};
894 return cir::PointerType::get(type.getContext(), loweredPointeeType,
895 type.getAddrSpace());
896 });
897 addConversion([&](cir::ArrayType type) -> mlir::Type {
898 mlir::Type loweredElementType = convertType(type.getElementType());
899 if (!loweredElementType)
900 return {};
901 return cir::ArrayType::get(loweredElementType, type.getSize());
902 });
903
904 addConversion([&](cir::DataMemberType type) -> mlir::Type {
905 mlir::Type abiType =
906 lowerModule.getCXXABI().lowerDataMemberType(type, *this);
907 return convertType(abiType);
908 });
909 addConversion([&](cir::MethodType type) -> mlir::Type {
910 mlir::Type abiType = lowerModule.getCXXABI().lowerMethodType(type, *this);
911 return convertType(abiType);
912 });
913 // This is necessary in order to convert CIR function types that have
914 // argument or return types that use CIR types that we are lowering in
915 // this pass.
916 addConversion([&](cir::FuncType type) -> mlir::Type {
917 llvm::SmallVector<mlir::Type> loweredInputTypes;
918 loweredInputTypes.reserve(type.getNumInputs());
919 if (mlir::failed(convertTypes(type.getInputs(), loweredInputTypes)))
920 return {};
921
922 mlir::Type loweredReturnType = convertType(type.getReturnType());
923 if (!loweredReturnType)
924 return {};
925
926 return cir::FuncType::get(loweredInputTypes, loweredReturnType,
927 /*isVarArg=*/type.getVarArg());
928 });
929 addConversion([&](cir::RecordType type) -> mlir::Type {
930 return convertRecordType(type);
931 });
932 }
933
934 void restoreRecordTypeNames() {
935 std::unique_lock<decltype(recordTypeMutex)> lock(recordTypeMutex);
936
937 for (auto rt : convertedRecordTypes)
938 rt.removeABIConversionNamePrefix();
939 }
940};
941} // namespace
942
943static void
944populateCXXABIConversionTarget(mlir::ConversionTarget &target,
945 const mlir::TypeConverter &typeConverter) {
946 target.addLegalOp<mlir::ModuleOp>();
947
948 // The ABI lowering pass is interested in CIR operations with operands or
949 // results of CXXABI-dependent types, or CIR operations with regions whose
950 // block arguments are of CXXABI-dependent types.
951 target.addDynamicallyLegalDialect<cir::CIRDialect>(
952 [&typeConverter](mlir::Operation *op) {
953 if (!typeConverter.isLegal(op))
954 return false;
955
956 bool attrs = llvm::all_of(
957 op->getAttrs(), [&typeConverter](const mlir::NamedAttribute &a) {
958 return isCXXABIAttributeLegal(typeConverter, a.getValue());
959 });
960
961 return attrs &&
962 std::all_of(op->getRegions().begin(), op->getRegions().end(),
963 [&typeConverter](mlir::Region &region) {
964 return typeConverter.isLegal(&region);
965 });
966 });
967
968 target.addDynamicallyLegalDialect<mlir::acc::OpenACCDialect>(
969 [&typeConverter](mlir::Operation *op) {
970 if (!typeConverter.isLegal(op))
971 return false;
972
973 bool attrs = llvm::all_of(
974 op->getAttrs(), [&typeConverter](const mlir::NamedAttribute &a) {
975 return isCXXABIAttributeLegal(typeConverter, a.getValue());
976 });
977
978 return attrs &&
979 std::all_of(op->getRegions().begin(), op->getRegions().end(),
980 [&typeConverter](mlir::Region &region) {
981 return typeConverter.isLegal(&region);
982 });
983 });
984
985 // Some CIR ops needs special checking for legality
986 target.addDynamicallyLegalOp<cir::FuncOp>([&typeConverter](cir::FuncOp op) {
987 bool attrs = llvm::all_of(
988 op->getAttrs(), [&typeConverter](const mlir::NamedAttribute &a) {
989 return isCXXABIAttributeLegal(typeConverter, a.getValue());
990 });
991
992 return attrs && typeConverter.isLegal(op.getFunctionType());
993 });
994 target.addDynamicallyLegalOp<cir::GlobalOp>(
995 [&typeConverter](cir::GlobalOp op) {
996 return typeConverter.isLegal(op.getSymType());
997 });
998 // Operations that do not use any special types must be explicitly marked as
999 // illegal to trigger processing here.
1000 target.addIllegalOp<cir::DeleteArrayOp>();
1001 target.addIllegalOp<cir::DynamicCastOp>();
1002 target.addIllegalOp<cir::VTableGetTypeInfoOp>();
1003}
1004
1005//===----------------------------------------------------------------------===//
1006// The Pass
1007//===----------------------------------------------------------------------===//
1008
1009// The applyPartialConversion function traverses blocks in the dominance order,
1010// so it does not lower and operations that are not reachachable from the
1011// operations passed in as arguments. Since we do need to lower such code in
1012// order to avoid verification errors occur, we cannot just pass the module op
1013// to applyPartialConversion. We must build a set of unreachable ops and
1014// explicitly add them, along with the module, to the vector we pass to
1015// applyPartialConversion.
1016//
1017// For instance, this CIR code:
1018//
1019// cir.func @foo(%arg0: !s32i) -> !s32i {
1020// %4 = cir.cast int_to_bool %arg0 : !s32i -> !cir.bool
1021// cir.if %4 {
1022// %5 = cir.const #cir.int<1> : !s32i
1023// cir.return %5 : !s32i
1024// } else {
1025// %5 = cir.const #cir.int<0> : !s32i
1026// cir.return %5 : !s32i
1027// }
1028// cir.return %arg0 : !s32i
1029// }
1030//
1031// contains an unreachable return operation (the last one). After the CXXABI
1032// pass it will be placed into the unreachable block. This will error because
1033// it will have not converted the types in the block, making the legalizer fail.
1034//
1035// In the future we may want to get rid of this function and use a DCE pass or
1036// something similar. But for now we need to guarantee the absence of the
1037// dialect verification errors. Note: We do the same in LowerToLLVM as well,
1038// this is a striaght copy/paste including most of the comment. We might wi sh
1039// to combine these if we don't want to do a DCE pass/etc.
1040static void collectUnreachable(mlir::Operation *parent,
1042
1043 llvm::SmallVector<mlir::Block *> unreachableBlocks;
1044 parent->walk([&](mlir::Block *blk) { // check
1045 if (blk->hasNoPredecessors() && !blk->isEntryBlock())
1046 unreachableBlocks.push_back(blk);
1047 });
1048
1049 std::set<mlir::Block *> visited;
1050 for (mlir::Block *root : unreachableBlocks) {
1051 // We create a work list for each unreachable block.
1052 // Thus we traverse operations in some order.
1053 std::deque<mlir::Block *> workList;
1054 workList.push_back(root);
1055
1056 while (!workList.empty()) {
1057 mlir::Block *blk = workList.back();
1058 workList.pop_back();
1059 if (visited.count(blk))
1060 continue;
1061 visited.emplace(blk);
1062
1063 for (mlir::Operation &op : *blk)
1064 ops.push_back(&op);
1065
1066 for (mlir::Block *succ : blk->getSuccessors())
1067 workList.push_back(succ);
1068 }
1069 }
1070}
1071
1072void CXXABILoweringPass::runOnOperation() {
1073 auto mod = mlir::cast<mlir::ModuleOp>(getOperation());
1074 mlir::MLIRContext *ctx = mod.getContext();
1075
1076 std::unique_ptr<cir::LowerModule> lowerModule = cir::createLowerModule(mod);
1077 // If lower module is not available, skip the ABI lowering pass.
1078 if (!lowerModule) {
1079 mod.emitWarning("Cannot create a CIR lower module, skipping the ")
1080 << getName() << " pass";
1081 return;
1082 }
1083
1084 mlir::DataLayout dataLayout(mod);
1085 CIRABITypeConverter typeConverter(*ctx, dataLayout, *lowerModule);
1086
1087 mlir::RewritePatternSet patterns(ctx);
1088 patterns.add<CIRGenericCXXABILoweringPattern>(patterns.getContext(),
1089 typeConverter);
1090 patterns.add<
1091#define GET_ABI_LOWERING_PATTERNS_LIST
1092#include "clang/CIR/Dialect/IR/CIRLowering.inc"
1093#undef GET_ABI_LOWERING_PATTERNS_LIST
1094 >(patterns.getContext(), typeConverter, dataLayout, *lowerModule);
1095
1096 mlir::ConversionTarget target(*ctx);
1097 populateCXXABIConversionTarget(target, typeConverter);
1098
1099 llvm::SmallVector<mlir::Operation *> ops;
1100 ops.push_back(mod);
1101 collectUnreachable(mod, ops);
1102
1103 if (failed(mlir::applyPartialConversion(ops, target, std::move(patterns))))
1104 signalPassFailure();
1105
1106 typeConverter.restoreRecordTypeNames();
1107}
1108
1109std::unique_ptr<Pass> mlir::createCXXABILoweringPass() {
1110 return std::make_unique<CXXABILoweringPass>();
1111}
#define CXX_ABI_ALWAYS_LEGAL_ATTRS
static void collectUnreachable(mlir::Operation *parent, llvm::SmallVector< mlir::Operation * > &ops)
static void populateCXXABIConversionTarget(mlir::ConversionTarget &target, const mlir::TypeConverter &typeConverter)
static mlir::TypedAttr lowerInitialValue(const LowerModule *lowerModule, const mlir::DataLayout &layout, const mlir::TypeConverter &tc, mlir::Type ty, mlir::Attribute initVal)
__device__ __2f16 b
virtual mlir::Type lowerMethodType(cir::MethodType type, const mlir::TypeConverter &typeConverter) const =0
Lower the given member function pointer type to its ABI type.
void readArrayCookie(mlir::Location loc, mlir::Value elementPtr, const mlir::DataLayout &dataLayout, CIRBaseBuilderTy &builder, mlir::Value &numElements, mlir::Value &allocPtr, clang::CharUnits &cookieSize) const
Read the array cookie for a dynamically-allocated array whose first element is at elementPtr.
Definition CIRCXXABI.cpp:25
virtual mlir::TypedAttr lowerDataMemberConstant(cir::DataMemberAttr attr, const mlir::DataLayout &layout, const mlir::TypeConverter &typeConverter) const =0
Lower the given data member pointer constant to a constant of the ABI type.
virtual mlir::TypedAttr lowerMethodConstant(cir::MethodAttr attr, const mlir::DataLayout &layout, const mlir::TypeConverter &typeConverter) const =0
Lower the given member function pointer constant to a constant of the ABI type.
virtual mlir::Type lowerDataMemberType(cir::DataMemberType type, const mlir::TypeConverter &typeConverter) const =0
Lower the given data member pointer type to its ABI type.
CIRCXXABI & getCXXABI() const
Definition LowerModule.h:46
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
std::unique_ptr< LowerModule > createLowerModule(mlir::ModuleOp module)
const internal::VariadicAllOfMatcher< Attr > attr
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
StringRef getName(const HeaderType T)
Definition HeaderFile.h:38
RangeSelector callArgs(std::string ID)
bool isa(CodeGen::Address addr)
Definition Address.h:330
std::unique_ptr< Pass > createCXXABILoweringPass()
__DEVICE__ _Tp arg(const std::complex< _Tp > &__c)