clang 23.0.0git
CIRGenTypes.cpp
Go to the documentation of this file.
1#include "CIRGenTypes.h"
2
4#include "CIRGenModule.h"
5#include "mlir/IR/BuiltinTypes.h"
6
9#include "clang/AST/Type.h"
12
13#include <cassert>
14
15using namespace clang;
16using namespace clang::CIRGen;
17
19 : cgm(genModule), astContext(genModule.getASTContext()),
20 builder(cgm.getBuilder()), theCXXABI(cgm.getCXXABI()),
21 theABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {}
22
24 for (auto i = functionInfos.begin(), e = functionInfos.end(); i != e;)
25 delete &*i++;
26}
27
28mlir::MLIRContext &CIRGenTypes::getMLIRContext() const {
29 return *builder.getContext();
30}
31
32/// Return true if the specified type in a function parameter or result position
33/// can be converted to a CIR type at this point. This boils down to being
34/// whether it is complete, as well as whether we've temporarily deferred
35/// expanding the type because we're in a recursive context.
37 // Some ABIs cannot have their member pointers represented in LLVM IR unless
38 // certain circumstances have been reached, but in CIR we represent member
39 // pointer types abstractly at this point so they are always convertible.
40 if (type->getAs<MemberPointerType>())
41 return true;
42
43 // If this isn't a tag type, we can convert it.
44 const TagType *tagType = type->getAs<TagType>();
45 if (!tagType)
46 return true;
47
48 // Function types involving incomplete class types are problematic in MLIR.
49 return !tagType->isIncompleteType();
50}
51
52/// Code to verify a given function type is complete, i.e. the return type and
53/// all of the parameter types are complete. Also check to see if we are in a
54/// RS_StructPointer context, and if so whether any struct types have been
55/// pended. If so, we don't want to ask the ABI lowering code to handle a type
56/// that cannot be converted to a CIR type.
59 return false;
60
61 if (const auto *fpt = dyn_cast<FunctionProtoType>(ft))
62 for (unsigned i = 0, e = fpt->getNumParams(); i != e; i++)
63 if (!isFuncParamTypeConvertible(fpt->getParamType(i)))
64 return false;
65
66 return true;
67}
68
69mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType qft) {
70 assert(qft.isCanonical());
72
73 // In classic codegen, if the function type depends on an incomplete type
74 // (e.g. a struct or enum), it cannot lower the function type due to ABI
75 // handling requirements and returns a placeholder. In CIR, ABI handling is
76 // deferred until after codegen, and record types are identified by name, so
77 // incomplete record type references in the function type will automatically
78 // see the complete type once the record is defined. We can always produce a
79 // proper function type here.
80
81 const CIRGenFunctionInfo *fi;
82 if (const auto *fpt = dyn_cast<FunctionProtoType>(ft)) {
85 } else {
89 }
90
91 mlir::Type resultType = getFunctionType(*fi);
92
93 return resultType;
94}
95
96// This is CIR's version of CodeGenTypes::addRecordTypeName. It isn't shareable
97// because CIR has different uniquing requirements.
99 StringRef suffix) {
100 llvm::SmallString<256> typeName;
101 llvm::raw_svector_ostream outStream(typeName);
102
103 PrintingPolicy policy = recordDecl->getASTContext().getPrintingPolicy();
107 policy.PrintAsCanonical = true;
108 policy.SuppressTagKeyword = true;
109
110 if (recordDecl->getIdentifier())
111 QualType(astContext.getCanonicalTagType(recordDecl))
112 .print(outStream, policy);
113 else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl())
114 typedefNameDecl->printQualifiedName(outStream, policy);
115 else
116 outStream << builder.getUniqueAnonRecordName();
117
118 if (!suffix.empty())
119 outStream << suffix;
120
121 return builder.getUniqueRecordName(std::string(typeName));
122}
123
124/// Return true if the specified type is already completely laid out.
126 const auto it = recordDeclTypes.find(ty);
127 return it != recordDeclTypes.end() && it->second.isComplete();
128}
129
130// We have multiple forms of this function that call each other, so we need to
131// declare one in advance.
132static bool
134 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked);
135
136/// Return true if it is safe to convert the specified record decl to CIR and
137/// lay it out, false if doing so would cause us to get into a recursive
138/// compilation mess.
139static bool
141 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
142 // If we have already checked this type (maybe the same type is used by-value
143 // multiple times in multiple record fields, don't check again.
144 if (!alreadyChecked.insert(rd).second)
145 return true;
146
147 assert(rd->isCompleteDefinition() &&
148 "Expect RecordDecl to be CompleteDefinition");
149 const Type *key = cgt.getASTContext().getCanonicalTagType(rd).getTypePtr();
150
151 // If this type is already laid out, converting it is a noop.
152 if (cgt.isRecordLayoutComplete(key))
153 return true;
154
155 // If this type is currently being laid out, we can't recursively compile it.
156 if (cgt.isRecordBeingLaidOut(key))
157 return false;
158
159 // If this type would require laying out bases that are currently being laid
160 // out, don't do it. This includes virtual base classes which get laid out
161 // when a class is translated, even though they aren't embedded by-value into
162 // the class.
163 if (const CXXRecordDecl *crd = dyn_cast<CXXRecordDecl>(rd)) {
164 for (const clang::CXXBaseSpecifier &i : crd->bases())
165 if (!isSafeToConvert(i.getType()
166 ->castAs<RecordType>()
167 ->getDecl()
168 ->getDefinitionOrSelf(),
169 cgt, alreadyChecked))
170 return false;
171 }
172
173 // If this type would require laying out members that are currently being laid
174 // out, don't do it.
175 for (const FieldDecl *field : rd->fields())
176 if (!isSafeToConvert(field->getType(), cgt, alreadyChecked))
177 return false;
178
179 // If there are no problems, lets do it.
180 return true;
181}
182
183/// Return true if it is safe to convert this field type, which requires the
184/// record elements contained by-value to all be recursively safe to convert.
185static bool
187 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
188 // Strip off atomic type sugar.
189 if (const auto *at = qt->getAs<AtomicType>())
190 qt = at->getValueType();
191
192 // If this is a record, check it.
193 if (const auto *rd = qt->getAsRecordDecl())
194 return isSafeToConvert(rd, cgt, alreadyChecked);
195
196 // If this is an array, check the elements, which are embedded inline.
197 if (const auto *at = cgt.getASTContext().getAsArrayType(qt))
198 return isSafeToConvert(at->getElementType(), cgt, alreadyChecked);
199
200 // Otherwise, there is no concern about transforming this. We only care about
201 // things that are contained by-value in a record that can have another
202 // record as a member.
203 return true;
204}
205
206// Return true if it is safe to convert the specified record decl to CIR and lay
207// it out, false if doing so would cause us to get into a recursive compilation
208// mess.
209static bool isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt) {
210 // If no records are being laid out, we can certainly do this one.
211 if (cgt.noRecordsBeingLaidOut())
212 return true;
213
215 return isSafeToConvert(rd, cgt, alreadyChecked);
216}
217
218/// Lay out a tagged decl type like struct or union.
220 // TagDecl's are not necessarily unique, instead use the (clang) type
221 // connected to the decl.
222 const Type *key = astContext.getCanonicalTagType(rd).getTypePtr();
223 cir::RecordType entry = recordDeclTypes[key];
224
225 // If we don't have an entry for this record yet, create one.
226 // We create an incomplete type initially. If `rd` is complete, we will
227 // add the members below.
228 if (!entry) {
229 auto name = getRecordTypeName(rd, "");
230 entry = builder.getIncompleteRecordTy(name, rd);
231 recordDeclTypes[key] = entry;
232 }
233
234 rd = rd->getDefinition();
235 if (!rd || !rd->isCompleteDefinition() || entry.isComplete())
236 return entry;
237
238 // If converting this type would cause us to infinitely loop, don't do it!
239 if (!isSafeToConvert(rd, *this)) {
240 deferredRecords.push_back(rd);
241 return entry;
242 }
243
244 // Okay, this is a definition of a type. Compile the implementation now.
245 bool insertResult = recordsBeingLaidOut.insert(key).second;
246 (void)insertResult;
247 assert(insertResult && "isSafeToCovert() should have caught this.");
248
249 // Force conversion of non-virtual base classes recursively.
250 if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(rd)) {
251 for (const auto &base : cxxRecordDecl->bases()) {
252 if (base.isVirtual())
253 continue;
254 convertRecordDeclType(base.getType()->castAsRecordDecl());
255 }
256 }
257
258 // Layout fields.
259 std::unique_ptr<CIRGenRecordLayout> layout = computeRecordLayout(rd, &entry);
260 recordDeclTypes[key] = entry;
261 cirGenRecordLayouts[key] = std::move(layout);
262
263 // We're done laying out this record.
264 bool eraseResult = recordsBeingLaidOut.erase(key);
265 (void)eraseResult;
266 assert(eraseResult && "record not in RecordsBeingLaidOut set?");
267
268 // If we're done converting the outer-most record, then convert any deferred
269 // records as well.
270 if (recordsBeingLaidOut.empty())
271 while (!deferredRecords.empty())
272 convertRecordDeclType(deferredRecords.pop_back_val());
273
274 return entry;
275}
276
278 type = astContext.getCanonicalType(type);
279 const Type *ty = type.getTypePtr();
280
281 // Process record types before the type cache lookup.
282 if (const auto *recordType = dyn_cast<RecordType>(type))
283 return convertRecordDeclType(recordType->getDecl()->getDefinitionOrSelf());
284
285 // Has the type already been processed?
286 TypeCacheTy::iterator tci = typeCache.find(ty);
287 if (tci != typeCache.end())
288 return tci->second;
289
290 // For types that haven't been implemented yet or are otherwise unsupported,
291 // report an error and return 'int'.
292
293 mlir::Type resultType = nullptr;
294 switch (ty->getTypeClass()) {
295 case Type::Record:
296 llvm_unreachable("Should have been handled above");
297
298 case Type::Builtin: {
299 switch (cast<BuiltinType>(ty)->getKind()) {
300 // void
301 case BuiltinType::Void:
302 resultType = cgm.voidTy;
303 break;
304
305 // bool
306 case BuiltinType::Bool:
307 resultType = cir::BoolType::get(&getMLIRContext());
308 break;
309
310 // Signed integral types.
311 case BuiltinType::Char_S:
312 case BuiltinType::Int:
313 case BuiltinType::Int128:
314 case BuiltinType::Long:
315 case BuiltinType::LongLong:
316 case BuiltinType::SChar:
317 case BuiltinType::Short:
318 case BuiltinType::WChar_S:
319 resultType =
320 cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
321 /*isSigned=*/true);
322 break;
323
324 // SVE types
325 case BuiltinType::SveInt8:
326 resultType =
327 cir::VectorType::get(builder.getSInt8Ty(), 16, /*is_scalable=*/true);
328 break;
329 case BuiltinType::SveUint8:
330 resultType =
331 cir::VectorType::get(builder.getUInt8Ty(), 16, /*is_scalable=*/true);
332 break;
333 case BuiltinType::SveInt16:
334 resultType =
335 cir::VectorType::get(builder.getSInt16Ty(), 8, /*is_scalable=*/true);
336 break;
337 case BuiltinType::SveUint16:
338 resultType =
339 cir::VectorType::get(builder.getUInt16Ty(), 8, /*is_scalable=*/true);
340 break;
341 case BuiltinType::SveFloat16:
342 resultType = cir::VectorType::get(builder.getFp16Ty(), 8,
343 /*is_scalable=*/true);
344 break;
345 case BuiltinType::SveBFloat16:
346 resultType = cir::VectorType::get(builder.getFp16Ty(), 8,
347 /*is_scalable=*/true);
348 break;
349 case BuiltinType::SveInt32:
350 resultType =
351 cir::VectorType::get(builder.getSInt32Ty(), 4, /*is_scalable=*/true);
352 break;
353 case BuiltinType::SveUint32:
354 resultType =
355 cir::VectorType::get(builder.getUInt32Ty(), 4, /*is_scalable=*/true);
356 break;
357 case BuiltinType::SveFloat32:
358 resultType = cir::VectorType::get(builder.getSingleTy(), 4,
359 /*is_scalable=*/true);
360 break;
361 case BuiltinType::SveInt64:
362 resultType =
363 cir::VectorType::get(builder.getSInt64Ty(), 2, /*is_scalable=*/true);
364 break;
365 case BuiltinType::SveUint64:
366 resultType =
367 cir::VectorType::get(builder.getUInt64Ty(), 2, /*is_scalable=*/true);
368 break;
369 case BuiltinType::SveFloat64:
370 resultType = cir::VectorType::get(builder.getDoubleTy(), 2,
371 /*is_scalable=*/true);
372 break;
373 case BuiltinType::SveBool:
374 resultType = cir::VectorType::get(builder.getUIntNTy(1), 16,
375 /*is_scalable=*/true);
376 break;
377
378 // Unsigned integral types.
379 case BuiltinType::Char8:
380 case BuiltinType::Char16:
381 case BuiltinType::Char32:
382 case BuiltinType::Char_U:
383 case BuiltinType::UChar:
384 case BuiltinType::UInt:
385 case BuiltinType::UInt128:
386 case BuiltinType::ULong:
387 case BuiltinType::ULongLong:
388 case BuiltinType::UShort:
389 case BuiltinType::WChar_U:
390 resultType =
391 cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
392 /*isSigned=*/false);
393 break;
394
395 // Floating-point types
396 case BuiltinType::Float16:
397 resultType = cgm.fP16Ty;
398 break;
399 case BuiltinType::Half:
400 if (astContext.getLangOpts().NativeHalfType ||
401 !astContext.getTargetInfo().useFP16ConversionIntrinsics()) {
402 resultType = cgm.fP16Ty;
403 } else {
404 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
405 resultType = cgm.sInt32Ty;
406 }
407 break;
408 case BuiltinType::BFloat16:
409 resultType = cgm.bFloat16Ty;
410 break;
411 case BuiltinType::Float:
412 assert(&astContext.getFloatTypeSemantics(type) ==
413 &llvm::APFloat::IEEEsingle() &&
414 "ClangIR NYI: 'float' in a format other than IEEE 32-bit");
415 resultType = cgm.floatTy;
416 break;
417 case BuiltinType::Double:
418 assert(&astContext.getFloatTypeSemantics(type) ==
419 &llvm::APFloat::IEEEdouble() &&
420 "ClangIR NYI: 'double' in a format other than IEEE 64-bit");
421 resultType = cgm.doubleTy;
422 break;
423 case BuiltinType::LongDouble:
424 resultType =
425 builder.getLongDoubleTy(astContext.getFloatTypeSemantics(type));
426 break;
427 case BuiltinType::Float128:
428 resultType = cgm.fP128Ty;
429 break;
430 case BuiltinType::Ibm128:
431 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
432 resultType = cgm.sInt32Ty;
433 break;
434
435 case BuiltinType::NullPtr:
436 // Add proper CIR type for it? this looks mostly useful for sema related
437 // things (like for overloads accepting void), for now, given that
438 // `sizeof(std::nullptr_t)` is equal to `sizeof(void *)`, model
439 // std::nullptr_t as !cir.ptr<!void>
440 resultType = builder.getVoidPtrTy();
441 break;
442
443 default:
444 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
445 resultType = cgm.sInt32Ty;
446 break;
447 }
448 break;
449 }
450
451 case Type::Complex: {
452 const auto *ct = cast<clang::ComplexType>(ty);
453 mlir::Type elementTy = convertType(ct->getElementType());
454 resultType = cir::ComplexType::get(elementTy);
455 break;
456 }
457
458 case Type::LValueReference:
459 case Type::RValueReference: {
460 const ReferenceType *refTy = cast<ReferenceType>(ty);
461 QualType elemTy = refTy->getPointeeType();
462 auto pointeeType = convertTypeForMem(elemTy);
463 resultType = builder.getPointerTo(pointeeType, elemTy.getAddressSpace());
464 assert(resultType && "Cannot get pointer type?");
465 break;
466 }
467
468 case Type::Pointer: {
469 const PointerType *ptrTy = cast<PointerType>(ty);
470 QualType elemTy = ptrTy->getPointeeType();
471 assert(!elemTy->isConstantMatrixType() && "not implemented");
472
473 mlir::Type pointeeType = convertType(elemTy);
474
475 resultType = builder.getPointerTo(pointeeType, elemTy.getAddressSpace());
476 break;
477 }
478
479 case Type::VariableArray: {
481 if (a->getIndexTypeCVRQualifiers() != 0)
482 cgm.errorNYI(SourceLocation(), "non trivial array types", type);
483 // VLAs resolve to the innermost element type; this matches
484 // the return of alloca, and there isn't any obviously better choice.
485 resultType = convertTypeForMem(a->getElementType());
486 break;
487 }
488
489 case Type::IncompleteArray: {
491 if (arrTy->getIndexTypeCVRQualifiers() != 0)
492 cgm.errorNYI(SourceLocation(), "non trivial array types", type);
493
494 mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
495 // int X[] -> [0 x int], unless the element type is not sized. If it is
496 // unsized (e.g. an incomplete record) just use [0 x i8].
497 if (!cir::isSized(elemTy)) {
498 elemTy = cgm.sInt8Ty;
499 }
500
501 resultType = cir::ArrayType::get(elemTy, 0);
502 break;
503 }
504
505 case Type::ConstantArray: {
507 mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
508 // In classic codegen, arrays of unsized types which it assumes are "arrays
509 // of undefined struct type" are lowered to arrays of i8 "just to have a
510 // concrete type", but in CIR, we can get here with abstract types like
511 // !cir.method and !cir.data_member, so we just create an array of the type
512 // and handle it during lowering if we still don't have a sized type.
513 resultType = cir::ArrayType::get(elemTy, arrTy->getSize().getZExtValue());
514 break;
515 }
516
517 case Type::ExtVector:
518 case Type::Vector: {
519 const VectorType *vec = cast<VectorType>(ty);
520 const mlir::Type elemTy = convertType(vec->getElementType());
521 resultType = cir::VectorType::get(elemTy, vec->getNumElements());
522 break;
523 }
524
525 case Type::Enum: {
526 const auto *ed = ty->castAsEnumDecl();
527 if (auto integerType = ed->getIntegerType(); !integerType.isNull())
528 return convertType(integerType);
529 // Return a placeholder 'i32' type. This can be changed later when the
530 // type is defined (see UpdateCompletedType), but is likely to be the
531 // "right" answer.
532 resultType = cgm.uInt32Ty;
533 break;
534 }
535
536 case Type::MemberPointer: {
537 const auto *mpt = cast<MemberPointerType>(ty);
538
539 NestedNameSpecifier mptNNS = mpt->getQualifier();
540 auto clsTy = mlir::cast<cir::RecordType>(
541 convertType(QualType(mptNNS.getAsType(), 0)));
542 if (mpt->isMemberDataPointer()) {
543 mlir::Type memberTy = convertType(mpt->getPointeeType());
544 resultType = cir::DataMemberType::get(memberTy, clsTy);
545 } else {
546 auto memberFuncTy = getFunctionType(cgm.getTypes().arrangeCXXMethodType(
547 mptNNS.getAsRecordDecl(),
548 mpt->getPointeeType()->getAs<clang::FunctionProtoType>(),
549 /*methodDecl=*/nullptr));
550 resultType = cir::MethodType::get(memberFuncTy, clsTy);
551 }
552 break;
553 }
554
555 case Type::FunctionNoProto:
556 case Type::FunctionProto:
557 resultType = convertFunctionTypeInternal(type);
558 break;
559
560 case Type::BitInt: {
561 const auto *bitIntTy = cast<BitIntType>(type);
562 if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) {
563 cgm.errorNYI(SourceLocation(), "large _BitInt type", type);
564 resultType = cgm.sInt32Ty;
565 } else {
566 resultType = cir::IntType::get(&getMLIRContext(), bitIntTy->getNumBits(),
567 bitIntTy->isSigned());
568 }
569 break;
570 }
571
572 case Type::Atomic: {
573 QualType valueType = cast<AtomicType>(ty)->getValueType();
574 resultType = convertTypeForMem(valueType);
575
576 // Pad out to the inflated size if necessary.
577 uint64_t valueSize = astContext.getTypeSize(valueType);
578 uint64_t atomicSize = astContext.getTypeSize(ty);
579 if (valueSize != atomicSize) {
580 cgm.errorNYI("convertType: atomic type value size != atomic size");
581 }
582
583 break;
584 }
585
586 default:
587 cgm.errorNYI(SourceLocation(), "processing of type",
588 type->getTypeClassName());
589 resultType = cgm.sInt32Ty;
590 break;
591 }
592
593 assert(resultType && "Type conversion not yet implemented");
594
595 typeCache[ty] = resultType;
596 return resultType;
597}
598
600 bool forBitField) {
601 if (qualType->isConstantMatrixType()) {
602 cgm.errorNYI("Matrix type conversion");
603 return cgm.sInt32Ty;
604 }
605
606 mlir::Type convertedType = convertType(qualType);
607
608 assert(!forBitField && "Bit fields NYI");
609
610 // If this is a bit-precise integer type in a bitfield representation, map
611 // this integer to the target-specified size.
612 if (forBitField && qualType->isBitIntType())
613 assert(!qualType->isBitIntType() && "Bit field with type _BitInt NYI");
614
615 return convertedType;
616}
617
618/// Return record layout info for the given record decl.
619const CIRGenRecordLayout &
621 const auto *key = astContext.getCanonicalTagType(rd).getTypePtr();
622
623 // If we have already computed the layout, return it.
624 auto it = cirGenRecordLayouts.find(key);
625 if (it != cirGenRecordLayouts.end())
626 return *it->second;
627
628 // Compute the type information.
630
631 // Now try again.
632 it = cirGenRecordLayouts.find(key);
633
634 assert(it != cirGenRecordLayouts.end() &&
635 "Unable to find record layout information for type");
636 return *it->second;
637}
638
640 if (t->getAs<PointerType>())
641 return astContext.getTargetNullPointerValue(t) == 0;
642
643 if (const auto *at = astContext.getAsArrayType(t)) {
645 return true;
646
647 if (const auto *cat = dyn_cast<ConstantArrayType>(at))
648 if (astContext.getConstantArrayElementCount(cat) == 0)
649 return true;
650 }
651
652 if (const auto *rd = t->getAsRecordDecl())
653 return isZeroInitializable(rd);
654
655 if (t->getAs<MemberPointerType>()) {
656 cgm.errorNYI(SourceLocation(), "isZeroInitializable for MemberPointerType",
657 t);
658 return false;
659 }
660
661 return true;
662}
663
667
669 CanQualType returnType, bool isInstanceMethod,
671 RequiredArgs required) {
672 assert(llvm::all_of(argTypes,
673 [](CanQualType t) { return t.isCanonicalAsParam(); }));
674 // Lookup or create unique function info.
675 llvm::FoldingSetNodeID id;
676 CIRGenFunctionInfo::Profile(id, isInstanceMethod, info, required, returnType,
677 argTypes);
678
679 void *insertPos = nullptr;
680 CIRGenFunctionInfo *fi = functionInfos.FindNodeOrInsertPos(id, insertPos);
681 if (fi) {
682 // We found a matching function info based on id. These asserts verify that
683 // it really is a match.
684 assert(
685 fi->getReturnType() == returnType &&
686 std::equal(fi->argTypesBegin(), fi->argTypesEnd(), argTypes.begin()) &&
687 "Bad match based on CIRGenFunctionInfo folding set id");
688 return *fi;
689 }
690
692
693 // Construction the function info. We co-allocate the ArgInfos.
694 fi = CIRGenFunctionInfo::create(info, isInstanceMethod, returnType, argTypes,
695 required);
696 functionInfos.InsertNode(fi, insertPos);
697
698 return *fi;
699}
700
702 assert(!dyn_cast<ObjCMethodDecl>(gd.getDecl()) &&
703 "This is reported as a FIXME in LLVM codegen");
704 const auto *fd = cast<FunctionDecl>(gd.getDecl());
705
709
711}
712
713// When we find the full definition for a TagDecl, replace the 'opaque' type we
714// previously made for it if applicable.
716 // If this is an enum being completed, then we flush all non-struct types
717 // from the cache. This allows function types and other things that may be
718 // derived from the enum to be recomputed.
719 if ([[maybe_unused]] const auto *ed = dyn_cast<EnumDecl>(td)) {
720 // Classic codegen clears the type cache if it contains an entry for this
721 // enum type that doesn't use i32 as the underlying type, but I can't find
722 // a test case that meets that condition. C++ doesn't allow forward
723 // declaration of enums, and C doesn't allow an incomplete forward
724 // declaration with a non-default type.
725 assert(
726 !typeCache.count(
727 ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()) ||
728 (convertType(ed->getIntegerType()) ==
729 typeCache[ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()]));
730 // If necessary, provide the full definition of a type only used with a
731 // declaration so far.
733 return;
734 }
735
736 // If we completed a RecordDecl that we previously used and converted to an
737 // anonymous type, then go ahead and complete it now.
738 const auto *rd = cast<RecordDecl>(td);
739 if (rd->isDependentType())
740 return;
741
742 // Only complete if we converted it already. If we haven't converted it yet,
743 // we'll just do it lazily.
744 if (recordDeclTypes.count(astContext.getCanonicalTagType(rd).getTypePtr()))
746
747 // If necessary, provide the full definition of a type only used with a
748 // declaration so far.
750}
751
753 // Return the address space for the type. If the type is a
754 // function type without an address space qualifier, the
755 // program address space is used. Otherwise, the target picks
756 // the best address space based on the type information
757 return ty->isFunctionType() && !ty.hasAddressSpace()
758 ? cgm.getDataLayout().getProgramAddressSpace()
760}
Defines the clang::ASTContext interface.
static bool isSafeToConvert(QualType qt, CIRGenTypes &cgt, llvm::SmallPtrSetImpl< const RecordDecl * > &alreadyChecked)
Return true if it is safe to convert this field type, which requires the record elements contained by...
static Decl::Kind getKind(const Decl *D)
C Language Family Type Representation.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CanQualType getCanonicalTagType(const TagDecl *TD) const
unsigned getTargetAddressSpace(LangAS AS) const
QualType getElementType() const
Definition TypeBase.h:3742
unsigned getIndexTypeCVRQualifiers() const
Definition TypeBase.h:3752
const_arg_iterator argTypesEnd() const
static CIRGenFunctionInfo * create(FunctionType::ExtInfo info, bool instanceMethod, CanQualType resultType, llvm::ArrayRef< CanQualType > argTypes, RequiredArgs required)
static void Profile(llvm::FoldingSetNodeID &id, bool instanceMethod, FunctionType::ExtInfo info, RequiredArgs required, CanQualType resultType, llvm::ArrayRef< CanQualType > argTypes)
const_arg_iterator argTypesBegin() const
This class organizes the cross-function state that is used while generating CIR code.
This class handles record and union layout info while lowering AST types to CIR types.
bool isZeroInitializable() const
Check whether this struct can be C++ zero-initialized with a zeroinitializer.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
const CIRGenFunctionInfo & arrangeGlobalDeclaration(GlobalDecl gd)
unsigned getTargetAddressSpace(QualType ty) const
const CIRGenFunctionInfo & arrangeCXXStructorDeclaration(clang::GlobalDecl gd)
const CIRGenFunctionInfo & arrangeCIRFunctionInfo(CanQualType returnType, bool isInstanceMethod, llvm::ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, RequiredArgs required)
const CIRGenFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > fpt)
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isFuncTypeConvertible(const clang::FunctionType *ft)
Utility to check whether a function type can be converted to a CIR type (i.e.
CIRGenTypes(CIRGenModule &cgm)
bool isRecordBeingLaidOut(const clang::Type *ty) const
CIRGenBuilderTy & getBuilder() const
Definition CIRGenTypes.h:81
mlir::MLIRContext & getMLIRContext() const
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
bool isFuncParamTypeConvertible(clang::QualType type)
Return true if the specified type in a function parameter or result position can be converted to a CI...
void updateCompletedType(const clang::TagDecl *td)
UpdateCompletedType - when we find the full definition for a TagDecl, replace the 'opaque' type we pr...
std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix)
bool noRecordsBeingLaidOut() const
const ABIInfo & getABIInfo() const
const CIRGenFunctionInfo & arrangeFunctionDeclaration(const clang::FunctionDecl *fd)
Free functions are functions that are compatible with an ordinary C function pointer type.
clang::ASTContext & getASTContext() const
bool isRecordLayoutComplete(const clang::Type *ty) const
Return true if the specified type is already completely laid out.
mlir::Type convertType(clang::QualType type)
Convert a Clang type into a mlir::Type.
const CIRGenRecordLayout & getCIRGenRecordLayout(const clang::RecordDecl *rd)
Return record layout info for the given record decl.
std::unique_ptr< CIRGenRecordLayout > computeRecordLayout(const clang::RecordDecl *rd, cir::RecordType *ty)
mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl)
Lay out a tagged decl type like struct or union.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
A class for recording the number of arguments that a function signature requires.
Represents a base class of a C++ class.
Definition DeclCXX.h:146
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
static CanQual< T > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Represents the canonical version of C arrays with a specified constant size.
Definition TypeBase.h:3768
llvm::APInt getSize() const
Return the constant array size as an APInt.
Definition TypeBase.h:3824
Represents a member of a struct/union/class.
Definition Decl.h:3160
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Definition TypeBase.h:4893
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5315
A class which abstracts out some details necessary for making a call.
Definition TypeBase.h:4622
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4511
QualType getReturnType() const
Definition TypeBase.h:4851
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a C array with an unspecified size.
Definition TypeBase.h:3917
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3661
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
CXXRecordDecl * getAsRecordDecl() const
Retrieve the record declaration stored in this nested name specifier, or null.
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3336
QualType getPointeeType() const
Definition TypeBase.h:3346
A (possibly-)qualified type.
Definition TypeBase.h:937
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8388
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8514
void print(raw_ostream &OS, const PrintingPolicy &Policy, const Twine &PlaceHolder=Twine(), unsigned Indentation=0) const
bool hasAddressSpace() const
Check if this type has any address space qualifier.
Definition TypeBase.h:8509
bool isCanonical() const
Definition TypeBase.h:8445
Represents a struct/union/class.
Definition Decl.h:4327
field_range fields() const
Definition Decl.h:4530
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition Decl.h:4511
Base for LValueReferenceType and RValueReferenceType.
Definition TypeBase.h:3581
QualType getPointeeType() const
Definition TypeBase.h:3599
Encodes a location in the source.
Represents the declaration of a struct/union/class/enum.
Definition Decl.h:3717
bool isCompleteDefinition() const
Return true if this decl has its body fully specified.
Definition Decl.h:3818
The base class of the type hierarchy.
Definition TypeBase.h:1839
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isConstantMatrixType() const
Definition TypeBase.h:8792
EnumDecl * castAsEnumDecl() const
Definition Type.h:59
bool isFunctionType() const
Definition TypeBase.h:8621
TypeClass getTypeClass() const
Definition TypeBase.h:2391
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9218
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3974
Represents a GCC generic vector type.
Definition TypeBase.h:4183
unsigned getNumElements() const
Definition TypeBase.h:4198
QualType getElementType() const
Definition TypeBase.h:4197
Defines the clang::TargetInfo interface.
bool isSized(mlir::Type ty)
Returns true if the type is a CIR sized type.
Definition CIRTypes.cpp:33
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
const internal::VariadicDynCastAllOfMatcher< Decl, TypedefNameDecl > typedefNameDecl
Matches typedef name declarations.
const AstTypeMatcher< TagType > tagType
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< RecordType > recordType
const internal::VariadicDynCastAllOfMatcher< Decl, CXXRecordDecl > cxxRecordDecl
Matches C++ class declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, RecordDecl > recordDecl
Matches class, struct, and union declarations.
const internal::VariadicAllOfMatcher< QualType > qualType
Matches QualTypes in the clang AST.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isInstanceMethod(const Decl *D)
Definition Attr.h:120
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool opCallCallConv()
static bool generateDebugInfo()
Describes how types, statements, expressions, and declarations should be printed.
unsigned SuppressTagKeyword
Whether type printing should skip printing the tag keyword.
unsigned AlwaysIncludeTypeForTemplateArgument
Whether to use type suffixes (eg: 1U) on integral non-type template parameters.
unsigned SuppressInlineNamespace
Suppress printing parts of scope specifiers that correspond to inline namespaces.
unsigned PrintAsCanonical
Whether to print entities as written or canonically.