clang 23.0.0git
CIRGenTypes.cpp
Go to the documentation of this file.
1#include "CIRGenTypes.h"
2
3#include "CIRGenCXXABI.h"
5#include "CIRGenModule.h"
6#include "mlir/IR/BuiltinTypes.h"
7
10#include "clang/AST/Type.h"
13
14#include <cassert>
15
16using namespace clang;
17using namespace clang::CIRGen;
18
20 : cgm(genModule), astContext(genModule.getASTContext()),
21 builder(cgm.getBuilder()), theCXXABI(cgm.getCXXABI()),
22 theABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {}
23
25 for (auto i = functionInfos.begin(), e = functionInfos.end(); i != e;)
26 delete &*i++;
27}
28
29mlir::MLIRContext &CIRGenTypes::getMLIRContext() const {
30 return *builder.getContext();
31}
32
33/// Return true if the specified type in a function parameter or result position
34/// can be converted to a CIR type at this point. This boils down to being
35/// whether it is complete, as well as whether we've temporarily deferred
36/// expanding the type because we're in a recursive context.
38 // Some ABIs cannot have their member pointers represented in LLVM IR unless
39 // certain circumstances have been reached, but in CIR we represent member
40 // pointer types abstractly at this point so they are always convertible.
41 if (type->getAs<MemberPointerType>())
42 return true;
43
44 // If this isn't a tag type, we can convert it.
45 const TagType *tagType = type->getAs<TagType>();
46 if (!tagType)
47 return true;
48
49 // Function types involving incomplete class types are problematic in MLIR.
50 return !tagType->isIncompleteType();
51}
52
53/// Code to verify a given function type is complete, i.e. the return type and
54/// all of the parameter types are complete. Also check to see if we are in a
55/// RS_StructPointer context, and if so whether any struct types have been
56/// pended. If so, we don't want to ask the ABI lowering code to handle a type
57/// that cannot be converted to a CIR type.
60 return false;
61
62 if (const auto *fpt = dyn_cast<FunctionProtoType>(ft))
63 for (unsigned i = 0, e = fpt->getNumParams(); i != e; i++)
64 if (!isFuncParamTypeConvertible(fpt->getParamType(i)))
65 return false;
66
67 return true;
68}
69
70mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType qft) {
71 assert(qft.isCanonical());
73
74 // In classic codegen, if the function type depends on an incomplete type
75 // (e.g. a struct or enum), it cannot lower the function type due to ABI
76 // handling requirements and returns a placeholder. In CIR, ABI handling is
77 // deferred until after codegen, and record types are identified by name, so
78 // incomplete record type references in the function type will automatically
79 // see the complete type once the record is defined. We can always produce a
80 // proper function type here.
81
82 const CIRGenFunctionInfo *fi;
83 if (const auto *fpt = dyn_cast<FunctionProtoType>(ft)) {
86 } else {
90 }
91
92 mlir::Type resultType = getFunctionType(*fi);
93
94 return resultType;
95}
96
97// This is CIR's version of CodeGenTypes::addRecordTypeName. It isn't shareable
98// because CIR has different uniquing requirements.
100 StringRef suffix) {
101 llvm::SmallString<256> typeName;
102 llvm::raw_svector_ostream outStream(typeName);
103
104 PrintingPolicy policy = recordDecl->getASTContext().getPrintingPolicy();
108 policy.PrintAsCanonical = true;
109 policy.SuppressTagKeyword = true;
110
111 if (recordDecl->getIdentifier())
112 QualType(astContext.getCanonicalTagType(recordDecl))
113 .print(outStream, policy);
114 else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl())
115 typedefNameDecl->printQualifiedName(outStream, policy);
116 else
117 outStream << builder.getUniqueAnonRecordName();
118
119 if (!suffix.empty())
120 outStream << suffix;
121
122 return builder.getUniqueRecordName(std::string(typeName));
123}
124
125/// Return true if the specified type is already completely laid out.
127 const auto it = recordDeclTypes.find(ty);
128 return it != recordDeclTypes.end() && it->second.isComplete();
129}
130
131// We have multiple forms of this function that call each other, so we need to
132// declare one in advance.
133static bool
135 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked);
136
137/// Return true if it is safe to convert the specified record decl to CIR and
138/// lay it out, false if doing so would cause us to get into a recursive
139/// compilation mess.
140static bool
142 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
143 // If we have already checked this type (maybe the same type is used by-value
144 // multiple times in multiple record fields, don't check again.
145 if (!alreadyChecked.insert(rd).second)
146 return true;
147
148 assert(rd->isCompleteDefinition() &&
149 "Expect RecordDecl to be CompleteDefinition");
150 const Type *key = cgt.getASTContext().getCanonicalTagType(rd).getTypePtr();
151
152 // If this type is already laid out, converting it is a noop.
153 if (cgt.isRecordLayoutComplete(key))
154 return true;
155
156 // If this type is currently being laid out, we can't recursively compile it.
157 if (cgt.isRecordBeingLaidOut(key))
158 return false;
159
160 // If this type would require laying out bases that are currently being laid
161 // out, don't do it. This includes virtual base classes which get laid out
162 // when a class is translated, even though they aren't embedded by-value into
163 // the class.
164 if (const CXXRecordDecl *crd = dyn_cast<CXXRecordDecl>(rd)) {
165 for (const clang::CXXBaseSpecifier &i : crd->bases())
166 if (!isSafeToConvert(i.getType()
167 ->castAs<RecordType>()
168 ->getDecl()
169 ->getDefinitionOrSelf(),
170 cgt, alreadyChecked))
171 return false;
172 }
173
174 // If this type would require laying out members that are currently being laid
175 // out, don't do it.
176 for (const FieldDecl *field : rd->fields())
177 if (!isSafeToConvert(field->getType(), cgt, alreadyChecked))
178 return false;
179
180 // If there are no problems, lets do it.
181 return true;
182}
183
184/// Return true if it is safe to convert this field type, which requires the
185/// record elements contained by-value to all be recursively safe to convert.
186static bool
188 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
189 // Strip off atomic type sugar.
190 if (const auto *at = qt->getAs<AtomicType>())
191 qt = at->getValueType();
192
193 // If this is a record, check it.
194 if (const auto *rd = qt->getAsRecordDecl())
195 return isSafeToConvert(rd, cgt, alreadyChecked);
196
197 // If this is an array, check the elements, which are embedded inline.
198 if (const auto *at = cgt.getASTContext().getAsArrayType(qt))
199 return isSafeToConvert(at->getElementType(), cgt, alreadyChecked);
200
201 // Otherwise, there is no concern about transforming this. We only care about
202 // things that are contained by-value in a record that can have another
203 // record as a member.
204 return true;
205}
206
207// Return true if it is safe to convert the specified record decl to CIR and lay
208// it out, false if doing so would cause us to get into a recursive compilation
209// mess.
210static bool isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt) {
211 // If no records are being laid out, we can certainly do this one.
212 if (cgt.noRecordsBeingLaidOut())
213 return true;
214
216 return isSafeToConvert(rd, cgt, alreadyChecked);
217}
218
219/// Lay out a tagged decl type like struct or union.
221 // TagDecl's are not necessarily unique, instead use the (clang) type
222 // connected to the decl.
223 const Type *key = astContext.getCanonicalTagType(rd).getTypePtr();
224 cir::RecordType entry = recordDeclTypes[key];
225
226 // If we don't have an entry for this record yet, create one.
227 // We create an incomplete type initially. If `rd` is complete, we will
228 // add the members below.
229 if (!entry) {
230 auto name = getRecordTypeName(rd, "");
231 entry = builder.getIncompleteRecordTy(name, rd);
232 recordDeclTypes[key] = entry;
233 }
234
235 rd = rd->getDefinition();
236 if (!rd || !rd->isCompleteDefinition() || entry.isComplete())
237 return entry;
238
239 // If converting this type would cause us to infinitely loop, don't do it!
240 if (!isSafeToConvert(rd, *this)) {
241 deferredRecords.push_back(rd);
242 return entry;
243 }
244
245 // Okay, this is a definition of a type. Compile the implementation now.
246 bool insertResult = recordsBeingLaidOut.insert(key).second;
247 (void)insertResult;
248 assert(insertResult && "isSafeToCovert() should have caught this.");
249
250 // Force conversion of non-virtual base classes recursively.
251 if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(rd)) {
252 for (const auto &base : cxxRecordDecl->bases()) {
253 if (base.isVirtual())
254 continue;
255 convertRecordDeclType(base.getType()->castAsRecordDecl());
256 }
257 }
258
259 // Layout fields.
260 std::unique_ptr<CIRGenRecordLayout> layout = computeRecordLayout(rd, &entry);
261 recordDeclTypes[key] = entry;
262 cirGenRecordLayouts[key] = std::move(layout);
263
264 // We're done laying out this record.
265 bool eraseResult = recordsBeingLaidOut.erase(key);
266 (void)eraseResult;
267 assert(eraseResult && "record not in RecordsBeingLaidOut set?");
268
269 // If we're done converting the outer-most record, then convert any deferred
270 // records as well.
271 if (recordsBeingLaidOut.empty())
272 while (!deferredRecords.empty())
273 convertRecordDeclType(deferredRecords.pop_back_val());
274
275 return entry;
276}
277
279 type = astContext.getCanonicalType(type);
280 const Type *ty = type.getTypePtr();
281
282 // Process record types before the type cache lookup.
283 if (const auto *recordType = dyn_cast<RecordType>(type))
284 return convertRecordDeclType(recordType->getDecl()->getDefinitionOrSelf());
285
286 // Has the type already been processed?
287 TypeCacheTy::iterator tci = typeCache.find(ty);
288 if (tci != typeCache.end())
289 return tci->second;
290
291 // For types that haven't been implemented yet or are otherwise unsupported,
292 // report an error and return 'int'.
293
294 mlir::Type resultType = nullptr;
295 switch (ty->getTypeClass()) {
296 case Type::Record:
297 llvm_unreachable("Should have been handled above");
298
299 case Type::Builtin: {
300 switch (cast<BuiltinType>(ty)->getKind()) {
301 // void
302 case BuiltinType::Void:
303 resultType = cgm.voidTy;
304 break;
305
306 // bool
307 case BuiltinType::Bool:
308 resultType = cir::BoolType::get(&getMLIRContext());
309 break;
310
311 // Signed integral types.
312 case BuiltinType::Char_S:
313 case BuiltinType::Int:
314 case BuiltinType::Int128:
315 case BuiltinType::Long:
316 case BuiltinType::LongLong:
317 case BuiltinType::SChar:
318 case BuiltinType::Short:
319 case BuiltinType::WChar_S:
320 resultType =
321 cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
322 /*isSigned=*/true);
323 break;
324
325 // SVE types
326 case BuiltinType::SveInt8:
327 resultType =
328 cir::VectorType::get(builder.getSInt8Ty(), 16, /*is_scalable=*/true);
329 break;
330 case BuiltinType::SveUint8:
331 resultType =
332 cir::VectorType::get(builder.getUInt8Ty(), 16, /*is_scalable=*/true);
333 break;
334 case BuiltinType::SveInt16:
335 resultType =
336 cir::VectorType::get(builder.getSInt16Ty(), 8, /*is_scalable=*/true);
337 break;
338 case BuiltinType::SveUint16:
339 resultType =
340 cir::VectorType::get(builder.getUInt16Ty(), 8, /*is_scalable=*/true);
341 break;
342 case BuiltinType::SveFloat16:
343 resultType = cir::VectorType::get(builder.getFp16Ty(), 8,
344 /*is_scalable=*/true);
345 break;
346 case BuiltinType::SveBFloat16:
347 resultType = cir::VectorType::get(builder.getFp16Ty(), 8,
348 /*is_scalable=*/true);
349 break;
350 case BuiltinType::SveInt32:
351 resultType =
352 cir::VectorType::get(builder.getSInt32Ty(), 4, /*is_scalable=*/true);
353 break;
354 case BuiltinType::SveUint32:
355 resultType =
356 cir::VectorType::get(builder.getUInt32Ty(), 4, /*is_scalable=*/true);
357 break;
358 case BuiltinType::SveFloat32:
359 resultType = cir::VectorType::get(builder.getSingleTy(), 4,
360 /*is_scalable=*/true);
361 break;
362 case BuiltinType::SveInt64:
363 resultType =
364 cir::VectorType::get(builder.getSInt64Ty(), 2, /*is_scalable=*/true);
365 break;
366 case BuiltinType::SveUint64:
367 resultType =
368 cir::VectorType::get(builder.getUInt64Ty(), 2, /*is_scalable=*/true);
369 break;
370 case BuiltinType::SveFloat64:
371 resultType = cir::VectorType::get(builder.getDoubleTy(), 2,
372 /*is_scalable=*/true);
373 break;
374 case BuiltinType::SveBool:
375 resultType = cir::VectorType::get(builder.getUIntNTy(1), 16,
376 /*is_scalable=*/true);
377 break;
378
379 // Unsigned integral types.
380 case BuiltinType::Char8:
381 case BuiltinType::Char16:
382 case BuiltinType::Char32:
383 case BuiltinType::Char_U:
384 case BuiltinType::UChar:
385 case BuiltinType::UInt:
386 case BuiltinType::UInt128:
387 case BuiltinType::ULong:
388 case BuiltinType::ULongLong:
389 case BuiltinType::UShort:
390 case BuiltinType::WChar_U:
391 resultType =
392 cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
393 /*isSigned=*/false);
394 break;
395
396 // Floating-point types
397 case BuiltinType::Float16:
398 resultType = cgm.fP16Ty;
399 break;
400 case BuiltinType::Half:
401 if (astContext.getLangOpts().NativeHalfType ||
402 !astContext.getTargetInfo().useFP16ConversionIntrinsics()) {
403 resultType = cgm.fP16Ty;
404 } else {
405 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
406 resultType = cgm.sInt32Ty;
407 }
408 break;
409 case BuiltinType::BFloat16:
410 resultType = cgm.bFloat16Ty;
411 break;
412 case BuiltinType::MFloat8:
413 resultType = cgm.uInt8Ty;
414 break;
415 case BuiltinType::Float:
416 assert(&astContext.getFloatTypeSemantics(type) ==
417 &llvm::APFloat::IEEEsingle() &&
418 "ClangIR NYI: 'float' in a format other than IEEE 32-bit");
419 resultType = cgm.floatTy;
420 break;
421 case BuiltinType::Double:
422 assert(&astContext.getFloatTypeSemantics(type) ==
423 &llvm::APFloat::IEEEdouble() &&
424 "ClangIR NYI: 'double' in a format other than IEEE 64-bit");
425 resultType = cgm.doubleTy;
426 break;
427 case BuiltinType::LongDouble:
428 resultType =
429 builder.getLongDoubleTy(astContext.getFloatTypeSemantics(type));
430 break;
431 case BuiltinType::Float128:
432 resultType = cgm.fP128Ty;
433 break;
434 case BuiltinType::Ibm128:
435 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
436 resultType = cgm.sInt32Ty;
437 break;
438
439 case BuiltinType::NullPtr:
440 // Add proper CIR type for it? this looks mostly useful for sema related
441 // things (like for overloads accepting void), for now, given that
442 // `sizeof(std::nullptr_t)` is equal to `sizeof(void *)`, model
443 // std::nullptr_t as !cir.ptr<!void>
444 resultType = builder.getVoidPtrTy();
445 break;
446
447 default:
448 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
449 resultType = cgm.sInt32Ty;
450 break;
451 }
452 break;
453 }
454
455 case Type::Complex: {
456 const auto *ct = cast<clang::ComplexType>(ty);
457 mlir::Type elementTy = convertType(ct->getElementType());
458 resultType = cir::ComplexType::get(elementTy);
459 break;
460 }
461
462 case Type::LValueReference:
463 case Type::RValueReference: {
464 const ReferenceType *refTy = cast<ReferenceType>(ty);
465 QualType elemTy = refTy->getPointeeType();
466 auto pointeeType = convertTypeForMem(elemTy);
467 resultType = builder.getPointerTo(pointeeType, elemTy.getAddressSpace());
468 assert(resultType && "Cannot get pointer type?");
469 break;
470 }
471
472 case Type::Pointer: {
473 const PointerType *ptrTy = cast<PointerType>(ty);
474 QualType elemTy = ptrTy->getPointeeType();
475 assert(!elemTy->isConstantMatrixType() && "not implemented");
476
477 mlir::Type pointeeType = convertType(elemTy);
478
479 resultType = builder.getPointerTo(pointeeType, elemTy.getAddressSpace());
480 break;
481 }
482
483 case Type::VariableArray: {
485 if (a->getIndexTypeCVRQualifiers() != 0)
486 cgm.errorNYI(SourceLocation(), "non trivial array types", type);
487 // VLAs resolve to the innermost element type; this matches
488 // the return of alloca, and there isn't any obviously better choice.
489 resultType = convertTypeForMem(a->getElementType());
490 break;
491 }
492
493 case Type::IncompleteArray: {
495 if (arrTy->getIndexTypeCVRQualifiers() != 0)
496 cgm.errorNYI(SourceLocation(), "non trivial array types", type);
497
498 mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
499 // int X[] -> [0 x int], unless the element type is not sized. If it is
500 // unsized (e.g. an incomplete record) just use [0 x i8].
501 if (!cir::isSized(elemTy)) {
502 elemTy = cgm.sInt8Ty;
503 }
504
505 resultType = cir::ArrayType::get(elemTy, 0);
506 break;
507 }
508
509 case Type::ConstantArray: {
511 mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
512 // In classic codegen, arrays of unsized types which it assumes are "arrays
513 // of undefined struct type" are lowered to arrays of i8 "just to have a
514 // concrete type", but in CIR, we can get here with abstract types like
515 // !cir.method and !cir.data_member, so we just create an array of the type
516 // and handle it during lowering if we still don't have a sized type.
517 resultType = cir::ArrayType::get(elemTy, arrTy->getSize().getZExtValue());
518 break;
519 }
520
521 case Type::ExtVector:
522 case Type::Vector: {
523 const VectorType *vec = cast<VectorType>(ty);
524 const mlir::Type elemTy = convertType(vec->getElementType());
525 resultType = cir::VectorType::get(elemTy, vec->getNumElements());
526 break;
527 }
528
529 case Type::Enum: {
530 const auto *ed = ty->castAsEnumDecl();
531 if (auto integerType = ed->getIntegerType(); !integerType.isNull())
532 return convertType(integerType);
533 // Return a placeholder 'i32' type. This can be changed later when the
534 // type is defined (see UpdateCompletedType), but is likely to be the
535 // "right" answer.
536 resultType = cgm.uInt32Ty;
537 break;
538 }
539
540 case Type::MemberPointer: {
541 const auto *mpt = cast<MemberPointerType>(ty);
542
543 NestedNameSpecifier mptNNS = mpt->getQualifier();
544 auto clsTy = mlir::cast<cir::RecordType>(
545 convertType(QualType(mptNNS.getAsType(), 0)));
546 if (mpt->isMemberDataPointer()) {
547 mlir::Type memberTy = convertType(mpt->getPointeeType());
548 resultType = cir::DataMemberType::get(memberTy, clsTy);
549 } else {
550 auto memberFuncTy = getFunctionType(cgm.getTypes().arrangeCXXMethodType(
551 mptNNS.getAsRecordDecl(),
552 mpt->getPointeeType()->getAs<clang::FunctionProtoType>(),
553 /*methodDecl=*/nullptr));
554 resultType = cir::MethodType::get(memberFuncTy, clsTy);
555 }
556 break;
557 }
558
559 case Type::FunctionNoProto:
560 case Type::FunctionProto:
561 resultType = convertFunctionTypeInternal(type);
562 break;
563
564 case Type::BitInt: {
565 const auto *bitIntTy = cast<BitIntType>(type);
566 if (bitIntTy->getNumBits() > cir::IntType::maxBitwidth()) {
567 cgm.errorNYI(SourceLocation(), "large _BitInt type", type);
568 resultType = cgm.sInt32Ty;
569 } else {
570 resultType = cir::IntType::get(&getMLIRContext(), bitIntTy->getNumBits(),
571 bitIntTy->isSigned(),
572 /*isBitInt=*/true);
573 }
574 break;
575 }
576
577 case Type::Atomic: {
578 QualType valueType = cast<AtomicType>(ty)->getValueType();
579 resultType = convertTypeForMem(valueType);
580
581 // Pad out to the inflated size if necessary.
582 uint64_t valueSize = astContext.getTypeSize(valueType);
583 uint64_t atomicSize = astContext.getTypeSize(ty);
584 if (valueSize != atomicSize) {
585 cgm.errorNYI("convertType: atomic type value size != atomic size");
586 }
587
588 break;
589 }
590
591 default:
592 cgm.errorNYI(SourceLocation(), "processing of type",
593 type->getTypeClassName());
594 resultType = cgm.sInt32Ty;
595 break;
596 }
597
598 assert(resultType && "Type conversion not yet implemented");
599
600 typeCache[ty] = resultType;
601 return resultType;
602}
603
605 bool forBitField) {
606 if (qualType->isConstantMatrixType()) {
607 cgm.errorNYI("Matrix type conversion");
608 return cgm.sInt32Ty;
609 }
610
611 mlir::Type convertedType = convertType(qualType);
612
613 assert(!forBitField && "Bit fields NYI");
614
615 // If this is a bit-precise integer type in a bitfield representation, map
616 // this integer to the target-specified size.
617 if (forBitField && qualType->isBitIntType())
618 assert(!qualType->isBitIntType() && "Bit field with type _BitInt NYI");
619
620 return convertedType;
621}
622
623/// Return record layout info for the given record decl.
624const CIRGenRecordLayout &
626 const auto *key = astContext.getCanonicalTagType(rd).getTypePtr();
627
628 // If we have already computed the layout, return it.
629 auto it = cirGenRecordLayouts.find(key);
630 if (it != cirGenRecordLayouts.end())
631 return *it->second;
632
633 // Compute the type information.
635
636 // Now try again.
637 it = cirGenRecordLayouts.find(key);
638
639 assert(it != cirGenRecordLayouts.end() &&
640 "Unable to find record layout information for type");
641 return *it->second;
642}
643
645 if (t->getAs<PointerType>())
646 return astContext.getTargetNullPointerValue(t) == 0;
647
648 if (const auto *at = astContext.getAsArrayType(t)) {
650 return true;
651
652 if (const auto *cat = dyn_cast<ConstantArrayType>(at))
653 if (astContext.getConstantArrayElementCount(cat) == 0)
654 return true;
655 }
656
657 if (const auto *rd = t->getAsRecordDecl())
658 return isZeroInitializable(rd);
659
660 if (const auto *mpt = t->getAs<MemberPointerType>())
661 return theCXXABI.isZeroInitializable(mpt);
662
663 if (t->getAs<HLSLInlineSpirvType>())
664 cgm.errorNYI(SourceLocation(),
665 "isZeroInitializable for HLSLInlineSpirvType");
666
667 return true;
668}
669
673
675 CanQualType returnType, bool isInstanceMethod,
677 RequiredArgs required) {
678 assert(llvm::all_of(argTypes,
679 [](CanQualType t) { return t.isCanonicalAsParam(); }));
680 // Lookup or create unique function info.
681 llvm::FoldingSetNodeID id;
682 CIRGenFunctionInfo::Profile(id, isInstanceMethod, info, required, returnType,
683 argTypes);
684
685 void *insertPos = nullptr;
686 CIRGenFunctionInfo *fi = functionInfos.FindNodeOrInsertPos(id, insertPos);
687 if (fi) {
688 // We found a matching function info based on id. These asserts verify that
689 // it really is a match.
690 assert(
691 fi->getReturnType() == returnType &&
692 std::equal(fi->argTypesBegin(), fi->argTypesEnd(), argTypes.begin()) &&
693 "Bad match based on CIRGenFunctionInfo folding set id");
694 return *fi;
695 }
696
698
699 // Construction the function info. We co-allocate the ArgInfos.
700 fi = CIRGenFunctionInfo::create(info, isInstanceMethod, returnType, argTypes,
701 required);
702 functionInfos.InsertNode(fi, insertPos);
703
704 return *fi;
705}
706
708 assert(!dyn_cast<ObjCMethodDecl>(gd.getDecl()) &&
709 "This is reported as a FIXME in LLVM codegen");
710 const auto *fd = cast<FunctionDecl>(gd.getDecl());
711
715
717}
718
719// When we find the full definition for a TagDecl, replace the 'opaque' type we
720// previously made for it if applicable.
722 // If this is an enum being completed, then we flush all non-struct types
723 // from the cache. This allows function types and other things that may be
724 // derived from the enum to be recomputed.
725 if ([[maybe_unused]] const auto *ed = dyn_cast<EnumDecl>(td)) {
726 // Classic codegen clears the type cache if it contains an entry for this
727 // enum type that doesn't use i32 as the underlying type, but I can't find
728 // a test case that meets that condition. C++ doesn't allow forward
729 // declaration of enums, and C doesn't allow an incomplete forward
730 // declaration with a non-default type.
731 assert(
732 !typeCache.count(
733 ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()) ||
734 (convertType(ed->getIntegerType()) ==
735 typeCache[ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()]));
736 // If necessary, provide the full definition of a type only used with a
737 // declaration so far.
739 return;
740 }
741
742 // If we completed a RecordDecl that we previously used and converted to an
743 // anonymous type, then go ahead and complete it now.
744 const auto *rd = cast<RecordDecl>(td);
745 if (rd->isDependentType())
746 return;
747
748 // Only complete if we converted it already. If we haven't converted it yet,
749 // we'll just do it lazily.
750 if (recordDeclTypes.count(astContext.getCanonicalTagType(rd).getTypePtr()))
752
753 // If necessary, provide the full definition of a type only used with a
754 // declaration so far.
756}
757
759 // Return the address space for the type. If the type is a
760 // function type without an address space qualifier, the
761 // program address space is used. Otherwise, the target picks
762 // the best address space based on the type information
763 return ty->isFunctionType() && !ty.hasAddressSpace()
764 ? cgm.getDataLayout().getProgramAddressSpace()
766}
Defines the clang::ASTContext interface.
static bool isSafeToConvert(QualType qt, CIRGenTypes &cgt, llvm::SmallPtrSetImpl< const RecordDecl * > &alreadyChecked)
Return true if it is safe to convert this field type, which requires the record elements contained by...
static Decl::Kind getKind(const Decl *D)
C Language Family Type Representation.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CanQualType getCanonicalTagType(const TagDecl *TD) const
unsigned getTargetAddressSpace(LangAS AS) const
QualType getElementType() const
Definition TypeBase.h:3784
unsigned getIndexTypeCVRQualifiers() const
Definition TypeBase.h:3794
const_arg_iterator argTypesEnd() const
static CIRGenFunctionInfo * create(FunctionType::ExtInfo info, bool instanceMethod, CanQualType resultType, llvm::ArrayRef< CanQualType > argTypes, RequiredArgs required)
static void Profile(llvm::FoldingSetNodeID &id, bool instanceMethod, FunctionType::ExtInfo info, RequiredArgs required, CanQualType resultType, llvm::ArrayRef< CanQualType > argTypes)
const_arg_iterator argTypesBegin() const
This class organizes the cross-function state that is used while generating CIR code.
This class handles record and union layout info while lowering AST types to CIR types.
bool isZeroInitializable() const
Check whether this struct can be C++ zero-initialized with a zeroinitializer.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:49
const CIRGenFunctionInfo & arrangeGlobalDeclaration(GlobalDecl gd)
unsigned getTargetAddressSpace(QualType ty) const
const CIRGenFunctionInfo & arrangeCXXStructorDeclaration(clang::GlobalDecl gd)
const CIRGenFunctionInfo & arrangeCIRFunctionInfo(CanQualType returnType, bool isInstanceMethod, llvm::ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, RequiredArgs required)
const CIRGenFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > fpt)
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isFuncTypeConvertible(const clang::FunctionType *ft)
Utility to check whether a function type can be converted to a CIR type (i.e.
CIRGenTypes(CIRGenModule &cgm)
bool isRecordBeingLaidOut(const clang::Type *ty) const
CIRGenBuilderTy & getBuilder() const
Definition CIRGenTypes.h:82
mlir::MLIRContext & getMLIRContext() const
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
bool isFuncParamTypeConvertible(clang::QualType type)
Return true if the specified type in a function parameter or result position can be converted to a CI...
void updateCompletedType(const clang::TagDecl *td)
UpdateCompletedType - when we find the full definition for a TagDecl, replace the 'opaque' type we pr...
std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix)
bool noRecordsBeingLaidOut() const
const ABIInfo & getABIInfo() const
const CIRGenFunctionInfo & arrangeFunctionDeclaration(const clang::FunctionDecl *fd)
Free functions are functions that are compatible with an ordinary C function pointer type.
clang::ASTContext & getASTContext() const
bool isRecordLayoutComplete(const clang::Type *ty) const
Return true if the specified type is already completely laid out.
mlir::Type convertType(clang::QualType type)
Convert a Clang type into a mlir::Type.
const CIRGenRecordLayout & getCIRGenRecordLayout(const clang::RecordDecl *rd)
Return record layout info for the given record decl.
std::unique_ptr< CIRGenRecordLayout > computeRecordLayout(const clang::RecordDecl *rd, cir::RecordType *ty)
mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl)
Lay out a tagged decl type like struct or union.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
A class for recording the number of arguments that a function signature requires.
Represents a base class of a C++ class.
Definition DeclCXX.h:146
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
static CanQual< T > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Represents the canonical version of C arrays with a specified constant size.
Definition TypeBase.h:3810
llvm::APInt getSize() const
Return the constant array size as an APInt.
Definition TypeBase.h:3866
Represents a member of a struct/union/class.
Definition Decl.h:3175
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Definition TypeBase.h:4935
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
A class which abstracts out some details necessary for making a call.
Definition TypeBase.h:4664
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4553
QualType getReturnType() const
Definition TypeBase.h:4893
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a C array with an unspecified size.
Definition TypeBase.h:3959
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3703
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
CXXRecordDecl * getAsRecordDecl() const
Retrieve the record declaration stored in this nested name specifier, or null.
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
QualType getPointeeType() const
Definition TypeBase.h:3388
A (possibly-)qualified type.
Definition TypeBase.h:937
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8431
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8557
void print(raw_ostream &OS, const PrintingPolicy &Policy, const Twine &PlaceHolder=Twine(), unsigned Indentation=0) const
bool hasAddressSpace() const
Check if this type has any address space qualifier.
Definition TypeBase.h:8552
bool isCanonical() const
Definition TypeBase.h:8488
Represents a struct/union/class.
Definition Decl.h:4342
field_range fields() const
Definition Decl.h:4545
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition Decl.h:4526
Base for LValueReferenceType and RValueReferenceType.
Definition TypeBase.h:3623
QualType getPointeeType() const
Definition TypeBase.h:3641
Encodes a location in the source.
Represents the declaration of a struct/union/class/enum.
Definition Decl.h:3732
bool isCompleteDefinition() const
Return true if this decl has its body fully specified.
Definition Decl.h:3833
The base class of the type hierarchy.
Definition TypeBase.h:1866
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isConstantMatrixType() const
Definition TypeBase.h:8835
EnumDecl * castAsEnumDecl() const
Definition Type.h:59
bool isFunctionType() const
Definition TypeBase.h:8664
TypeClass getTypeClass() const
Definition TypeBase.h:2433
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4016
Represents a GCC generic vector type.
Definition TypeBase.h:4225
unsigned getNumElements() const
Definition TypeBase.h:4240
QualType getElementType() const
Definition TypeBase.h:4239
Defines the clang::TargetInfo interface.
bool isSized(mlir::Type ty)
Returns true if the type is a CIR sized type.
Definition CIRTypes.cpp:34
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
const internal::VariadicDynCastAllOfMatcher< Decl, TypedefNameDecl > typedefNameDecl
Matches typedef name declarations.
const AstTypeMatcher< TagType > tagType
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< RecordType > recordType
const internal::VariadicDynCastAllOfMatcher< Decl, CXXRecordDecl > cxxRecordDecl
Matches C++ class declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, RecordDecl > recordDecl
Matches class, struct, and union declarations.
const internal::VariadicAllOfMatcher< QualType > qualType
Matches QualTypes in the clang AST.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isInstanceMethod(const Decl *D)
Definition Attr.h:120
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool opCallCallConv()
static bool generateDebugInfo()
Describes how types, statements, expressions, and declarations should be printed.
unsigned SuppressTagKeyword
Whether type printing should skip printing the tag keyword.
unsigned AlwaysIncludeTypeForTemplateArgument
Whether to use type suffixes (eg: 1U) on integral non-type template parameters.
unsigned SuppressInlineNamespace
Suppress printing parts of scope specifiers that correspond to inline namespaces.
unsigned PrintAsCanonical
Whether to print entities as written or canonically.