clang 23.0.0git
CIRGenTypes.cpp
Go to the documentation of this file.
1#include "CIRGenTypes.h"
2
3#include "CIRGenCXXABI.h"
5#include "CIRGenModule.h"
6#include "mlir/IR/BuiltinTypes.h"
7
10#include "clang/AST/Type.h"
13
14#include <cassert>
15
16using namespace clang;
17using namespace clang::CIRGen;
18
20 : cgm(genModule), astContext(genModule.getASTContext()),
21 builder(cgm.getBuilder()), theCXXABI(cgm.getCXXABI()),
22 theABIInfo(cgm.getTargetCIRGenInfo().getABIInfo()) {}
23
25 for (auto i = functionInfos.begin(), e = functionInfos.end(); i != e;)
26 delete &*i++;
27}
28
29mlir::MLIRContext &CIRGenTypes::getMLIRContext() const {
30 return *builder.getContext();
31}
32
33/// Return true if the specified type in a function parameter or result position
34/// can be converted to a CIR type at this point. This boils down to being
35/// whether it is complete, as well as whether we've temporarily deferred
36/// expanding the type because we're in a recursive context.
38 // Some ABIs cannot have their member pointers represented in LLVM IR unless
39 // certain circumstances have been reached, but in CIR we represent member
40 // pointer types abstractly at this point so they are always convertible.
41 if (type->getAs<MemberPointerType>())
42 return true;
43
44 // If this isn't a tag type, we can convert it.
45 const TagType *tagType = type->getAs<TagType>();
46 if (!tagType)
47 return true;
48
49 // Function types involving incomplete class types are problematic in MLIR.
50 return !tagType->isIncompleteType();
51}
52
53/// Code to verify a given function type is complete, i.e. the return type and
54/// all of the parameter types are complete. Also check to see if we are in a
55/// RS_StructPointer context, and if so whether any struct types have been
56/// pended. If so, we don't want to ask the ABI lowering code to handle a type
57/// that cannot be converted to a CIR type.
60 return false;
61
62 if (const auto *fpt = dyn_cast<FunctionProtoType>(ft))
63 for (unsigned i = 0, e = fpt->getNumParams(); i != e; i++)
64 if (!isFuncParamTypeConvertible(fpt->getParamType(i)))
65 return false;
66
67 return true;
68}
69
70mlir::Type CIRGenTypes::convertFunctionTypeInternal(QualType qft) {
71 assert(qft.isCanonical());
73
74 // In classic codegen, if the function type depends on an incomplete type
75 // (e.g. a struct or enum), it cannot lower the function type due to ABI
76 // handling requirements and returns a placeholder. In CIR, ABI handling is
77 // deferred until after codegen, and record types are identified by name, so
78 // incomplete record type references in the function type will automatically
79 // see the complete type once the record is defined. We can always produce a
80 // proper function type here.
81
82 const CIRGenFunctionInfo *fi;
83 if (const auto *fpt = dyn_cast<FunctionProtoType>(ft)) {
86 } else {
90 }
91
92 mlir::Type resultType = getFunctionType(*fi);
93
94 return resultType;
95}
96
97// This is CIR's version of CodeGenTypes::addRecordTypeName. It isn't shareable
98// because CIR has different uniquing requirements.
100 StringRef suffix) {
101 llvm::SmallString<256> typeName;
102 llvm::raw_svector_ostream outStream(typeName);
103
104 PrintingPolicy policy = recordDecl->getASTContext().getPrintingPolicy();
108 policy.PrintAsCanonical = true;
109 policy.SuppressTagKeyword = true;
110
111 if (recordDecl->getIdentifier())
112 QualType(astContext.getCanonicalTagType(recordDecl))
113 .print(outStream, policy);
114 else if (auto *typedefNameDecl = recordDecl->getTypedefNameForAnonDecl())
115 typedefNameDecl->printQualifiedName(outStream, policy);
116 else
117 outStream << builder.getUniqueAnonRecordName();
118
119 if (!suffix.empty())
120 outStream << suffix;
121
122 return builder.getUniqueRecordName(std::string(typeName));
123}
124
125/// Return true if the specified type is already completely laid out.
127 const auto it = recordDeclTypes.find(ty);
128 return it != recordDeclTypes.end() && it->second.isComplete();
129}
130
131// We have multiple forms of this function that call each other, so we need to
132// declare one in advance.
133static bool
135 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked);
136
137/// Return true if it is safe to convert the specified record decl to CIR and
138/// lay it out, false if doing so would cause us to get into a recursive
139/// compilation mess.
140static bool
142 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
143 // If we have already checked this type (maybe the same type is used by-value
144 // multiple times in multiple record fields, don't check again.
145 if (!alreadyChecked.insert(rd).second)
146 return true;
147
148 assert(rd->isCompleteDefinition() &&
149 "Expect RecordDecl to be CompleteDefinition");
150 const Type *key = cgt.getASTContext().getCanonicalTagType(rd).getTypePtr();
151
152 // If this type is already laid out, converting it is a noop.
153 if (cgt.isRecordLayoutComplete(key))
154 return true;
155
156 // Check the cross-call cache. This avoids redundant recursive field walks
157 // for the same record types across different convertRecordDeclType calls
158 // during a single layout phase.
159 if (cgt.isCachedSafeToConvert(key))
160 return true;
161
162 // If this type is currently being laid out, we can't recursively compile it.
163 if (cgt.isRecordBeingLaidOut(key))
164 return false;
165
166 // If this type would require laying out bases that are currently being laid
167 // out, don't do it. This includes virtual base classes which get laid out
168 // when a class is translated, even though they aren't embedded by-value into
169 // the class.
170 if (const CXXRecordDecl *crd = dyn_cast<CXXRecordDecl>(rd)) {
171 for (const clang::CXXBaseSpecifier &i : crd->bases())
172 if (!isSafeToConvert(i.getType()
173 ->castAs<RecordType>()
174 ->getDecl()
175 ->getDefinitionOrSelf(),
176 cgt, alreadyChecked))
177 return false;
178 }
179
180 // If this type would require laying out members that are currently being laid
181 // out, don't do it.
182 for (const FieldDecl *field : rd->fields())
183 if (!isSafeToConvert(field->getType(), cgt, alreadyChecked))
184 return false;
185
186 // Cache the positive result. This will be cleared when recordsBeingLaidOut
187 // changes.
188 cgt.cacheSafeToConvert(key);
189
190 // If there are no problems, lets do it.
191 return true;
192}
193
194/// Return true if it is safe to convert this field type, which requires the
195/// record elements contained by-value to all be recursively safe to convert.
196static bool
198 llvm::SmallPtrSetImpl<const RecordDecl *> &alreadyChecked) {
199 // Strip off atomic type sugar.
200 if (const auto *at = qt->getAs<AtomicType>())
201 qt = at->getValueType();
202
203 // If this is a record, check it.
204 if (const auto *rd = qt->getAsRecordDecl())
205 return isSafeToConvert(rd, cgt, alreadyChecked);
206
207 // If this is an array, check the elements, which are embedded inline.
208 if (const auto *at = cgt.getASTContext().getAsArrayType(qt))
209 return isSafeToConvert(at->getElementType(), cgt, alreadyChecked);
210
211 // Otherwise, there is no concern about transforming this. We only care about
212 // things that are contained by-value in a record that can have another
213 // record as a member.
214 return true;
215}
216
217// Return true if it is safe to convert the specified record decl to CIR and lay
218// it out, false if doing so would cause us to get into a recursive compilation
219// mess.
220static bool isSafeToConvert(const RecordDecl *rd, CIRGenTypes &cgt) {
221 // If no records are being laid out, we can certainly do this one.
222 if (cgt.noRecordsBeingLaidOut())
223 return true;
224
226 return isSafeToConvert(rd, cgt, alreadyChecked);
227}
228
229/// Lay out a tagged decl type like struct or union.
231 // TagDecl's are not necessarily unique, instead use the (clang) type
232 // connected to the decl.
233 const Type *key = astContext.getCanonicalTagType(rd).getTypePtr();
234 cir::RecordType entry = recordDeclTypes[key];
235
236 // If we don't have an entry for this record yet, create one.
237 // We create an incomplete type initially. If `rd` is complete, we will
238 // add the members below.
239 if (!entry) {
240 auto name = getRecordTypeName(rd, "");
241 entry = builder.getIncompleteRecordTy(name, rd);
242 recordDeclTypes[key] = entry;
243 }
244
245 rd = rd->getDefinition();
246 if (!rd || !rd->isCompleteDefinition() || entry.isComplete())
247 return entry;
248
249 // If converting this type would cause us to infinitely loop, don't do it!
250 if (!isSafeToConvert(rd, *this)) {
251 deferredRecords.push_back(rd);
252 return entry;
253 }
254
255 // Okay, this is a definition of a type. Compile the implementation now.
256 bool insertResult = recordsBeingLaidOut.insert(key).second;
257 (void)insertResult;
258 assert(insertResult && "isSafeToCovert() should have caught this.");
259
260 // Invalidate the safety cache since recordsBeingLaidOut changed.
261 safeToConvertCache.clear();
262
263 // Force conversion of non-virtual base classes recursively.
264 if (const auto *cxxRecordDecl = dyn_cast<CXXRecordDecl>(rd)) {
265 for (const auto &base : cxxRecordDecl->bases()) {
266 if (base.isVirtual())
267 continue;
268 convertRecordDeclType(base.getType()->castAsRecordDecl());
269 }
270 }
271
272 // Layout fields.
273 std::unique_ptr<CIRGenRecordLayout> layout = computeRecordLayout(rd, &entry);
274 recordDeclTypes[key] = entry;
275 cirGenRecordLayouts[key] = std::move(layout);
276
277 // We're done laying out this record.
278 bool eraseResult = recordsBeingLaidOut.erase(key);
279 (void)eraseResult;
280 assert(eraseResult && "record not in RecordsBeingLaidOut set?");
281
282 // Invalidate the safety cache since recordsBeingLaidOut changed.
283 safeToConvertCache.clear();
284
285 // If we're done converting the outer-most record, then convert any deferred
286 // records as well.
287 if (recordsBeingLaidOut.empty())
288 while (!deferredRecords.empty())
289 convertRecordDeclType(deferredRecords.pop_back_val());
290
291 return entry;
292}
293
295 type = astContext.getCanonicalType(type);
296 const Type *ty = type.getTypePtr();
297
298 // Process record types before the type cache lookup.
299 if (const auto *recordType = dyn_cast<RecordType>(type))
300 return convertRecordDeclType(recordType->getDecl()->getDefinitionOrSelf());
301
302 // Has the type already been processed?
303 TypeCacheTy::iterator tci = typeCache.find(ty);
304 if (tci != typeCache.end())
305 return tci->second;
306
307 // For types that haven't been implemented yet or are otherwise unsupported,
308 // report an error and return 'int'.
309
310 mlir::Type resultType = nullptr;
311 switch (ty->getTypeClass()) {
312 case Type::Record:
313 llvm_unreachable("Should have been handled above");
314
315 case Type::Builtin: {
316 switch (cast<BuiltinType>(ty)->getKind()) {
317 // void
318 case BuiltinType::Void:
319 resultType = cgm.voidTy;
320 break;
321
322 // bool
323 case BuiltinType::Bool:
324 resultType = cir::BoolType::get(&getMLIRContext());
325 break;
326
327 // Signed integral types.
328 case BuiltinType::Char_S:
329 case BuiltinType::Int:
330 case BuiltinType::Int128:
331 case BuiltinType::Long:
332 case BuiltinType::LongLong:
333 case BuiltinType::SChar:
334 case BuiltinType::Short:
335 case BuiltinType::WChar_S:
336 case BuiltinType::Accum:
337 case BuiltinType::Fract:
338 case BuiltinType::LongAccum:
339 case BuiltinType::LongFract:
340 case BuiltinType::ShortAccum:
341 case BuiltinType::ShortFract:
342 // Saturated signed types.
343 case BuiltinType::SatAccum:
344 case BuiltinType::SatFract:
345 case BuiltinType::SatLongAccum:
346 case BuiltinType::SatLongFract:
347 case BuiltinType::SatShortAccum:
348 case BuiltinType::SatShortFract:
349 resultType =
350 cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
351 /*isSigned=*/true);
352 break;
353
354 // SVE types
355 case BuiltinType::SveInt8:
356 resultType =
357 cir::VectorType::get(builder.getSInt8Ty(), 16, /*is_scalable=*/true);
358 break;
359 case BuiltinType::SveUint8:
360 resultType =
361 cir::VectorType::get(builder.getUInt8Ty(), 16, /*is_scalable=*/true);
362 break;
363 case BuiltinType::SveInt16:
364 resultType =
365 cir::VectorType::get(builder.getSInt16Ty(), 8, /*is_scalable=*/true);
366 break;
367 case BuiltinType::SveUint16:
368 resultType =
369 cir::VectorType::get(builder.getUInt16Ty(), 8, /*is_scalable=*/true);
370 break;
371 case BuiltinType::SveFloat16:
372 resultType = cir::VectorType::get(builder.getFp16Ty(), 8,
373 /*is_scalable=*/true);
374 break;
375 case BuiltinType::SveBFloat16:
376 resultType = cir::VectorType::get(builder.getFp16Ty(), 8,
377 /*is_scalable=*/true);
378 break;
379 case BuiltinType::SveInt32:
380 resultType =
381 cir::VectorType::get(builder.getSInt32Ty(), 4, /*is_scalable=*/true);
382 break;
383 case BuiltinType::SveUint32:
384 resultType =
385 cir::VectorType::get(builder.getUInt32Ty(), 4, /*is_scalable=*/true);
386 break;
387 case BuiltinType::SveFloat32:
388 resultType = cir::VectorType::get(builder.getSingleTy(), 4,
389 /*is_scalable=*/true);
390 break;
391 case BuiltinType::SveInt64:
392 resultType =
393 cir::VectorType::get(builder.getSInt64Ty(), 2, /*is_scalable=*/true);
394 break;
395 case BuiltinType::SveUint64:
396 resultType =
397 cir::VectorType::get(builder.getUInt64Ty(), 2, /*is_scalable=*/true);
398 break;
399 case BuiltinType::SveFloat64:
400 resultType = cir::VectorType::get(builder.getDoubleTy(), 2,
401 /*is_scalable=*/true);
402 break;
403 case BuiltinType::SveBool:
404 resultType = cir::VectorType::get(builder.getUIntNTy(1), 16,
405 /*is_scalable=*/true);
406 break;
407
408 // Unsigned integral types.
409 case BuiltinType::Char8:
410 case BuiltinType::Char16:
411 case BuiltinType::Char32:
412 case BuiltinType::Char_U:
413 case BuiltinType::UChar:
414 case BuiltinType::UInt:
415 case BuiltinType::UInt128:
416 case BuiltinType::ULong:
417 case BuiltinType::ULongLong:
418 case BuiltinType::UShort:
419 case BuiltinType::WChar_U:
420 case BuiltinType::UAccum:
421 case BuiltinType::UFract:
422 case BuiltinType::ULongAccum:
423 case BuiltinType::ULongFract:
424 case BuiltinType::UShortAccum:
425 case BuiltinType::UShortFract:
426 // Saturated unsigned types.
427 case BuiltinType::SatUAccum:
428 case BuiltinType::SatUFract:
429 case BuiltinType::SatULongAccum:
430 case BuiltinType::SatULongFract:
431 case BuiltinType::SatUShortAccum:
432 case BuiltinType::SatUShortFract:
433 resultType =
434 cir::IntType::get(&getMLIRContext(), astContext.getTypeSize(ty),
435 /*isSigned=*/false);
436 break;
437
438 // Floating-point types
439 case BuiltinType::Float16:
440 resultType = cgm.fP16Ty;
441 break;
442 case BuiltinType::Half:
443 if (astContext.getLangOpts().NativeHalfType ||
444 !astContext.getTargetInfo().useFP16ConversionIntrinsics()) {
445 resultType = cgm.fP16Ty;
446 } else {
447 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
448 resultType = cgm.sInt32Ty;
449 }
450 break;
451 case BuiltinType::BFloat16:
452 resultType = cgm.bFloat16Ty;
453 break;
454 case BuiltinType::MFloat8:
455 resultType = cgm.uInt8Ty;
456 break;
457 case BuiltinType::Float:
458 assert(&astContext.getFloatTypeSemantics(type) ==
459 &llvm::APFloat::IEEEsingle() &&
460 "ClangIR NYI: 'float' in a format other than IEEE 32-bit");
461 resultType = cgm.floatTy;
462 break;
463 case BuiltinType::Double:
464 assert(&astContext.getFloatTypeSemantics(type) ==
465 &llvm::APFloat::IEEEdouble() &&
466 "ClangIR NYI: 'double' in a format other than IEEE 64-bit");
467 resultType = cgm.doubleTy;
468 break;
469 case BuiltinType::LongDouble:
470 resultType =
471 builder.getLongDoubleTy(astContext.getFloatTypeSemantics(type));
472 break;
473 case BuiltinType::Float128:
474 resultType = cgm.fP128Ty;
475 break;
476 case BuiltinType::Ibm128:
477 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
478 resultType = cgm.sInt32Ty;
479 break;
480
481 case BuiltinType::NullPtr:
482 // Add proper CIR type for it? this looks mostly useful for sema related
483 // things (like for overloads accepting void), for now, given that
484 // `sizeof(std::nullptr_t)` is equal to `sizeof(void *)`, model
485 // std::nullptr_t as !cir.ptr<!void>
486 resultType = builder.getVoidPtrTy();
487 break;
488
489 default:
490 cgm.errorNYI(SourceLocation(), "processing of built-in type", type);
491 resultType = cgm.sInt32Ty;
492 break;
493 }
494 break;
495 }
496
497 case Type::Complex: {
498 const auto *ct = cast<clang::ComplexType>(ty);
499 mlir::Type elementTy = convertType(ct->getElementType());
500 resultType = cir::ComplexType::get(elementTy);
501 break;
502 }
503
504 case Type::LValueReference:
505 case Type::RValueReference: {
506 const ReferenceType *refTy = cast<ReferenceType>(ty);
507 QualType elemTy = refTy->getPointeeType();
508 auto pointeeType = convertTypeForMem(elemTy);
509 resultType = builder.getPointerTo(pointeeType, elemTy.getAddressSpace());
510 assert(resultType && "Cannot get pointer type?");
511 break;
512 }
513
514 case Type::Pointer: {
515 const PointerType *ptrTy = cast<PointerType>(ty);
516 QualType elemTy = ptrTy->getPointeeType();
517 assert(!elemTy->isConstantMatrixType() && "not implemented");
518
519 mlir::Type pointeeType = convertType(elemTy);
520
521 resultType = builder.getPointerTo(pointeeType, elemTy.getAddressSpace());
522 break;
523 }
524
525 case Type::VariableArray: {
527 if (a->getIndexTypeCVRQualifiers() != 0)
528 cgm.errorNYI(SourceLocation(), "non trivial array types", type);
529 // VLAs resolve to the innermost element type; this matches
530 // the return of alloca, and there isn't any obviously better choice.
531 resultType = convertTypeForMem(a->getElementType());
532 break;
533 }
534
535 case Type::IncompleteArray: {
537 if (arrTy->getIndexTypeCVRQualifiers() != 0)
538 cgm.errorNYI(SourceLocation(), "non trivial array types", type);
539
540 mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
541 // int X[] -> [0 x int], unless the element type is not sized. If it is
542 // unsized (e.g. an incomplete record) just use [0 x i8].
543 if (!cir::isSized(elemTy)) {
544 elemTy = cgm.sInt8Ty;
545 }
546
547 resultType = cir::ArrayType::get(elemTy, 0);
548 break;
549 }
550
551 case Type::ConstantArray: {
553 mlir::Type elemTy = convertTypeForMem(arrTy->getElementType());
554 // In classic codegen, arrays of unsized types which it assumes are "arrays
555 // of undefined struct type" are lowered to arrays of i8 "just to have a
556 // concrete type", but in CIR, we can get here with abstract types like
557 // !cir.method and !cir.data_member, so we just create an array of the type
558 // and handle it during lowering if we still don't have a sized type.
559 resultType = cir::ArrayType::get(elemTy, arrTy->getSize().getZExtValue());
560 break;
561 }
562
563 case Type::ExtVector:
564 case Type::Vector: {
565 const VectorType *vec = cast<VectorType>(ty);
566 const mlir::Type elemTy = convertType(vec->getElementType());
567 resultType = cir::VectorType::get(elemTy, vec->getNumElements());
568 break;
569 }
570
571 case Type::Enum: {
572 const auto *ed = ty->castAsEnumDecl();
573 if (auto integerType = ed->getIntegerType(); !integerType.isNull())
574 return convertType(integerType);
575 // Return a placeholder 'i32' type. This can be changed later when the
576 // type is defined (see UpdateCompletedType), but is likely to be the
577 // "right" answer.
578 resultType = cgm.uInt32Ty;
579 break;
580 }
581
582 case Type::MemberPointer: {
583 const auto *mpt = cast<MemberPointerType>(ty);
584
585 NestedNameSpecifier mptNNS = mpt->getQualifier();
586 auto clsTy = mlir::cast<cir::RecordType>(
587 convertType(QualType(mptNNS.getAsType(), 0)));
588 if (mpt->isMemberDataPointer()) {
589 mlir::Type memberTy = convertType(mpt->getPointeeType());
590 resultType = cir::DataMemberType::get(memberTy, clsTy);
591 } else {
592 auto memberFuncTy = getFunctionType(cgm.getTypes().arrangeCXXMethodType(
593 mptNNS.getAsRecordDecl(),
594 mpt->getPointeeType()->getAs<clang::FunctionProtoType>(),
595 /*methodDecl=*/nullptr));
596 resultType = cir::MethodType::get(memberFuncTy, clsTy);
597 }
598 break;
599 }
600
601 case Type::FunctionNoProto:
602 case Type::FunctionProto:
603 resultType = convertFunctionTypeInternal(type);
604 break;
605
606 case Type::BitInt: {
607 const auto *bitIntTy = cast<BitIntType>(type);
608 unsigned numBits = bitIntTy->getNumBits();
609 assert(numBits <= cir::IntType::maxBitwidth() &&
610 "_BitInt width exceeds CIR IntType maximum");
611 resultType =
612 cir::IntType::get(&getMLIRContext(), numBits, bitIntTy->isSigned(),
613 /*isBitInt=*/true);
614 break;
615 }
616
617 case Type::Atomic: {
618 QualType valueType = cast<AtomicType>(ty)->getValueType();
619 resultType = convertTypeForMem(valueType);
620
621 // Pad out to the inflated size if necessary.
622 uint64_t valueSize = astContext.getTypeSize(valueType);
623 uint64_t atomicSize = astContext.getTypeSize(ty);
624 if (valueSize != atomicSize) {
625 cgm.errorNYI("convertType: atomic type value size != atomic size");
626 }
627
628 break;
629 }
630
631 default:
632 cgm.errorNYI(SourceLocation(), "processing of type",
633 type->getTypeClassName());
634 resultType = cgm.sInt32Ty;
635 break;
636 }
637
638 assert(resultType && "Type conversion not yet implemented");
639
640 typeCache[ty] = resultType;
641 return resultType;
642}
643
645 bool forBitField) {
646 if (qualType->isConstantMatrixType()) {
647 cgm.errorNYI("Matrix type conversion");
648 return cgm.sInt32Ty;
649 }
650
651 mlir::Type convertedType = convertType(qualType);
652
653 assert(!forBitField && "Bit fields NYI");
654
655 // If this is a bit-precise integer type in a bitfield representation, map
656 // this integer to the target-specified size.
657 if (forBitField && qualType->isBitIntType())
658 assert(!qualType->isBitIntType() && "Bit field with type _BitInt NYI");
659
660 return convertedType;
661}
662
663/// Return record layout info for the given record decl.
664const CIRGenRecordLayout &
666 const auto *key = astContext.getCanonicalTagType(rd).getTypePtr();
667
668 // If we have already computed the layout, return it.
669 auto it = cirGenRecordLayouts.find(key);
670 if (it != cirGenRecordLayouts.end())
671 return *it->second;
672
673 // Compute the type information.
675
676 // Now try again.
677 it = cirGenRecordLayouts.find(key);
678
679 assert(it != cirGenRecordLayouts.end() &&
680 "Unable to find record layout information for type");
681 return *it->second;
682}
683
685 if (t->getAs<PointerType>())
686 return astContext.getTargetNullPointerValue(t) == 0;
687
688 if (const auto *at = astContext.getAsArrayType(t)) {
690 return true;
691
692 if (const auto *cat = dyn_cast<ConstantArrayType>(at))
693 if (astContext.getConstantArrayElementCount(cat) == 0)
694 return true;
695 }
696
697 if (const auto *rd = t->getAsRecordDecl())
698 return isZeroInitializable(rd);
699
700 if (const auto *mpt = t->getAs<MemberPointerType>())
701 return theCXXABI.isZeroInitializable(mpt);
702
703 if (t->getAs<HLSLInlineSpirvType>())
704 cgm.errorNYI(SourceLocation(),
705 "isZeroInitializable for HLSLInlineSpirvType");
706
707 return true;
708}
709
713
715 CanQualType returnType, bool isInstanceMethod,
717 RequiredArgs required) {
718 assert(llvm::all_of(argTypes,
719 [](CanQualType t) { return t.isCanonicalAsParam(); }));
720 // Lookup or create unique function info.
721 llvm::FoldingSetNodeID id;
722 CIRGenFunctionInfo::Profile(id, isInstanceMethod, info, required, returnType,
723 argTypes);
724
725 void *insertPos = nullptr;
726 CIRGenFunctionInfo *fi = functionInfos.FindNodeOrInsertPos(id, insertPos);
727 if (fi) {
728 // We found a matching function info based on id. These asserts verify that
729 // it really is a match.
730 assert(
731 fi->getReturnType() == returnType &&
732 std::equal(fi->argTypesBegin(), fi->argTypesEnd(), argTypes.begin()) &&
733 "Bad match based on CIRGenFunctionInfo folding set id");
734 return *fi;
735 }
736
738
739 // Construction the function info. We co-allocate the ArgInfos.
740 fi = CIRGenFunctionInfo::create(info, isInstanceMethod, returnType, argTypes,
741 required);
742 functionInfos.InsertNode(fi, insertPos);
743
744 return *fi;
745}
746
748 assert(!dyn_cast<ObjCMethodDecl>(gd.getDecl()) &&
749 "This is reported as a FIXME in LLVM codegen");
750 const auto *fd = cast<FunctionDecl>(gd.getDecl());
751
755
757}
758
759// When we find the full definition for a TagDecl, replace the 'opaque' type we
760// previously made for it if applicable.
762 // If this is an enum being completed, then we flush all non-struct types
763 // from the cache. This allows function types and other things that may be
764 // derived from the enum to be recomputed.
765 if ([[maybe_unused]] const auto *ed = dyn_cast<EnumDecl>(td)) {
766 // Classic codegen clears the type cache if it contains an entry for this
767 // enum type that doesn't use i32 as the underlying type, but I can't find
768 // a test case that meets that condition. C++ doesn't allow forward
769 // declaration of enums, and C doesn't allow an incomplete forward
770 // declaration with a non-default type.
771 assert(
772 !typeCache.count(
773 ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()) ||
774 (convertType(ed->getIntegerType()) ==
775 typeCache[ed->getASTContext().getCanonicalTagType(ed)->getTypePtr()]));
776 // If necessary, provide the full definition of a type only used with a
777 // declaration so far.
779 return;
780 }
781
782 // If we completed a RecordDecl that we previously used and converted to an
783 // anonymous type, then go ahead and complete it now.
784 const auto *rd = cast<RecordDecl>(td);
785 if (rd->isDependentType())
786 return;
787
788 // Only complete if we converted it already. If we haven't converted it yet,
789 // we'll just do it lazily.
790 if (recordDeclTypes.count(astContext.getCanonicalTagType(rd).getTypePtr()))
792
793 // If necessary, provide the full definition of a type only used with a
794 // declaration so far.
796}
797
799 // Return the address space for the type. If the type is a
800 // function type without an address space qualifier, the
801 // program address space is used. Otherwise, the target picks
802 // the best address space based on the type information
803 return ty->isFunctionType() && !ty.hasAddressSpace()
804 ? cgm.getDataLayout().getProgramAddressSpace()
806}
Defines the clang::ASTContext interface.
static bool isSafeToConvert(QualType qt, CIRGenTypes &cgt, llvm::SmallPtrSetImpl< const RecordDecl * > &alreadyChecked)
Return true if it is safe to convert this field type, which requires the record elements contained by...
static Decl::Kind getKind(const Decl *D)
C Language Family Type Representation.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
CanQualType getCanonicalTagType(const TagDecl *TD) const
unsigned getTargetAddressSpace(LangAS AS) const
QualType getElementType() const
Definition TypeBase.h:3789
unsigned getIndexTypeCVRQualifiers() const
Definition TypeBase.h:3799
const_arg_iterator argTypesEnd() const
static CIRGenFunctionInfo * create(FunctionType::ExtInfo info, bool instanceMethod, CanQualType resultType, llvm::ArrayRef< CanQualType > argTypes, RequiredArgs required)
static void Profile(llvm::FoldingSetNodeID &id, bool instanceMethod, FunctionType::ExtInfo info, RequiredArgs required, CanQualType resultType, llvm::ArrayRef< CanQualType > argTypes)
const_arg_iterator argTypesBegin() const
This class organizes the cross-function state that is used while generating CIR code.
This class handles record and union layout info while lowering AST types to CIR types.
bool isZeroInitializable() const
Check whether this struct can be C++ zero-initialized with a zeroinitializer.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:50
const CIRGenFunctionInfo & arrangeGlobalDeclaration(GlobalDecl gd)
unsigned getTargetAddressSpace(QualType ty) const
const CIRGenFunctionInfo & arrangeCXXStructorDeclaration(clang::GlobalDecl gd)
const CIRGenFunctionInfo & arrangeCIRFunctionInfo(CanQualType returnType, bool isInstanceMethod, llvm::ArrayRef< CanQualType > argTypes, FunctionType::ExtInfo info, RequiredArgs required)
const CIRGenFunctionInfo & arrangeFreeFunctionType(CanQual< FunctionProtoType > fpt)
bool isZeroInitializable(clang::QualType ty)
Return whether a type can be zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isFuncTypeConvertible(const clang::FunctionType *ft)
Utility to check whether a function type can be converted to a CIR type (i.e.
CIRGenTypes(CIRGenModule &cgm)
bool isRecordBeingLaidOut(const clang::Type *ty) const
CIRGenBuilderTy & getBuilder() const
Definition CIRGenTypes.h:89
mlir::MLIRContext & getMLIRContext() const
bool isCachedSafeToConvert(const clang::Type *key) const
Check if a record type key is in the safe-to-convert cache.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
bool isFuncParamTypeConvertible(clang::QualType type)
Return true if the specified type in a function parameter or result position can be converted to a CI...
void updateCompletedType(const clang::TagDecl *td)
UpdateCompletedType - when we find the full definition for a TagDecl, replace the 'opaque' type we pr...
std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix)
bool noRecordsBeingLaidOut() const
const ABIInfo & getABIInfo() const
const CIRGenFunctionInfo & arrangeFunctionDeclaration(const clang::FunctionDecl *fd)
Free functions are functions that are compatible with an ordinary C function pointer type.
clang::ASTContext & getASTContext() const
bool isRecordLayoutComplete(const clang::Type *ty) const
Return true if the specified type is already completely laid out.
mlir::Type convertType(clang::QualType type)
Convert a Clang type into a mlir::Type.
const CIRGenRecordLayout & getCIRGenRecordLayout(const clang::RecordDecl *rd)
Return record layout info for the given record decl.
std::unique_ptr< CIRGenRecordLayout > computeRecordLayout(const clang::RecordDecl *rd, cir::RecordType *ty)
mlir::Type convertRecordDeclType(const clang::RecordDecl *recordDecl)
Lay out a tagged decl type like struct or union.
void cacheSafeToConvert(const clang::Type *key)
Add a record type key to the safe-to-convert cache.
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
A class for recording the number of arguments that a function signature requires.
Represents a base class of a C++ class.
Definition DeclCXX.h:146
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
static CanQual< T > CreateUnsafe(QualType Other)
Builds a canonical type from a QualType.
bool isCanonicalAsParam() const
Determines if this canonical type is furthermore canonical as a parameter.
const T * getTypePtr() const
Retrieve the underlying type pointer, which refers to a canonical type.
Represents the canonical version of C arrays with a specified constant size.
Definition TypeBase.h:3815
llvm::APInt getSize() const
Return the constant array size as an APInt.
Definition TypeBase.h:3871
Represents a member of a struct/union/class.
Definition Decl.h:3178
Represents a K&R-style 'int foo()' function, which has no information available about its arguments.
Definition TypeBase.h:4940
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
A class which abstracts out some details necessary for making a call.
Definition TypeBase.h:4669
FunctionType - C99 6.7.5.3 - Function Declarators.
Definition TypeBase.h:4558
QualType getReturnType() const
Definition TypeBase.h:4898
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a C array with an unspecified size.
Definition TypeBase.h:3964
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3708
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
CXXRecordDecl * getAsRecordDecl() const
Retrieve the record declaration stored in this nested name specifier, or null.
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3383
QualType getPointeeType() const
Definition TypeBase.h:3393
A (possibly-)qualified type.
Definition TypeBase.h:937
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8436
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8562
void print(raw_ostream &OS, const PrintingPolicy &Policy, const Twine &PlaceHolder=Twine(), unsigned Indentation=0) const
bool hasAddressSpace() const
Check if this type has any address space qualifier.
Definition TypeBase.h:8557
bool isCanonical() const
Definition TypeBase.h:8493
Represents a struct/union/class.
Definition Decl.h:4343
field_range fields() const
Definition Decl.h:4546
RecordDecl * getDefinition() const
Returns the RecordDecl that actually defines this struct/union/class.
Definition Decl.h:4527
Base for LValueReferenceType and RValueReferenceType.
Definition TypeBase.h:3628
QualType getPointeeType() const
Definition TypeBase.h:3646
Encodes a location in the source.
Represents the declaration of a struct/union/class/enum.
Definition Decl.h:3735
bool isCompleteDefinition() const
Return true if this decl has its body fully specified.
Definition Decl.h:3836
The base class of the type hierarchy.
Definition TypeBase.h:1871
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isConstantMatrixType() const
Definition TypeBase.h:8840
EnumDecl * castAsEnumDecl() const
Definition Type.h:59
bool isFunctionType() const
Definition TypeBase.h:8669
TypeClass getTypeClass() const
Definition TypeBase.h:2438
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:4021
Represents a GCC generic vector type.
Definition TypeBase.h:4230
unsigned getNumElements() const
Definition TypeBase.h:4245
QualType getElementType() const
Definition TypeBase.h:4244
Defines the clang::TargetInfo interface.
bool isSized(mlir::Type ty)
Returns true if the type is a CIR sized type.
Definition CIRTypes.cpp:34
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
const internal::VariadicDynCastAllOfMatcher< Decl, TypedefNameDecl > typedefNameDecl
Matches typedef name declarations.
const AstTypeMatcher< TagType > tagType
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< RecordType > recordType
const internal::VariadicDynCastAllOfMatcher< Decl, CXXRecordDecl > cxxRecordDecl
Matches C++ class declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, RecordDecl > recordDecl
Matches class, struct, and union declarations.
const internal::VariadicAllOfMatcher< QualType > qualType
Matches QualTypes in the clang AST.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isInstanceMethod(const Decl *D)
Definition Attr.h:120
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool opCallCallConv()
static bool generateDebugInfo()
Describes how types, statements, expressions, and declarations should be printed.
unsigned SuppressTagKeyword
Whether type printing should skip printing the tag keyword.
unsigned AlwaysIncludeTypeForTemplateArgument
Whether to use type suffixes (eg: 1U) on integral non-type template parameters.
unsigned SuppressInlineNamespace
Suppress printing parts of scope specifiers that correspond to inline namespaces.
unsigned PrintAsCanonical
Whether to print entities as written or canonically.