23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Type.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Support/raw_ostream.h"
29 using namespace clang;
30 using namespace CodeGen;
73 struct CGRecordLowering {
79 enum InfoKind { VFPtr, VBPtr,
Field,
Base, VBase, Scissor }
Kind;
99 return MemberInfo(
Offset, MemberInfo::Field, Data);
107 bool isDiscreteBitFieldABI() {
109 D->isMsStruct(Context);
124 bool isOverlappingVBaseABI() {
129 llvm::Type *getIntNType(
uint64_t NumBits) {
130 unsigned AlignedBits = llvm::alignTo(NumBits, Context.
getCharWidth());
131 return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
134 llvm::Type *getCharType() {
135 return llvm::Type::getIntNTy(Types.getLLVMContext(),
139 llvm::Type *getByteArrayType(
CharUnits NumChars) {
140 assert(!NumChars.
isZero() &&
"Empty byte arrays aren't allowed.");
141 llvm::Type *
Type = getCharType();
147 llvm::Type *getStorageType(
const FieldDecl *FD) {
148 llvm::Type *
Type = Types.ConvertTypeForMem(FD->
getType());
150 if (isDiscreteBitFieldABI())
return Type;
156 return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
168 return Types.isZeroInitializable(FD->
getType());
171 return Types.isZeroInitializable(RD);
173 void appendPaddingBytes(
CharUnits Size) {
175 FieldTypes.push_back(getByteArrayType(Size));
182 llvm::Type *StorageType);
184 void lower(
bool NonVirtualBaseType);
186 void accumulateFields();
189 void computeVolatileBitfields();
190 void accumulateBases();
191 void accumulateVPtrs();
192 void accumulateVBases();
196 void calculateZeroInit();
199 void clipTailPadding();
201 void determinePacked(
bool NVBaseType);
203 void insertPadding();
205 void fillOutputFields();
212 const llvm::DataLayout &DataLayout;
214 std::vector<MemberInfo> Members;
217 llvm::DenseMap<const FieldDecl *, unsigned> Fields;
218 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
219 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
220 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
221 bool IsZeroInitializable : 1;
222 bool IsZeroInitializableAsBase : 1;
225 CGRecordLowering(
const CGRecordLowering &) =
delete;
226 void operator =(
const CGRecordLowering &) =
delete;
232 : Types(Types), Context(Types.getContext()), D(D),
234 Layout(Types.getContext().getASTRecordLayout(D)),
235 DataLayout(Types.getDataLayout()), IsZeroInitializable(
true),
236 IsZeroInitializableAsBase(
true), Packed(Packed) {}
238 void CGRecordLowering::setBitFieldInfo(
252 if (DataLayout.isBigEndian())
260 void CGRecordLowering::lower(
bool NVBaseType) {
281 CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
284 computeVolatileBitfields();
292 if (Members.empty()) {
293 appendPaddingBytes(Size);
294 computeVolatileBitfields();
300 llvm::stable_sort(Members);
301 Members.push_back(StorageInfo(Size, getIntNType(8)));
303 determinePacked(NVBaseType);
308 computeVolatileBitfields();
311 void CGRecordLowering::lowerUnion() {
313 llvm::Type *StorageType =
nullptr;
314 bool SeenNamedMember =
false;
320 for (
const auto *Field : D->fields()) {
321 if (
Field->isBitField()) {
322 if (
Field->isZeroLengthBitField(Context))
324 llvm::Type *FieldType = getStorageType(Field);
325 if (LayoutSize < getSize(FieldType))
326 FieldType = getByteArrayType(LayoutSize);
329 Fields[
Field->getCanonicalDecl()] = 0;
330 llvm::Type *FieldType = getStorageType(Field);
337 if (!SeenNamedMember) {
338 SeenNamedMember =
Field->getIdentifier();
339 if (!SeenNamedMember)
340 if (
const auto *FieldRD =
Field->getType()->getAsRecordDecl())
341 SeenNamedMember = FieldRD->findFirstNamedDataMember();
342 if (SeenNamedMember && !isZeroInitializable(Field)) {
343 IsZeroInitializable = IsZeroInitializableAsBase =
false;
344 StorageType = FieldType;
349 if (!IsZeroInitializable)
353 getAlignment(FieldType) > getAlignment(StorageType) ||
354 (getAlignment(FieldType) == getAlignment(StorageType) &&
355 getSize(FieldType) > getSize(StorageType)))
356 StorageType = FieldType;
360 return appendPaddingBytes(LayoutSize);
363 if (LayoutSize < getSize(StorageType))
364 StorageType = getByteArrayType(LayoutSize);
365 FieldTypes.push_back(StorageType);
366 appendPaddingBytes(LayoutSize - getSize(StorageType));
368 if (LayoutSize % getAlignment(StorageType))
372 void CGRecordLowering::accumulateFields() {
374 FieldEnd = D->field_end();
375 Field != FieldEnd;) {
376 if (
Field->isBitField()) {
380 accumulateBitFields(Start, Field);
381 }
else if (!
Field->isZeroSize(Context)) {
382 Members.push_back(MemberInfo(
383 bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
384 getStorageType(*Field), *Field));
405 if (isDiscreteBitFieldABI()) {
407 uint64_t BitOffset = getFieldBitOffset(*Field);
409 if (
Field->isZeroLengthBitField(Context)) {
414 Types.ConvertTypeForMem(
Field->getType(),
true);
417 if (Run == FieldEnd || BitOffset >= Tail) {
419 StartBitOffset = BitOffset;
420 Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(
Type);
424 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset),
Type));
428 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
429 MemberInfo::Field,
nullptr, *Field));
439 auto IsBetterAsSingleFieldRun = [&](
uint64_t OffsetInRecord,
441 if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
443 if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
444 !DataLayout.fitsInLegalInteger(OffsetInRecord))
449 Context.
toBits(getAlignment(getIntNType(OffsetInRecord))) !=
456 bool StartFieldAsSingleRun =
false;
459 if (Run == FieldEnd) {
461 if (Field == FieldEnd)
464 if (!
Field->isZeroLengthBitField(Context)) {
466 StartBitOffset = getFieldBitOffset(*Field);
467 Tail = StartBitOffset +
Field->getBitWidthValue(Context);
468 StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
484 if (!StartFieldAsSingleRun && Field != FieldEnd &&
485 !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
486 (!
Field->isZeroLengthBitField(Context) ||
489 Tail == getFieldBitOffset(*Field)) {
490 Tail +=
Field->getBitWidthValue(Context);
496 llvm::Type *
Type = getIntNType(Tail - StartBitOffset);
500 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset),
Type));
501 for (; Run !=
Field; ++Run)
502 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
503 MemberInfo::Field,
nullptr, *Run));
505 StartFieldAsSingleRun =
false;
509 void CGRecordLowering::accumulateBases() {
511 if (Layout.isPrimaryBaseVirtual()) {
514 getStorageType(BaseDecl), BaseDecl));
517 for (
const auto &
Base : RD->bases()) {
518 if (
Base.isVirtual())
526 Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
527 MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
544 void CGRecordLowering::computeVolatileBitfields() {
545 if (!
isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
548 for (
auto &I : BitFields) {
551 llvm::Type *ResLTy = Types.ConvertTypeForMem(
Field->getType());
555 ResLTy->getPrimitiveSizeInBits())
562 const unsigned OldOffset =
565 const unsigned AbsoluteOffset =
569 const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
572 if (Info.
StorageSize == StorageSize && (OldOffset % StorageSize == 0))
576 unsigned Offset = AbsoluteOffset & (StorageSize - 1);
598 if (
End >= RecordSize)
602 bool Conflict =
false;
603 for (
const auto *F : D->fields()) {
605 if (F->isBitField() && !F->isZeroLengthBitField(Context))
615 if (F->isZeroLengthBitField(Context)) {
616 if (
End > FOffset && StorageOffset < FOffset) {
625 Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
628 if (
End < FOffset || FEnd < StorageOffset)
648 void CGRecordLowering::accumulateVPtrs() {
651 llvm::FunctionType::get(getIntNType(32),
true)->
652 getPointerTo()->getPointerTo()));
654 Members.push_back(MemberInfo(Layout.
getVBPtrOffset(), MemberInfo::VBPtr,
655 llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
658 void CGRecordLowering::accumulateVBases() {
664 if (isOverlappingVBaseABI())
665 for (
const auto &
Base : RD->vbases()) {
671 if (Context.
isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
673 ScissorOffset =
std::min(ScissorOffset,
676 Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor,
nullptr,
678 for (
const auto &
Base : RD->vbases()) {
685 if (isOverlappingVBaseABI() &&
687 !hasOwnStorage(RD, BaseDecl)) {
688 Members.push_back(MemberInfo(
Offset, MemberInfo::VBase,
nullptr,
696 Members.push_back(MemberInfo(
Offset, MemberInfo::VBase,
697 getStorageType(BaseDecl), BaseDecl));
706 for (
const auto &
Base :
Decl->bases())
707 if (!hasOwnStorage(
Base.getType()->getAsCXXRecordDecl(), Query))
712 void CGRecordLowering::calculateZeroInit() {
713 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
714 MemberEnd = Members.end();
715 IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
716 if (
Member->Kind == MemberInfo::Field) {
719 IsZeroInitializable = IsZeroInitializableAsBase =
false;
720 }
else if (
Member->Kind == MemberInfo::Base ||
721 Member->Kind == MemberInfo::VBase) {
722 if (isZeroInitializable(
Member->RD))
724 IsZeroInitializable =
false;
725 if (
Member->Kind == MemberInfo::Base)
726 IsZeroInitializableAsBase =
false;
731 void CGRecordLowering::clipTailPadding() {
732 std::vector<MemberInfo>::iterator Prior = Members.begin();
734 for (std::vector<MemberInfo>::iterator Member = Prior + 1,
735 MemberEnd = Members.end();
736 Member != MemberEnd; ++Member) {
738 if (!
Member->Data &&
Member->Kind != MemberInfo::Scissor)
740 if (
Member->Offset < Tail) {
741 assert(Prior->Kind == MemberInfo::Field &&
742 "Only storage fields have tail padding!");
743 if (!Prior->FD || Prior->FD->isBitField())
744 Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
745 cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
747 assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
748 "should not have reused this field's tail padding");
749 Prior->Data = getByteArrayType(
755 Tail = Prior->Offset + getSize(Prior->Data);
759 void CGRecordLowering::determinePacked(
bool NVBaseType) {
766 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
767 MemberEnd = Members.end();
768 Member != MemberEnd; ++Member) {
775 if (
Member->Offset < NVSize)
781 if (Members.back().Offset % Alignment)
786 if (NVSize % NVAlignment)
790 Members.back().Data = getIntNType(Context.
toBits(Alignment));
793 void CGRecordLowering::insertPadding() {
794 std::vector<std::pair<CharUnits, CharUnits> > Padding;
796 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
797 MemberEnd = Members.end();
798 Member != MemberEnd; ++Member) {
806 Padding.push_back(std::make_pair(Size,
Offset - Size));
812 for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
813 Pad = Padding.begin(), PadEnd = Padding.end();
814 Pad != PadEnd; ++Pad)
815 Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
816 llvm::stable_sort(Members);
819 void CGRecordLowering::fillOutputFields() {
820 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
821 MemberEnd = Members.end();
822 Member != MemberEnd; ++Member) {
824 FieldTypes.push_back(
Member->Data);
825 if (
Member->Kind == MemberInfo::Field) {
827 Fields[
Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
830 setBitFieldInfo(
Member->FD,
Member->Offset, FieldTypes.back());
831 }
else if (
Member->Kind == MemberInfo::Base)
832 NonVirtualBases[
Member->RD] = FieldTypes.size() - 1;
833 else if (
Member->Kind == MemberInfo::VBase)
834 VirtualBases[
Member->RD] = FieldTypes.size() - 1;
846 llvm::Type *Ty = Types.ConvertTypeForMem(FD->
getType());
849 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
853 if (
Size > TypeSizeInBits) {
863 Size = TypeSizeInBits;
870 if (Types.getDataLayout().isBigEndian()) {
877 std::unique_ptr<CGRecordLayout>
879 CGRecordLowering Builder(*
this, D,
false);
881 Builder.lower(
false);
884 llvm::StructType *BaseTy =
nullptr;
885 if (isa<CXXRecordDecl>(D) && !D->
isUnion() && !D->
hasAttr<FinalAttr>()) {
887 if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
888 CGRecordLowering BaseBuilder(*
this, D, Builder.Packed);
889 BaseBuilder.lower(
true);
891 getLLVMContext(), BaseBuilder.FieldTypes,
"", BaseBuilder.Packed);
895 assert(Builder.Packed == BaseBuilder.Packed &&
896 "Non-virtual and complete types must agree on packedness");
903 Ty->setBody(Builder.FieldTypes, Builder.Packed);
905 auto RL = std::make_unique<CGRecordLayout>(
906 Ty, BaseTy, (
bool)Builder.IsZeroInitializable,
907 (
bool)Builder.IsZeroInitializableAsBase);
909 RL->NonVirtualBases.swap(Builder.NonVirtualBases);
910 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
913 RL->FieldInfo.swap(Builder.Fields);
916 RL->BitFields.swap(Builder.BitFields);
919 if (
getContext().getLangOpts().DumpRecordLayouts) {
920 llvm::outs() <<
"\n*** Dumping IRgen Record Layout\n";
921 llvm::outs() <<
"Record: ";
922 D->
dump(llvm::outs());
923 llvm::outs() <<
"\nLayout: ";
924 RL->print(llvm::outs());
932 assert(TypeSizeInBits ==
getDataLayout().getTypeAllocSizeInBits(Ty) &&
933 "Type size mismatch!");
938 uint64_t AlignedNonVirtualTypeSizeInBits =
941 assert(AlignedNonVirtualTypeSizeInBits ==
943 "Type size mismatch!");
947 llvm::StructType *ST = RL->getLLVMType();
948 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(ST);
952 for (
unsigned i = 0, e = AST_RL.
getFieldCount(); i != e; ++i, ++it) {
962 unsigned FieldNo = RL->getLLVMFieldNo(FD);
963 assert(AST_RL.
getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
964 "Invalid field offset!");
973 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
984 assert(
static_cast<unsigned>(Info.
Offset + Info.
Size) ==
986 "Big endian union bitfield does not end at the back");
988 assert(Info.
Offset == 0 &&
989 "Little endian union bitfield with a non-zero offset");
991 "Union not large enough for bitfield storage");
997 "Storage size does not match the element type size");
999 assert(Info.
Size > 0 &&
"Empty bitfield!");
1001 "Bitfield outside of its allocated storage");
1009 OS <<
"<CGRecordLayout\n";
1010 OS <<
" LLVMType:" << *CompleteObjectType <<
"\n";
1011 if (BaseSubobjectType)
1012 OS <<
" NonVirtualBaseLLVMType:" << *BaseSubobjectType <<
"\n";
1013 OS <<
" IsZeroInitializable:" << IsZeroInitializable <<
"\n";
1014 OS <<
" BitFields:[\n";
1017 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1018 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1019 it = BitFields.begin(), ie = BitFields.end();
1024 it2 = RD->
field_begin(); *it2 != it->first; ++it2)
1026 BFIs.push_back(std::make_pair(Index, &it->second));
1028 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1029 for (
unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1031 BFIs[i].second->print(OS);
1039 print(llvm::errs());
1043 OS <<
"<CGBitFieldInfo"
1053 print(llvm::errs());