23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/DerivedTypes.h"
25#include "llvm/IR/Type.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/MathExtras.h"
28#include "llvm/Support/raw_ostream.h"
30using namespace CodeGen;
73struct CGRecordLowering {
79 enum InfoKind { VFPtr, VBPtr,
Field,
Base, VBase, Scissor }
Kind;
85 MemberInfo(
CharUnits Offset, InfoKind Kind, llvm::Type *Data,
88 MemberInfo(
CharUnits Offset, InfoKind Kind, llvm::Type *Data,
92 bool operator <(
const MemberInfo& a)
const {
return Offset < a.Offset; }
99 return MemberInfo(Offset, MemberInfo::Field,
Data);
107 bool isDiscreteBitFieldABI() {
109 D->isMsStruct(Context);
124 bool isOverlappingVBaseABI() {
129 llvm::Type *getIntNType(uint64_t NumBits) {
130 unsigned AlignedBits = llvm::alignTo(NumBits, Context.
getCharWidth());
131 return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
134 llvm::Type *getCharType() {
135 return llvm::Type::getIntNTy(Types.getLLVMContext(),
139 llvm::Type *getByteArrayType(
CharUnits NumChars) {
140 assert(!NumChars.
isZero() &&
"Empty byte arrays aren't allowed.");
141 llvm::Type *
Type = getCharType();
147 llvm::Type *getStorageType(
const FieldDecl *FD) {
148 llvm::Type *
Type = Types.ConvertTypeForMem(FD->
getType());
150 if (isDiscreteBitFieldABI())
return Type;
156 return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
158 CharUnits bitsToCharUnits(uint64_t BitOffset) {
167 bool isZeroInitializable(
const FieldDecl *FD) {
168 return Types.isZeroInitializable(FD->
getType());
170 bool isZeroInitializable(
const RecordDecl *RD) {
171 return Types.isZeroInitializable(RD);
173 void appendPaddingBytes(
CharUnits Size) {
175 FieldTypes.push_back(getByteArrayType(Size));
182 llvm::Type *StorageType);
184 void lower(
bool NonVirtualBaseType);
185 void lowerUnion(
bool isNoUniqueAddress);
186 void accumulateFields();
189 void computeVolatileBitfields();
190 void accumulateBases();
191 void accumulateVPtrs();
192 void accumulateVBases();
196 void calculateZeroInit();
199 void clipTailPadding();
201 void determinePacked(
bool NVBaseType);
203 void insertPadding();
205 void fillOutputFields();
212 const llvm::DataLayout &DataLayout;
214 std::vector<MemberInfo> Members;
217 llvm::DenseMap<const FieldDecl *, unsigned> Fields;
218 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
219 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
220 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
221 bool IsZeroInitializable : 1;
222 bool IsZeroInitializableAsBase : 1;
225 CGRecordLowering(
const CGRecordLowering &) =
delete;
226 void operator =(
const CGRecordLowering &) =
delete;
232 : Types(Types), Context(Types.getContext()), D(D),
234 Layout(Types.getContext().getASTRecordLayout(D)),
235 DataLayout(Types.getDataLayout()), IsZeroInitializable(
true),
236 IsZeroInitializableAsBase(
true), Packed(Packed) {}
238void CGRecordLowering::setBitFieldInfo(
252 if (DataLayout.isBigEndian())
260void CGRecordLowering::lower(
bool NVBaseType) {
281 CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
283 lowerUnion(NVBaseType);
284 computeVolatileBitfields();
292 if (Members.empty()) {
293 appendPaddingBytes(Size);
294 computeVolatileBitfields();
300 llvm::stable_sort(Members);
301 Members.push_back(StorageInfo(Size, getIntNType(8)));
303 determinePacked(NVBaseType);
308 computeVolatileBitfields();
311void CGRecordLowering::lowerUnion(
bool isNoUniqueAddress) {
313 isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize();
314 llvm::Type *StorageType =
nullptr;
315 bool SeenNamedMember =
false;
321 for (
const auto *Field : D->fields()) {
322 if (
Field->isBitField()) {
323 if (
Field->isZeroLengthBitField(Context))
325 llvm::Type *FieldType = getStorageType(Field);
326 if (LayoutSize < getSize(FieldType))
327 FieldType = getByteArrayType(LayoutSize);
330 Fields[
Field->getCanonicalDecl()] = 0;
331 llvm::Type *FieldType = getStorageType(Field);
338 if (!SeenNamedMember) {
339 SeenNamedMember =
Field->getIdentifier();
340 if (!SeenNamedMember)
341 if (
const auto *FieldRD =
Field->getType()->getAsRecordDecl())
342 SeenNamedMember = FieldRD->findFirstNamedDataMember();
343 if (SeenNamedMember && !isZeroInitializable(Field)) {
344 IsZeroInitializable = IsZeroInitializableAsBase =
false;
345 StorageType = FieldType;
350 if (!IsZeroInitializable)
354 getAlignment(FieldType) > getAlignment(StorageType) ||
355 (getAlignment(FieldType) == getAlignment(StorageType) &&
356 getSize(FieldType) > getSize(StorageType)))
357 StorageType = FieldType;
361 return appendPaddingBytes(LayoutSize);
364 if (LayoutSize < getSize(StorageType))
365 StorageType = getByteArrayType(LayoutSize);
366 FieldTypes.push_back(StorageType);
367 appendPaddingBytes(LayoutSize - getSize(StorageType));
369 const auto StorageAlignment = getAlignment(StorageType);
370 assert((Layout.getSize() % StorageAlignment == 0 ||
371 Layout.getDataSize() % StorageAlignment) &&
372 "Union's standard layout and no_unique_address layout must agree on "
374 if (Layout.getDataSize() % StorageAlignment)
378void CGRecordLowering::accumulateFields() {
380 FieldEnd = D->field_end();
381 Field != FieldEnd;) {
382 if (
Field->isBitField()) {
386 accumulateBitFields(Start, Field);
387 }
else if (!
Field->isZeroSize(Context)) {
390 Members.push_back(MemberInfo(
391 bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
392 Field->isPotentiallyOverlapping()
393 ? getStorageType(
Field->getType()->getAsCXXRecordDecl())
394 : getStorageType(*Field),
416 if (isDiscreteBitFieldABI()) {
418 uint64_t BitOffset = getFieldBitOffset(*Field);
420 if (
Field->isZeroLengthBitField(Context)) {
425 Types.ConvertTypeForMem(
Field->getType(),
true);
428 if (Run == FieldEnd || BitOffset >= Tail) {
430 StartBitOffset = BitOffset;
431 Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(
Type);
435 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset),
Type));
439 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
440 MemberInfo::Field,
nullptr, *Field));
450 auto IsBetterAsSingleFieldRun = [&](
uint64_t OffsetInRecord,
452 if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
454 if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
455 !DataLayout.fitsInLegalInteger(OffsetInRecord))
460 Context.
toBits(getAlignment(getIntNType(OffsetInRecord))) !=
467 bool StartFieldAsSingleRun =
false;
470 if (Run == FieldEnd) {
472 if (Field == FieldEnd)
475 if (!
Field->isZeroLengthBitField(Context)) {
477 StartBitOffset = getFieldBitOffset(*Field);
478 Tail = StartBitOffset +
Field->getBitWidthValue(Context);
479 StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
495 if (!StartFieldAsSingleRun && Field != FieldEnd &&
496 !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
497 (!
Field->isZeroLengthBitField(Context) ||
500 Tail == getFieldBitOffset(*Field)) {
501 Tail +=
Field->getBitWidthValue(Context);
507 llvm::Type *
Type = getIntNType(Tail - StartBitOffset);
511 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset),
Type));
512 for (; Run !=
Field; ++Run)
513 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
514 MemberInfo::Field,
nullptr, *Run));
516 StartFieldAsSingleRun =
false;
520void CGRecordLowering::accumulateBases() {
522 if (Layout.isPrimaryBaseVirtual()) {
525 getStorageType(BaseDecl), BaseDecl));
528 for (
const auto &
Base : RD->bases()) {
529 if (
Base.isVirtual())
537 Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
538 MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
555void CGRecordLowering::computeVolatileBitfields() {
556 if (!
isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
559 for (
auto &I : BitFields) {
562 llvm::Type *ResLTy = Types.ConvertTypeForMem(
Field->getType());
565 if ((uint64_t)(Context.
toBits(Layout.getAlignment())) <
566 ResLTy->getPrimitiveSizeInBits())
573 const unsigned OldOffset =
576 const unsigned AbsoluteOffset =
580 const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
583 if (Info.
StorageSize == StorageSize && (OldOffset % StorageSize == 0))
587 unsigned Offset = AbsoluteOffset & (StorageSize - 1);
592 if (Offset + Info.
Size > StorageSize)
597 Offset = StorageSize - (Offset + Info.
Size);
609 if (End >= RecordSize)
613 bool Conflict =
false;
614 for (
const auto *F : D->fields()) {
616 if (F->isBitField() && !F->isZeroLengthBitField(Context))
626 if (F->isZeroLengthBitField(Context)) {
627 if (End > FOffset && StorageOffset < FOffset) {
636 Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
639 if (End < FOffset || FEnd < StorageOffset)
659void CGRecordLowering::accumulateVPtrs() {
663 llvm::PointerType::getUnqual(Types.getLLVMContext())));
667 llvm::PointerType::getUnqual(Types.getLLVMContext())));
670void CGRecordLowering::accumulateVBases() {
676 if (isOverlappingVBaseABI())
677 for (
const auto &
Base : RD->vbases()) {
683 if (Context.
isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
685 ScissorOffset = std::min(ScissorOffset,
688 Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor,
nullptr,
690 for (
const auto &
Base : RD->vbases()) {
697 if (isOverlappingVBaseABI() &&
699 !hasOwnStorage(RD, BaseDecl)) {
700 Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
nullptr,
708 Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
709 getStorageType(BaseDecl), BaseDecl));
718 for (
const auto &
Base :
Decl->bases())
719 if (!hasOwnStorage(
Base.getType()->getAsCXXRecordDecl(), Query))
724void CGRecordLowering::calculateZeroInit() {
725 for (std::vector<MemberInfo>::const_iterator
Member = Members.begin(),
726 MemberEnd = Members.end();
727 IsZeroInitializableAsBase &&
Member != MemberEnd; ++
Member) {
728 if (
Member->Kind == MemberInfo::Field) {
731 IsZeroInitializable = IsZeroInitializableAsBase =
false;
732 }
else if (
Member->Kind == MemberInfo::Base ||
733 Member->Kind == MemberInfo::VBase) {
734 if (isZeroInitializable(
Member->RD))
736 IsZeroInitializable =
false;
737 if (
Member->Kind == MemberInfo::Base)
738 IsZeroInitializableAsBase =
false;
743void CGRecordLowering::clipTailPadding() {
744 std::vector<MemberInfo>::iterator Prior = Members.begin();
746 for (std::vector<MemberInfo>::iterator
Member = Prior + 1,
747 MemberEnd = Members.end();
750 if (!
Member->Data &&
Member->Kind != MemberInfo::Scissor)
752 if (
Member->Offset < Tail) {
753 assert(Prior->Kind == MemberInfo::Field &&
754 "Only storage fields have tail padding!");
755 if (!Prior->FD || Prior->FD->isBitField())
756 Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
757 cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
759 assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
760 "should not have reused this field's tail padding");
761 Prior->Data = getByteArrayType(
767 Tail = Prior->Offset + getSize(Prior->Data);
771void CGRecordLowering::determinePacked(
bool NVBaseType) {
778 for (std::vector<MemberInfo>::const_iterator
Member = Members.begin(),
779 MemberEnd = Members.end();
787 if (
Member->Offset < NVSize)
788 NVAlignment = std::max(NVAlignment, getAlignment(
Member->Data));
789 Alignment = std::max(Alignment, getAlignment(
Member->Data));
793 if (Members.back().Offset % Alignment)
798 if (NVSize % NVAlignment)
802 Members.back().Data = getIntNType(Context.
toBits(Alignment));
805void CGRecordLowering::insertPadding() {
806 std::vector<std::pair<CharUnits, CharUnits> > Padding;
808 for (std::vector<MemberInfo>::const_iterator
Member = Members.begin(),
809 MemberEnd = Members.end();
814 assert(Offset >= Size);
818 Padding.push_back(std::make_pair(Size, Offset - Size));
824 for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
825 Pad = Padding.begin(), PadEnd = Padding.end();
826 Pad != PadEnd; ++Pad)
827 Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
828 llvm::stable_sort(Members);
831void CGRecordLowering::fillOutputFields() {
832 for (std::vector<MemberInfo>::const_iterator
Member = Members.begin(),
833 MemberEnd = Members.end();
836 FieldTypes.push_back(
Member->Data);
837 if (
Member->Kind == MemberInfo::Field) {
839 Fields[
Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
842 setBitFieldInfo(
Member->FD,
Member->Offset, FieldTypes.back());
843 }
else if (
Member->Kind == MemberInfo::Base)
844 NonVirtualBases[
Member->RD] = FieldTypes.size() - 1;
845 else if (
Member->Kind == MemberInfo::VBase)
846 VirtualBases[
Member->RD] = FieldTypes.size() - 1;
852 uint64_t Offset, uint64_t Size,
853 uint64_t StorageSize,
858 llvm::Type *Ty = Types.ConvertTypeForMem(FD->
getType());
861 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
865 if (
Size > TypeSizeInBits) {
875 Size = TypeSizeInBits;
882 if (Types.getDataLayout().isBigEndian()) {
889std::unique_ptr<CGRecordLayout>
891 CGRecordLowering Builder(*
this, D,
false);
893 Builder.lower(
false);
896 llvm::StructType *BaseTy =
nullptr;
897 if (isa<CXXRecordDecl>(D)) {
899 if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
900 CGRecordLowering BaseBuilder(*
this, D, Builder.Packed);
901 BaseBuilder.lower(
true);
902 BaseTy = llvm::StructType::create(
903 getLLVMContext(), BaseBuilder.FieldTypes,
"", BaseBuilder.Packed);
907 assert(Builder.Packed == BaseBuilder.Packed &&
908 "Non-virtual and complete types must agree on packedness");
915 Ty->setBody(Builder.FieldTypes, Builder.Packed);
917 auto RL = std::make_unique<CGRecordLayout>(
918 Ty, BaseTy, (
bool)Builder.IsZeroInitializable,
919 (
bool)Builder.IsZeroInitializableAsBase);
921 RL->NonVirtualBases.swap(Builder.NonVirtualBases);
922 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
925 RL->FieldInfo.swap(Builder.Fields);
928 RL->BitFields.swap(Builder.BitFields);
931 if (
getContext().getLangOpts().DumpRecordLayouts) {
932 llvm::outs() <<
"\n*** Dumping IRgen Record Layout\n";
933 llvm::outs() <<
"Record: ";
934 D->
dump(llvm::outs());
935 llvm::outs() <<
"\nLayout: ";
936 RL->print(llvm::outs());
944 assert(TypeSizeInBits ==
getDataLayout().getTypeAllocSizeInBits(Ty) &&
945 "Type size mismatch!");
950 uint64_t AlignedNonVirtualTypeSizeInBits =
953 assert(AlignedNonVirtualTypeSizeInBits ==
955 "Type size mismatch!");
959 llvm::StructType *ST = RL->getLLVMType();
960 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(ST);
964 for (
unsigned i = 0, e = AST_RL.
getFieldCount(); i != e; ++i, ++it) {
974 unsigned FieldNo = RL->getLLVMFieldNo(FD);
975 assert(AST_RL.
getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
976 "Invalid field offset!");
985 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
996 assert(
static_cast<unsigned>(Info.
Offset + Info.
Size) ==
998 "Big endian union bitfield does not end at the back");
1000 assert(Info.
Offset == 0 &&
1001 "Little endian union bitfield with a non-zero offset");
1003 "Union not large enough for bitfield storage");
1009 "Storage size does not match the element type size");
1011 assert(Info.
Size > 0 &&
"Empty bitfield!");
1013 "Bitfield outside of its allocated storage");
1021 OS <<
"<CGRecordLayout\n";
1022 OS <<
" LLVMType:" << *CompleteObjectType <<
"\n";
1023 if (BaseSubobjectType)
1024 OS <<
" NonVirtualBaseLLVMType:" << *BaseSubobjectType <<
"\n";
1025 OS <<
" IsZeroInitializable:" << IsZeroInitializable <<
"\n";
1026 OS <<
" BitFields:[\n";
1029 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1030 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1031 it = BitFields.begin(), ie = BitFields.end();
1036 it2 = RD->
field_begin(); *it2 != it->first; ++it2)
1038 BFIs.push_back(std::make_pair(Index, &it->second));
1040 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1041 for (
unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1043 BFIs[i].second->print(OS);
1051 print(llvm::errs());
1055 OS <<
"<CGBitFieldInfo"
1065 print(llvm::errs());
Defines the clang::ASTContext interface.
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool isNearlyEmpty(const CXXRecordDecl *RD) const
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
CharUnits getSize() const
getSize - Get the record size in characters.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getVBPtrOffset() const
getVBPtrOffset - Get the offset for virtual base table pointer.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
const VBaseOffsetsMapTy & getVBaseOffsetsMap() const
const CXXRecordDecl * getPrimaryBase() const
getPrimaryBase - Get the primary base for this record.
bool isPrimaryBaseVirtual() const
isPrimaryBaseVirtual - Get whether the primary base for this record is virtual or not.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Represents a C++ struct/union/class.
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
void print(raw_ostream &OS) const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
ASTContext & getContext() const
std::unique_ptr< CGRecordLayout > ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty)
Compute a new LLVM record layout object for the given record.
llvm::LLVMContext & getLLVMContext()
const llvm::DataLayout & getDataLayout() const
void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty, StringRef suffix)
addRecordTypeName - Compute a name from the given record decl with an optional suffix and name the gi...
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
DeclContext * getParent()
getParent - Returns the containing DeclContext.
Decl - This represents one declaration (or definition), e.g.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
unsigned getBitWidthValue(const ASTContext &Ctx) const
Computes the bit width of this field, if this is a bit field.
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
FieldDecl * getCanonicalDecl() override
Retrieves the canonical declaration of this field.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Represents a struct/union/class.
field_iterator field_begin() const
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual StringRef getABI() const
Get the ABI currently in use.
bool useZeroLengthBitfieldAlignment() const
Check whether zero length bitfields should force alignment of the next member.
bool useBitFieldTypeAlignment() const
Check whether the alignment of bit-field types is respected when laying out structures.
The base class of the type hierarchy.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
void print(raw_ostream &OS) const
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types, const FieldDecl *FD, uint64_t Offset, uint64_t Size, uint64_t StorageSize, CharUnits StorageOffset)
Given a bit-field decl, build an appropriate helper object for accessing that field (which is expecte...