23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/DerivedTypes.h"
25#include "llvm/IR/Type.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/MathExtras.h"
28#include "llvm/Support/raw_ostream.h"
30using namespace CodeGen;
73struct CGRecordLowering {
79 enum InfoKind { VFPtr, VBPtr,
Field,
Base, VBase, Scissor }
Kind;
85 MemberInfo(
CharUnits Offset, InfoKind Kind, llvm::Type *Data,
88 MemberInfo(
CharUnits Offset, InfoKind Kind, llvm::Type *Data,
99 return MemberInfo(
Offset, MemberInfo::Field,
Data);
107 bool isDiscreteBitFieldABI() {
109 D->isMsStruct(Context);
124 bool isOverlappingVBaseABI() {
129 llvm::Type *getIntNType(uint64_t NumBits) {
130 unsigned AlignedBits = llvm::alignTo(NumBits, Context.
getCharWidth());
131 return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
134 llvm::Type *getCharType() {
135 return llvm::Type::getIntNTy(Types.getLLVMContext(),
139 llvm::Type *getByteArrayType(
CharUnits NumChars) {
140 assert(!NumChars.
isZero() &&
"Empty byte arrays aren't allowed.");
141 llvm::Type *
Type = getCharType();
147 llvm::Type *getStorageType(
const FieldDecl *FD) {
148 llvm::Type *
Type = Types.ConvertTypeForMem(FD->
getType());
150 if (isDiscreteBitFieldABI())
return Type;
156 return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
158 CharUnits bitsToCharUnits(uint64_t BitOffset) {
167 bool isZeroInitializable(
const FieldDecl *FD) {
168 return Types.isZeroInitializable(FD->
getType());
170 bool isZeroInitializable(
const RecordDecl *RD) {
171 return Types.isZeroInitializable(RD);
173 void appendPaddingBytes(
CharUnits Size) {
175 FieldTypes.push_back(getByteArrayType(Size));
182 llvm::Type *StorageType);
184 void lower(
bool NonVirtualBaseType);
185 void lowerUnion(
bool isNoUniqueAddress);
186 void accumulateFields();
189 void computeVolatileBitfields();
190 void accumulateBases();
191 void accumulateVPtrs();
192 void accumulateVBases();
196 void calculateZeroInit();
199 void clipTailPadding();
201 void determinePacked(
bool NVBaseType);
203 void insertPadding();
205 void fillOutputFields();
212 const llvm::DataLayout &DataLayout;
214 std::vector<MemberInfo> Members;
217 llvm::DenseMap<const FieldDecl *, unsigned> Fields;
218 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
219 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
220 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
221 bool IsZeroInitializable : 1;
222 bool IsZeroInitializableAsBase : 1;
225 CGRecordLowering(
const CGRecordLowering &) =
delete;
226 void operator =(
const CGRecordLowering &) =
delete;
232 : Types(Types), Context(Types.getContext()), D(D),
234 Layout(Types.getContext().getASTRecordLayout(D)),
235 DataLayout(Types.getDataLayout()), IsZeroInitializable(
true),
236 IsZeroInitializableAsBase(
true), Packed(Packed) {}
238void CGRecordLowering::setBitFieldInfo(
252 if (DataLayout.isBigEndian())
260void CGRecordLowering::lower(
bool NVBaseType) {
281 CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
283 lowerUnion(NVBaseType);
284 computeVolatileBitfields();
292 if (Members.empty()) {
293 appendPaddingBytes(Size);
294 computeVolatileBitfields();
300 llvm::stable_sort(Members);
301 Members.push_back(StorageInfo(Size, getIntNType(8)));
303 determinePacked(NVBaseType);
308 computeVolatileBitfields();
311void CGRecordLowering::lowerUnion(
bool isNoUniqueAddress) {
313 isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize();
314 llvm::Type *StorageType =
nullptr;
315 bool SeenNamedMember =
false;
321 for (
const auto *Field : D->fields()) {
322 if (
Field->isBitField()) {
323 if (
Field->isZeroLengthBitField(Context))
325 llvm::Type *FieldType = getStorageType(Field);
326 if (LayoutSize < getSize(FieldType))
327 FieldType = getByteArrayType(LayoutSize);
330 Fields[
Field->getCanonicalDecl()] = 0;
331 llvm::Type *FieldType = getStorageType(Field);
338 if (!SeenNamedMember) {
339 SeenNamedMember =
Field->getIdentifier();
340 if (!SeenNamedMember)
341 if (
const auto *FieldRD =
Field->getType()->getAsRecordDecl())
342 SeenNamedMember = FieldRD->findFirstNamedDataMember();
343 if (SeenNamedMember && !isZeroInitializable(Field)) {
344 IsZeroInitializable = IsZeroInitializableAsBase =
false;
345 StorageType = FieldType;
350 if (!IsZeroInitializable)
354 getAlignment(FieldType) > getAlignment(StorageType) ||
355 (getAlignment(FieldType) == getAlignment(StorageType) &&
356 getSize(FieldType) > getSize(StorageType)))
357 StorageType = FieldType;
361 return appendPaddingBytes(LayoutSize);
364 if (LayoutSize < getSize(StorageType))
365 StorageType = getByteArrayType(LayoutSize);
366 FieldTypes.push_back(StorageType);
367 appendPaddingBytes(LayoutSize - getSize(StorageType));
369 const auto StorageAlignment = getAlignment(StorageType);
370 assert((Layout.getSize() % StorageAlignment == 0 ||
371 Layout.getDataSize() % StorageAlignment) &&
372 "Union's standard layout and no_unique_address layout must agree on "
374 if (Layout.getDataSize() % StorageAlignment)
378void CGRecordLowering::accumulateFields() {
380 FieldEnd = D->field_end();
381 Field != FieldEnd;) {
382 if (
Field->isBitField()) {
386 accumulateBitFields(Start, Field);
387 }
else if (!
Field->isZeroSize(Context)) {
390 Members.push_back(MemberInfo(
391 bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
392 Field->isPotentiallyOverlapping()
393 ? getStorageType(
Field->getType()->getAsCXXRecordDecl())
394 : getStorageType(*Field),
416 if (isDiscreteBitFieldABI()) {
418 uint64_t BitOffset = getFieldBitOffset(*Field);
420 if (
Field->isZeroLengthBitField(Context)) {
425 Types.ConvertTypeForMem(
Field->getType(),
true);
428 if (Run == FieldEnd || BitOffset >= Tail) {
430 StartBitOffset = BitOffset;
431 Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(
Type);
435 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset),
Type));
439 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
440 MemberInfo::Field,
nullptr, *Field));
450 auto IsBetterAsSingleFieldRun = [&](
uint64_t OffsetInRecord,
452 if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
454 if (OffsetInRecord < 8 || !llvm::isPowerOf2_64(OffsetInRecord) ||
455 !DataLayout.fitsInLegalInteger(OffsetInRecord))
460 Context.
toBits(getAlignment(getIntNType(OffsetInRecord))) !=
467 bool StartFieldAsSingleRun =
false;
470 if (Run == FieldEnd) {
472 if (Field == FieldEnd)
475 if (!
Field->isZeroLengthBitField(Context)) {
477 StartBitOffset = getFieldBitOffset(*Field);
478 Tail = StartBitOffset +
Field->getBitWidthValue(Context);
479 StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset,
495 if (!StartFieldAsSingleRun && Field != FieldEnd &&
496 !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) &&
497 (!
Field->isZeroLengthBitField(Context) ||
500 Tail == getFieldBitOffset(*Field)) {
501 Tail +=
Field->getBitWidthValue(Context);
507 llvm::Type *
Type = getIntNType(Tail - StartBitOffset);
511 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset),
Type));
512 for (; Run !=
Field; ++Run)
513 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
514 MemberInfo::Field,
nullptr, *Run));
516 StartFieldAsSingleRun =
false;
520void CGRecordLowering::accumulateBases() {
522 if (Layout.isPrimaryBaseVirtual()) {
525 getStorageType(BaseDecl), BaseDecl));
528 for (
const auto &
Base : RD->bases()) {
529 if (
Base.isVirtual())
537 Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
538 MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
555void CGRecordLowering::computeVolatileBitfields() {
556 if (!
isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
559 for (
auto &I : BitFields) {
562 llvm::Type *ResLTy = Types.ConvertTypeForMem(
Field->getType());
565 if ((uint64_t)(Context.
toBits(Layout.getAlignment())) <
566 ResLTy->getPrimitiveSizeInBits())
573 const unsigned OldOffset =
576 const unsigned AbsoluteOffset =
580 const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
583 if (Info.
StorageSize == StorageSize && (OldOffset % StorageSize == 0))
587 unsigned Offset = AbsoluteOffset & (StorageSize - 1);
609 if (End >= RecordSize)
613 bool Conflict =
false;
614 for (
const auto *F : D->fields()) {
616 if (F->isBitField() && !F->isZeroLengthBitField(Context))
626 if (F->isZeroLengthBitField(Context)) {
627 if (End > FOffset && StorageOffset < FOffset) {
636 Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
639 if (End < FOffset || FEnd < StorageOffset)
659void CGRecordLowering::accumulateVPtrs() {
662 llvm::FunctionType::get(getIntNType(32),
true)->
663 getPointerTo()->getPointerTo()));
665 Members.push_back(MemberInfo(Layout.
getVBPtrOffset(), MemberInfo::VBPtr,
666 llvm::Type::getInt32PtrTy(Types.getLLVMContext())));
669void CGRecordLowering::accumulateVBases() {
675 if (isOverlappingVBaseABI())
676 for (
const auto &
Base : RD->vbases()) {
682 if (Context.
isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
684 ScissorOffset = std::min(ScissorOffset,
687 Members.push_back(MemberInfo(ScissorOffset, MemberInfo::Scissor,
nullptr,
689 for (
const auto &
Base : RD->vbases()) {
696 if (isOverlappingVBaseABI() &&
698 !hasOwnStorage(RD, BaseDecl)) {
699 Members.push_back(MemberInfo(
Offset, MemberInfo::VBase,
nullptr,
707 Members.push_back(MemberInfo(
Offset, MemberInfo::VBase,
708 getStorageType(BaseDecl), BaseDecl));
717 for (
const auto &
Base :
Decl->bases())
718 if (!hasOwnStorage(
Base.getType()->getAsCXXRecordDecl(), Query))
723void CGRecordLowering::calculateZeroInit() {
724 for (std::vector<MemberInfo>::const_iterator
Member = Members.begin(),
725 MemberEnd = Members.end();
726 IsZeroInitializableAsBase &&
Member != MemberEnd; ++
Member) {
727 if (
Member->Kind == MemberInfo::Field) {
730 IsZeroInitializable = IsZeroInitializableAsBase =
false;
731 }
else if (
Member->Kind == MemberInfo::Base ||
732 Member->Kind == MemberInfo::VBase) {
733 if (isZeroInitializable(
Member->RD))
735 IsZeroInitializable =
false;
736 if (
Member->Kind == MemberInfo::Base)
737 IsZeroInitializableAsBase =
false;
742void CGRecordLowering::clipTailPadding() {
743 std::vector<MemberInfo>::iterator Prior = Members.begin();
745 for (std::vector<MemberInfo>::iterator
Member = Prior + 1,
746 MemberEnd = Members.end();
749 if (!
Member->Data &&
Member->Kind != MemberInfo::Scissor)
751 if (
Member->Offset < Tail) {
752 assert(Prior->Kind == MemberInfo::Field &&
753 "Only storage fields have tail padding!");
754 if (!Prior->FD || Prior->FD->isBitField())
755 Prior->Data = getByteArrayType(bitsToCharUnits(llvm::alignTo(
756 cast<llvm::IntegerType>(Prior->Data)->getIntegerBitWidth(), 8)));
758 assert(Prior->FD->hasAttr<NoUniqueAddressAttr>() &&
759 "should not have reused this field's tail padding");
760 Prior->Data = getByteArrayType(
766 Tail = Prior->Offset + getSize(Prior->Data);
770void CGRecordLowering::determinePacked(
bool NVBaseType) {
777 for (std::vector<MemberInfo>::const_iterator
Member = Members.begin(),
778 MemberEnd = Members.end();
786 if (
Member->Offset < NVSize)
787 NVAlignment = std::max(NVAlignment, getAlignment(
Member->Data));
788 Alignment = std::max(Alignment, getAlignment(
Member->Data));
792 if (Members.back().Offset % Alignment)
797 if (NVSize % NVAlignment)
801 Members.back().Data = getIntNType(Context.
toBits(Alignment));
804void CGRecordLowering::insertPadding() {
805 std::vector<std::pair<CharUnits, CharUnits> > Padding;
807 for (std::vector<MemberInfo>::const_iterator
Member = Members.begin(),
808 MemberEnd = Members.end();
817 Padding.push_back(std::make_pair(Size,
Offset - Size));
823 for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
824 Pad = Padding.begin(), PadEnd = Padding.end();
825 Pad != PadEnd; ++Pad)
826 Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
827 llvm::stable_sort(Members);
830void CGRecordLowering::fillOutputFields() {
831 for (std::vector<MemberInfo>::const_iterator
Member = Members.begin(),
832 MemberEnd = Members.end();
835 FieldTypes.push_back(
Member->Data);
836 if (
Member->Kind == MemberInfo::Field) {
838 Fields[
Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
841 setBitFieldInfo(
Member->FD,
Member->Offset, FieldTypes.back());
842 }
else if (
Member->Kind == MemberInfo::Base)
843 NonVirtualBases[
Member->RD] = FieldTypes.size() - 1;
844 else if (
Member->Kind == MemberInfo::VBase)
845 VirtualBases[
Member->RD] = FieldTypes.size() - 1;
851 uint64_t
Offset, uint64_t Size,
852 uint64_t StorageSize,
857 llvm::Type *Ty = Types.ConvertTypeForMem(FD->
getType());
860 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
864 if (
Size > TypeSizeInBits) {
874 Size = TypeSizeInBits;
881 if (Types.getDataLayout().isBigEndian()) {
888std::unique_ptr<CGRecordLayout>
890 CGRecordLowering Builder(*
this, D,
false);
892 Builder.lower(
false);
895 llvm::StructType *BaseTy =
nullptr;
896 if (isa<CXXRecordDecl>(D)) {
898 if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
899 CGRecordLowering BaseBuilder(*
this, D, Builder.Packed);
900 BaseBuilder.lower(
true);
901 BaseTy = llvm::StructType::create(
902 getLLVMContext(), BaseBuilder.FieldTypes,
"", BaseBuilder.Packed);
906 assert(Builder.Packed == BaseBuilder.Packed &&
907 "Non-virtual and complete types must agree on packedness");
914 Ty->setBody(Builder.FieldTypes, Builder.Packed);
916 auto RL = std::make_unique<CGRecordLayout>(
917 Ty, BaseTy, (
bool)Builder.IsZeroInitializable,
918 (
bool)Builder.IsZeroInitializableAsBase);
920 RL->NonVirtualBases.swap(Builder.NonVirtualBases);
921 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
924 RL->FieldInfo.swap(Builder.Fields);
927 RL->BitFields.swap(Builder.BitFields);
930 if (
getContext().getLangOpts().DumpRecordLayouts) {
931 llvm::outs() <<
"\n*** Dumping IRgen Record Layout\n";
932 llvm::outs() <<
"Record: ";
933 D->
dump(llvm::outs());
934 llvm::outs() <<
"\nLayout: ";
935 RL->print(llvm::outs());
943 assert(TypeSizeInBits ==
getDataLayout().getTypeAllocSizeInBits(Ty) &&
944 "Type size mismatch!");
949 uint64_t AlignedNonVirtualTypeSizeInBits =
952 assert(AlignedNonVirtualTypeSizeInBits ==
954 "Type size mismatch!");
958 llvm::StructType *ST = RL->getLLVMType();
959 const llvm::StructLayout *SL =
getDataLayout().getStructLayout(ST);
963 for (
unsigned i = 0, e = AST_RL.
getFieldCount(); i != e; ++i, ++it) {
973 unsigned FieldNo = RL->getLLVMFieldNo(FD);
974 assert(AST_RL.
getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
975 "Invalid field offset!");
984 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
995 assert(
static_cast<unsigned>(Info.
Offset + Info.
Size) ==
997 "Big endian union bitfield does not end at the back");
999 assert(Info.
Offset == 0 &&
1000 "Little endian union bitfield with a non-zero offset");
1002 "Union not large enough for bitfield storage");
1008 "Storage size does not match the element type size");
1010 assert(Info.
Size > 0 &&
"Empty bitfield!");
1012 "Bitfield outside of its allocated storage");
1020 OS <<
"<CGRecordLayout\n";
1021 OS <<
" LLVMType:" << *CompleteObjectType <<
"\n";
1022 if (BaseSubobjectType)
1023 OS <<
" NonVirtualBaseLLVMType:" << *BaseSubobjectType <<
"\n";
1024 OS <<
" IsZeroInitializable:" << IsZeroInitializable <<
"\n";
1025 OS <<
" BitFields:[\n";
1028 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1029 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1030 it = BitFields.begin(), ie = BitFields.end();
1035 it2 = RD->
field_begin(); *it2 != it->first; ++it2)
1037 BFIs.push_back(std::make_pair(Index, &it->second));
1039 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1040 for (
unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1042 BFIs[i].second->print(OS);
1050 print(llvm::errs());
1054 OS <<
"<CGBitFieldInfo"
1064 print(llvm::errs());
Defines the clang::ASTContext interface.
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool isNearlyEmpty(const CXXRecordDecl *RD) const
TypeInfoChars getTypeInfoDataSizeInChars(QualType T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
CharUnits getSize() const
getSize - Get the record size in characters.
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getVBPtrOffset() const
getVBPtrOffset - Get the offset for virtual base table pointer.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
const VBaseOffsetsMapTy & getVBaseOffsetsMap() const
const CXXRecordDecl * getPrimaryBase() const
getPrimaryBase - Get the primary base for this record.
bool isPrimaryBaseVirtual() const
isPrimaryBaseVirtual - Get whether the primary base for this record is virtual or not.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Represents a C++ struct/union/class.
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
void print(raw_ostream &OS) const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
ASTContext & getContext() const
std::unique_ptr< CGRecordLayout > ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty)
Compute a new LLVM record layout object for the given record.
llvm::LLVMContext & getLLVMContext()
const llvm::DataLayout & getDataLayout() const
void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty, StringRef suffix)
addRecordTypeName - Compute a name from the given record decl with an optional suffix and name the gi...
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
DeclContext * getParent()
getParent - Returns the containing DeclContext.
Decl - This represents one declaration (or definition), e.g.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
unsigned getBitWidthValue(const ASTContext &Ctx) const
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
FieldDecl * getCanonicalDecl() override
Retrieves the canonical declaration of this field.
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Represents a struct/union/class.
field_iterator field_begin() const
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
virtual StringRef getABI() const
Get the ABI currently in use.
bool useZeroLengthBitfieldAlignment() const
Check whether zero length bitfields should force alignment of the next member.
bool useBitFieldTypeAlignment() const
Check whether the alignment of bit-field types is respected when laying out structures.
The base class of the type hierarchy.
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
void print(raw_ostream &OS) const
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types, const FieldDecl *FD, uint64_t Offset, uint64_t Size, uint64_t StorageSize, CharUnits StorageOffset)
Given a bit-field decl, build an appropriate helper object for accessing that field (which is expecte...