clang 22.0.0git
CIRGenRecordLayoutBuilder.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to compute the layout of a record.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenModule.h"
15#include "CIRGenTypes.h"
16
18#include "clang/AST/Decl.h"
19#include "clang/AST/DeclCXX.h"
24#include "llvm/Support/Casting.h"
25
26#include <memory>
27
28using namespace llvm;
29using namespace clang;
30using namespace clang::CIRGen;
31
32namespace {
33/// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to an
34/// mlir::Type. Some of the lowering is straightforward, some is not.
35// TODO: Detail some of the complexities and weirdnesses?
36// (See CGRecordLayoutBuilder.cpp)
37struct CIRRecordLowering final {
38
39 // MemberInfo is a helper structure that contains information about a record
40 // member. In addition to the standard member types, there exists a sentinel
41 // member type that ensures correct rounding.
42 struct MemberInfo final {
43 CharUnits offset;
44 enum class InfoKind { VFPtr, Field, Base, VBase } kind;
45 mlir::Type data;
46 union {
47 const FieldDecl *fieldDecl;
48 const CXXRecordDecl *cxxRecordDecl;
49 };
50 MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
51 const FieldDecl *fieldDecl = nullptr)
52 : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}
53 MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
54 const CXXRecordDecl *rd)
55 : offset{offset}, kind{kind}, data{data}, cxxRecordDecl{rd} {}
56 // MemberInfos are sorted so we define a < operator.
57 bool operator<(const MemberInfo &other) const {
58 return offset < other.offset;
59 }
60 };
61 // The constructor.
62 CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl,
63 bool packed);
64
65 /// Constructs a MemberInfo instance from an offset and mlir::Type.
66 MemberInfo makeStorageInfo(CharUnits offset, mlir::Type data) {
67 return MemberInfo(offset, MemberInfo::InfoKind::Field, data);
68 }
69
70 // Layout routines.
71 void setBitFieldInfo(const FieldDecl *fd, CharUnits startOffset,
72 mlir::Type storageType);
73
74 void lower(bool NonVirtualBaseType);
75 void lowerUnion();
76
77 /// Determines if we need a packed llvm struct.
78 void determinePacked(bool nvBaseType);
79 /// Inserts padding everywhere it's needed.
80 void insertPadding();
81
82 void computeVolatileBitfields();
83 void accumulateBases();
84 void accumulateVPtrs();
85 void accumulateVBases();
86 void accumulateFields();
88 accumulateBitFields(RecordDecl::field_iterator field,
90
91 mlir::Type getVFPtrType();
92
93 bool isAAPCS() const {
94 return astContext.getTargetInfo().getABI().starts_with("aapcs");
95 }
96
97 /// Helper function to check if the target machine is BigEndian.
98 bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
99
100 // The Itanium base layout rule allows virtual bases to overlap
101 // other bases, which complicates layout in specific ways.
102 //
103 // Note specifically that the ms_struct attribute doesn't change this.
104 bool isOverlappingVBaseABI() {
105 return !astContext.getTargetInfo().getCXXABI().isMicrosoft();
106 }
107 // Recursively searches all of the bases to find out if a vbase is
108 // not the primary vbase of some base class.
109 bool hasOwnStorage(const CXXRecordDecl *decl, const CXXRecordDecl *query);
110
111 /// The Microsoft bitfield layout rule allocates discrete storage
112 /// units of the field's formal type and only combines adjacent
113 /// fields of the same formal type. We want to emit a layout with
114 /// these discrete storage units instead of combining them into a
115 /// continuous run.
116 bool isDiscreteBitFieldABI() {
117 return astContext.getTargetInfo().getCXXABI().isMicrosoft() ||
118 recordDecl->isMsStruct(astContext);
119 }
120
121 CharUnits bitsToCharUnits(uint64_t bitOffset) {
122 return astContext.toCharUnitsFromBits(bitOffset);
123 }
124
125 void calculateZeroInit();
126
127 CharUnits getSize(mlir::Type Ty) {
128 return CharUnits::fromQuantity(dataLayout.layout.getTypeSize(Ty));
129 }
130 CharUnits getSizeInBits(mlir::Type ty) {
131 return CharUnits::fromQuantity(dataLayout.layout.getTypeSizeInBits(ty));
132 }
133 CharUnits getAlignment(mlir::Type Ty) {
134 return CharUnits::fromQuantity(dataLayout.layout.getTypeABIAlignment(Ty));
135 }
136
137 bool isZeroInitializable(const FieldDecl *fd) {
138 return cirGenTypes.isZeroInitializable(fd->getType());
139 }
140 bool isZeroInitializable(const RecordDecl *rd) {
141 return cirGenTypes.isZeroInitializable(rd);
142 }
143
144 /// Wraps cir::IntType with some implicit arguments.
145 mlir::Type getUIntNType(uint64_t numBits) {
146 unsigned alignedBits = llvm::PowerOf2Ceil(numBits);
147 alignedBits = std::max(8u, alignedBits);
148 return cir::IntType::get(&cirGenTypes.getMLIRContext(), alignedBits,
149 /*isSigned=*/false);
150 }
151
152 mlir::Type getCharType() {
153 return cir::IntType::get(&cirGenTypes.getMLIRContext(),
154 astContext.getCharWidth(),
155 /*isSigned=*/false);
156 }
157
158 mlir::Type getByteArrayType(CharUnits numberOfChars) {
159 assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed.");
160 mlir::Type type = getCharType();
161 return numberOfChars == CharUnits::One()
162 ? type
163 : cir::ArrayType::get(type, numberOfChars.getQuantity());
164 }
165
166 // Gets the CIR BaseSubobject type from a CXXRecordDecl.
167 mlir::Type getStorageType(const CXXRecordDecl *RD) {
168 return cirGenTypes.getCIRGenRecordLayout(RD).getBaseSubobjectCIRType();
169 }
170 // This is different from LLVM traditional codegen because CIRGen uses arrays
171 // of bytes instead of arbitrary-sized integers. This is important for packed
172 // structures support.
173 mlir::Type getBitfieldStorageType(unsigned numBits) {
174 unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth());
175 if (cir::isValidFundamentalIntWidth(alignedBits))
176 return builder.getUIntNTy(alignedBits);
177
178 mlir::Type type = getCharType();
179 return cir::ArrayType::get(type, alignedBits / astContext.getCharWidth());
180 }
181
182 mlir::Type getStorageType(const FieldDecl *fieldDecl) {
183 mlir::Type type = cirGenTypes.convertTypeForMem(fieldDecl->getType());
184 if (fieldDecl->isBitField()) {
185 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
186 "getStorageType for bitfields");
187 }
188 return type;
189 }
190
191 uint64_t getFieldBitOffset(const FieldDecl *fieldDecl) {
192 return astRecordLayout.getFieldOffset(fieldDecl->getFieldIndex());
193 }
194
195 /// Fills out the structures that are ultimately consumed.
196 void fillOutputFields();
197
198 void appendPaddingBytes(CharUnits size) {
199 if (!size.isZero()) {
200 fieldTypes.push_back(getByteArrayType(size));
201 padded = true;
202 }
203 }
204
205 CIRGenTypes &cirGenTypes;
206 CIRGenBuilderTy &builder;
207 const ASTContext &astContext;
208 const RecordDecl *recordDecl;
209 const CXXRecordDecl *cxxRecordDecl;
210 const ASTRecordLayout &astRecordLayout;
211 // Helpful intermediate data-structures
212 std::vector<MemberInfo> members;
213 // Output fields, consumed by CIRGenTypes::computeRecordLayout
214 llvm::SmallVector<mlir::Type, 16> fieldTypes;
215 llvm::DenseMap<const FieldDecl *, CIRGenBitFieldInfo> bitFields;
216 llvm::DenseMap<const FieldDecl *, unsigned> fieldIdxMap;
217 llvm::DenseMap<const CXXRecordDecl *, unsigned> nonVirtualBases;
218 llvm::DenseMap<const CXXRecordDecl *, unsigned> virtualBases;
219 cir::CIRDataLayout dataLayout;
220
221 LLVM_PREFERRED_TYPE(bool)
222 unsigned zeroInitializable : 1;
223 LLVM_PREFERRED_TYPE(bool)
224 unsigned zeroInitializableAsBase : 1;
225 LLVM_PREFERRED_TYPE(bool)
226 unsigned packed : 1;
227 LLVM_PREFERRED_TYPE(bool)
228 unsigned padded : 1;
229
230private:
231 CIRRecordLowering(const CIRRecordLowering &) = delete;
232 void operator=(const CIRRecordLowering &) = delete;
233}; // CIRRecordLowering
234} // namespace
235
236CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes,
237 const RecordDecl *recordDecl, bool packed)
238 : cirGenTypes{cirGenTypes}, builder{cirGenTypes.getBuilder()},
239 astContext{cirGenTypes.getASTContext()}, recordDecl{recordDecl},
241 astRecordLayout{
242 cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)},
243 dataLayout{cirGenTypes.getCGModule().getModule()},
244 zeroInitializable{true}, zeroInitializableAsBase{true}, packed{packed},
245 padded{false} {}
246
247void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
248 CharUnits startOffset,
249 mlir::Type storageType) {
250 CIRGenBitFieldInfo &info = bitFields[fd->getCanonicalDecl()];
252 info.offset =
253 (unsigned)(getFieldBitOffset(fd) - astContext.toBits(startOffset));
254 info.size = fd->getBitWidthValue();
255 info.storageSize = getSizeInBits(storageType).getQuantity();
256 info.storageOffset = startOffset;
257 info.storageType = storageType;
258 info.name = fd->getName();
259
260 if (info.size > info.storageSize)
261 info.size = info.storageSize;
262 // Reverse the bit offsets for big endian machines. Since bitfields are laid
263 // out as packed bits within an integer-sized unit, we can imagine the bits
264 // counting from the most-significant-bit instead of the
265 // least-significant-bit.
266 if (dataLayout.isBigEndian())
267 info.offset = info.storageSize - (info.offset + info.size);
268
269 info.volatileStorageSize = 0;
270 info.volatileOffset = 0;
272}
273
274void CIRRecordLowering::lower(bool nonVirtualBaseType) {
275 if (recordDecl->isUnion()) {
276 lowerUnion();
277 computeVolatileBitfields();
278 return;
279 }
280
281 CharUnits size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize()
282 : astRecordLayout.getSize();
283
284 accumulateFields();
285
286 if (cxxRecordDecl) {
287 accumulateVPtrs();
288 accumulateBases();
289 if (members.empty()) {
290 appendPaddingBytes(size);
291 computeVolatileBitfields();
292 return;
293 }
294 if (!nonVirtualBaseType)
295 accumulateVBases();
296 }
297
298 llvm::stable_sort(members);
299 // TODO: implement clipTailPadding once bitfields are implemented
302
303 members.push_back(makeStorageInfo(size, getUIntNType(8)));
304 determinePacked(nonVirtualBaseType);
305 insertPadding();
306 members.pop_back();
307
308 calculateZeroInit();
309 fillOutputFields();
310 computeVolatileBitfields();
311}
312
313void CIRRecordLowering::fillOutputFields() {
314 for (const MemberInfo &member : members) {
315 if (member.data)
316 fieldTypes.push_back(member.data);
317 if (member.kind == MemberInfo::InfoKind::Field) {
318 if (member.fieldDecl)
319 fieldIdxMap[member.fieldDecl->getCanonicalDecl()] =
320 fieldTypes.size() - 1;
321 // A field without storage must be a bitfield.
323 if (!member.data)
324 setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back());
325 } else if (member.kind == MemberInfo::InfoKind::Base) {
326 nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
327 } else if (member.kind == MemberInfo::InfoKind::VBase) {
328 virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
329 }
330 }
331}
332
334CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
336 if (isDiscreteBitFieldABI()) {
337 // run stores the first element of the current run of bitfields. fieldEnd is
338 // used as a special value to note that we don't have a current run. A
339 // bitfield run is a contiguous collection of bitfields that can be stored
340 // in the same storage block. Zero-sized bitfields and bitfields that would
341 // cross an alignment boundary break a run and start a new one.
343 // tail is the offset of the first bit off the end of the current run. It's
344 // used to determine if the ASTRecordLayout is treating these two bitfields
345 // as contiguous. StartBitOffset is offset of the beginning of the Run.
346 uint64_t startBitOffset, tail = 0;
347 for (; field != fieldEnd && field->isBitField(); ++field) {
348 // Zero-width bitfields end runs.
349 if (field->isZeroLengthBitField()) {
350 run = fieldEnd;
351 continue;
352 }
353 uint64_t bitOffset = getFieldBitOffset(*field);
354 mlir::Type type = cirGenTypes.convertTypeForMem(field->getType());
355 // If we don't have a run yet, or don't live within the previous run's
356 // allocated storage then we allocate some storage and start a new run.
357 if (run == fieldEnd || bitOffset >= tail) {
358 run = field;
359 startBitOffset = bitOffset;
360 tail = startBitOffset + dataLayout.getTypeAllocSizeInBits(type);
361 // Add the storage member to the record. This must be added to the
362 // record before the bitfield members so that it gets laid out before
363 // the bitfields it contains get laid out.
364 members.push_back(
365 makeStorageInfo(bitsToCharUnits(startBitOffset), type));
366 }
367 // Bitfields get the offset of their storage but come afterward and remain
368 // there after a stable sort.
369 members.push_back(MemberInfo(bitsToCharUnits(startBitOffset),
370 MemberInfo::InfoKind::Field, nullptr,
371 *field));
372 }
373 return field;
374 }
375
376 CharUnits regSize =
377 bitsToCharUnits(astContext.getTargetInfo().getRegisterWidth());
378 unsigned charBits = astContext.getCharWidth();
379
380 // Data about the start of the span we're accumulating to create an access
381 // unit from. 'Begin' is the first bitfield of the span. If 'begin' is
382 // 'fieldEnd', we've not got a current span. The span starts at the
383 // 'beginOffset' character boundary. 'bitSizeSinceBegin' is the size (in bits)
384 // of the span -- this might include padding when we've advanced to a
385 // subsequent bitfield run.
386 RecordDecl::field_iterator begin = fieldEnd;
387 CharUnits beginOffset;
388 uint64_t bitSizeSinceBegin;
389
390 // The (non-inclusive) end of the largest acceptable access unit we've found
391 // since 'begin'. If this is 'begin', we're gathering the initial set of
392 // bitfields of a new span. 'bestEndOffset' is the end of that acceptable
393 // access unit -- it might extend beyond the last character of the bitfield
394 // run, using available padding characters.
395 RecordDecl::field_iterator bestEnd = begin;
396 CharUnits bestEndOffset;
397 bool bestClipped; // Whether the representation must be in a byte array.
398
399 for (;;) {
400 // atAlignedBoundary is true if 'field' is the (potential) start of a new
401 // span (or the end of the bitfields). When true, limitOffset is the
402 // character offset of that span and barrier indicates whether the new
403 // span cannot be merged into the current one.
404 bool atAlignedBoundary = false;
405 bool barrier = false; // a barrier can be a zero Bit Width or non bit member
406 if (field != fieldEnd && field->isBitField()) {
407 uint64_t bitOffset = getFieldBitOffset(*field);
408 if (begin == fieldEnd) {
409 // Beginning a new span.
410 begin = field;
411 bestEnd = begin;
412
413 assert((bitOffset % charBits) == 0 && "Not at start of char");
414 beginOffset = bitsToCharUnits(bitOffset);
415 bitSizeSinceBegin = 0;
416 } else if ((bitOffset % charBits) != 0) {
417 // Bitfield occupies the same character as previous bitfield, it must be
418 // part of the same span. This can include zero-length bitfields, should
419 // the target not align them to character boundaries. Such non-alignment
420 // is at variance with the standards, which require zero-length
421 // bitfields be a barrier between access units. But of course we can't
422 // achieve that in the middle of a character.
423 assert(bitOffset ==
424 astContext.toBits(beginOffset) + bitSizeSinceBegin &&
425 "Concatenating non-contiguous bitfields");
426 } else {
427 // Bitfield potentially begins a new span. This includes zero-length
428 // bitfields on non-aligning targets that lie at character boundaries
429 // (those are barriers to merging).
430 if (field->isZeroLengthBitField())
431 barrier = true;
432 atAlignedBoundary = true;
433 }
434 } else {
435 // We've reached the end of the bitfield run. Either we're done, or this
436 // is a barrier for the current span.
437 if (begin == fieldEnd)
438 break;
439
440 barrier = true;
441 atAlignedBoundary = true;
442 }
443
444 // 'installBest' indicates whether we should create an access unit for the
445 // current best span: fields ['begin', 'bestEnd') occupying characters
446 // ['beginOffset', 'bestEndOffset').
447 bool installBest = false;
448 if (atAlignedBoundary) {
449 // 'field' is the start of a new span or the end of the bitfields. The
450 // just-seen span now extends to 'bitSizeSinceBegin'.
451
452 // Determine if we can accumulate that just-seen span into the current
453 // accumulation.
454 CharUnits accessSize = bitsToCharUnits(bitSizeSinceBegin + charBits - 1);
455 if (bestEnd == begin) {
456 // This is the initial run at the start of a new span. By definition,
457 // this is the best seen so far.
458 bestEnd = field;
459 bestEndOffset = beginOffset + accessSize;
460 // Assume clipped until proven not below.
461 bestClipped = true;
462 if (!bitSizeSinceBegin)
463 // A zero-sized initial span -- this will install nothing and reset
464 // for another.
465 installBest = true;
466 } else if (accessSize > regSize) {
467 // Accumulating the just-seen span would create a multi-register access
468 // unit, which would increase register pressure.
469 installBest = true;
470 }
471
472 if (!installBest) {
473 // Determine if accumulating the just-seen span will create an expensive
474 // access unit or not.
475 mlir::Type type = getUIntNType(astContext.toBits(accessSize));
477 cirGenTypes.getCGModule().errorNYI(
478 field->getSourceRange(), "NYI CheapUnalignedBitFieldAccess");
479
480 if (!installBest) {
481 // Find the next used storage offset to determine what the limit of
482 // the current span is. That's either the offset of the next field
483 // with storage (which might be field itself) or the end of the
484 // non-reusable tail padding.
485 CharUnits limitOffset;
486 for (auto probe = field; probe != fieldEnd; ++probe)
487 if (!isEmptyFieldForLayout(astContext, *probe)) {
488 // A member with storage sets the limit.
489 assert((getFieldBitOffset(*probe) % charBits) == 0 &&
490 "Next storage is not byte-aligned");
491 limitOffset = bitsToCharUnits(getFieldBitOffset(*probe));
492 goto FoundLimit;
493 }
494 limitOffset = cxxRecordDecl ? astRecordLayout.getNonVirtualSize()
495 : astRecordLayout.getDataSize();
496
497 FoundLimit:
498 CharUnits typeSize = getSize(type);
499 if (beginOffset + typeSize <= limitOffset) {
500 // There is space before limitOffset to create a naturally-sized
501 // access unit.
502 bestEndOffset = beginOffset + typeSize;
503 bestEnd = field;
504 bestClipped = false;
505 }
506 if (barrier) {
507 // The next field is a barrier that we cannot merge across.
508 installBest = true;
509 } else if (cirGenTypes.getCGModule()
511 .FineGrainedBitfieldAccesses) {
512 installBest = true;
513 } else {
514 // Otherwise, we're not installing. Update the bit size
515 // of the current span to go all the way to limitOffset, which is
516 // the (aligned) offset of next bitfield to consider.
517 bitSizeSinceBegin = astContext.toBits(limitOffset - beginOffset);
518 }
519 }
520 }
521 }
522
523 if (installBest) {
524 assert((field == fieldEnd || !field->isBitField() ||
525 (getFieldBitOffset(*field) % charBits) == 0) &&
526 "Installing but not at an aligned bitfield or limit");
527 CharUnits accessSize = bestEndOffset - beginOffset;
528 if (!accessSize.isZero()) {
529 // Add the storage member for the access unit to the record. The
530 // bitfields get the offset of their storage but come afterward and
531 // remain there after a stable sort.
532 mlir::Type type;
533 if (bestClipped) {
534 assert(getSize(getUIntNType(astContext.toBits(accessSize))) >
535 accessSize &&
536 "Clipped access need not be clipped");
537 type = getByteArrayType(accessSize);
538 } else {
539 type = getUIntNType(astContext.toBits(accessSize));
540 assert(getSize(type) == accessSize &&
541 "Unclipped access must be clipped");
542 }
543 members.push_back(makeStorageInfo(beginOffset, type));
544 for (; begin != bestEnd; ++begin)
545 if (!begin->isZeroLengthBitField())
546 members.push_back(MemberInfo(
547 beginOffset, MemberInfo::InfoKind::Field, nullptr, *begin));
548 }
549 // Reset to start a new span.
550 field = bestEnd;
551 begin = fieldEnd;
552 } else {
553 assert(field != fieldEnd && field->isBitField() &&
554 "Accumulating past end of bitfields");
555 assert(!barrier && "Accumulating across barrier");
556 // Accumulate this bitfield into the current (potential) span.
557 bitSizeSinceBegin += field->getBitWidthValue();
558 ++field;
559 }
560 }
561
562 return field;
563}
564
565void CIRRecordLowering::accumulateFields() {
566 for (RecordDecl::field_iterator field = recordDecl->field_begin(),
567 fieldEnd = recordDecl->field_end();
568 field != fieldEnd;) {
569 if (field->isBitField()) {
570 field = accumulateBitFields(field, fieldEnd);
571 assert((field == fieldEnd || !field->isBitField()) &&
572 "Failed to accumulate all the bitfields");
573 } else if (!field->isZeroSize(astContext)) {
574 members.push_back(MemberInfo(bitsToCharUnits(getFieldBitOffset(*field)),
575 MemberInfo::InfoKind::Field,
576 getStorageType(*field), *field));
577 ++field;
578 } else {
579 // TODO(cir): do we want to do anything special about zero size members?
581 ++field;
582 }
583 }
584}
585
586void CIRRecordLowering::calculateZeroInit() {
587 for (const MemberInfo &member : members) {
588 if (member.kind == MemberInfo::InfoKind::Field) {
589 if (!member.fieldDecl || isZeroInitializable(member.fieldDecl))
590 continue;
591 zeroInitializable = zeroInitializableAsBase = false;
592 return;
593 } else if (member.kind == MemberInfo::InfoKind::Base ||
594 member.kind == MemberInfo::InfoKind::VBase) {
595 if (isZeroInitializable(member.cxxRecordDecl))
596 continue;
597 zeroInitializable = false;
598 if (member.kind == MemberInfo::InfoKind::Base)
599 zeroInitializableAsBase = false;
600 }
601 }
602}
603
604void CIRRecordLowering::determinePacked(bool nvBaseType) {
605 if (packed)
606 return;
607 CharUnits alignment = CharUnits::One();
608 CharUnits nvAlignment = CharUnits::One();
609 CharUnits nvSize = !nvBaseType && cxxRecordDecl
610 ? astRecordLayout.getNonVirtualSize()
611 : CharUnits::Zero();
612
613 for (const MemberInfo &member : members) {
614 if (!member.data)
615 continue;
616 // If any member falls at an offset that it not a multiple of its alignment,
617 // then the entire record must be packed.
618 if (member.offset % getAlignment(member.data))
619 packed = true;
620 if (member.offset < nvSize)
621 nvAlignment = std::max(nvAlignment, getAlignment(member.data));
622 alignment = std::max(alignment, getAlignment(member.data));
623 }
624 // If the size of the record (the capstone's offset) is not a multiple of the
625 // record's alignment, it must be packed.
626 if (members.back().offset % alignment)
627 packed = true;
628 // If the non-virtual sub-object is not a multiple of the non-virtual
629 // sub-object's alignment, it must be packed. We cannot have a packed
630 // non-virtual sub-object and an unpacked complete object or vise versa.
631 if (nvSize % nvAlignment)
632 packed = true;
633 // Update the alignment of the sentinel.
634 if (!packed)
635 members.back().data = getUIntNType(astContext.toBits(alignment));
636}
637
638void CIRRecordLowering::insertPadding() {
639 std::vector<std::pair<CharUnits, CharUnits>> padding;
640 CharUnits size = CharUnits::Zero();
641 for (const MemberInfo &member : members) {
642 if (!member.data)
643 continue;
644 CharUnits offset = member.offset;
645 assert(offset >= size);
646 // Insert padding if we need to.
647 if (offset !=
648 size.alignTo(packed ? CharUnits::One() : getAlignment(member.data)))
649 padding.push_back(std::make_pair(size, offset - size));
650 size = offset + getSize(member.data);
651 }
652 if (padding.empty())
653 return;
654 padded = true;
655 // Add the padding to the Members list and sort it.
656 for (const std::pair<CharUnits, CharUnits> &paddingPair : padding)
657 members.push_back(makeStorageInfo(paddingPair.first,
658 getByteArrayType(paddingPair.second)));
659 llvm::stable_sort(members);
660}
661
662std::unique_ptr<CIRGenRecordLayout>
663CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
664 CIRRecordLowering lowering(*this, rd, /*packed=*/false);
665 assert(ty->isIncomplete() && "recomputing record layout?");
666 lowering.lower(/*nonVirtualBaseType=*/false);
667
668 // If we're in C++, compute the base subobject type.
669 cir::RecordType baseTy;
670 if (llvm::isa<CXXRecordDecl>(rd) && !rd->isUnion() &&
671 !rd->hasAttr<FinalAttr>()) {
672 baseTy = *ty;
673 if (lowering.astRecordLayout.getNonVirtualSize() !=
674 lowering.astRecordLayout.getSize()) {
675 CIRRecordLowering baseLowering(*this, rd, /*Packed=*/lowering.packed);
676 baseLowering.lower(/*NonVirtualBaseType=*/true);
677 std::string baseIdentifier = getRecordTypeName(rd, ".base");
678 baseTy = builder.getCompleteNamedRecordType(
679 baseLowering.fieldTypes, baseLowering.packed, baseLowering.padded,
680 baseIdentifier);
681 // TODO(cir): add something like addRecordTypeName
682
683 // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work
684 // on both of them with the same index.
685 assert(lowering.packed == baseLowering.packed &&
686 "Non-virtual and complete types must agree on packedness");
687 }
688 }
689
690 // Fill in the record *after* computing the base type. Filling in the body
691 // signifies that the type is no longer opaque and record layout is complete,
692 // but we may need to recursively layout rd while laying D out as a base type.
694 ty->complete(lowering.fieldTypes, lowering.packed, lowering.padded);
695
696 auto rl = std::make_unique<CIRGenRecordLayout>(
697 ty ? *ty : cir::RecordType{}, baseTy ? baseTy : cir::RecordType{},
698 (bool)lowering.zeroInitializable, (bool)lowering.zeroInitializableAsBase);
699
701
702 rl->nonVirtualBases.swap(lowering.nonVirtualBases);
703 rl->completeObjectVirtualBases.swap(lowering.virtualBases);
704
706
707 // Add all the field numbers.
708 rl->fieldIdxMap.swap(lowering.fieldIdxMap);
709
710 rl->bitFields.swap(lowering.bitFields);
711
712 // Dump the layout, if requested.
713 if (getASTContext().getLangOpts().DumpRecordLayouts) {
714 llvm::outs() << "\n*** Dumping CIRgen Record Layout\n";
715 llvm::outs() << "Record: ";
716 rd->dump(llvm::outs());
717 llvm::outs() << "\nLayout: ";
718 rl->print(llvm::outs());
719 }
720
721 // TODO: implement verification
722 return rl;
723}
724
725void CIRGenRecordLayout::print(raw_ostream &os) const {
726 os << "<CIRecordLayout\n";
727 os << " CIR Type:" << completeObjectType << "\n";
728 if (baseSubobjectType)
729 os << " NonVirtualBaseCIRType:" << baseSubobjectType << "\n";
730 os << " IsZeroInitializable:" << zeroInitializable << "\n";
731 os << " BitFields:[\n";
732 std::vector<std::pair<unsigned, const CIRGenBitFieldInfo *>> bitInfo;
733 for (auto &[decl, info] : bitFields) {
734 const RecordDecl *rd = decl->getParent();
735 unsigned index = 0;
736 for (RecordDecl::field_iterator it = rd->field_begin(); *it != decl; ++it)
737 ++index;
738 bitInfo.push_back(std::make_pair(index, &info));
739 }
740 llvm::array_pod_sort(bitInfo.begin(), bitInfo.end());
741 for (std::pair<unsigned, const CIRGenBitFieldInfo *> &info : bitInfo) {
742 os.indent(4);
743 info.second->print(os);
744 os << "\n";
745 }
746 os << " ]>\n";
747}
748
749void CIRGenBitFieldInfo::print(raw_ostream &os) const {
750 os << "<CIRBitFieldInfo" << " name:" << name << " offset:" << offset
751 << " size:" << size << " isSigned:" << isSigned
752 << " storageSize:" << storageSize
753 << " storageOffset:" << storageOffset.getQuantity()
754 << " volatileOffset:" << volatileOffset
755 << " volatileStorageSize:" << volatileStorageSize
756 << " volatileStorageOffset:" << volatileStorageOffset.getQuantity() << ">";
757}
758
759void CIRGenRecordLayout::dump() const { print(llvm::errs()); }
760
761void CIRGenBitFieldInfo::dump() const { print(llvm::errs()); }
762
763void CIRRecordLowering::lowerUnion() {
764 CharUnits layoutSize = astRecordLayout.getSize();
765 mlir::Type storageType = nullptr;
766 bool seenNamedMember = false;
767
768 // Iterate through the fields setting bitFieldInfo and the Fields array. Also
769 // locate the "most appropriate" storage type.
770 for (const FieldDecl *field : recordDecl->fields()) {
771 mlir::Type fieldType;
772 if (field->isBitField()) {
773 if (field->isZeroLengthBitField())
774 continue;
775 fieldType = getBitfieldStorageType(field->getBitWidthValue());
776 setBitFieldInfo(field, CharUnits::Zero(), fieldType);
777 } else {
778 fieldType = getStorageType(field);
779 }
780
781 // This maps a field to its index. For unions, the index is always 0.
782 fieldIdxMap[field->getCanonicalDecl()] = 0;
783
784 // Compute zero-initializable status.
785 // This union might not be zero initialized: it may contain a pointer to
786 // data member which might have some exotic initialization sequence.
787 // If this is the case, then we ought not to try and come up with a "better"
788 // type, it might not be very easy to come up with a Constant which
789 // correctly initializes it.
790 if (!seenNamedMember) {
791 seenNamedMember = field->getIdentifier();
792 if (!seenNamedMember)
793 if (const RecordDecl *fieldRD = field->getType()->getAsRecordDecl())
794 seenNamedMember = fieldRD->findFirstNamedDataMember();
795 if (seenNamedMember && !isZeroInitializable(field)) {
796 zeroInitializable = zeroInitializableAsBase = false;
797 storageType = fieldType;
798 }
799 }
800
801 // Because our union isn't zero initializable, we won't be getting a better
802 // storage type.
803 if (!zeroInitializable)
804 continue;
805
806 // Conditionally update our storage type if we've got a new "better" one.
807 if (!storageType || getAlignment(fieldType) > getAlignment(storageType) ||
808 (getAlignment(fieldType) == getAlignment(storageType) &&
809 getSize(fieldType) > getSize(storageType)))
810 storageType = fieldType;
811
812 // NOTE(cir): Track all union member's types, not just the largest one. It
813 // allows for proper type-checking and retain more info for analisys.
814 fieldTypes.push_back(fieldType);
815 }
816
817 if (!storageType)
818 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
819 "No-storage Union NYI");
820
821 if (layoutSize < getSize(storageType))
822 storageType = getByteArrayType(layoutSize);
823 else
824 appendPaddingBytes(layoutSize - getSize(storageType));
825
826 // Set packed if we need it.
827 if (layoutSize % getAlignment(storageType))
828 packed = true;
829}
830
831bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *decl,
832 const CXXRecordDecl *query) {
833 const ASTRecordLayout &declLayout = astContext.getASTRecordLayout(decl);
834 if (declLayout.isPrimaryBaseVirtual() && declLayout.getPrimaryBase() == query)
835 return false;
836 for (const auto &base : decl->bases())
837 if (!hasOwnStorage(base.getType()->getAsCXXRecordDecl(), query))
838 return false;
839 return true;
840}
841
842/// The AAPCS that defines that, when possible, bit-fields should
843/// be accessed using containers of the declared type width:
844/// When a volatile bit-field is read, and its container does not overlap with
845/// any non-bit-field member or any zero length bit-field member, its container
846/// must be read exactly once using the access width appropriate to the type of
847/// the container. When a volatile bit-field is written, and its container does
848/// not overlap with any non-bit-field member or any zero-length bit-field
849/// member, its container must be read exactly once and written exactly once
850/// using the access width appropriate to the type of the container. The two
851/// accesses are not atomic.
852///
853/// Enforcing the width restriction can be disabled using
854/// -fno-aapcs-bitfield-width.
855void CIRRecordLowering::computeVolatileBitfields() {
856 if (!isAAPCS() ||
857 !cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth)
858 return;
859
860 for (auto &[field, info] : bitFields) {
861 mlir::Type resLTy = cirGenTypes.convertTypeForMem(field->getType());
862
863 if (astContext.toBits(astRecordLayout.getAlignment()) <
864 getSizeInBits(resLTy).getQuantity())
865 continue;
866
867 // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
868 // for big-endian targets, but it assumes a container of width
869 // info.storageSize. Since AAPCS uses a different container size (width
870 // of the type), we first undo that calculation here and redo it once
871 // the bit-field offset within the new container is calculated.
872 const unsigned oldOffset =
873 isBigEndian() ? info.storageSize - (info.offset + info.size)
874 : info.offset;
875 // Offset to the bit-field from the beginning of the struct.
876 const unsigned absoluteOffset =
877 astContext.toBits(info.storageOffset) + oldOffset;
878
879 // Container size is the width of the bit-field type.
880 const unsigned storageSize = getSizeInBits(resLTy).getQuantity();
881 // Nothing to do if the access uses the desired
882 // container width and is naturally aligned.
883 if (info.storageSize == storageSize && (oldOffset % storageSize == 0))
884 continue;
885
886 // Offset within the container.
887 unsigned offset = absoluteOffset & (storageSize - 1);
888 // Bail out if an aligned load of the container cannot cover the entire
889 // bit-field. This can happen for example, if the bit-field is part of a
890 // packed struct. AAPCS does not define access rules for such cases, we let
891 // clang to follow its own rules.
892 if (offset + info.size > storageSize)
893 continue;
894
895 // Re-adjust offsets for big-endian targets.
896 if (isBigEndian())
897 offset = storageSize - (offset + info.size);
898
899 const CharUnits storageOffset =
900 astContext.toCharUnitsFromBits(absoluteOffset & ~(storageSize - 1));
901 const CharUnits end = storageOffset +
902 astContext.toCharUnitsFromBits(storageSize) -
904
905 const ASTRecordLayout &layout =
906 astContext.getASTRecordLayout(field->getParent());
907 // If we access outside memory outside the record, than bail out.
908 const CharUnits recordSize = layout.getSize();
909 if (end >= recordSize)
910 continue;
911
912 // Bail out if performing this load would access non-bit-fields members.
913 bool conflict = false;
914 for (const auto *f : recordDecl->fields()) {
915 // Allow sized bit-fields overlaps.
916 if (f->isBitField() && !f->isZeroLengthBitField())
917 continue;
918
919 const CharUnits fOffset = astContext.toCharUnitsFromBits(
920 layout.getFieldOffset(f->getFieldIndex()));
921
922 // As C11 defines, a zero sized bit-field defines a barrier, so
923 // fields after and before it should be race condition free.
924 // The AAPCS acknowledges it and imposes no restritions when the
925 // natural container overlaps a zero-length bit-field.
926 if (f->isZeroLengthBitField()) {
927 if (end > fOffset && storageOffset < fOffset) {
928 conflict = true;
929 break;
930 }
931 }
932
933 const CharUnits fEnd =
934 fOffset +
935 astContext.toCharUnitsFromBits(
936 getSizeInBits(cirGenTypes.convertTypeForMem(f->getType()))
937 .getQuantity()) -
939 // If no overlap, continue.
940 if (end < fOffset || fEnd < storageOffset)
941 continue;
942
943 // The desired load overlaps a non-bit-field member, bail out.
944 conflict = true;
945 break;
946 }
947
948 if (conflict)
949 continue;
950 // Write the new bit-field access parameters.
951 // As the storage offset now is defined as the number of elements from the
952 // start of the structure, we should divide the Offset by the element size.
954 storageOffset /
955 astContext.toCharUnitsFromBits(storageSize).getQuantity();
956 info.volatileStorageSize = storageSize;
957 info.volatileOffset = offset;
958 }
959}
960
961void CIRRecordLowering::accumulateBases() {
962 // If we've got a primary virtual base, we need to add it with the bases.
963 if (astRecordLayout.isPrimaryBaseVirtual()) {
964 const CXXRecordDecl *baseDecl = astRecordLayout.getPrimaryBase();
965 members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::Base,
966 getStorageType(baseDecl), baseDecl));
967 }
968
969 // Accumulate the non-virtual bases.
970 for (const auto &base : cxxRecordDecl->bases()) {
971 if (base.isVirtual())
972 continue;
973 // Bases can be zero-sized even if not technically empty if they
974 // contain only a trailing array member.
975 const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
976 if (!baseDecl->isEmpty() &&
977 !astContext.getASTRecordLayout(baseDecl).getNonVirtualSize().isZero()) {
978 members.push_back(MemberInfo(astRecordLayout.getBaseClassOffset(baseDecl),
979 MemberInfo::InfoKind::Base,
980 getStorageType(baseDecl), baseDecl));
981 }
982 }
983}
984
985void CIRRecordLowering::accumulateVBases() {
986 for (const auto &base : cxxRecordDecl->vbases()) {
987 const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
988 if (isEmptyRecordForLayout(astContext, base.getType()))
989 continue;
990 CharUnits offset = astRecordLayout.getVBaseClassOffset(baseDecl);
991 // If the vbase is a primary virtual base of some base, then it doesn't
992 // get its own storage location but instead lives inside of that base.
993 if (isOverlappingVBaseABI() && astContext.isNearlyEmpty(baseDecl) &&
994 !hasOwnStorage(cxxRecordDecl, baseDecl)) {
995 members.push_back(
996 MemberInfo(offset, MemberInfo::InfoKind::VBase, nullptr, baseDecl));
997 continue;
998 }
999 // If we've got a vtordisp, add it as a storage type.
1000 if (astRecordLayout.getVBaseOffsetsMap()
1001 .find(baseDecl)
1002 ->second.hasVtorDisp())
1003 members.push_back(makeStorageInfo(offset - CharUnits::fromQuantity(4),
1004 getUIntNType(32)));
1005 members.push_back(MemberInfo(offset, MemberInfo::InfoKind::VBase,
1006 getStorageType(baseDecl), baseDecl));
1007 }
1008}
1009
1010void CIRRecordLowering::accumulateVPtrs() {
1011 if (astRecordLayout.hasOwnVFPtr())
1012 members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr,
1013 getVFPtrType()));
1014
1015 if (astRecordLayout.hasOwnVBPtr())
1016 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
1017 "accumulateVPtrs: hasOwnVBPtr");
1018}
1019
1020mlir::Type CIRRecordLowering::getVFPtrType() {
1021 return cir::VPtrType::get(builder.getContext());
1022}
Defines the clang::ASTContext interface.
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
static void print(llvm::raw_ostream &OS, const T &V, ASTContext &ASTCtx, QualType Ty)
bool isBigEndian() const
llvm::TypeSize getTypeAllocSizeInBits(mlir::Type ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool isNearlyEmpty(const CXXRecordDecl *RD) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:856
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
CharUnits getAlignment() const
getAlignment - Get the record alignment in characters.
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
CharUnits getSize() const
getSize - Get the record size in characters.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getDataSize() const
getDataSize() - Get the record data size, which is the record size without tail padding,...
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
const VBaseOffsetsMapTy & getVBaseOffsetsMap() const
const CXXRecordDecl * getPrimaryBase() const
getPrimaryBase - Get the primary base for this record.
bool isPrimaryBaseVirtual() const
isPrimaryBaseVirtual - Get whether the primary base for this record is virtual or not.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
CIRGenModule & getCGModule() const
Definition CIRGenTypes.h:82
std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix)
clang::ASTContext & getASTContext() const
std::unique_ptr< CIRGenRecordLayout > computeRecordLayout(const clang::RecordDecl *rd, cir::RecordType *ty)
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
void dump() const
bool hasAttr() const
Definition DeclBase.h:577
Represents a member of a struct/union/class.
Definition Decl.h:3157
unsigned getBitWidthValue() const
Computes the bit width of this field, if this is a bit field.
Definition Decl.cpp:4693
FieldDecl * getCanonicalDecl() override
Retrieves the canonical declaration of this field.
Definition Decl.h:3404
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
Represents a struct/union/class.
Definition Decl.h:4309
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4509
field_iterator field_begin() const
Definition Decl.cpp:5154
bool isUnion() const
Definition Decl.h:3919
virtual unsigned getRegisterWidth() const
Return the "preferred" register width on this target.
Definition TargetInfo.h:898
bool hasCheapUnalignedBitFieldAccess() const
Return true iff unaligned accesses are cheap.
Definition TargetInfo.h:912
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
QualType getType() const
Definition Decl.h:722
bool isValidFundamentalIntWidth(unsigned width)
Definition CIRTypes.cpp:475
bool isEmptyFieldForLayout(const ASTContext &context, const FieldDecl *fd)
isEmptyFieldForLayout - Return true if the field is "empty", that is, either a zero-width bit-field o...
bool isEmptyRecordForLayout(const ASTContext &context, QualType t)
isEmptyRecordForLayout - Return true if a structure contains only empty base classes (per isEmptyReco...
Definition TargetInfo.cpp:7
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Decl, FieldDecl > fieldDecl
Matches field declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, CXXRecordDecl > cxxRecordDecl
Matches C++ class declarations.
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, RecordDecl > recordDecl
Matches class, struct, and union declarations.
RangeSelector member(std::string ID)
Given a MemberExpr, selects the member token. ID is the node's binding in the match result.
Stencil run(MatchConsumer< std::string > C)
Wraps a MatchConsumer in a Stencil, so that it can be used in a Stencil.
Definition Stencil.cpp:489
The JSON file list parser is used to communicate input to InstallAPI.
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
void __ovld __conv barrier(cl_mem_fence_flags)
All work-items in a work-group executing the kernel on a processor must execute this function before ...
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
static bool zeroSizeRecordMembers()
static bool astRecordDeclAttr()
static bool recordZeroInit()
unsigned offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
void print(llvm::raw_ostream &os) const
unsigned storageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
clang::CharUnits storageOffset
The offset of the bitfield storage from the start of the record.
unsigned size
The total size of the bit-field, in bits.
unsigned isSigned
Whether the bit-field is signed.
clang::CharUnits volatileStorageOffset
The offset of the bitfield storage from the start of the record.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
llvm::StringRef name
The name of a bitfield.