clang 22.0.0git
CIRGenRecordLayoutBuilder.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to compute the layout of a record.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenModule.h"
15#include "CIRGenTypes.h"
16
18#include "clang/AST/Decl.h"
19#include "clang/AST/DeclCXX.h"
24#include "llvm/Support/Casting.h"
25
26#include <memory>
27
28using namespace llvm;
29using namespace clang;
30using namespace clang::CIRGen;
31
32namespace {
33/// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to an
34/// mlir::Type. Some of the lowering is straightforward, some is not.
35// TODO: Detail some of the complexities and weirdnesses?
36// (See CGRecordLayoutBuilder.cpp)
37struct CIRRecordLowering final {
38
39 // MemberInfo is a helper structure that contains information about a record
40 // member. In addition to the standard member types, there exists a sentinel
41 // member type that ensures correct rounding.
42 struct MemberInfo final {
43 CharUnits offset;
44 enum class InfoKind { VFPtr, Field, Base, VBase } kind;
45 mlir::Type data;
46 union {
47 const FieldDecl *fieldDecl;
48 const CXXRecordDecl *cxxRecordDecl;
49 };
50 MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
51 const FieldDecl *fieldDecl = nullptr)
52 : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}
53 MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
54 const CXXRecordDecl *rd)
55 : offset{offset}, kind{kind}, data{data}, cxxRecordDecl{rd} {}
56 // MemberInfos are sorted so we define a < operator.
57 bool operator<(const MemberInfo &other) const {
58 return offset < other.offset;
59 }
60 };
61 // The constructor.
62 CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl,
63 bool packed);
64
65 /// Constructs a MemberInfo instance from an offset and mlir::Type.
66 MemberInfo makeStorageInfo(CharUnits offset, mlir::Type data) {
67 return MemberInfo(offset, MemberInfo::InfoKind::Field, data);
68 }
69
70 // Layout routines.
71 void setBitFieldInfo(const FieldDecl *fd, CharUnits startOffset,
72 mlir::Type storageType);
73
74 void lower(bool NonVirtualBaseType);
75 void lowerUnion();
76
77 /// Determines if we need a packed llvm struct.
78 void determinePacked(bool nvBaseType);
79 /// Inserts padding everywhere it's needed.
80 void insertPadding();
81
82 void computeVolatileBitfields();
83 void accumulateBases();
84 void accumulateVPtrs();
85 void accumulateVBases();
86 void accumulateFields();
88 accumulateBitFields(RecordDecl::field_iterator field,
90
91 mlir::Type getVFPtrType();
92
93 bool isAAPCS() const {
94 return astContext.getTargetInfo().getABI().starts_with("aapcs");
95 }
96
97 /// Helper function to check if the target machine is BigEndian.
98 bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
99
100 // The Itanium base layout rule allows virtual bases to overlap
101 // other bases, which complicates layout in specific ways.
102 //
103 // Note specifically that the ms_struct attribute doesn't change this.
104 bool isOverlappingVBaseABI() {
105 return !astContext.getTargetInfo().getCXXABI().isMicrosoft();
106 }
107 // Recursively searches all of the bases to find out if a vbase is
108 // not the primary vbase of some base class.
109 bool hasOwnStorage(const CXXRecordDecl *decl, const CXXRecordDecl *query);
110
111 /// The Microsoft bitfield layout rule allocates discrete storage
112 /// units of the field's formal type and only combines adjacent
113 /// fields of the same formal type. We want to emit a layout with
114 /// these discrete storage units instead of combining them into a
115 /// continuous run.
116 bool isDiscreteBitFieldABI() {
117 return astContext.getTargetInfo().getCXXABI().isMicrosoft() ||
118 recordDecl->isMsStruct(astContext);
119 }
120
121 CharUnits bitsToCharUnits(uint64_t bitOffset) {
122 return astContext.toCharUnitsFromBits(bitOffset);
123 }
124
125 void calculateZeroInit();
126
127 CharUnits getSize(mlir::Type Ty) {
128 return CharUnits::fromQuantity(dataLayout.layout.getTypeSize(Ty));
129 }
130 CharUnits getSizeInBits(mlir::Type ty) {
131 return CharUnits::fromQuantity(dataLayout.layout.getTypeSizeInBits(ty));
132 }
133 CharUnits getAlignment(mlir::Type Ty) {
134 return CharUnits::fromQuantity(dataLayout.layout.getTypeABIAlignment(Ty));
135 }
136
137 bool isZeroInitializable(const FieldDecl *fd) {
138 return cirGenTypes.isZeroInitializable(fd->getType());
139 }
140 bool isZeroInitializable(const RecordDecl *rd) {
141 return cirGenTypes.isZeroInitializable(rd);
142 }
143
144 /// Wraps cir::IntType with some implicit arguments.
145 mlir::Type getUIntNType(uint64_t numBits) {
146 unsigned alignedBits = llvm::PowerOf2Ceil(numBits);
147 alignedBits = std::max(8u, alignedBits);
148 return cir::IntType::get(&cirGenTypes.getMLIRContext(), alignedBits,
149 /*isSigned=*/false);
150 }
151
152 mlir::Type getCharType() {
153 return cir::IntType::get(&cirGenTypes.getMLIRContext(),
154 astContext.getCharWidth(),
155 /*isSigned=*/false);
156 }
157
158 mlir::Type getByteArrayType(CharUnits numberOfChars) {
159 assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed.");
160 mlir::Type type = getCharType();
161 return numberOfChars == CharUnits::One()
162 ? type
163 : cir::ArrayType::get(type, numberOfChars.getQuantity());
164 }
165
166 // Gets the CIR BaseSubobject type from a CXXRecordDecl.
167 mlir::Type getStorageType(const CXXRecordDecl *RD) {
168 return cirGenTypes.getCIRGenRecordLayout(RD).getBaseSubobjectCIRType();
169 }
170 // This is different from LLVM traditional codegen because CIRGen uses arrays
171 // of bytes instead of arbitrary-sized integers. This is important for packed
172 // structures support.
173 mlir::Type getBitfieldStorageType(unsigned numBits) {
174 unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth());
175 if (cir::isValidFundamentalIntWidth(alignedBits))
176 return builder.getUIntNTy(alignedBits);
177
178 mlir::Type type = getCharType();
179 return cir::ArrayType::get(type, alignedBits / astContext.getCharWidth());
180 }
181
182 mlir::Type getStorageType(const FieldDecl *fieldDecl) {
183 mlir::Type type = cirGenTypes.convertTypeForMem(fieldDecl->getType());
184 if (fieldDecl->isBitField()) {
185 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
186 "getStorageType for bitfields");
187 }
188 return type;
189 }
190
191 uint64_t getFieldBitOffset(const FieldDecl *fieldDecl) {
192 return astRecordLayout.getFieldOffset(fieldDecl->getFieldIndex());
193 }
194
195 /// Fills out the structures that are ultimately consumed.
196 void fillOutputFields();
197
198 void appendPaddingBytes(CharUnits size) {
199 if (!size.isZero()) {
200 fieldTypes.push_back(getByteArrayType(size));
201 padded = true;
202 }
203 }
204
205 CIRGenTypes &cirGenTypes;
206 CIRGenBuilderTy &builder;
207 const ASTContext &astContext;
208 const RecordDecl *recordDecl;
209 const CXXRecordDecl *cxxRecordDecl;
210 const ASTRecordLayout &astRecordLayout;
211 // Helpful intermediate data-structures
212 std::vector<MemberInfo> members;
213 // Output fields, consumed by CIRGenTypes::computeRecordLayout
214 llvm::SmallVector<mlir::Type, 16> fieldTypes;
215 llvm::DenseMap<const FieldDecl *, CIRGenBitFieldInfo> bitFields;
216 llvm::DenseMap<const FieldDecl *, unsigned> fieldIdxMap;
217 llvm::DenseMap<const CXXRecordDecl *, unsigned> nonVirtualBases;
218 llvm::DenseMap<const CXXRecordDecl *, unsigned> virtualBases;
219 cir::CIRDataLayout dataLayout;
220
221 LLVM_PREFERRED_TYPE(bool)
222 unsigned zeroInitializable : 1;
223 LLVM_PREFERRED_TYPE(bool)
224 unsigned zeroInitializableAsBase : 1;
225 LLVM_PREFERRED_TYPE(bool)
226 unsigned packed : 1;
227 LLVM_PREFERRED_TYPE(bool)
228 unsigned padded : 1;
229
230private:
231 CIRRecordLowering(const CIRRecordLowering &) = delete;
232 void operator=(const CIRRecordLowering &) = delete;
233}; // CIRRecordLowering
234} // namespace
235
236CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes,
237 const RecordDecl *recordDecl, bool packed)
238 : cirGenTypes{cirGenTypes}, builder{cirGenTypes.getBuilder()},
239 astContext{cirGenTypes.getASTContext()}, recordDecl{recordDecl},
241 astRecordLayout{
242 cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)},
243 dataLayout{cirGenTypes.getCGModule().getModule()},
244 zeroInitializable{true}, zeroInitializableAsBase{true}, packed{packed},
245 padded{false} {}
246
247void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
248 CharUnits startOffset,
249 mlir::Type storageType) {
250 CIRGenBitFieldInfo &info = bitFields[fd->getCanonicalDecl()];
252 info.offset =
253 (unsigned)(getFieldBitOffset(fd) - astContext.toBits(startOffset));
254 info.size = fd->getBitWidthValue();
255 info.storageSize = getSizeInBits(storageType).getQuantity();
256 info.storageOffset = startOffset;
257 info.storageType = storageType;
258 info.name = fd->getName();
259
260 if (info.size > info.storageSize)
261 info.size = info.storageSize;
262 // Reverse the bit offsets for big endian machines. Since bitfields are laid
263 // out as packed bits within an integer-sized unit, we can imagine the bits
264 // counting from the most-significant-bit instead of the
265 // least-significant-bit.
266 if (dataLayout.isBigEndian())
267 info.offset = info.storageSize - (info.offset + info.size);
268
269 info.volatileStorageSize = 0;
270 info.volatileOffset = 0;
272}
273
274void CIRRecordLowering::lower(bool nonVirtualBaseType) {
275 if (recordDecl->isUnion()) {
276 lowerUnion();
277 computeVolatileBitfields();
278 return;
279 }
280
281 CharUnits size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize()
282 : astRecordLayout.getSize();
283
284 accumulateFields();
285
286 if (cxxRecordDecl) {
287 accumulateVPtrs();
288 accumulateBases();
289 if (members.empty()) {
290 appendPaddingBytes(size);
291 computeVolatileBitfields();
292 return;
293 }
294 if (!nonVirtualBaseType)
295 accumulateVBases();
296 }
297
298 llvm::stable_sort(members);
299 // TODO: Verify bitfield clipping
301
302 members.push_back(makeStorageInfo(size, getUIntNType(8)));
303 determinePacked(nonVirtualBaseType);
304 insertPadding();
305 members.pop_back();
306
307 calculateZeroInit();
308 fillOutputFields();
309 computeVolatileBitfields();
310}
311
312void CIRRecordLowering::fillOutputFields() {
313 for (const MemberInfo &member : members) {
314 if (member.data)
315 fieldTypes.push_back(member.data);
316 if (member.kind == MemberInfo::InfoKind::Field) {
317 if (member.fieldDecl)
318 fieldIdxMap[member.fieldDecl->getCanonicalDecl()] =
319 fieldTypes.size() - 1;
320 // A field without storage must be a bitfield.
321 if (!member.data) {
322 assert(member.fieldDecl &&
323 "member.data is a nullptr so member.fieldDecl should not be");
324 setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back());
325 }
326 } else if (member.kind == MemberInfo::InfoKind::Base) {
327 nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
328 } else if (member.kind == MemberInfo::InfoKind::VBase) {
329 virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
330 }
331 }
332}
333
335CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
337 if (isDiscreteBitFieldABI()) {
338 // run stores the first element of the current run of bitfields. fieldEnd is
339 // used as a special value to note that we don't have a current run. A
340 // bitfield run is a contiguous collection of bitfields that can be stored
341 // in the same storage block. Zero-sized bitfields and bitfields that would
342 // cross an alignment boundary break a run and start a new one.
344 // tail is the offset of the first bit off the end of the current run. It's
345 // used to determine if the ASTRecordLayout is treating these two bitfields
346 // as contiguous. StartBitOffset is offset of the beginning of the Run.
347 uint64_t startBitOffset, tail = 0;
348 for (; field != fieldEnd && field->isBitField(); ++field) {
349 // Zero-width bitfields end runs.
350 if (field->isZeroLengthBitField()) {
351 run = fieldEnd;
352 continue;
353 }
354 uint64_t bitOffset = getFieldBitOffset(*field);
355 mlir::Type type = cirGenTypes.convertTypeForMem(field->getType());
356 // If we don't have a run yet, or don't live within the previous run's
357 // allocated storage then we allocate some storage and start a new run.
358 if (run == fieldEnd || bitOffset >= tail) {
359 run = field;
360 startBitOffset = bitOffset;
361 tail = startBitOffset + dataLayout.getTypeAllocSizeInBits(type);
362 // Add the storage member to the record. This must be added to the
363 // record before the bitfield members so that it gets laid out before
364 // the bitfields it contains get laid out.
365 members.push_back(
366 makeStorageInfo(bitsToCharUnits(startBitOffset), type));
367 }
368 // Bitfields get the offset of their storage but come afterward and remain
369 // there after a stable sort.
370 members.push_back(MemberInfo(bitsToCharUnits(startBitOffset),
371 MemberInfo::InfoKind::Field, nullptr,
372 *field));
373 }
374 return field;
375 }
376
377 CharUnits regSize =
378 bitsToCharUnits(astContext.getTargetInfo().getRegisterWidth());
379 unsigned charBits = astContext.getCharWidth();
380
381 // Data about the start of the span we're accumulating to create an access
382 // unit from. 'Begin' is the first bitfield of the span. If 'begin' is
383 // 'fieldEnd', we've not got a current span. The span starts at the
384 // 'beginOffset' character boundary. 'bitSizeSinceBegin' is the size (in bits)
385 // of the span -- this might include padding when we've advanced to a
386 // subsequent bitfield run.
387 RecordDecl::field_iterator begin = fieldEnd;
388 CharUnits beginOffset;
389 uint64_t bitSizeSinceBegin;
390
391 // The (non-inclusive) end of the largest acceptable access unit we've found
392 // since 'begin'. If this is 'begin', we're gathering the initial set of
393 // bitfields of a new span. 'bestEndOffset' is the end of that acceptable
394 // access unit -- it might extend beyond the last character of the bitfield
395 // run, using available padding characters.
396 RecordDecl::field_iterator bestEnd = begin;
397 CharUnits bestEndOffset;
398 bool bestClipped; // Whether the representation must be in a byte array.
399
400 for (;;) {
401 // atAlignedBoundary is true if 'field' is the (potential) start of a new
402 // span (or the end of the bitfields). When true, limitOffset is the
403 // character offset of that span and barrier indicates whether the new
404 // span cannot be merged into the current one.
405 bool atAlignedBoundary = false;
406 bool barrier = false; // a barrier can be a zero Bit Width or non bit member
407 if (field != fieldEnd && field->isBitField()) {
408 uint64_t bitOffset = getFieldBitOffset(*field);
409 if (begin == fieldEnd) {
410 // Beginning a new span.
411 begin = field;
412 bestEnd = begin;
413
414 assert((bitOffset % charBits) == 0 && "Not at start of char");
415 beginOffset = bitsToCharUnits(bitOffset);
416 bitSizeSinceBegin = 0;
417 } else if ((bitOffset % charBits) != 0) {
418 // Bitfield occupies the same character as previous bitfield, it must be
419 // part of the same span. This can include zero-length bitfields, should
420 // the target not align them to character boundaries. Such non-alignment
421 // is at variance with the standards, which require zero-length
422 // bitfields be a barrier between access units. But of course we can't
423 // achieve that in the middle of a character.
424 assert(bitOffset ==
425 astContext.toBits(beginOffset) + bitSizeSinceBegin &&
426 "Concatenating non-contiguous bitfields");
427 } else {
428 // Bitfield potentially begins a new span. This includes zero-length
429 // bitfields on non-aligning targets that lie at character boundaries
430 // (those are barriers to merging).
431 if (field->isZeroLengthBitField())
432 barrier = true;
433 atAlignedBoundary = true;
434 }
435 } else {
436 // We've reached the end of the bitfield run. Either we're done, or this
437 // is a barrier for the current span.
438 if (begin == fieldEnd)
439 break;
440
441 barrier = true;
442 atAlignedBoundary = true;
443 }
444
445 // 'installBest' indicates whether we should create an access unit for the
446 // current best span: fields ['begin', 'bestEnd') occupying characters
447 // ['beginOffset', 'bestEndOffset').
448 bool installBest = false;
449 if (atAlignedBoundary) {
450 // 'field' is the start of a new span or the end of the bitfields. The
451 // just-seen span now extends to 'bitSizeSinceBegin'.
452
453 // Determine if we can accumulate that just-seen span into the current
454 // accumulation.
455 CharUnits accessSize = bitsToCharUnits(bitSizeSinceBegin + charBits - 1);
456 if (bestEnd == begin) {
457 // This is the initial run at the start of a new span. By definition,
458 // this is the best seen so far.
459 bestEnd = field;
460 bestEndOffset = beginOffset + accessSize;
461 // Assume clipped until proven not below.
462 bestClipped = true;
463 if (!bitSizeSinceBegin)
464 // A zero-sized initial span -- this will install nothing and reset
465 // for another.
466 installBest = true;
467 } else if (accessSize > regSize) {
468 // Accumulating the just-seen span would create a multi-register access
469 // unit, which would increase register pressure.
470 installBest = true;
471 }
472
473 if (!installBest) {
474 // Determine if accumulating the just-seen span will create an expensive
475 // access unit or not.
476 mlir::Type type = getUIntNType(astContext.toBits(accessSize));
478 cirGenTypes.getCGModule().errorNYI(
479 field->getSourceRange(), "NYI CheapUnalignedBitFieldAccess");
480
481 if (!installBest) {
482 // Find the next used storage offset to determine what the limit of
483 // the current span is. That's either the offset of the next field
484 // with storage (which might be field itself) or the end of the
485 // non-reusable tail padding.
486 CharUnits limitOffset;
487 for (auto probe = field; probe != fieldEnd; ++probe)
488 if (!isEmptyFieldForLayout(astContext, *probe)) {
489 // A member with storage sets the limit.
490 assert((getFieldBitOffset(*probe) % charBits) == 0 &&
491 "Next storage is not byte-aligned");
492 limitOffset = bitsToCharUnits(getFieldBitOffset(*probe));
493 goto FoundLimit;
494 }
495 limitOffset = cxxRecordDecl ? astRecordLayout.getNonVirtualSize()
496 : astRecordLayout.getDataSize();
497
498 FoundLimit:
499 CharUnits typeSize = getSize(type);
500 if (beginOffset + typeSize <= limitOffset) {
501 // There is space before limitOffset to create a naturally-sized
502 // access unit.
503 bestEndOffset = beginOffset + typeSize;
504 bestEnd = field;
505 bestClipped = false;
506 }
507 if (barrier) {
508 // The next field is a barrier that we cannot merge across.
509 installBest = true;
510 } else if (cirGenTypes.getCGModule()
512 .FineGrainedBitfieldAccesses) {
513 installBest = true;
514 } else {
515 // Otherwise, we're not installing. Update the bit size
516 // of the current span to go all the way to limitOffset, which is
517 // the (aligned) offset of next bitfield to consider.
518 bitSizeSinceBegin = astContext.toBits(limitOffset - beginOffset);
519 }
520 }
521 }
522 }
523
524 if (installBest) {
525 assert((field == fieldEnd || !field->isBitField() ||
526 (getFieldBitOffset(*field) % charBits) == 0) &&
527 "Installing but not at an aligned bitfield or limit");
528 CharUnits accessSize = bestEndOffset - beginOffset;
529 if (!accessSize.isZero()) {
530 // Add the storage member for the access unit to the record. The
531 // bitfields get the offset of their storage but come afterward and
532 // remain there after a stable sort.
533 mlir::Type type;
534 if (bestClipped) {
535 assert(getSize(getUIntNType(astContext.toBits(accessSize))) >
536 accessSize &&
537 "Clipped access need not be clipped");
538 type = getByteArrayType(accessSize);
539 } else {
540 type = getUIntNType(astContext.toBits(accessSize));
541 assert(getSize(type) == accessSize &&
542 "Unclipped access must be clipped");
543 }
544 members.push_back(makeStorageInfo(beginOffset, type));
545 for (; begin != bestEnd; ++begin)
546 if (!begin->isZeroLengthBitField())
547 members.push_back(MemberInfo(
548 beginOffset, MemberInfo::InfoKind::Field, nullptr, *begin));
549 }
550 // Reset to start a new span.
551 field = bestEnd;
552 begin = fieldEnd;
553 } else {
554 assert(field != fieldEnd && field->isBitField() &&
555 "Accumulating past end of bitfields");
556 assert(!barrier && "Accumulating across barrier");
557 // Accumulate this bitfield into the current (potential) span.
558 bitSizeSinceBegin += field->getBitWidthValue();
559 ++field;
560 }
561 }
562
563 return field;
564}
565
566void CIRRecordLowering::accumulateFields() {
567 for (RecordDecl::field_iterator field = recordDecl->field_begin(),
568 fieldEnd = recordDecl->field_end();
569 field != fieldEnd;) {
570 if (field->isBitField()) {
571 field = accumulateBitFields(field, fieldEnd);
572 assert((field == fieldEnd || !field->isBitField()) &&
573 "Failed to accumulate all the bitfields");
574 } else if (!field->isZeroSize(astContext)) {
575 members.push_back(MemberInfo(bitsToCharUnits(getFieldBitOffset(*field)),
576 MemberInfo::InfoKind::Field,
577 getStorageType(*field), *field));
578 ++field;
579 } else {
580 // TODO(cir): do we want to do anything special about zero size members?
582 ++field;
583 }
584 }
585}
586
587void CIRRecordLowering::calculateZeroInit() {
588 for (const MemberInfo &member : members) {
589 if (member.kind == MemberInfo::InfoKind::Field) {
590 if (!member.fieldDecl || isZeroInitializable(member.fieldDecl))
591 continue;
592 zeroInitializable = zeroInitializableAsBase = false;
593 return;
594 } else if (member.kind == MemberInfo::InfoKind::Base ||
595 member.kind == MemberInfo::InfoKind::VBase) {
596 if (isZeroInitializable(member.cxxRecordDecl))
597 continue;
598 zeroInitializable = false;
599 if (member.kind == MemberInfo::InfoKind::Base)
600 zeroInitializableAsBase = false;
601 }
602 }
603}
604
605void CIRRecordLowering::determinePacked(bool nvBaseType) {
606 if (packed)
607 return;
608 CharUnits alignment = CharUnits::One();
609 CharUnits nvAlignment = CharUnits::One();
610 CharUnits nvSize = !nvBaseType && cxxRecordDecl
611 ? astRecordLayout.getNonVirtualSize()
612 : CharUnits::Zero();
613
614 for (const MemberInfo &member : members) {
615 if (!member.data)
616 continue;
617 // If any member falls at an offset that it not a multiple of its alignment,
618 // then the entire record must be packed.
619 if (!member.offset.isMultipleOf(getAlignment(member.data)))
620 packed = true;
621 if (member.offset < nvSize)
622 nvAlignment = std::max(nvAlignment, getAlignment(member.data));
623 alignment = std::max(alignment, getAlignment(member.data));
624 }
625 // If the size of the record (the capstone's offset) is not a multiple of the
626 // record's alignment, it must be packed.
627 if (!members.back().offset.isMultipleOf(alignment))
628 packed = true;
629 // If the non-virtual sub-object is not a multiple of the non-virtual
630 // sub-object's alignment, it must be packed. We cannot have a packed
631 // non-virtual sub-object and an unpacked complete object or vise versa.
632 if (!nvSize.isMultipleOf(nvAlignment))
633 packed = true;
634 // Update the alignment of the sentinel.
635 if (!packed)
636 members.back().data = getUIntNType(astContext.toBits(alignment));
637}
638
639void CIRRecordLowering::insertPadding() {
640 std::vector<std::pair<CharUnits, CharUnits>> padding;
641 CharUnits size = CharUnits::Zero();
642 for (const MemberInfo &member : members) {
643 if (!member.data)
644 continue;
645 CharUnits offset = member.offset;
646 assert(offset >= size);
647 // Insert padding if we need to.
648 if (offset !=
649 size.alignTo(packed ? CharUnits::One() : getAlignment(member.data)))
650 padding.push_back(std::make_pair(size, offset - size));
651 size = offset + getSize(member.data);
652 }
653 if (padding.empty())
654 return;
655 padded = true;
656 // Add the padding to the Members list and sort it.
657 for (const std::pair<CharUnits, CharUnits> &paddingPair : padding)
658 members.push_back(makeStorageInfo(paddingPair.first,
659 getByteArrayType(paddingPair.second)));
660 llvm::stable_sort(members);
661}
662
663std::unique_ptr<CIRGenRecordLayout>
664CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
665 CIRRecordLowering lowering(*this, rd, /*packed=*/false);
666 assert(ty->isIncomplete() && "recomputing record layout?");
667 lowering.lower(/*nonVirtualBaseType=*/false);
668
669 // If we're in C++, compute the base subobject type.
670 cir::RecordType baseTy;
671 if (llvm::isa<CXXRecordDecl>(rd) && !rd->isUnion() &&
672 !rd->hasAttr<FinalAttr>()) {
673 baseTy = *ty;
674 if (lowering.astRecordLayout.getNonVirtualSize() !=
675 lowering.astRecordLayout.getSize()) {
676 CIRRecordLowering baseLowering(*this, rd, /*Packed=*/lowering.packed);
677 baseLowering.lower(/*NonVirtualBaseType=*/true);
678 std::string baseIdentifier = getRecordTypeName(rd, ".base");
679 baseTy = builder.getCompleteNamedRecordType(
680 baseLowering.fieldTypes, baseLowering.packed, baseLowering.padded,
681 baseIdentifier);
682 // TODO(cir): add something like addRecordTypeName
683
684 // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work
685 // on both of them with the same index.
686 assert(lowering.packed == baseLowering.packed &&
687 "Non-virtual and complete types must agree on packedness");
688 }
689 }
690
691 // Fill in the record *after* computing the base type. Filling in the body
692 // signifies that the type is no longer opaque and record layout is complete,
693 // but we may need to recursively layout rd while laying D out as a base type.
695 ty->complete(lowering.fieldTypes, lowering.packed, lowering.padded);
696
697 auto rl = std::make_unique<CIRGenRecordLayout>(
698 ty ? *ty : cir::RecordType{}, baseTy ? baseTy : cir::RecordType{},
699 (bool)lowering.zeroInitializable, (bool)lowering.zeroInitializableAsBase);
700
701 rl->nonVirtualBases.swap(lowering.nonVirtualBases);
702 rl->completeObjectVirtualBases.swap(lowering.virtualBases);
703
704 // Add all the field numbers.
705 rl->fieldIdxMap.swap(lowering.fieldIdxMap);
706
707 rl->bitFields.swap(lowering.bitFields);
708
709 // Dump the layout, if requested.
710 if (getASTContext().getLangOpts().DumpRecordLayouts) {
711 llvm::outs() << "\n*** Dumping CIRgen Record Layout\n";
712 llvm::outs() << "Record: ";
713 rd->dump(llvm::outs());
714 llvm::outs() << "\nLayout: ";
715 rl->print(llvm::outs());
716 }
717
718 // TODO: implement verification
719 return rl;
720}
721
722void CIRGenRecordLayout::print(raw_ostream &os) const {
723 os << "<CIRecordLayout\n";
724 os << " CIR Type:" << completeObjectType << "\n";
725 if (baseSubobjectType)
726 os << " NonVirtualBaseCIRType:" << baseSubobjectType << "\n";
727 os << " IsZeroInitializable:" << zeroInitializable << "\n";
728 os << " BitFields:[\n";
729 std::vector<std::pair<unsigned, const CIRGenBitFieldInfo *>> bitInfo;
730 for (auto &[decl, info] : bitFields) {
731 const RecordDecl *rd = decl->getParent();
732 unsigned index = 0;
733 for (RecordDecl::field_iterator it = rd->field_begin(); *it != decl; ++it)
734 ++index;
735 bitInfo.push_back(std::make_pair(index, &info));
736 }
737 llvm::array_pod_sort(bitInfo.begin(), bitInfo.end());
738 for (std::pair<unsigned, const CIRGenBitFieldInfo *> &info : bitInfo) {
739 os.indent(4);
740 info.second->print(os);
741 os << "\n";
742 }
743 os << " ]>\n";
744}
745
746void CIRGenBitFieldInfo::print(raw_ostream &os) const {
747 os << "<CIRBitFieldInfo" << " name:" << name << " offset:" << offset
748 << " size:" << size << " isSigned:" << isSigned
749 << " storageSize:" << storageSize
750 << " storageOffset:" << storageOffset.getQuantity()
751 << " volatileOffset:" << volatileOffset
752 << " volatileStorageSize:" << volatileStorageSize
753 << " volatileStorageOffset:" << volatileStorageOffset.getQuantity() << ">";
754}
755
756void CIRGenRecordLayout::dump() const { print(llvm::errs()); }
757
758void CIRGenBitFieldInfo::dump() const { print(llvm::errs()); }
759
760void CIRRecordLowering::lowerUnion() {
761 CharUnits layoutSize = astRecordLayout.getSize();
762 mlir::Type storageType = nullptr;
763 bool seenNamedMember = false;
764
765 // Iterate through the fields setting bitFieldInfo and the Fields array. Also
766 // locate the "most appropriate" storage type.
767 for (const FieldDecl *field : recordDecl->fields()) {
768 mlir::Type fieldType;
769 if (field->isBitField()) {
770 if (field->isZeroLengthBitField())
771 continue;
772 fieldType = getBitfieldStorageType(field->getBitWidthValue());
773 setBitFieldInfo(field, CharUnits::Zero(), fieldType);
774 } else {
775 fieldType = getStorageType(field);
776 }
777
778 // This maps a field to its index. For unions, the index is always 0.
779 fieldIdxMap[field->getCanonicalDecl()] = 0;
780
781 // Compute zero-initializable status.
782 // This union might not be zero initialized: it may contain a pointer to
783 // data member which might have some exotic initialization sequence.
784 // If this is the case, then we ought not to try and come up with a "better"
785 // type, it might not be very easy to come up with a Constant which
786 // correctly initializes it.
787 if (!seenNamedMember) {
788 seenNamedMember = field->getIdentifier();
789 if (!seenNamedMember)
790 if (const RecordDecl *fieldRD = field->getType()->getAsRecordDecl())
791 seenNamedMember = fieldRD->findFirstNamedDataMember();
792 if (seenNamedMember && !isZeroInitializable(field)) {
793 zeroInitializable = zeroInitializableAsBase = false;
794 storageType = fieldType;
795 }
796 }
797
798 // Because our union isn't zero initializable, we won't be getting a better
799 // storage type.
800 if (!zeroInitializable)
801 continue;
802
803 // Conditionally update our storage type if we've got a new "better" one.
804 if (!storageType || getAlignment(fieldType) > getAlignment(storageType) ||
805 (getAlignment(fieldType) == getAlignment(storageType) &&
806 getSize(fieldType) > getSize(storageType)))
807 storageType = fieldType;
808
809 // NOTE(cir): Track all union member's types, not just the largest one. It
810 // allows for proper type-checking and retain more info for analisys.
811 fieldTypes.push_back(fieldType);
812 }
813
814 if (!storageType)
815 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
816 "No-storage Union NYI");
817
818 if (layoutSize < getSize(storageType))
819 storageType = getByteArrayType(layoutSize);
820 else
821 appendPaddingBytes(layoutSize - getSize(storageType));
822
823 // Set packed if we need it.
824 if (!layoutSize.isMultipleOf(getAlignment(storageType)))
825 packed = true;
826}
827
828bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *decl,
829 const CXXRecordDecl *query) {
830 const ASTRecordLayout &declLayout = astContext.getASTRecordLayout(decl);
831 if (declLayout.isPrimaryBaseVirtual() && declLayout.getPrimaryBase() == query)
832 return false;
833 for (const auto &base : decl->bases())
834 if (!hasOwnStorage(base.getType()->getAsCXXRecordDecl(), query))
835 return false;
836 return true;
837}
838
839/// The AAPCS that defines that, when possible, bit-fields should
840/// be accessed using containers of the declared type width:
841/// When a volatile bit-field is read, and its container does not overlap with
842/// any non-bit-field member or any zero length bit-field member, its container
843/// must be read exactly once using the access width appropriate to the type of
844/// the container. When a volatile bit-field is written, and its container does
845/// not overlap with any non-bit-field member or any zero-length bit-field
846/// member, its container must be read exactly once and written exactly once
847/// using the access width appropriate to the type of the container. The two
848/// accesses are not atomic.
849///
850/// Enforcing the width restriction can be disabled using
851/// -fno-aapcs-bitfield-width.
852void CIRRecordLowering::computeVolatileBitfields() {
853 if (!isAAPCS() ||
854 !cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth)
855 return;
856
857 for (auto &[field, info] : bitFields) {
858 mlir::Type resLTy = cirGenTypes.convertTypeForMem(field->getType());
859
860 if (astContext.toBits(astRecordLayout.getAlignment()) <
861 getSizeInBits(resLTy).getQuantity())
862 continue;
863
864 // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
865 // for big-endian targets, but it assumes a container of width
866 // info.storageSize. Since AAPCS uses a different container size (width
867 // of the type), we first undo that calculation here and redo it once
868 // the bit-field offset within the new container is calculated.
869 const unsigned oldOffset =
870 isBigEndian() ? info.storageSize - (info.offset + info.size)
871 : info.offset;
872 // Offset to the bit-field from the beginning of the struct.
873 const unsigned absoluteOffset =
874 astContext.toBits(info.storageOffset) + oldOffset;
875
876 // Container size is the width of the bit-field type.
877 const unsigned storageSize = getSizeInBits(resLTy).getQuantity();
878 // Nothing to do if the access uses the desired
879 // container width and is naturally aligned.
880 if (info.storageSize == storageSize && (oldOffset % storageSize == 0))
881 continue;
882
883 // Offset within the container.
884 unsigned offset = absoluteOffset & (storageSize - 1);
885 // Bail out if an aligned load of the container cannot cover the entire
886 // bit-field. This can happen for example, if the bit-field is part of a
887 // packed struct. AAPCS does not define access rules for such cases, we let
888 // clang to follow its own rules.
889 if (offset + info.size > storageSize)
890 continue;
891
892 // Re-adjust offsets for big-endian targets.
893 if (isBigEndian())
894 offset = storageSize - (offset + info.size);
895
896 const CharUnits storageOffset =
897 astContext.toCharUnitsFromBits(absoluteOffset & ~(storageSize - 1));
898 const CharUnits end = storageOffset +
899 astContext.toCharUnitsFromBits(storageSize) -
901
902 const ASTRecordLayout &layout =
903 astContext.getASTRecordLayout(field->getParent());
904 // If we access outside memory outside the record, than bail out.
905 const CharUnits recordSize = layout.getSize();
906 if (end >= recordSize)
907 continue;
908
909 // Bail out if performing this load would access non-bit-fields members.
910 bool conflict = false;
911 for (const auto *f : recordDecl->fields()) {
912 // Allow sized bit-fields overlaps.
913 if (f->isBitField() && !f->isZeroLengthBitField())
914 continue;
915
916 const CharUnits fOffset = astContext.toCharUnitsFromBits(
917 layout.getFieldOffset(f->getFieldIndex()));
918
919 // As C11 defines, a zero sized bit-field defines a barrier, so
920 // fields after and before it should be race condition free.
921 // The AAPCS acknowledges it and imposes no restritions when the
922 // natural container overlaps a zero-length bit-field.
923 if (f->isZeroLengthBitField()) {
924 if (end > fOffset && storageOffset < fOffset) {
925 conflict = true;
926 break;
927 }
928 }
929
930 const CharUnits fEnd =
931 fOffset +
932 astContext.toCharUnitsFromBits(
933 getSizeInBits(cirGenTypes.convertTypeForMem(f->getType()))
934 .getQuantity()) -
936 // If no overlap, continue.
937 if (end < fOffset || fEnd < storageOffset)
938 continue;
939
940 // The desired load overlaps a non-bit-field member, bail out.
941 conflict = true;
942 break;
943 }
944
945 if (conflict)
946 continue;
947 // Write the new bit-field access parameters.
948 // As the storage offset now is defined as the number of elements from the
949 // start of the structure, we should divide the Offset by the element size.
951 storageOffset /
952 astContext.toCharUnitsFromBits(storageSize).getQuantity();
953 info.volatileStorageSize = storageSize;
954 info.volatileOffset = offset;
955 }
956}
957
958void CIRRecordLowering::accumulateBases() {
959 // If we've got a primary virtual base, we need to add it with the bases.
960 if (astRecordLayout.isPrimaryBaseVirtual()) {
961 const CXXRecordDecl *baseDecl = astRecordLayout.getPrimaryBase();
962 members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::Base,
963 getStorageType(baseDecl), baseDecl));
964 }
965
966 // Accumulate the non-virtual bases.
967 for (const auto &base : cxxRecordDecl->bases()) {
968 if (base.isVirtual())
969 continue;
970 // Bases can be zero-sized even if not technically empty if they
971 // contain only a trailing array member.
972 const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
973 if (!baseDecl->isEmpty() &&
974 !astContext.getASTRecordLayout(baseDecl).getNonVirtualSize().isZero()) {
975 members.push_back(MemberInfo(astRecordLayout.getBaseClassOffset(baseDecl),
976 MemberInfo::InfoKind::Base,
977 getStorageType(baseDecl), baseDecl));
978 }
979 }
980}
981
982void CIRRecordLowering::accumulateVBases() {
983 for (const auto &base : cxxRecordDecl->vbases()) {
984 const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
985 if (isEmptyRecordForLayout(astContext, base.getType()))
986 continue;
987 CharUnits offset = astRecordLayout.getVBaseClassOffset(baseDecl);
988 // If the vbase is a primary virtual base of some base, then it doesn't
989 // get its own storage location but instead lives inside of that base.
990 if (isOverlappingVBaseABI() && astContext.isNearlyEmpty(baseDecl) &&
991 !hasOwnStorage(cxxRecordDecl, baseDecl)) {
992 members.push_back(
993 MemberInfo(offset, MemberInfo::InfoKind::VBase, nullptr, baseDecl));
994 continue;
995 }
996 // If we've got a vtordisp, add it as a storage type.
997 if (astRecordLayout.getVBaseOffsetsMap()
998 .find(baseDecl)
999 ->second.hasVtorDisp())
1000 members.push_back(makeStorageInfo(offset - CharUnits::fromQuantity(4),
1001 getUIntNType(32)));
1002 members.push_back(MemberInfo(offset, MemberInfo::InfoKind::VBase,
1003 getStorageType(baseDecl), baseDecl));
1004 }
1005}
1006
1007void CIRRecordLowering::accumulateVPtrs() {
1008 if (astRecordLayout.hasOwnVFPtr())
1009 members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr,
1010 getVFPtrType()));
1011
1012 if (astRecordLayout.hasOwnVBPtr())
1013 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
1014 "accumulateVPtrs: hasOwnVBPtr");
1015}
1016
1017mlir::Type CIRRecordLowering::getVFPtrType() {
1018 return cir::VPtrType::get(builder.getContext());
1019}
Defines the clang::ASTContext interface.
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
static void print(llvm::raw_ostream &OS, const T &V, ASTContext &ASTCtx, QualType Ty)
bool isBigEndian() const
llvm::TypeSize getTypeAllocSizeInBits(mlir::Type ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool isNearlyEmpty(const CXXRecordDecl *RD) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
CharUnits getAlignment() const
getAlignment - Get the record alignment in characters.
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
CharUnits getSize() const
getSize - Get the record size in characters.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getDataSize() const
getDataSize() - Get the record data size, which is the record size without tail padding,...
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
const VBaseOffsetsMapTy & getVBaseOffsetsMap() const
const CXXRecordDecl * getPrimaryBase() const
getPrimaryBase - Get the primary base for this record.
bool isPrimaryBaseVirtual() const
isPrimaryBaseVirtual - Get whether the primary base for this record is virtual or not.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:48
CIRGenModule & getCGModule() const
Definition CIRGenTypes.h:82
std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix)
clang::ASTContext & getASTContext() const
std::unique_ptr< CIRGenRecordLayout > computeRecordLayout(const clang::RecordDecl *rd, cir::RecordType *ty)
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Definition CharUnits.h:143
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
void dump() const
bool hasAttr() const
Definition DeclBase.h:577
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getBitWidthValue() const
Computes the bit width of this field, if this is a bit field.
Definition Decl.cpp:4741
FieldDecl * getCanonicalDecl() override
Retrieves the canonical declaration of this field.
Definition Decl.h:3407
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
Represents a struct/union/class.
Definition Decl.h:4312
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4512
field_iterator field_begin() const
Definition Decl.cpp:5202
bool isUnion() const
Definition Decl.h:3922
virtual unsigned getRegisterWidth() const
Return the "preferred" register width on this target.
Definition TargetInfo.h:898
bool hasCheapUnalignedBitFieldAccess() const
Return true iff unaligned accesses are cheap.
Definition TargetInfo.h:912
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
QualType getType() const
Definition Decl.h:723
bool isValidFundamentalIntWidth(unsigned width)
Definition CIRTypes.cpp:513
bool isEmptyFieldForLayout(const ASTContext &context, const FieldDecl *fd)
isEmptyFieldForLayout - Return true if the field is "empty", that is, either a zero-width bit-field o...
bool isEmptyRecordForLayout(const ASTContext &context, QualType t)
isEmptyRecordForLayout - Return true if a structure contains only empty base classes (per isEmptyReco...
Definition TargetInfo.cpp:7
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Decl, FieldDecl > fieldDecl
Matches field declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, CXXRecordDecl > cxxRecordDecl
Matches C++ class declarations.
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, RecordDecl > recordDecl
Matches class, struct, and union declarations.
RangeSelector member(std::string ID)
Given a MemberExpr, selects the member token. ID is the node's binding in the match result.
Stencil run(MatchConsumer< std::string > C)
Wraps a MatchConsumer in a Stencil, so that it can be used in a Stencil.
Definition Stencil.cpp:489
The JSON file list parser is used to communicate input to InstallAPI.
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
void __ovld __conv barrier(cl_mem_fence_flags)
All work-items in a work-group executing the kernel on a processor must execute this function before ...
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
static bool zeroSizeRecordMembers()
static bool checkBitfieldClipping()
static bool astRecordDeclAttr()
unsigned offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
void print(llvm::raw_ostream &os) const
unsigned storageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
clang::CharUnits storageOffset
The offset of the bitfield storage from the start of the record.
unsigned size
The total size of the bit-field, in bits.
unsigned isSigned
Whether the bit-field is signed.
clang::CharUnits volatileStorageOffset
The offset of the bitfield storage from the start of the record.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
llvm::StringRef name
The name of a bitfield.