clang 19.0.0git
CGRecordLayoutBuilder.cpp
Go to the documentation of this file.
1//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Builder implementation for CGRecordLayout objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGRecordLayout.h"
14#include "CGCXXABI.h"
15#include "CodeGenTypes.h"
17#include "clang/AST/Attr.h"
19#include "clang/AST/DeclCXX.h"
20#include "clang/AST/Expr.h"
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/DerivedTypes.h"
25#include "llvm/IR/Type.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/MathExtras.h"
28#include "llvm/Support/raw_ostream.h"
29using namespace clang;
30using namespace CodeGen;
31
32namespace {
33/// The CGRecordLowering is responsible for lowering an ASTRecordLayout to an
34/// llvm::Type. Some of the lowering is straightforward, some is not. Here we
35/// detail some of the complexities and weirdnesses here.
36/// * LLVM does not have unions - Unions can, in theory be represented by any
37/// llvm::Type with correct size. We choose a field via a specific heuristic
38/// and add padding if necessary.
39/// * LLVM does not have bitfields - Bitfields are collected into contiguous
40/// runs and allocated as a single storage type for the run. ASTRecordLayout
41/// contains enough information to determine where the runs break. Microsoft
42/// and Itanium follow different rules and use different codepaths.
43/// * It is desired that, when possible, bitfields use the appropriate iN type
44/// when lowered to llvm types. For example unsigned x : 24 gets lowered to
45/// i24. This isn't always possible because i24 has storage size of 32 bit
46/// and if it is possible to use that extra byte of padding we must use [i8 x
47/// 3] instead of i24. This is computed when accumulating bitfields in
48/// accumulateBitfields.
49/// C++ examples that require clipping:
50/// struct { int a : 24; char b; }; // a must be clipped, b goes at offset 3
51/// struct A { int a : 24; ~A(); }; // a must be clipped because:
52/// struct B : A { char b; }; // b goes at offset 3
53/// * The allocation of bitfield access units is described in more detail in
54/// CGRecordLowering::accumulateBitFields.
55/// * Clang ignores 0 sized bitfields and 0 sized bases but *not* zero sized
56/// fields. The existing asserts suggest that LLVM assumes that *every* field
57/// has an underlying storage type. Therefore empty structures containing
58/// zero sized subobjects such as empty records or zero sized arrays still get
59/// a zero sized (empty struct) storage type.
60/// * Clang reads the complete type rather than the base type when generating
61/// code to access fields. Bitfields in tail position with tail padding may
62/// be clipped in the base class but not the complete class (we may discover
63/// that the tail padding is not used in the complete class.) However,
64/// because LLVM reads from the complete type it can generate incorrect code
65/// if we do not clip the tail padding off of the bitfield in the complete
66/// layout.
67/// * Itanium allows nearly empty primary virtual bases. These bases don't get
68/// get their own storage because they're laid out as part of another base
69/// or at the beginning of the structure. Determining if a VBase actually
70/// gets storage awkwardly involves a walk of all bases.
71/// * VFPtrs and VBPtrs do *not* make a record NotZeroInitializable.
72struct CGRecordLowering {
73 // MemberInfo is a helper structure that contains information about a record
74 // member. In additional to the standard member types, there exists a
75 // sentinel member type that ensures correct rounding.
76 struct MemberInfo {
77 CharUnits Offset;
78 enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind;
79 llvm::Type *Data;
80 union {
81 const FieldDecl *FD;
82 const CXXRecordDecl *RD;
83 };
84 MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
85 const FieldDecl *FD = nullptr)
86 : Offset(Offset), Kind(Kind), Data(Data), FD(FD) {}
87 MemberInfo(CharUnits Offset, InfoKind Kind, llvm::Type *Data,
88 const CXXRecordDecl *RD)
89 : Offset(Offset), Kind(Kind), Data(Data), RD(RD) {}
90 // MemberInfos are sorted so we define a < operator.
91 bool operator <(const MemberInfo& a) const { return Offset < a.Offset; }
92 };
93 // The constructor.
94 CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed);
95 // Short helper routines.
96 /// Constructs a MemberInfo instance from an offset and llvm::Type *.
97 static MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) {
98 return MemberInfo(Offset, MemberInfo::Field, Data);
99 }
100
101 /// The Microsoft bitfield layout rule allocates discrete storage
102 /// units of the field's formal type and only combines adjacent
103 /// fields of the same formal type. We want to emit a layout with
104 /// these discrete storage units instead of combining them into a
105 /// continuous run.
106 bool isDiscreteBitFieldABI() const {
107 return Context.getTargetInfo().getCXXABI().isMicrosoft() ||
108 D->isMsStruct(Context);
109 }
110
111 /// Helper function to check if we are targeting AAPCS.
112 bool isAAPCS() const {
113 return Context.getTargetInfo().getABI().starts_with("aapcs");
114 }
115
116 /// Helper function to check if the target machine is BigEndian.
117 bool isBE() const { return Context.getTargetInfo().isBigEndian(); }
118
119 /// The Itanium base layout rule allows virtual bases to overlap
120 /// other bases, which complicates layout in specific ways.
121 ///
122 /// Note specifically that the ms_struct attribute doesn't change this.
123 bool isOverlappingVBaseABI() const {
124 return !Context.getTargetInfo().getCXXABI().isMicrosoft();
125 }
126
127 /// Wraps llvm::Type::getIntNTy with some implicit arguments.
128 llvm::Type *getIntNType(uint64_t NumBits) const {
129 unsigned AlignedBits = llvm::alignTo(NumBits, Context.getCharWidth());
130 return llvm::Type::getIntNTy(Types.getLLVMContext(), AlignedBits);
131 }
132 /// Get the LLVM type sized as one character unit.
133 llvm::Type *getCharType() const {
134 return llvm::Type::getIntNTy(Types.getLLVMContext(),
135 Context.getCharWidth());
136 }
137 /// Gets an llvm type of size NumChars and alignment 1.
138 llvm::Type *getByteArrayType(CharUnits NumChars) const {
139 assert(!NumChars.isZero() && "Empty byte arrays aren't allowed.");
140 llvm::Type *Type = getCharType();
141 return NumChars == CharUnits::One() ? Type :
142 (llvm::Type *)llvm::ArrayType::get(Type, NumChars.getQuantity());
143 }
144 /// Gets the storage type for a field decl and handles storage
145 /// for itanium bitfields that are smaller than their declared type.
146 llvm::Type *getStorageType(const FieldDecl *FD) const {
147 llvm::Type *Type = Types.ConvertTypeForMem(FD->getType());
148 if (!FD->isBitField()) return Type;
149 if (isDiscreteBitFieldABI()) return Type;
150 return getIntNType(std::min(FD->getBitWidthValue(Context),
151 (unsigned)Context.toBits(getSize(Type))));
152 }
153 /// Gets the llvm Basesubobject type from a CXXRecordDecl.
154 llvm::Type *getStorageType(const CXXRecordDecl *RD) const {
155 return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType();
156 }
157 CharUnits bitsToCharUnits(uint64_t BitOffset) const {
158 return Context.toCharUnitsFromBits(BitOffset);
159 }
160 CharUnits getSize(llvm::Type *Type) const {
161 return CharUnits::fromQuantity(DataLayout.getTypeAllocSize(Type));
162 }
163 CharUnits getAlignment(llvm::Type *Type) const {
164 return CharUnits::fromQuantity(DataLayout.getABITypeAlign(Type));
165 }
166 bool isZeroInitializable(const FieldDecl *FD) const {
167 return Types.isZeroInitializable(FD->getType());
168 }
169 bool isZeroInitializable(const RecordDecl *RD) const {
170 return Types.isZeroInitializable(RD);
171 }
172 void appendPaddingBytes(CharUnits Size) {
173 if (!Size.isZero())
174 FieldTypes.push_back(getByteArrayType(Size));
175 }
176 uint64_t getFieldBitOffset(const FieldDecl *FD) const {
177 return Layout.getFieldOffset(FD->getFieldIndex());
178 }
179 // Layout routines.
180 void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset,
181 llvm::Type *StorageType);
182 /// Lowers an ASTRecordLayout to a llvm type.
183 void lower(bool NonVirtualBaseType);
184 void lowerUnion(bool isNoUniqueAddress);
185 void accumulateFields(bool isNonVirtualBaseType);
187 accumulateBitFields(bool isNonVirtualBaseType,
190 void computeVolatileBitfields();
191 void accumulateBases();
192 void accumulateVPtrs();
193 void accumulateVBases();
194 /// Recursively searches all of the bases to find out if a vbase is
195 /// not the primary vbase of some base class.
196 bool hasOwnStorage(const CXXRecordDecl *Decl,
197 const CXXRecordDecl *Query) const;
198 void calculateZeroInit();
199 CharUnits calculateTailClippingOffset(bool isNonVirtualBaseType) const;
200 void checkBitfieldClipping() const;
201 /// Determines if we need a packed llvm struct.
202 void determinePacked(bool NVBaseType);
203 /// Inserts padding everywhere it's needed.
204 void insertPadding();
205 /// Fills out the structures that are ultimately consumed.
206 void fillOutputFields();
207 // Input memoization fields.
208 CodeGenTypes &Types;
209 const ASTContext &Context;
210 const RecordDecl *D;
211 const CXXRecordDecl *RD;
212 const ASTRecordLayout &Layout;
213 const llvm::DataLayout &DataLayout;
214 // Helpful intermediate data-structures.
215 std::vector<MemberInfo> Members;
216 // Output fields, consumed by CodeGenTypes::ComputeRecordLayout.
218 llvm::DenseMap<const FieldDecl *, unsigned> Fields;
219 llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
220 llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
221 llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
222 bool IsZeroInitializable : 1;
223 bool IsZeroInitializableAsBase : 1;
224 bool Packed : 1;
225private:
226 CGRecordLowering(const CGRecordLowering &) = delete;
227 void operator =(const CGRecordLowering &) = delete;
228};
229} // namespace {
230
231CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D,
232 bool Packed)
233 : Types(Types), Context(Types.getContext()), D(D),
234 RD(dyn_cast<CXXRecordDecl>(D)),
235 Layout(Types.getContext().getASTRecordLayout(D)),
236 DataLayout(Types.getDataLayout()), IsZeroInitializable(true),
237 IsZeroInitializableAsBase(true), Packed(Packed) {}
238
239void CGRecordLowering::setBitFieldInfo(
240 const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) {
241 CGBitFieldInfo &Info = BitFields[FD->getCanonicalDecl()];
243 Info.Offset = (unsigned)(getFieldBitOffset(FD) - Context.toBits(StartOffset));
244 Info.Size = FD->getBitWidthValue(Context);
245 Info.StorageSize = (unsigned)DataLayout.getTypeAllocSizeInBits(StorageType);
246 Info.StorageOffset = StartOffset;
247 if (Info.Size > Info.StorageSize)
248 Info.Size = Info.StorageSize;
249 // Reverse the bit offsets for big endian machines. Because we represent
250 // a bitfield as a single large integer load, we can imagine the bits
251 // counting from the most-significant-bit instead of the
252 // least-significant-bit.
253 if (DataLayout.isBigEndian())
254 Info.Offset = Info.StorageSize - (Info.Offset + Info.Size);
255
256 Info.VolatileStorageSize = 0;
257 Info.VolatileOffset = 0;
259}
260
261void CGRecordLowering::lower(bool NVBaseType) {
262 // The lowering process implemented in this function takes a variety of
263 // carefully ordered phases.
264 // 1) Store all members (fields and bases) in a list and sort them by offset.
265 // 2) Add a 1-byte capstone member at the Size of the structure.
266 // 3) Clip bitfield storages members if their tail padding is or might be
267 // used by another field or base. The clipping process uses the capstone
268 // by treating it as another object that occurs after the record.
269 // 4) Determine if the llvm-struct requires packing. It's important that this
270 // phase occur after clipping, because clipping changes the llvm type.
271 // This phase reads the offset of the capstone when determining packedness
272 // and updates the alignment of the capstone to be equal of the alignment
273 // of the record after doing so.
274 // 5) Insert padding everywhere it is needed. This phase requires 'Packed' to
275 // have been computed and needs to know the alignment of the record in
276 // order to understand if explicit tail padding is needed.
277 // 6) Remove the capstone, we don't need it anymore.
278 // 7) Determine if this record can be zero-initialized. This phase could have
279 // been placed anywhere after phase 1.
280 // 8) Format the complete list of members in a way that can be consumed by
281 // CodeGenTypes::ComputeRecordLayout.
282 CharUnits Size = NVBaseType ? Layout.getNonVirtualSize() : Layout.getSize();
283 if (D->isUnion()) {
284 lowerUnion(NVBaseType);
285 computeVolatileBitfields();
286 return;
287 }
288 accumulateFields(NVBaseType);
289 // RD implies C++.
290 if (RD) {
291 accumulateVPtrs();
292 accumulateBases();
293 if (Members.empty()) {
294 appendPaddingBytes(Size);
295 computeVolatileBitfields();
296 return;
297 }
298 if (!NVBaseType)
299 accumulateVBases();
300 }
301 llvm::stable_sort(Members);
302 Members.push_back(StorageInfo(Size, getIntNType(8)));
303 checkBitfieldClipping();
304 determinePacked(NVBaseType);
305 insertPadding();
306 Members.pop_back();
307 calculateZeroInit();
308 fillOutputFields();
309 computeVolatileBitfields();
310}
311
312void CGRecordLowering::lowerUnion(bool isNoUniqueAddress) {
313 CharUnits LayoutSize =
314 isNoUniqueAddress ? Layout.getDataSize() : Layout.getSize();
315 llvm::Type *StorageType = nullptr;
316 bool SeenNamedMember = false;
317 // Iterate through the fields setting bitFieldInfo and the Fields array. Also
318 // locate the "most appropriate" storage type. The heuristic for finding the
319 // storage type isn't necessary, the first (non-0-length-bitfield) field's
320 // type would work fine and be simpler but would be different than what we've
321 // been doing and cause lit tests to change.
322 for (const auto *Field : D->fields()) {
323 if (Field->isBitField()) {
324 if (Field->isZeroLengthBitField(Context))
325 continue;
326 llvm::Type *FieldType = getStorageType(Field);
327 if (LayoutSize < getSize(FieldType))
328 FieldType = getByteArrayType(LayoutSize);
329 setBitFieldInfo(Field, CharUnits::Zero(), FieldType);
330 }
331 Fields[Field->getCanonicalDecl()] = 0;
332 llvm::Type *FieldType = getStorageType(Field);
333 // Compute zero-initializable status.
334 // This union might not be zero initialized: it may contain a pointer to
335 // data member which might have some exotic initialization sequence.
336 // If this is the case, then we aught not to try and come up with a "better"
337 // type, it might not be very easy to come up with a Constant which
338 // correctly initializes it.
339 if (!SeenNamedMember) {
340 SeenNamedMember = Field->getIdentifier();
341 if (!SeenNamedMember)
342 if (const auto *FieldRD = Field->getType()->getAsRecordDecl())
343 SeenNamedMember = FieldRD->findFirstNamedDataMember();
344 if (SeenNamedMember && !isZeroInitializable(Field)) {
345 IsZeroInitializable = IsZeroInitializableAsBase = false;
346 StorageType = FieldType;
347 }
348 }
349 // Because our union isn't zero initializable, we won't be getting a better
350 // storage type.
351 if (!IsZeroInitializable)
352 continue;
353 // Conditionally update our storage type if we've got a new "better" one.
354 if (!StorageType ||
355 getAlignment(FieldType) > getAlignment(StorageType) ||
356 (getAlignment(FieldType) == getAlignment(StorageType) &&
357 getSize(FieldType) > getSize(StorageType)))
358 StorageType = FieldType;
359 }
360 // If we have no storage type just pad to the appropriate size and return.
361 if (!StorageType)
362 return appendPaddingBytes(LayoutSize);
363 // If our storage size was bigger than our required size (can happen in the
364 // case of packed bitfields on Itanium) then just use an I8 array.
365 if (LayoutSize < getSize(StorageType))
366 StorageType = getByteArrayType(LayoutSize);
367 FieldTypes.push_back(StorageType);
368 appendPaddingBytes(LayoutSize - getSize(StorageType));
369 // Set packed if we need it.
370 const auto StorageAlignment = getAlignment(StorageType);
371 assert((Layout.getSize() % StorageAlignment == 0 ||
372 Layout.getDataSize() % StorageAlignment) &&
373 "Union's standard layout and no_unique_address layout must agree on "
374 "packedness");
375 if (Layout.getDataSize() % StorageAlignment)
376 Packed = true;
377}
378
379void CGRecordLowering::accumulateFields(bool isNonVirtualBaseType) {
380 for (RecordDecl::field_iterator Field = D->field_begin(),
381 FieldEnd = D->field_end();
382 Field != FieldEnd;) {
383 if (Field->isBitField()) {
384 Field = accumulateBitFields(isNonVirtualBaseType, Field, FieldEnd);
385 assert((Field == FieldEnd || !Field->isBitField()) &&
386 "Failed to accumulate all the bitfields");
387 } else if (Field->isZeroSize(Context)) {
388 // Empty fields have no storage.
389 ++Field;
390 } else {
391 // Use base subobject layout for the potentially-overlapping field,
392 // as it is done in RecordLayoutBuilder
393 Members.push_back(MemberInfo(
394 bitsToCharUnits(getFieldBitOffset(*Field)), MemberInfo::Field,
395 Field->isPotentiallyOverlapping()
396 ? getStorageType(Field->getType()->getAsCXXRecordDecl())
397 : getStorageType(*Field),
398 *Field));
399 ++Field;
400 }
401 }
402}
403
404// Create members for bitfields. Field is a bitfield, and FieldEnd is the end
405// iterator of the record. Return the first non-bitfield encountered. We need
406// to know whether this is the base or complete layout, as virtual bases could
407// affect the upper bound of bitfield access unit allocation.
409CGRecordLowering::accumulateBitFields(bool isNonVirtualBaseType,
412 if (isDiscreteBitFieldABI()) {
413 // Run stores the first element of the current run of bitfields. FieldEnd is
414 // used as a special value to note that we don't have a current run. A
415 // bitfield run is a contiguous collection of bitfields that can be stored
416 // in the same storage block. Zero-sized bitfields and bitfields that would
417 // cross an alignment boundary break a run and start a new one.
418 RecordDecl::field_iterator Run = FieldEnd;
419 // Tail is the offset of the first bit off the end of the current run. It's
420 // used to determine if the ASTRecordLayout is treating these two bitfields
421 // as contiguous. StartBitOffset is offset of the beginning of the Run.
422 uint64_t StartBitOffset, Tail = 0;
423 for (; Field != FieldEnd && Field->isBitField(); ++Field) {
424 // Zero-width bitfields end runs.
425 if (Field->isZeroLengthBitField(Context)) {
426 Run = FieldEnd;
427 continue;
428 }
429 uint64_t BitOffset = getFieldBitOffset(*Field);
430 llvm::Type *Type =
431 Types.ConvertTypeForMem(Field->getType(), /*ForBitField=*/true);
432 // If we don't have a run yet, or don't live within the previous run's
433 // allocated storage then we allocate some storage and start a new run.
434 if (Run == FieldEnd || BitOffset >= Tail) {
435 Run = Field;
436 StartBitOffset = BitOffset;
437 Tail = StartBitOffset + DataLayout.getTypeAllocSizeInBits(Type);
438 // Add the storage member to the record. This must be added to the
439 // record before the bitfield members so that it gets laid out before
440 // the bitfields it contains get laid out.
441 Members.push_back(StorageInfo(bitsToCharUnits(StartBitOffset), Type));
442 }
443 // Bitfields get the offset of their storage but come afterward and remain
444 // there after a stable sort.
445 Members.push_back(MemberInfo(bitsToCharUnits(StartBitOffset),
446 MemberInfo::Field, nullptr, *Field));
447 }
448 return Field;
449 }
450
451 // The SysV ABI can overlap bitfield storage units with both other bitfield
452 // storage units /and/ other non-bitfield data members. Accessing a sequence
453 // of bitfields mustn't interfere with adjacent non-bitfields -- they're
454 // permitted to be accessed in separate threads for instance.
455
456 // We split runs of bit-fields into a sequence of "access units". When we emit
457 // a load or store of a bit-field, we'll load/store the entire containing
458 // access unit. As mentioned, the standard requires that these loads and
459 // stores must not interfere with accesses to other memory locations, and it
460 // defines the bit-field's memory location as the current run of
461 // non-zero-width bit-fields. So an access unit must never overlap with
462 // non-bit-field storage or cross a zero-width bit-field. Otherwise, we're
463 // free to draw the lines as we see fit.
464
465 // Drawing these lines well can be complicated. LLVM generally can't modify a
466 // program to access memory that it didn't before, so using very narrow access
467 // units can prevent the compiler from using optimal access patterns. For
468 // example, suppose a run of bit-fields occupies four bytes in a struct. If we
469 // split that into four 1-byte access units, then a sequence of assignments
470 // that doesn't touch all four bytes may have to be emitted with multiple
471 // 8-bit stores instead of a single 32-bit store. On the other hand, if we use
472 // very wide access units, we may find ourselves emitting accesses to
473 // bit-fields we didn't really need to touch, just because LLVM was unable to
474 // clean up after us.
475
476 // It is desirable to have access units be aligned powers of 2 no larger than
477 // a register. (On non-strict alignment ISAs, the alignment requirement can be
478 // dropped.) A three byte access unit will be accessed using 2-byte and 1-byte
479 // accesses and bit manipulation. If no bitfield straddles across the two
480 // separate accesses, it is better to have separate 2-byte and 1-byte access
481 // units, as then LLVM will not generate unnecessary memory accesses, or bit
482 // manipulation. Similarly, on a strict-alignment architecture, it is better
483 // to keep access-units naturally aligned, to avoid similar bit
484 // manipulation synthesizing larger unaligned accesses.
485
486 // Bitfields that share parts of a single byte are, of necessity, placed in
487 // the same access unit. That unit will encompass a consecutive run where
488 // adjacent bitfields share parts of a byte. (The first bitfield of such an
489 // access unit will start at the beginning of a byte.)
490
491 // We then try and accumulate adjacent access units when the combined unit is
492 // naturally sized, no larger than a register, and (on a strict alignment
493 // ISA), naturally aligned. Note that this requires lookahead to one or more
494 // subsequent access units. For instance, consider a 2-byte access-unit
495 // followed by 2 1-byte units. We can merge that into a 4-byte access-unit,
496 // but we would not want to merge a 2-byte followed by a single 1-byte (and no
497 // available tail padding). We keep track of the best access unit seen so far,
498 // and use that when we determine we cannot accumulate any more. Then we start
499 // again at the bitfield following that best one.
500
501 // The accumulation is also prevented when:
502 // *) it would cross a character-aigned zero-width bitfield, or
503 // *) fine-grained bitfield access option is in effect.
504
505 CharUnits RegSize =
506 bitsToCharUnits(Context.getTargetInfo().getRegisterWidth());
507 unsigned CharBits = Context.getCharWidth();
508
509 // Limit of useable tail padding at end of the record. Computed lazily and
510 // cached here.
511 CharUnits ScissorOffset = CharUnits::Zero();
512
513 // Data about the start of the span we're accumulating to create an access
514 // unit from. Begin is the first bitfield of the span. If Begin is FieldEnd,
515 // we've not got a current span. The span starts at the BeginOffset character
516 // boundary. BitSizeSinceBegin is the size (in bits) of the span -- this might
517 // include padding when we've advanced to a subsequent bitfield run.
519 CharUnits BeginOffset;
520 uint64_t BitSizeSinceBegin;
521
522 // The (non-inclusive) end of the largest acceptable access unit we've found
523 // since Begin. If this is Begin, we're gathering the initial set of bitfields
524 // of a new span. BestEndOffset is the end of that acceptable access unit --
525 // it might extend beyond the last character of the bitfield run, using
526 // available padding characters.
528 CharUnits BestEndOffset;
529 bool BestClipped; // Whether the representation must be in a byte array.
530
531 for (;;) {
532 // AtAlignedBoundary is true iff Field is the (potential) start of a new
533 // span (or the end of the bitfields). When true, LimitOffset is the
534 // character offset of that span and Barrier indicates whether the new
535 // span cannot be merged into the current one.
536 bool AtAlignedBoundary = false;
537 bool Barrier = false;
538
539 if (Field != FieldEnd && Field->isBitField()) {
540 uint64_t BitOffset = getFieldBitOffset(*Field);
541 if (Begin == FieldEnd) {
542 // Beginning a new span.
543 Begin = Field;
544 BestEnd = Begin;
545
546 assert((BitOffset % CharBits) == 0 && "Not at start of char");
547 BeginOffset = bitsToCharUnits(BitOffset);
548 BitSizeSinceBegin = 0;
549 } else if ((BitOffset % CharBits) != 0) {
550 // Bitfield occupies the same character as previous bitfield, it must be
551 // part of the same span. This can include zero-length bitfields, should
552 // the target not align them to character boundaries. Such non-alignment
553 // is at variance with the standards, which require zero-length
554 // bitfields be a barrier between access units. But of course we can't
555 // achieve that in the middle of a character.
556 assert(BitOffset == Context.toBits(BeginOffset) + BitSizeSinceBegin &&
557 "Concatenating non-contiguous bitfields");
558 } else {
559 // Bitfield potentially begins a new span. This includes zero-length
560 // bitfields on non-aligning targets that lie at character boundaries
561 // (those are barriers to merging).
562 if (Field->isZeroLengthBitField(Context))
563 Barrier = true;
564 AtAlignedBoundary = true;
565 }
566 } else {
567 // We've reached the end of the bitfield run. Either we're done, or this
568 // is a barrier for the current span.
569 if (Begin == FieldEnd)
570 break;
571
572 Barrier = true;
573 AtAlignedBoundary = true;
574 }
575
576 // InstallBest indicates whether we should create an access unit for the
577 // current best span: fields [Begin, BestEnd) occupying characters
578 // [BeginOffset, BestEndOffset).
579 bool InstallBest = false;
580 if (AtAlignedBoundary) {
581 // Field is the start of a new span or the end of the bitfields. The
582 // just-seen span now extends to BitSizeSinceBegin.
583
584 // Determine if we can accumulate that just-seen span into the current
585 // accumulation.
586 CharUnits AccessSize = bitsToCharUnits(BitSizeSinceBegin + CharBits - 1);
587 if (BestEnd == Begin) {
588 // This is the initial run at the start of a new span. By definition,
589 // this is the best seen so far.
590 BestEnd = Field;
591 BestEndOffset = BeginOffset + AccessSize;
592 // Assume clipped until proven not below.
593 BestClipped = true;
594 if (!BitSizeSinceBegin)
595 // A zero-sized initial span -- this will install nothing and reset
596 // for another.
597 InstallBest = true;
598 } else if (AccessSize > RegSize)
599 // Accumulating the just-seen span would create a multi-register access
600 // unit, which would increase register pressure.
601 InstallBest = true;
602
603 if (!InstallBest) {
604 // Determine if accumulating the just-seen span will create an expensive
605 // access unit or not.
606 llvm::Type *Type = getIntNType(Context.toBits(AccessSize));
608 // Unaligned accesses are expensive. Only accumulate if the new unit
609 // is naturally aligned. Otherwise install the best we have, which is
610 // either the initial access unit (can't do better), or a naturally
611 // aligned accumulation (since we would have already installed it if
612 // it wasn't naturally aligned).
613 CharUnits Align = getAlignment(Type);
614 if (Align > Layout.getAlignment())
615 // The alignment required is greater than the containing structure
616 // itself.
617 InstallBest = true;
618 else if (!BeginOffset.isMultipleOf(Align))
619 // The access unit is not at a naturally aligned offset within the
620 // structure.
621 InstallBest = true;
622
623 if (InstallBest && BestEnd == Field)
624 // We're installing the first span, whose clipping was presumed
625 // above. Compute it correctly.
626 if (getSize(Type) == AccessSize)
627 BestClipped = false;
628 }
629
630 if (!InstallBest) {
631 // Find the next used storage offset to determine what the limit of
632 // the current span is. That's either the offset of the next field
633 // with storage (which might be Field itself) or the end of the
634 // non-reusable tail padding.
635 CharUnits LimitOffset;
636 for (auto Probe = Field; Probe != FieldEnd; ++Probe)
637 if (!Probe->isZeroSize(Context)) {
638 // A member with storage sets the limit.
639 assert((getFieldBitOffset(*Probe) % CharBits) == 0 &&
640 "Next storage is not byte-aligned");
641 LimitOffset = bitsToCharUnits(getFieldBitOffset(*Probe));
642 goto FoundLimit;
643 }
644 // We reached the end of the fields, determine the bounds of useable
645 // tail padding. As this can be complex for C++, we cache the result.
646 if (ScissorOffset.isZero()) {
647 ScissorOffset = calculateTailClippingOffset(isNonVirtualBaseType);
648 assert(!ScissorOffset.isZero() && "Tail clipping at zero");
649 }
650
651 LimitOffset = ScissorOffset;
652 FoundLimit:;
653
654 CharUnits TypeSize = getSize(Type);
655 if (BeginOffset + TypeSize <= LimitOffset) {
656 // There is space before LimitOffset to create a naturally-sized
657 // access unit.
658 BestEndOffset = BeginOffset + TypeSize;
659 BestEnd = Field;
660 BestClipped = false;
661 }
662
663 if (Barrier)
664 // The next field is a barrier that we cannot merge across.
665 InstallBest = true;
666 else if (Types.getCodeGenOpts().FineGrainedBitfieldAccesses)
667 // Fine-grained access, so no merging of spans.
668 InstallBest = true;
669 else
670 // Otherwise, we're not installing. Update the bit size
671 // of the current span to go all the way to LimitOffset, which is
672 // the (aligned) offset of next bitfield to consider.
673 BitSizeSinceBegin = Context.toBits(LimitOffset - BeginOffset);
674 }
675 }
676 }
677
678 if (InstallBest) {
679 assert((Field == FieldEnd || !Field->isBitField() ||
680 (getFieldBitOffset(*Field) % CharBits) == 0) &&
681 "Installing but not at an aligned bitfield or limit");
682 CharUnits AccessSize = BestEndOffset - BeginOffset;
683 if (!AccessSize.isZero()) {
684 // Add the storage member for the access unit to the record. The
685 // bitfields get the offset of their storage but come afterward and
686 // remain there after a stable sort.
687 llvm::Type *Type;
688 if (BestClipped) {
689 assert(getSize(getIntNType(Context.toBits(AccessSize))) >
690 AccessSize &&
691 "Clipped access need not be clipped");
692 Type = getByteArrayType(AccessSize);
693 } else {
694 Type = getIntNType(Context.toBits(AccessSize));
695 assert(getSize(Type) == AccessSize &&
696 "Unclipped access must be clipped");
697 }
698 Members.push_back(StorageInfo(BeginOffset, Type));
699 for (; Begin != BestEnd; ++Begin)
700 if (!Begin->isZeroLengthBitField(Context))
701 Members.push_back(
702 MemberInfo(BeginOffset, MemberInfo::Field, nullptr, *Begin));
703 }
704 // Reset to start a new span.
705 Field = BestEnd;
706 Begin = FieldEnd;
707 } else {
708 assert(Field != FieldEnd && Field->isBitField() &&
709 "Accumulating past end of bitfields");
710 assert(!Barrier && "Accumulating across barrier");
711 // Accumulate this bitfield into the current (potential) span.
712 BitSizeSinceBegin += Field->getBitWidthValue(Context);
713 ++Field;
714 }
715 }
716
717 return Field;
718}
719
720void CGRecordLowering::accumulateBases() {
721 // If we've got a primary virtual base, we need to add it with the bases.
722 if (Layout.isPrimaryBaseVirtual()) {
723 const CXXRecordDecl *BaseDecl = Layout.getPrimaryBase();
724 Members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::Base,
725 getStorageType(BaseDecl), BaseDecl));
726 }
727 // Accumulate the non-virtual bases.
728 for (const auto &Base : RD->bases()) {
729 if (Base.isVirtual())
730 continue;
731
732 // Bases can be zero-sized even if not technically empty if they
733 // contain only a trailing array member.
734 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
735 if (!BaseDecl->isEmpty() &&
736 !Context.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
737 Members.push_back(MemberInfo(Layout.getBaseClassOffset(BaseDecl),
738 MemberInfo::Base, getStorageType(BaseDecl), BaseDecl));
739 }
740}
741
742/// The AAPCS that defines that, when possible, bit-fields should
743/// be accessed using containers of the declared type width:
744/// When a volatile bit-field is read, and its container does not overlap with
745/// any non-bit-field member or any zero length bit-field member, its container
746/// must be read exactly once using the access width appropriate to the type of
747/// the container. When a volatile bit-field is written, and its container does
748/// not overlap with any non-bit-field member or any zero-length bit-field
749/// member, its container must be read exactly once and written exactly once
750/// using the access width appropriate to the type of the container. The two
751/// accesses are not atomic.
752///
753/// Enforcing the width restriction can be disabled using
754/// -fno-aapcs-bitfield-width.
755void CGRecordLowering::computeVolatileBitfields() {
756 if (!isAAPCS() || !Types.getCodeGenOpts().AAPCSBitfieldWidth)
757 return;
758
759 for (auto &I : BitFields) {
760 const FieldDecl *Field = I.first;
761 CGBitFieldInfo &Info = I.second;
762 llvm::Type *ResLTy = Types.ConvertTypeForMem(Field->getType());
763 // If the record alignment is less than the type width, we can't enforce a
764 // aligned load, bail out.
765 if ((uint64_t)(Context.toBits(Layout.getAlignment())) <
766 ResLTy->getPrimitiveSizeInBits())
767 continue;
768 // CGRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
769 // for big-endian targets, but it assumes a container of width
770 // Info.StorageSize. Since AAPCS uses a different container size (width
771 // of the type), we first undo that calculation here and redo it once
772 // the bit-field offset within the new container is calculated.
773 const unsigned OldOffset =
774 isBE() ? Info.StorageSize - (Info.Offset + Info.Size) : Info.Offset;
775 // Offset to the bit-field from the beginning of the struct.
776 const unsigned AbsoluteOffset =
777 Context.toBits(Info.StorageOffset) + OldOffset;
778
779 // Container size is the width of the bit-field type.
780 const unsigned StorageSize = ResLTy->getPrimitiveSizeInBits();
781 // Nothing to do if the access uses the desired
782 // container width and is naturally aligned.
783 if (Info.StorageSize == StorageSize && (OldOffset % StorageSize == 0))
784 continue;
785
786 // Offset within the container.
787 unsigned Offset = AbsoluteOffset & (StorageSize - 1);
788 // Bail out if an aligned load of the container cannot cover the entire
789 // bit-field. This can happen for example, if the bit-field is part of a
790 // packed struct. AAPCS does not define access rules for such cases, we let
791 // clang to follow its own rules.
792 if (Offset + Info.Size > StorageSize)
793 continue;
794
795 // Re-adjust offsets for big-endian targets.
796 if (isBE())
797 Offset = StorageSize - (Offset + Info.Size);
798
799 const CharUnits StorageOffset =
800 Context.toCharUnitsFromBits(AbsoluteOffset & ~(StorageSize - 1));
801 const CharUnits End = StorageOffset +
802 Context.toCharUnitsFromBits(StorageSize) -
804
805 const ASTRecordLayout &Layout =
806 Context.getASTRecordLayout(Field->getParent());
807 // If we access outside memory outside the record, than bail out.
808 const CharUnits RecordSize = Layout.getSize();
809 if (End >= RecordSize)
810 continue;
811
812 // Bail out if performing this load would access non-bit-fields members.
813 bool Conflict = false;
814 for (const auto *F : D->fields()) {
815 // Allow sized bit-fields overlaps.
816 if (F->isBitField() && !F->isZeroLengthBitField(Context))
817 continue;
818
819 const CharUnits FOffset = Context.toCharUnitsFromBits(
820 Layout.getFieldOffset(F->getFieldIndex()));
821
822 // As C11 defines, a zero sized bit-field defines a barrier, so
823 // fields after and before it should be race condition free.
824 // The AAPCS acknowledges it and imposes no restritions when the
825 // natural container overlaps a zero-length bit-field.
826 if (F->isZeroLengthBitField(Context)) {
827 if (End > FOffset && StorageOffset < FOffset) {
828 Conflict = true;
829 break;
830 }
831 }
832
833 const CharUnits FEnd =
834 FOffset +
835 Context.toCharUnitsFromBits(
836 Types.ConvertTypeForMem(F->getType())->getPrimitiveSizeInBits()) -
838 // If no overlap, continue.
839 if (End < FOffset || FEnd < StorageOffset)
840 continue;
841
842 // The desired load overlaps a non-bit-field member, bail out.
843 Conflict = true;
844 break;
845 }
846
847 if (Conflict)
848 continue;
849 // Write the new bit-field access parameters.
850 // As the storage offset now is defined as the number of elements from the
851 // start of the structure, we should divide the Offset by the element size.
853 StorageOffset / Context.toCharUnitsFromBits(StorageSize).getQuantity();
854 Info.VolatileStorageSize = StorageSize;
855 Info.VolatileOffset = Offset;
856 }
857}
858
859void CGRecordLowering::accumulateVPtrs() {
860 if (Layout.hasOwnVFPtr())
861 Members.push_back(
862 MemberInfo(CharUnits::Zero(), MemberInfo::VFPtr,
863 llvm::PointerType::getUnqual(Types.getLLVMContext())));
864 if (Layout.hasOwnVBPtr())
865 Members.push_back(
866 MemberInfo(Layout.getVBPtrOffset(), MemberInfo::VBPtr,
867 llvm::PointerType::getUnqual(Types.getLLVMContext())));
868}
869
871CGRecordLowering::calculateTailClippingOffset(bool isNonVirtualBaseType) const {
872 if (!RD)
873 return Layout.getDataSize();
874
875 CharUnits ScissorOffset = Layout.getNonVirtualSize();
876 // In the itanium ABI, it's possible to place a vbase at a dsize that is
877 // smaller than the nvsize. Here we check to see if such a base is placed
878 // before the nvsize and set the scissor offset to that, instead of the
879 // nvsize.
880 if (!isNonVirtualBaseType && isOverlappingVBaseABI())
881 for (const auto &Base : RD->vbases()) {
882 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
883 if (BaseDecl->isEmpty())
884 continue;
885 // If the vbase is a primary virtual base of some base, then it doesn't
886 // get its own storage location but instead lives inside of that base.
887 if (Context.isNearlyEmpty(BaseDecl) && !hasOwnStorage(RD, BaseDecl))
888 continue;
889 ScissorOffset = std::min(ScissorOffset,
890 Layout.getVBaseClassOffset(BaseDecl));
891 }
892
893 return ScissorOffset;
894}
895
896void CGRecordLowering::accumulateVBases() {
897 Members.push_back(MemberInfo(calculateTailClippingOffset(false),
898 MemberInfo::Scissor, nullptr, RD));
899 for (const auto &Base : RD->vbases()) {
900 const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl();
901 if (BaseDecl->isEmpty())
902 continue;
903 CharUnits Offset = Layout.getVBaseClassOffset(BaseDecl);
904 // If the vbase is a primary virtual base of some base, then it doesn't
905 // get its own storage location but instead lives inside of that base.
906 if (isOverlappingVBaseABI() &&
907 Context.isNearlyEmpty(BaseDecl) &&
908 !hasOwnStorage(RD, BaseDecl)) {
909 Members.push_back(MemberInfo(Offset, MemberInfo::VBase, nullptr,
910 BaseDecl));
911 continue;
912 }
913 // If we've got a vtordisp, add it as a storage type.
914 if (Layout.getVBaseOffsetsMap().find(BaseDecl)->second.hasVtorDisp())
915 Members.push_back(StorageInfo(Offset - CharUnits::fromQuantity(4),
916 getIntNType(32)));
917 Members.push_back(MemberInfo(Offset, MemberInfo::VBase,
918 getStorageType(BaseDecl), BaseDecl));
919 }
920}
921
922bool CGRecordLowering::hasOwnStorage(const CXXRecordDecl *Decl,
923 const CXXRecordDecl *Query) const {
924 const ASTRecordLayout &DeclLayout = Context.getASTRecordLayout(Decl);
925 if (DeclLayout.isPrimaryBaseVirtual() && DeclLayout.getPrimaryBase() == Query)
926 return false;
927 for (const auto &Base : Decl->bases())
928 if (!hasOwnStorage(Base.getType()->getAsCXXRecordDecl(), Query))
929 return false;
930 return true;
931}
932
933void CGRecordLowering::calculateZeroInit() {
934 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
935 MemberEnd = Members.end();
936 IsZeroInitializableAsBase && Member != MemberEnd; ++Member) {
937 if (Member->Kind == MemberInfo::Field) {
938 if (!Member->FD || isZeroInitializable(Member->FD))
939 continue;
940 IsZeroInitializable = IsZeroInitializableAsBase = false;
941 } else if (Member->Kind == MemberInfo::Base ||
942 Member->Kind == MemberInfo::VBase) {
943 if (isZeroInitializable(Member->RD))
944 continue;
945 IsZeroInitializable = false;
946 if (Member->Kind == MemberInfo::Base)
947 IsZeroInitializableAsBase = false;
948 }
949 }
950}
951
952// Verify accumulateBitfields computed the correct storage representations.
953void CGRecordLowering::checkBitfieldClipping() const {
954#ifndef NDEBUG
955 auto Tail = CharUnits::Zero();
956 for (const auto &M : Members) {
957 // Only members with data and the scissor can cut into tail padding.
958 if (!M.Data && M.Kind != MemberInfo::Scissor)
959 continue;
960
961 assert(M.Offset >= Tail && "Bitfield access unit is not clipped");
962 Tail = M.Offset;
963 if (M.Data)
964 Tail += getSize(M.Data);
965 }
966#endif
967}
968
969void CGRecordLowering::determinePacked(bool NVBaseType) {
970 if (Packed)
971 return;
972 CharUnits Alignment = CharUnits::One();
973 CharUnits NVAlignment = CharUnits::One();
974 CharUnits NVSize =
975 !NVBaseType && RD ? Layout.getNonVirtualSize() : CharUnits::Zero();
976 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
977 MemberEnd = Members.end();
978 Member != MemberEnd; ++Member) {
979 if (!Member->Data)
980 continue;
981 // If any member falls at an offset that it not a multiple of its alignment,
982 // then the entire record must be packed.
983 if (Member->Offset % getAlignment(Member->Data))
984 Packed = true;
985 if (Member->Offset < NVSize)
986 NVAlignment = std::max(NVAlignment, getAlignment(Member->Data));
987 Alignment = std::max(Alignment, getAlignment(Member->Data));
988 }
989 // If the size of the record (the capstone's offset) is not a multiple of the
990 // record's alignment, it must be packed.
991 if (Members.back().Offset % Alignment)
992 Packed = true;
993 // If the non-virtual sub-object is not a multiple of the non-virtual
994 // sub-object's alignment, it must be packed. We cannot have a packed
995 // non-virtual sub-object and an unpacked complete object or vise versa.
996 if (NVSize % NVAlignment)
997 Packed = true;
998 // Update the alignment of the sentinel.
999 if (!Packed)
1000 Members.back().Data = getIntNType(Context.toBits(Alignment));
1001}
1002
1003void CGRecordLowering::insertPadding() {
1004 std::vector<std::pair<CharUnits, CharUnits> > Padding;
1006 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
1007 MemberEnd = Members.end();
1008 Member != MemberEnd; ++Member) {
1009 if (!Member->Data)
1010 continue;
1011 CharUnits Offset = Member->Offset;
1012 assert(Offset >= Size);
1013 // Insert padding if we need to.
1014 if (Offset !=
1015 Size.alignTo(Packed ? CharUnits::One() : getAlignment(Member->Data)))
1016 Padding.push_back(std::make_pair(Size, Offset - Size));
1017 Size = Offset + getSize(Member->Data);
1018 }
1019 if (Padding.empty())
1020 return;
1021 // Add the padding to the Members list and sort it.
1022 for (std::vector<std::pair<CharUnits, CharUnits> >::const_iterator
1023 Pad = Padding.begin(), PadEnd = Padding.end();
1024 Pad != PadEnd; ++Pad)
1025 Members.push_back(StorageInfo(Pad->first, getByteArrayType(Pad->second)));
1026 llvm::stable_sort(Members);
1027}
1028
1029void CGRecordLowering::fillOutputFields() {
1030 for (std::vector<MemberInfo>::const_iterator Member = Members.begin(),
1031 MemberEnd = Members.end();
1032 Member != MemberEnd; ++Member) {
1033 if (Member->Data)
1034 FieldTypes.push_back(Member->Data);
1035 if (Member->Kind == MemberInfo::Field) {
1036 if (Member->FD)
1037 Fields[Member->FD->getCanonicalDecl()] = FieldTypes.size() - 1;
1038 // A field without storage must be a bitfield.
1039 if (!Member->Data)
1040 setBitFieldInfo(Member->FD, Member->Offset, FieldTypes.back());
1041 } else if (Member->Kind == MemberInfo::Base)
1042 NonVirtualBases[Member->RD] = FieldTypes.size() - 1;
1043 else if (Member->Kind == MemberInfo::VBase)
1044 VirtualBases[Member->RD] = FieldTypes.size() - 1;
1045 }
1046}
1047
1049 const FieldDecl *FD,
1050 uint64_t Offset, uint64_t Size,
1051 uint64_t StorageSize,
1052 CharUnits StorageOffset) {
1053 // This function is vestigial from CGRecordLayoutBuilder days but is still
1054 // used in GCObjCRuntime.cpp. That usage has a "fixme" attached to it that
1055 // when addressed will allow for the removal of this function.
1056 llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
1057 CharUnits TypeSizeInBytes =
1058 CharUnits::fromQuantity(Types.getDataLayout().getTypeAllocSize(Ty));
1059 uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
1060
1062
1063 if (Size > TypeSizeInBits) {
1064 // We have a wide bit-field. The extra bits are only used for padding, so
1065 // if we have a bitfield of type T, with size N:
1066 //
1067 // T t : N;
1068 //
1069 // We can just assume that it's:
1070 //
1071 // T t : sizeof(T);
1072 //
1073 Size = TypeSizeInBits;
1074 }
1075
1076 // Reverse the bit offsets for big endian machines. Because we represent
1077 // a bitfield as a single large integer load, we can imagine the bits
1078 // counting from the most-significant-bit instead of the
1079 // least-significant-bit.
1080 if (Types.getDataLayout().isBigEndian()) {
1081 Offset = StorageSize - (Offset + Size);
1082 }
1083
1085}
1086
1087std::unique_ptr<CGRecordLayout>
1088CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty) {
1089 CGRecordLowering Builder(*this, D, /*Packed=*/false);
1090
1091 Builder.lower(/*NonVirtualBaseType=*/false);
1092
1093 // If we're in C++, compute the base subobject type.
1094 llvm::StructType *BaseTy = nullptr;
1095 if (isa<CXXRecordDecl>(D)) {
1096 BaseTy = Ty;
1097 if (Builder.Layout.getNonVirtualSize() != Builder.Layout.getSize()) {
1098 CGRecordLowering BaseBuilder(*this, D, /*Packed=*/Builder.Packed);
1099 BaseBuilder.lower(/*NonVirtualBaseType=*/true);
1100 BaseTy = llvm::StructType::create(
1101 getLLVMContext(), BaseBuilder.FieldTypes, "", BaseBuilder.Packed);
1102 addRecordTypeName(D, BaseTy, ".base");
1103 // BaseTy and Ty must agree on their packedness for getLLVMFieldNo to work
1104 // on both of them with the same index.
1105 assert(Builder.Packed == BaseBuilder.Packed &&
1106 "Non-virtual and complete types must agree on packedness");
1107 }
1108 }
1109
1110 // Fill in the struct *after* computing the base type. Filling in the body
1111 // signifies that the type is no longer opaque and record layout is complete,
1112 // but we may need to recursively layout D while laying D out as a base type.
1113 Ty->setBody(Builder.FieldTypes, Builder.Packed);
1114
1115 auto RL = std::make_unique<CGRecordLayout>(
1116 Ty, BaseTy, (bool)Builder.IsZeroInitializable,
1117 (bool)Builder.IsZeroInitializableAsBase);
1118
1119 RL->NonVirtualBases.swap(Builder.NonVirtualBases);
1120 RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
1121
1122 // Add all the field numbers.
1123 RL->FieldInfo.swap(Builder.Fields);
1124
1125 // Add bitfield info.
1126 RL->BitFields.swap(Builder.BitFields);
1127
1128 // Dump the layout, if requested.
1129 if (getContext().getLangOpts().DumpRecordLayouts) {
1130 llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
1131 llvm::outs() << "Record: ";
1132 D->dump(llvm::outs());
1133 llvm::outs() << "\nLayout: ";
1134 RL->print(llvm::outs());
1135 }
1136
1137#ifndef NDEBUG
1138 // Verify that the computed LLVM struct size matches the AST layout size.
1139 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
1140
1141 uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
1142 assert(TypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(Ty) &&
1143 "Type size mismatch!");
1144
1145 if (BaseTy) {
1146 CharUnits NonVirtualSize = Layout.getNonVirtualSize();
1147
1148 uint64_t AlignedNonVirtualTypeSizeInBits =
1149 getContext().toBits(NonVirtualSize);
1150
1151 assert(AlignedNonVirtualTypeSizeInBits ==
1152 getDataLayout().getTypeAllocSizeInBits(BaseTy) &&
1153 "Type size mismatch!");
1154 }
1155
1156 // Verify that the LLVM and AST field offsets agree.
1157 llvm::StructType *ST = RL->getLLVMType();
1158 const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST);
1159
1160 const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
1162 for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
1163 const FieldDecl *FD = *it;
1164
1165 // Ignore zero-sized fields.
1166 if (FD->isZeroSize(getContext()))
1167 continue;
1168
1169 // For non-bit-fields, just check that the LLVM struct offset matches the
1170 // AST offset.
1171 if (!FD->isBitField()) {
1172 unsigned FieldNo = RL->getLLVMFieldNo(FD);
1173 assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
1174 "Invalid field offset!");
1175 continue;
1176 }
1177
1178 // Ignore unnamed bit-fields.
1179 if (!FD->getDeclName())
1180 continue;
1181
1182 const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
1183 llvm::Type *ElementTy = ST->getTypeAtIndex(RL->getLLVMFieldNo(FD));
1184
1185 // Unions have overlapping elements dictating their layout, but for
1186 // non-unions we can verify that this section of the layout is the exact
1187 // expected size.
1188 if (D->isUnion()) {
1189 // For unions we verify that the start is zero and the size
1190 // is in-bounds. However, on BE systems, the offset may be non-zero, but
1191 // the size + offset should match the storage size in that case as it
1192 // "starts" at the back.
1193 if (getDataLayout().isBigEndian())
1194 assert(static_cast<unsigned>(Info.Offset + Info.Size) ==
1195 Info.StorageSize &&
1196 "Big endian union bitfield does not end at the back");
1197 else
1198 assert(Info.Offset == 0 &&
1199 "Little endian union bitfield with a non-zero offset");
1200 assert(Info.StorageSize <= SL->getSizeInBits() &&
1201 "Union not large enough for bitfield storage");
1202 } else {
1203 assert((Info.StorageSize ==
1204 getDataLayout().getTypeAllocSizeInBits(ElementTy) ||
1205 Info.VolatileStorageSize ==
1206 getDataLayout().getTypeAllocSizeInBits(ElementTy)) &&
1207 "Storage size does not match the element type size");
1208 }
1209 assert(Info.Size > 0 && "Empty bitfield!");
1210 assert(static_cast<unsigned>(Info.Offset) + Info.Size <= Info.StorageSize &&
1211 "Bitfield outside of its allocated storage");
1212 }
1213#endif
1214
1215 return RL;
1216}
1217
1218void CGRecordLayout::print(raw_ostream &OS) const {
1219 OS << "<CGRecordLayout\n";
1220 OS << " LLVMType:" << *CompleteObjectType << "\n";
1221 if (BaseSubobjectType)
1222 OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
1223 OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
1224 OS << " BitFields:[\n";
1225
1226 // Print bit-field infos in declaration order.
1227 std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
1228 for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
1229 it = BitFields.begin(), ie = BitFields.end();
1230 it != ie; ++it) {
1231 const RecordDecl *RD = it->first->getParent();
1232 unsigned Index = 0;
1234 it2 = RD->field_begin(); *it2 != it->first; ++it2)
1235 ++Index;
1236 BFIs.push_back(std::make_pair(Index, &it->second));
1237 }
1238 llvm::array_pod_sort(BFIs.begin(), BFIs.end());
1239 for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
1240 OS.indent(4);
1241 BFIs[i].second->print(OS);
1242 OS << "\n";
1243 }
1244
1245 OS << "]>\n";
1246}
1247
1248LLVM_DUMP_METHOD void CGRecordLayout::dump() const {
1249 print(llvm::errs());
1250}
1251
1252void CGBitFieldInfo::print(raw_ostream &OS) const {
1253 OS << "<CGBitFieldInfo"
1254 << " Offset:" << Offset << " Size:" << Size << " IsSigned:" << IsSigned
1255 << " StorageSize:" << StorageSize
1256 << " StorageOffset:" << StorageOffset.getQuantity()
1257 << " VolatileOffset:" << VolatileOffset
1258 << " VolatileStorageSize:" << VolatileStorageSize
1259 << " VolatileStorageOffset:" << VolatileStorageOffset.getQuantity() << ">";
1260}
1261
1262LLVM_DUMP_METHOD void CGBitFieldInfo::dump() const {
1263 print(llvm::errs());
1264}
Defines the clang::ASTContext interface.
static bool isAAPCS(const TargetInfo &TargetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CGExpr.cpp:447
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
const char * Data
SourceLocation Begin
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool isNearlyEmpty(const CXXRecordDecl *RD) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:757
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2344
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
Definition: RecordLayout.h:280
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
Definition: RecordLayout.h:300
CharUnits getSize() const
getSize - Get the record size in characters.
Definition: RecordLayout.h:193
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:196
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getVBPtrOffset() const
getVBPtrOffset - Get the offset for virtual base table pointer.
Definition: RecordLayout.h:324
CharUnits getDataSize() const
getDataSize() - Get the record data size, which is the record size without tail padding,...
Definition: RecordLayout.h:206
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:259
const VBaseOffsetsMapTy & getVBaseOffsetsMap() const
Definition: RecordLayout.h:334
const CXXRecordDecl * getPrimaryBase() const
getPrimaryBase - Get the primary base for this record.
Definition: RecordLayout.h:234
bool isPrimaryBaseVirtual() const
isPrimaryBaseVirtual - Get whether the primary base for this record is virtual or not.
Definition: RecordLayout.h:242
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
Definition: RecordLayout.h:210
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition: DeclCXX.h:1190
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Definition: CharUnits.h:143
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
void print(raw_ostream &OS) const
This class organizes the cross-module state that is used while lowering AST types to LLVM types.
Definition: CodeGenTypes.h:54
ASTContext & getContext() const
Definition: CodeGenTypes.h:108
std::unique_ptr< CGRecordLayout > ComputeRecordLayout(const RecordDecl *D, llvm::StructType *Ty)
Compute a new LLVM record layout object for the given record.
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenTypes.h:112
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenTypes.h:104
void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty, StringRef suffix)
addRecordTypeName - Compute a name from the given record decl with an optional suffix and name the gi...
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2342
DeclContext * getParent()
getParent - Returns the containing DeclContext.
Definition: DeclBase.h:2066
Decl - This represents one declaration (or definition), e.g.
Definition: DeclBase.h:86
void dump() const
Definition: ASTDumper.cpp:220
Represents a member of a struct/union/class.
Definition: Decl.h:3058
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:3149
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4646
unsigned getBitWidthValue(const ASTContext &Ctx) const
Computes the bit width of this field, if this is a bit field.
Definition: Decl.cpp:4594
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition: Decl.cpp:4604
FieldDecl * getCanonicalDecl() override
Retrieves the canonical declaration of this field.
Definition: Decl.h:3282
DeclarationName getDeclName() const
Get the actual, stored name of the declaration, which may be a special name.
Definition: Decl.h:315
Represents a struct/union/class.
Definition: Decl.h:4169
specific_decl_iterator< FieldDecl > field_iterator
Definition: Decl.h:4372
field_iterator field_begin() const
Definition: Decl.cpp:5071
bool isUnion() const
Definition: Decl.h:3791
bool isMicrosoft() const
Is this ABI an MSVC-compatible ABI?
Definition: TargetCXXABI.h:136
virtual unsigned getRegisterWidth() const
Return the "preferred" register width on this target.
Definition: TargetInfo.h:859
bool isBigEndian() const
Definition: TargetInfo.h:1630
bool hasCheapUnalignedBitFieldAccess() const
Return true iff unaligned accesses are cheap.
Definition: TargetInfo.h:873
TargetCXXABI getCXXABI() const
Get the C++ ABI currently in use.
Definition: TargetInfo.h:1307
virtual StringRef getABI() const
Get the ABI currently in use.
Definition: TargetInfo.h:1304
The base class of the type hierarchy.
Definition: Type.h:1813
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2155
QualType getType() const
Definition: Decl.h:717
The JSON file list parser is used to communicate input to InstallAPI.
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
unsigned long uint64_t
#define true
Definition: stdbool.h:21
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits VolatileStorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned VolatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned VolatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
void print(raw_ostream &OS) const
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned IsSigned
Whether the bit-field is signed.
static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types, const FieldDecl *FD, uint64_t Offset, uint64_t Size, uint64_t StorageSize, CharUnits StorageOffset)
Given a bit-field decl, build an appropriate helper object for accessing that field (which is expecte...