clang 22.0.0git
CIRGenRecordLayoutBuilder.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to compute the layout of a record.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenModule.h"
15#include "CIRGenTypes.h"
16
18#include "clang/AST/Decl.h"
19#include "clang/AST/DeclCXX.h"
24#include "llvm/Support/Casting.h"
25
26#include <memory>
27
28using namespace llvm;
29using namespace clang;
30using namespace clang::CIRGen;
31
32namespace {
33/// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to an
34/// mlir::Type. Some of the lowering is straightforward, some is not.
35// TODO: Detail some of the complexities and weirdnesses?
36// (See CGRecordLayoutBuilder.cpp)
37struct CIRRecordLowering final {
38
39 // MemberInfo is a helper structure that contains information about a record
40 // member. In addition to the standard member types, there exists a sentinel
41 // member type that ensures correct rounding.
42 struct MemberInfo final {
43 CharUnits offset;
44 enum class InfoKind { VFPtr, Field, Base, VBase } kind;
45 mlir::Type data;
46 union {
47 const FieldDecl *fieldDecl;
49 };
50 MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
51 const FieldDecl *fieldDecl = nullptr)
52 : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}
53 MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
54 const CXXRecordDecl *rd)
55 : offset{offset}, kind{kind}, data{data}, cxxRecordDecl{rd} {}
56 // MemberInfos are sorted so we define a < operator.
57 bool operator<(const MemberInfo &other) const {
58 return offset < other.offset;
59 }
60 };
61 // The constructor.
62 CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl,
63 bool packed);
64
65 /// Constructs a MemberInfo instance from an offset and mlir::Type.
66 MemberInfo makeStorageInfo(CharUnits offset, mlir::Type data) {
67 return MemberInfo(offset, MemberInfo::InfoKind::Field, data);
68 }
69
70 // Layout routines.
71 void setBitFieldInfo(const FieldDecl *fd, CharUnits startOffset,
72 mlir::Type storageType);
73
74 void lower(bool NonVirtualBaseType);
75 void lowerUnion();
76
77 /// Determines if we need a packed llvm struct.
78 void determinePacked(bool nvBaseType);
79 /// Inserts padding everywhere it's needed.
80 void insertPadding();
81
82 void computeVolatileBitfields();
83 void accumulateBases();
84 void accumulateVPtrs();
85 void accumulateVBases();
86 void accumulateFields();
88 accumulateBitFields(RecordDecl::field_iterator field,
90
91 mlir::Type getVFPtrType();
92
93 bool isAAPCS() const {
94 return astContext.getTargetInfo().getABI().starts_with("aapcs");
95 }
96
97 /// Helper function to check if the target machine is BigEndian.
98 bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
99
100 // The Itanium base layout rule allows virtual bases to overlap
101 // other bases, which complicates layout in specific ways.
102 //
103 // Note specifically that the ms_struct attribute doesn't change this.
104 bool isOverlappingVBaseABI() {
105 return !astContext.getTargetInfo().getCXXABI().isMicrosoft();
106 }
107 // Recursively searches all of the bases to find out if a vbase is
108 // not the primary vbase of some base class.
109 bool hasOwnStorage(const CXXRecordDecl *decl, const CXXRecordDecl *query);
110
111 CharUnits bitsToCharUnits(uint64_t bitOffset) {
112 return astContext.toCharUnitsFromBits(bitOffset);
113 }
114
115 void calculateZeroInit();
116
117 CharUnits getSize(mlir::Type Ty) {
118 return CharUnits::fromQuantity(dataLayout.layout.getTypeSize(Ty));
119 }
120 CharUnits getSizeInBits(mlir::Type ty) {
121 return CharUnits::fromQuantity(dataLayout.layout.getTypeSizeInBits(ty));
122 }
123 CharUnits getAlignment(mlir::Type Ty) {
124 return CharUnits::fromQuantity(dataLayout.layout.getTypeABIAlignment(Ty));
125 }
126
127 bool isZeroInitializable(const FieldDecl *fd) {
128 return cirGenTypes.isZeroInitializable(fd->getType());
129 }
130 bool isZeroInitializable(const RecordDecl *rd) {
131 return cirGenTypes.isZeroInitializable(rd);
132 }
133
134 /// Wraps cir::IntType with some implicit arguments.
135 mlir::Type getUIntNType(uint64_t numBits) {
136 unsigned alignedBits = llvm::PowerOf2Ceil(numBits);
137 alignedBits = std::max(8u, alignedBits);
138 return cir::IntType::get(&cirGenTypes.getMLIRContext(), alignedBits,
139 /*isSigned=*/false);
140 }
141
142 mlir::Type getCharType() {
143 return cir::IntType::get(&cirGenTypes.getMLIRContext(),
144 astContext.getCharWidth(),
145 /*isSigned=*/false);
146 }
147
148 mlir::Type getByteArrayType(CharUnits numberOfChars) {
149 assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed.");
150 mlir::Type type = getCharType();
151 return numberOfChars == CharUnits::One()
152 ? type
153 : cir::ArrayType::get(type, numberOfChars.getQuantity());
154 }
155
156 // Gets the CIR BaseSubobject type from a CXXRecordDecl.
157 mlir::Type getStorageType(const CXXRecordDecl *RD) {
158 return cirGenTypes.getCIRGenRecordLayout(RD).getBaseSubobjectCIRType();
159 }
160 // This is different from LLVM traditional codegen because CIRGen uses arrays
161 // of bytes instead of arbitrary-sized integers. This is important for packed
162 // structures support.
163 mlir::Type getBitfieldStorageType(unsigned numBits) {
164 unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth());
165 if (cir::isValidFundamentalIntWidth(alignedBits))
166 return builder.getUIntNTy(alignedBits);
167
168 mlir::Type type = getCharType();
169 return cir::ArrayType::get(type, alignedBits / astContext.getCharWidth());
170 }
171
172 mlir::Type getStorageType(const FieldDecl *fieldDecl) {
173 mlir::Type type = cirGenTypes.convertTypeForMem(fieldDecl->getType());
174 if (fieldDecl->isBitField()) {
175 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
176 "getStorageType for bitfields");
177 }
178 return type;
179 }
180
181 uint64_t getFieldBitOffset(const FieldDecl *fieldDecl) {
182 return astRecordLayout.getFieldOffset(fieldDecl->getFieldIndex());
183 }
184
185 /// Fills out the structures that are ultimately consumed.
186 void fillOutputFields();
187
188 void appendPaddingBytes(CharUnits size) {
189 if (!size.isZero()) {
190 fieldTypes.push_back(getByteArrayType(size));
191 padded = true;
192 }
193 }
194
195 CIRGenTypes &cirGenTypes;
196 CIRGenBuilderTy &builder;
197 const ASTContext &astContext;
198 const RecordDecl *recordDecl;
200 const ASTRecordLayout &astRecordLayout;
201 // Helpful intermediate data-structures
202 std::vector<MemberInfo> members;
203 // Output fields, consumed by CIRGenTypes::computeRecordLayout
205 llvm::DenseMap<const FieldDecl *, CIRGenBitFieldInfo> bitFields;
206 llvm::DenseMap<const FieldDecl *, unsigned> fieldIdxMap;
207 llvm::DenseMap<const CXXRecordDecl *, unsigned> nonVirtualBases;
208 llvm::DenseMap<const CXXRecordDecl *, unsigned> virtualBases;
209 cir::CIRDataLayout dataLayout;
210
211 LLVM_PREFERRED_TYPE(bool)
212 unsigned zeroInitializable : 1;
213 LLVM_PREFERRED_TYPE(bool)
214 unsigned zeroInitializableAsBase : 1;
215 LLVM_PREFERRED_TYPE(bool)
216 unsigned packed : 1;
217 LLVM_PREFERRED_TYPE(bool)
218 unsigned padded : 1;
219
220private:
221 CIRRecordLowering(const CIRRecordLowering &) = delete;
222 void operator=(const CIRRecordLowering &) = delete;
223}; // CIRRecordLowering
224} // namespace
225
226CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes,
227 const RecordDecl *recordDecl, bool packed)
228 : cirGenTypes{cirGenTypes}, builder{cirGenTypes.getBuilder()},
229 astContext{cirGenTypes.getASTContext()}, recordDecl{recordDecl},
231 astRecordLayout{
232 cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)},
233 dataLayout{cirGenTypes.getCGModule().getModule()},
234 zeroInitializable{true}, zeroInitializableAsBase{true}, packed{packed},
235 padded{false} {}
236
237void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
238 CharUnits startOffset,
239 mlir::Type storageType) {
240 CIRGenBitFieldInfo &info = bitFields[fd->getCanonicalDecl()];
242 info.offset =
243 (unsigned)(getFieldBitOffset(fd) - astContext.toBits(startOffset));
244 info.size = fd->getBitWidthValue();
245 info.storageSize = getSizeInBits(storageType).getQuantity();
246 info.storageOffset = startOffset;
247 info.storageType = storageType;
248 info.name = fd->getName();
249
250 if (info.size > info.storageSize)
251 info.size = info.storageSize;
252 // Reverse the bit offsets for big endian machines. Since bitfields are laid
253 // out as packed bits within an integer-sized unit, we can imagine the bits
254 // counting from the most-significant-bit instead of the
255 // least-significant-bit.
256 if (dataLayout.isBigEndian())
257 info.offset = info.storageSize - (info.offset + info.size);
258
259 info.volatileStorageSize = 0;
260 info.volatileOffset = 0;
262}
263
264void CIRRecordLowering::lower(bool nonVirtualBaseType) {
265 if (recordDecl->isUnion()) {
266 lowerUnion();
267 computeVolatileBitfields();
268 return;
269 }
270
271 CharUnits size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize()
272 : astRecordLayout.getSize();
273
274 accumulateFields();
275
276 if (cxxRecordDecl) {
277 accumulateVPtrs();
278 accumulateBases();
279 if (members.empty()) {
280 appendPaddingBytes(size);
281 computeVolatileBitfields();
282 return;
283 }
284 if (!nonVirtualBaseType)
285 accumulateVBases();
286 }
287
288 llvm::stable_sort(members);
289 // TODO: implement clipTailPadding once bitfields are implemented
292
293 members.push_back(makeStorageInfo(size, getUIntNType(8)));
294 determinePacked(nonVirtualBaseType);
295 insertPadding();
296 members.pop_back();
297
298 calculateZeroInit();
299 fillOutputFields();
300 computeVolatileBitfields();
301}
302
303void CIRRecordLowering::fillOutputFields() {
304 for (const MemberInfo &member : members) {
305 if (member.data)
306 fieldTypes.push_back(member.data);
307 if (member.kind == MemberInfo::InfoKind::Field) {
308 if (member.fieldDecl)
309 fieldIdxMap[member.fieldDecl->getCanonicalDecl()] =
310 fieldTypes.size() - 1;
311 // A field without storage must be a bitfield.
313 if (!member.data)
314 setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back());
315 } else if (member.kind == MemberInfo::InfoKind::Base) {
316 nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
317 } else if (member.kind == MemberInfo::InfoKind::VBase) {
318 virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
319 }
320 }
321}
322
324CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
327
328 CharUnits regSize =
329 bitsToCharUnits(astContext.getTargetInfo().getRegisterWidth());
330 unsigned charBits = astContext.getCharWidth();
331
332 // Data about the start of the span we're accumulating to create an access
333 // unit from. 'Begin' is the first bitfield of the span. If 'begin' is
334 // 'fieldEnd', we've not got a current span. The span starts at the
335 // 'beginOffset' character boundary. 'bitSizeSinceBegin' is the size (in bits)
336 // of the span -- this might include padding when we've advanced to a
337 // subsequent bitfield run.
338 RecordDecl::field_iterator begin = fieldEnd;
339 CharUnits beginOffset;
340 uint64_t bitSizeSinceBegin;
341
342 // The (non-inclusive) end of the largest acceptable access unit we've found
343 // since 'begin'. If this is 'begin', we're gathering the initial set of
344 // bitfields of a new span. 'bestEndOffset' is the end of that acceptable
345 // access unit -- it might extend beyond the last character of the bitfield
346 // run, using available padding characters.
347 RecordDecl::field_iterator bestEnd = begin;
348 CharUnits bestEndOffset;
349 bool bestClipped; // Whether the representation must be in a byte array.
350
351 for (;;) {
352 // atAlignedBoundary is true if 'field' is the (potential) start of a new
353 // span (or the end of the bitfields). When true, limitOffset is the
354 // character offset of that span and barrier indicates whether the new
355 // span cannot be merged into the current one.
356 bool atAlignedBoundary = false;
357 bool barrier = false; // a barrier can be a zero Bit Width or non bit member
358 if (field != fieldEnd && field->isBitField()) {
359 uint64_t bitOffset = getFieldBitOffset(*field);
360 if (begin == fieldEnd) {
361 // Beginning a new span.
362 begin = field;
363 bestEnd = begin;
364
365 assert((bitOffset % charBits) == 0 && "Not at start of char");
366 beginOffset = bitsToCharUnits(bitOffset);
367 bitSizeSinceBegin = 0;
368 } else if ((bitOffset % charBits) != 0) {
369 // Bitfield occupies the same character as previous bitfield, it must be
370 // part of the same span. This can include zero-length bitfields, should
371 // the target not align them to character boundaries. Such non-alignment
372 // is at variance with the standards, which require zero-length
373 // bitfields be a barrier between access units. But of course we can't
374 // achieve that in the middle of a character.
375 assert(bitOffset ==
376 astContext.toBits(beginOffset) + bitSizeSinceBegin &&
377 "Concatenating non-contiguous bitfields");
378 } else {
379 // Bitfield potentially begins a new span. This includes zero-length
380 // bitfields on non-aligning targets that lie at character boundaries
381 // (those are barriers to merging).
382 if (field->isZeroLengthBitField())
383 barrier = true;
384 atAlignedBoundary = true;
385 }
386 } else {
387 // We've reached the end of the bitfield run. Either we're done, or this
388 // is a barrier for the current span.
389 if (begin == fieldEnd)
390 break;
391
392 barrier = true;
393 atAlignedBoundary = true;
394 }
395
396 // 'installBest' indicates whether we should create an access unit for the
397 // current best span: fields ['begin', 'bestEnd') occupying characters
398 // ['beginOffset', 'bestEndOffset').
399 bool installBest = false;
400 if (atAlignedBoundary) {
401 // 'field' is the start of a new span or the end of the bitfields. The
402 // just-seen span now extends to 'bitSizeSinceBegin'.
403
404 // Determine if we can accumulate that just-seen span into the current
405 // accumulation.
406 CharUnits accessSize = bitsToCharUnits(bitSizeSinceBegin + charBits - 1);
407 if (bestEnd == begin) {
408 // This is the initial run at the start of a new span. By definition,
409 // this is the best seen so far.
410 bestEnd = field;
411 bestEndOffset = beginOffset + accessSize;
412 // Assume clipped until proven not below.
413 bestClipped = true;
414 if (!bitSizeSinceBegin)
415 // A zero-sized initial span -- this will install nothing and reset
416 // for another.
417 installBest = true;
418 } else if (accessSize > regSize) {
419 // Accumulating the just-seen span would create a multi-register access
420 // unit, which would increase register pressure.
421 installBest = true;
422 }
423
424 if (!installBest) {
425 // Determine if accumulating the just-seen span will create an expensive
426 // access unit or not.
427 mlir::Type type = getUIntNType(astContext.toBits(accessSize));
428 if (!astContext.getTargetInfo().hasCheapUnalignedBitFieldAccess())
429 cirGenTypes.getCGModule().errorNYI(
430 field->getSourceRange(), "NYI CheapUnalignedBitFieldAccess");
431
432 if (!installBest) {
433 // Find the next used storage offset to determine what the limit of
434 // the current span is. That's either the offset of the next field
435 // with storage (which might be field itself) or the end of the
436 // non-reusable tail padding.
437 CharUnits limitOffset;
438 for (auto probe = field; probe != fieldEnd; ++probe)
439 if (!isEmptyFieldForLayout(astContext, *probe)) {
440 // A member with storage sets the limit.
441 assert((getFieldBitOffset(*probe) % charBits) == 0 &&
442 "Next storage is not byte-aligned");
443 limitOffset = bitsToCharUnits(getFieldBitOffset(*probe));
444 goto FoundLimit;
445 }
446 limitOffset = cxxRecordDecl ? astRecordLayout.getNonVirtualSize()
447 : astRecordLayout.getDataSize();
448
449 FoundLimit:
450 CharUnits typeSize = getSize(type);
451 if (beginOffset + typeSize <= limitOffset) {
452 // There is space before limitOffset to create a naturally-sized
453 // access unit.
454 bestEndOffset = beginOffset + typeSize;
455 bestEnd = field;
456 bestClipped = false;
457 }
458 if (barrier) {
459 // The next field is a barrier that we cannot merge across.
460 installBest = true;
461 } else if (cirGenTypes.getCGModule()
462 .getCodeGenOpts()
463 .FineGrainedBitfieldAccesses) {
464 installBest = true;
465 } else {
466 // Otherwise, we're not installing. Update the bit size
467 // of the current span to go all the way to limitOffset, which is
468 // the (aligned) offset of next bitfield to consider.
469 bitSizeSinceBegin = astContext.toBits(limitOffset - beginOffset);
470 }
471 }
472 }
473 }
474
475 if (installBest) {
476 assert((field == fieldEnd || !field->isBitField() ||
477 (getFieldBitOffset(*field) % charBits) == 0) &&
478 "Installing but not at an aligned bitfield or limit");
479 CharUnits accessSize = bestEndOffset - beginOffset;
480 if (!accessSize.isZero()) {
481 // Add the storage member for the access unit to the record. The
482 // bitfields get the offset of their storage but come afterward and
483 // remain there after a stable sort.
484 mlir::Type type;
485 if (bestClipped) {
486 assert(getSize(getUIntNType(astContext.toBits(accessSize))) >
487 accessSize &&
488 "Clipped access need not be clipped");
489 type = getByteArrayType(accessSize);
490 } else {
491 type = getUIntNType(astContext.toBits(accessSize));
492 assert(getSize(type) == accessSize &&
493 "Unclipped access must be clipped");
494 }
495 members.push_back(makeStorageInfo(beginOffset, type));
496 for (; begin != bestEnd; ++begin)
497 if (!begin->isZeroLengthBitField())
498 members.push_back(MemberInfo(
499 beginOffset, MemberInfo::InfoKind::Field, nullptr, *begin));
500 }
501 // Reset to start a new span.
502 field = bestEnd;
503 begin = fieldEnd;
504 } else {
505 assert(field != fieldEnd && field->isBitField() &&
506 "Accumulating past end of bitfields");
507 assert(!barrier && "Accumulating across barrier");
508 // Accumulate this bitfield into the current (potential) span.
509 bitSizeSinceBegin += field->getBitWidthValue();
510 ++field;
511 }
512 }
513
514 return field;
515}
516
517void CIRRecordLowering::accumulateFields() {
518 for (RecordDecl::field_iterator field = recordDecl->field_begin(),
519 fieldEnd = recordDecl->field_end();
520 field != fieldEnd;) {
521 if (field->isBitField()) {
522 field = accumulateBitFields(field, fieldEnd);
523 assert((field == fieldEnd || !field->isBitField()) &&
524 "Failed to accumulate all the bitfields");
525 } else if (!field->isZeroSize(astContext)) {
526 members.push_back(MemberInfo(bitsToCharUnits(getFieldBitOffset(*field)),
527 MemberInfo::InfoKind::Field,
528 getStorageType(*field), *field));
529 ++field;
530 } else {
531 // TODO(cir): do we want to do anything special about zero size members?
533 ++field;
534 }
535 }
536}
537
538void CIRRecordLowering::calculateZeroInit() {
539 for (const MemberInfo &member : members) {
540 if (member.kind == MemberInfo::InfoKind::Field) {
541 if (!member.fieldDecl || isZeroInitializable(member.fieldDecl))
542 continue;
543 zeroInitializable = zeroInitializableAsBase = false;
544 return;
545 } else if (member.kind == MemberInfo::InfoKind::Base ||
546 member.kind == MemberInfo::InfoKind::VBase) {
547 if (isZeroInitializable(member.cxxRecordDecl))
548 continue;
549 zeroInitializable = false;
550 if (member.kind == MemberInfo::InfoKind::Base)
551 zeroInitializableAsBase = false;
552 }
553 }
554}
555
556void CIRRecordLowering::determinePacked(bool nvBaseType) {
557 if (packed)
558 return;
559 CharUnits alignment = CharUnits::One();
560 CharUnits nvAlignment = CharUnits::One();
561 CharUnits nvSize = !nvBaseType && cxxRecordDecl
562 ? astRecordLayout.getNonVirtualSize()
563 : CharUnits::Zero();
564
565 for (const MemberInfo &member : members) {
566 if (!member.data)
567 continue;
568 // If any member falls at an offset that it not a multiple of its alignment,
569 // then the entire record must be packed.
570 if (member.offset % getAlignment(member.data))
571 packed = true;
572 if (member.offset < nvSize)
573 nvAlignment = std::max(nvAlignment, getAlignment(member.data));
574 alignment = std::max(alignment, getAlignment(member.data));
575 }
576 // If the size of the record (the capstone's offset) is not a multiple of the
577 // record's alignment, it must be packed.
578 if (members.back().offset % alignment)
579 packed = true;
580 // If the non-virtual sub-object is not a multiple of the non-virtual
581 // sub-object's alignment, it must be packed. We cannot have a packed
582 // non-virtual sub-object and an unpacked complete object or vise versa.
583 if (nvSize % nvAlignment)
584 packed = true;
585 // Update the alignment of the sentinel.
586 if (!packed)
587 members.back().data = getUIntNType(astContext.toBits(alignment));
588}
589
590void CIRRecordLowering::insertPadding() {
591 std::vector<std::pair<CharUnits, CharUnits>> padding;
592 CharUnits size = CharUnits::Zero();
593 for (const MemberInfo &member : members) {
594 if (!member.data)
595 continue;
596 CharUnits offset = member.offset;
597 assert(offset >= size);
598 // Insert padding if we need to.
599 if (offset !=
600 size.alignTo(packed ? CharUnits::One() : getAlignment(member.data)))
601 padding.push_back(std::make_pair(size, offset - size));
602 size = offset + getSize(member.data);
603 }
604 if (padding.empty())
605 return;
606 padded = true;
607 // Add the padding to the Members list and sort it.
608 for (const std::pair<CharUnits, CharUnits> &paddingPair : padding)
609 members.push_back(makeStorageInfo(paddingPair.first,
610 getByteArrayType(paddingPair.second)));
611 llvm::stable_sort(members);
612}
613
614std::unique_ptr<CIRGenRecordLayout>
615CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
616 CIRRecordLowering lowering(*this, rd, /*packed=*/false);
617 assert(ty->isIncomplete() && "recomputing record layout?");
618 lowering.lower(/*nonVirtualBaseType=*/false);
619
620 // If we're in C++, compute the base subobject type.
621 cir::RecordType baseTy;
622 if (llvm::isa<CXXRecordDecl>(rd) && !rd->isUnion() &&
623 !rd->hasAttr<FinalAttr>()) {
624 baseTy = *ty;
625 if (lowering.astRecordLayout.getNonVirtualSize() !=
626 lowering.astRecordLayout.getSize()) {
627 CIRRecordLowering baseLowering(*this, rd, /*Packed=*/lowering.packed);
628 baseLowering.lower(/*NonVirtualBaseType=*/true);
629 std::string baseIdentifier = getRecordTypeName(rd, ".base");
630 baseTy =
631 builder.getCompleteRecordTy(baseLowering.fieldTypes, baseIdentifier,
632 baseLowering.packed, baseLowering.padded);
633 // TODO(cir): add something like addRecordTypeName
634
635 // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work
636 // on both of them with the same index.
637 assert(lowering.packed == baseLowering.packed &&
638 "Non-virtual and complete types must agree on packedness");
639 }
640 }
641
642 // Fill in the record *after* computing the base type. Filling in the body
643 // signifies that the type is no longer opaque and record layout is complete,
644 // but we may need to recursively layout rd while laying D out as a base type.
646 ty->complete(lowering.fieldTypes, lowering.packed, lowering.padded);
647
648 auto rl = std::make_unique<CIRGenRecordLayout>(
649 ty ? *ty : cir::RecordType{}, baseTy ? baseTy : cir::RecordType{},
650 (bool)lowering.zeroInitializable, (bool)lowering.zeroInitializableAsBase);
651
653
654 rl->nonVirtualBases.swap(lowering.nonVirtualBases);
655 rl->completeObjectVirtualBases.swap(lowering.virtualBases);
656
658
659 // Add all the field numbers.
660 rl->fieldIdxMap.swap(lowering.fieldIdxMap);
661
662 rl->bitFields.swap(lowering.bitFields);
663
664 // Dump the layout, if requested.
665 if (getASTContext().getLangOpts().DumpRecordLayouts) {
666 llvm::outs() << "\n*** Dumping CIRgen Record Layout\n";
667 llvm::outs() << "Record: ";
668 rd->dump(llvm::outs());
669 llvm::outs() << "\nLayout: ";
670 rl->print(llvm::outs());
671 }
672
673 // TODO: implement verification
674 return rl;
675}
676
677void CIRGenRecordLayout::print(raw_ostream &os) const {
678 os << "<CIRecordLayout\n";
679 os << " CIR Type:" << completeObjectType << "\n";
680 if (baseSubobjectType)
681 os << " NonVirtualBaseCIRType:" << baseSubobjectType << "\n";
682 os << " IsZeroInitializable:" << zeroInitializable << "\n";
683 os << " BitFields:[\n";
684 std::vector<std::pair<unsigned, const CIRGenBitFieldInfo *>> bitInfo;
685 for (auto &[decl, info] : bitFields) {
686 const RecordDecl *rd = decl->getParent();
687 unsigned index = 0;
688 for (RecordDecl::field_iterator it = rd->field_begin(); *it != decl; ++it)
689 ++index;
690 bitInfo.push_back(std::make_pair(index, &info));
691 }
692 llvm::array_pod_sort(bitInfo.begin(), bitInfo.end());
693 for (std::pair<unsigned, const CIRGenBitFieldInfo *> &info : bitInfo) {
694 os.indent(4);
695 info.second->print(os);
696 os << "\n";
697 }
698 os << " ]>\n";
699}
700
701void CIRGenBitFieldInfo::print(raw_ostream &os) const {
702 os << "<CIRBitFieldInfo" << " name:" << name << " offset:" << offset
703 << " size:" << size << " isSigned:" << isSigned
704 << " storageSize:" << storageSize
705 << " storageOffset:" << storageOffset.getQuantity()
706 << " volatileOffset:" << volatileOffset
707 << " volatileStorageSize:" << volatileStorageSize
708 << " volatileStorageOffset:" << volatileStorageOffset.getQuantity() << ">";
709}
710
711void CIRGenRecordLayout::dump() const { print(llvm::errs()); }
712
713void CIRGenBitFieldInfo::dump() const { print(llvm::errs()); }
714
715void CIRRecordLowering::lowerUnion() {
716 CharUnits layoutSize = astRecordLayout.getSize();
717 mlir::Type storageType = nullptr;
718 bool seenNamedMember = false;
719
720 // Iterate through the fields setting bitFieldInfo and the Fields array. Also
721 // locate the "most appropriate" storage type.
722 for (const FieldDecl *field : recordDecl->fields()) {
723 mlir::Type fieldType;
724 if (field->isBitField()) {
725 if (field->isZeroLengthBitField())
726 continue;
727 fieldType = getBitfieldStorageType(field->getBitWidthValue());
728 setBitFieldInfo(field, CharUnits::Zero(), fieldType);
729 } else {
730 fieldType = getStorageType(field);
731 }
732
733 // This maps a field to its index. For unions, the index is always 0.
734 fieldIdxMap[field->getCanonicalDecl()] = 0;
735
736 // Compute zero-initializable status.
737 // This union might not be zero initialized: it may contain a pointer to
738 // data member which might have some exotic initialization sequence.
739 // If this is the case, then we ought not to try and come up with a "better"
740 // type, it might not be very easy to come up with a Constant which
741 // correctly initializes it.
742 if (!seenNamedMember) {
743 seenNamedMember = field->getIdentifier();
744 if (!seenNamedMember)
745 if (const RecordDecl *fieldRD = field->getType()->getAsRecordDecl())
746 seenNamedMember = fieldRD->findFirstNamedDataMember();
747 if (seenNamedMember && !isZeroInitializable(field)) {
748 zeroInitializable = zeroInitializableAsBase = false;
749 storageType = fieldType;
750 }
751 }
752
753 // Because our union isn't zero initializable, we won't be getting a better
754 // storage type.
755 if (!zeroInitializable)
756 continue;
757
758 // Conditionally update our storage type if we've got a new "better" one.
759 if (!storageType || getAlignment(fieldType) > getAlignment(storageType) ||
760 (getAlignment(fieldType) == getAlignment(storageType) &&
761 getSize(fieldType) > getSize(storageType)))
762 storageType = fieldType;
763
764 // NOTE(cir): Track all union member's types, not just the largest one. It
765 // allows for proper type-checking and retain more info for analisys.
766 fieldTypes.push_back(fieldType);
767 }
768
769 if (!storageType)
770 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
771 "No-storage Union NYI");
772
773 if (layoutSize < getSize(storageType))
774 storageType = getByteArrayType(layoutSize);
775 else
776 appendPaddingBytes(layoutSize - getSize(storageType));
777
778 // Set packed if we need it.
779 if (layoutSize % getAlignment(storageType))
780 packed = true;
781}
782
783bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *decl,
784 const CXXRecordDecl *query) {
785 const ASTRecordLayout &declLayout = astContext.getASTRecordLayout(decl);
786 if (declLayout.isPrimaryBaseVirtual() && declLayout.getPrimaryBase() == query)
787 return false;
788 for (const auto &base : decl->bases())
789 if (!hasOwnStorage(base.getType()->getAsCXXRecordDecl(), query))
790 return false;
791 return true;
792}
793
794/// The AAPCS that defines that, when possible, bit-fields should
795/// be accessed using containers of the declared type width:
796/// When a volatile bit-field is read, and its container does not overlap with
797/// any non-bit-field member or any zero length bit-field member, its container
798/// must be read exactly once using the access width appropriate to the type of
799/// the container. When a volatile bit-field is written, and its container does
800/// not overlap with any non-bit-field member or any zero-length bit-field
801/// member, its container must be read exactly once and written exactly once
802/// using the access width appropriate to the type of the container. The two
803/// accesses are not atomic.
804///
805/// Enforcing the width restriction can be disabled using
806/// -fno-aapcs-bitfield-width.
807void CIRRecordLowering::computeVolatileBitfields() {
808 if (!isAAPCS() ||
809 !cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth)
810 return;
811
812 for (auto &[field, info] : bitFields) {
813 mlir::Type resLTy = cirGenTypes.convertTypeForMem(field->getType());
814
815 if (astContext.toBits(astRecordLayout.getAlignment()) <
816 getSizeInBits(resLTy).getQuantity())
817 continue;
818
819 // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
820 // for big-endian targets, but it assumes a container of width
821 // info.storageSize. Since AAPCS uses a different container size (width
822 // of the type), we first undo that calculation here and redo it once
823 // the bit-field offset within the new container is calculated.
824 const unsigned oldOffset =
825 isBigEndian() ? info.storageSize - (info.offset + info.size)
826 : info.offset;
827 // Offset to the bit-field from the beginning of the struct.
828 const unsigned absoluteOffset =
829 astContext.toBits(info.storageOffset) + oldOffset;
830
831 // Container size is the width of the bit-field type.
832 const unsigned storageSize = getSizeInBits(resLTy).getQuantity();
833 // Nothing to do if the access uses the desired
834 // container width and is naturally aligned.
835 if (info.storageSize == storageSize && (oldOffset % storageSize == 0))
836 continue;
837
838 // Offset within the container.
839 unsigned offset = absoluteOffset & (storageSize - 1);
840 // Bail out if an aligned load of the container cannot cover the entire
841 // bit-field. This can happen for example, if the bit-field is part of a
842 // packed struct. AAPCS does not define access rules for such cases, we let
843 // clang to follow its own rules.
844 if (offset + info.size > storageSize)
845 continue;
846
847 // Re-adjust offsets for big-endian targets.
848 if (isBigEndian())
849 offset = storageSize - (offset + info.size);
850
851 const CharUnits storageOffset =
852 astContext.toCharUnitsFromBits(absoluteOffset & ~(storageSize - 1));
853 const CharUnits end = storageOffset +
854 astContext.toCharUnitsFromBits(storageSize) -
856
857 const ASTRecordLayout &layout =
858 astContext.getASTRecordLayout(field->getParent());
859 // If we access outside memory outside the record, than bail out.
860 const CharUnits recordSize = layout.getSize();
861 if (end >= recordSize)
862 continue;
863
864 // Bail out if performing this load would access non-bit-fields members.
865 bool conflict = false;
866 for (const auto *f : recordDecl->fields()) {
867 // Allow sized bit-fields overlaps.
868 if (f->isBitField() && !f->isZeroLengthBitField())
869 continue;
870
871 const CharUnits fOffset = astContext.toCharUnitsFromBits(
872 layout.getFieldOffset(f->getFieldIndex()));
873
874 // As C11 defines, a zero sized bit-field defines a barrier, so
875 // fields after and before it should be race condition free.
876 // The AAPCS acknowledges it and imposes no restritions when the
877 // natural container overlaps a zero-length bit-field.
878 if (f->isZeroLengthBitField()) {
879 if (end > fOffset && storageOffset < fOffset) {
880 conflict = true;
881 break;
882 }
883 }
884
885 const CharUnits fEnd =
886 fOffset +
887 astContext.toCharUnitsFromBits(
888 getSizeInBits(cirGenTypes.convertTypeForMem(f->getType()))
889 .getQuantity()) -
891 // If no overlap, continue.
892 if (end < fOffset || fEnd < storageOffset)
893 continue;
894
895 // The desired load overlaps a non-bit-field member, bail out.
896 conflict = true;
897 break;
898 }
899
900 if (conflict)
901 continue;
902 // Write the new bit-field access parameters.
903 // As the storage offset now is defined as the number of elements from the
904 // start of the structure, we should divide the Offset by the element size.
906 storageOffset /
907 astContext.toCharUnitsFromBits(storageSize).getQuantity();
908 info.volatileStorageSize = storageSize;
909 info.volatileOffset = offset;
910 }
911}
912
913void CIRRecordLowering::accumulateBases() {
914 // If we've got a primary virtual base, we need to add it with the bases.
915 if (astRecordLayout.isPrimaryBaseVirtual()) {
916 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
917 "accumulateBases: primary virtual base");
918 }
919
920 // Accumulate the non-virtual bases.
921 for (const auto &base : cxxRecordDecl->bases()) {
922 if (base.isVirtual())
923 continue;
924 // Bases can be zero-sized even if not technically empty if they
925 // contain only a trailing array member.
926 const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
927 if (!baseDecl->isEmpty() &&
928 !astContext.getASTRecordLayout(baseDecl).getNonVirtualSize().isZero()) {
929 members.push_back(MemberInfo(astRecordLayout.getBaseClassOffset(baseDecl),
930 MemberInfo::InfoKind::Base,
931 getStorageType(baseDecl), baseDecl));
932 }
933 }
934}
935
936void CIRRecordLowering::accumulateVBases() {
937 for (const auto &base : cxxRecordDecl->vbases()) {
938 const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
939 if (isEmptyRecordForLayout(astContext, base.getType()))
940 continue;
941 CharUnits offset = astRecordLayout.getVBaseClassOffset(baseDecl);
942 // If the vbase is a primary virtual base of some base, then it doesn't
943 // get its own storage location but instead lives inside of that base.
944 if (isOverlappingVBaseABI() && astContext.isNearlyEmpty(baseDecl) &&
945 !hasOwnStorage(cxxRecordDecl, baseDecl)) {
946 members.push_back(
947 MemberInfo(offset, MemberInfo::InfoKind::VBase, nullptr, baseDecl));
948 continue;
949 }
950 // If we've got a vtordisp, add it as a storage type.
951 if (astRecordLayout.getVBaseOffsetsMap()
952 .find(baseDecl)
953 ->second.hasVtorDisp())
954 members.push_back(makeStorageInfo(offset - CharUnits::fromQuantity(4),
955 getUIntNType(32)));
956 members.push_back(MemberInfo(offset, MemberInfo::InfoKind::VBase,
957 getStorageType(baseDecl), baseDecl));
958 }
959}
960
961void CIRRecordLowering::accumulateVPtrs() {
962 if (astRecordLayout.hasOwnVFPtr())
963 members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr,
964 getVFPtrType()));
965
966 if (astRecordLayout.hasOwnVBPtr())
967 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
968 "accumulateVPtrs: hasOwnVBPtr");
969}
970
971mlir::Type CIRRecordLowering::getVFPtrType() {
972 return cir::VPtrType::get(builder.getContext());
973}
Defines the clang::ASTContext interface.
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
Definition: CIRGenExpr.cpp:360
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
CharUnits getSize() const
getSize - Get the record size in characters.
Definition: RecordLayout.h:194
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:201
const CXXRecordDecl * getPrimaryBase() const
getPrimaryBase - Get the primary base for this record.
Definition: RecordLayout.h:235
bool isPrimaryBaseVirtual() const
isPrimaryBaseVirtual - Get whether the primary base for this record is virtual or not.
Definition: RecordLayout.h:243
cir::RecordType getCompleteRecordTy(llvm::ArrayRef< mlir::Type > members, llvm::StringRef name, bool packed, bool padded)
Get a CIR named record type.
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition: CIRGenTypes.h:48
std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix)
Definition: CIRGenTypes.cpp:94
clang::ASTContext & getASTContext() const
Definition: CIRGenTypes.h:100
std::unique_ptr< CIRGenRecordLayout > computeRecordLayout(const clang::RecordDecl *rd, cir::RecordType *ty)
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition: DeclCXX.h:1186
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:53
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2393
void dump() const
Definition: ASTDumper.cpp:220
bool hasAttr() const
Definition: DeclBase.h:577
Represents a member of a struct/union/class.
Definition: Decl.h:3153
unsigned getBitWidthValue() const
Computes the bit width of this field, if this is a bit field.
Definition: Decl.cpp:4689
FieldDecl * getCanonicalDecl() override
Retrieves the canonical declaration of this field.
Definition: Decl.h:3400
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition: Decl.h:300
Represents a struct/union/class.
Definition: Decl.h:4305
specific_decl_iterator< FieldDecl > field_iterator
Definition: Decl.h:4505
field_iterator field_begin() const
Definition: Decl.cpp:5150
bool isUnion() const
Definition: Decl.h:3915
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2229
QualType getType() const
Definition: Decl.h:722
#define bool
Definition: gpuintrin.h:32
bool isValidFundamentalIntWidth(unsigned width)
Definition: CIRTypes.cpp:465
bool isEmptyFieldForLayout(const ASTContext &context, const FieldDecl *fd)
isEmptyFieldForLayout - Return true if the field is "empty", that is, either a zero-width bit-field o...
Definition: TargetInfo.cpp:30
bool isEmptyRecordForLayout(const ASTContext &context, QualType t)
isEmptyRecordForLayout - Return true if a structure contains only empty base classes (per isEmptyReco...
Definition: TargetInfo.cpp:7
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Decl, FieldDecl > fieldDecl
Matches field declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, CXXRecordDecl > cxxRecordDecl
Matches C++ class declarations.
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, RecordDecl > recordDecl
Matches class, struct, and union declarations.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
Definition: DiagnosticIDs.h:76
RangeSelector member(std::string ID)
Given a MemberExpr, selects the member token.
The JSON file list parser is used to communicate input to InstallAPI.
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
void __ovld __conv barrier(cl_mem_fence_flags)
All work-items in a work-group executing the kernel on a processor must execute this function before ...
#define true
Definition: stdbool.h:25
#define false
Definition: stdbool.h:26
static bool isDiscreteBitFieldABI()
static bool zeroSizeRecordMembers()
static bool astRecordDeclAttr()
static bool bitfields()
static bool recordZeroInit()
Record with information about how a bitfield should be accessed.
unsigned offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
void print(llvm::raw_ostream &os) const
unsigned storageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
clang::CharUnits storageOffset
The offset of the bitfield storage from the start of the record.
unsigned size
The total size of the bit-field, in bits.
unsigned isSigned
Whether the bit-field is signed.
clang::CharUnits volatileStorageOffset
The offset of the bitfield storage from the start of the record.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
llvm::StringRef name
The name of a bitfield.