clang 23.0.0git
CIRGenRecordLayoutBuilder.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to compute the layout of a record.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenBuilder.h"
14#include "CIRGenModule.h"
15#include "CIRGenTypes.h"
16
18#include "clang/AST/Decl.h"
19#include "clang/AST/DeclCXX.h"
24#include "llvm/Support/Casting.h"
25
26#include <memory>
27
28using namespace llvm;
29using namespace clang;
30using namespace clang::CIRGen;
31
32namespace {
33/// The CIRRecordLowering is responsible for lowering an ASTRecordLayout to an
34/// mlir::Type. Some of the lowering is straightforward, some is not.
35// TODO: Detail some of the complexities and weirdnesses?
36// (See CGRecordLayoutBuilder.cpp)
37struct CIRRecordLowering final {
38
39 // MemberInfo is a helper structure that contains information about a record
40 // member. In addition to the standard member types, there exists a sentinel
41 // member type that ensures correct rounding.
42 struct MemberInfo final {
43 CharUnits offset;
44 enum class InfoKind { VFPtr, Field, Base, VBase } kind;
45 mlir::Type data;
46 union {
47 const FieldDecl *fieldDecl;
48 const CXXRecordDecl *cxxRecordDecl;
49 };
50 MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
51 const FieldDecl *fieldDecl = nullptr)
52 : offset{offset}, kind{kind}, data{data}, fieldDecl{fieldDecl} {}
53 MemberInfo(CharUnits offset, InfoKind kind, mlir::Type data,
54 const CXXRecordDecl *rd)
55 : offset{offset}, kind{kind}, data{data}, cxxRecordDecl{rd} {}
56 // MemberInfos are sorted so we define a < operator.
57 bool operator<(const MemberInfo &other) const {
58 return offset < other.offset;
59 }
60 };
61 // The constructor.
62 CIRRecordLowering(CIRGenTypes &cirGenTypes, const RecordDecl *recordDecl,
63 bool packed);
64
65 /// Constructs a MemberInfo instance from an offset and mlir::Type.
66 MemberInfo makeStorageInfo(CharUnits offset, mlir::Type data) {
67 return MemberInfo(offset, MemberInfo::InfoKind::Field, data);
68 }
69
70 // Layout routines.
71 void setBitFieldInfo(const FieldDecl *fd, CharUnits startOffset,
72 mlir::Type storageType);
73
74 void lower(bool NonVirtualBaseType);
75 void lowerUnion();
76
77 /// Determines if we need a packed llvm struct.
78 void determinePacked(bool nvBaseType);
79 /// Inserts padding everywhere it's needed.
80 void insertPadding();
81
82 void computeVolatileBitfields();
83 void accumulateBases();
84 void accumulateVPtrs();
85 void accumulateVBases();
86 void accumulateFields();
88 accumulateBitFields(RecordDecl::field_iterator field,
90
91 mlir::Type getVFPtrType();
92
93 bool isAAPCS() const {
94 return astContext.getTargetInfo().getABI().starts_with("aapcs");
95 }
96
97 /// Helper function to check if the target machine is BigEndian.
98 bool isBigEndian() const { return astContext.getTargetInfo().isBigEndian(); }
99
100 // The Itanium base layout rule allows virtual bases to overlap
101 // other bases, which complicates layout in specific ways.
102 //
103 // Note specifically that the ms_struct attribute doesn't change this.
104 bool isOverlappingVBaseABI() {
105 return !astContext.getTargetInfo().getCXXABI().isMicrosoft();
106 }
107 // Recursively searches all of the bases to find out if a vbase is
108 // not the primary vbase of some base class.
109 bool hasOwnStorage(const CXXRecordDecl *decl, const CXXRecordDecl *query);
110
111 /// The Microsoft bitfield layout rule allocates discrete storage
112 /// units of the field's formal type and only combines adjacent
113 /// fields of the same formal type. We want to emit a layout with
114 /// these discrete storage units instead of combining them into a
115 /// continuous run.
116 bool isDiscreteBitFieldABI() {
117 return astContext.getTargetInfo().getCXXABI().isMicrosoft() ||
118 recordDecl->isMsStruct(astContext);
119 }
120
121 CharUnits bitsToCharUnits(uint64_t bitOffset) {
122 return astContext.toCharUnitsFromBits(bitOffset);
123 }
124
125 void calculateZeroInit();
126
127 CharUnits getSize(mlir::Type Ty) {
128 return CharUnits::fromQuantity(dataLayout.layout.getTypeSize(Ty));
129 }
130 CharUnits getSizeInBits(mlir::Type ty) {
131 return CharUnits::fromQuantity(dataLayout.layout.getTypeSizeInBits(ty));
132 }
133 CharUnits getAlignment(mlir::Type Ty) {
134 return CharUnits::fromQuantity(dataLayout.layout.getTypeABIAlignment(Ty));
135 }
136
137 bool isZeroInitializable(const FieldDecl *fd) {
138 return cirGenTypes.isZeroInitializable(fd->getType());
139 }
140 bool isZeroInitializable(const RecordDecl *rd) {
141 return cirGenTypes.isZeroInitializable(rd);
142 }
143
144 /// Wraps cir::IntType with some implicit arguments.
145 mlir::Type getUIntNType(uint64_t numBits) {
146 unsigned alignedBits = llvm::PowerOf2Ceil(numBits);
147 alignedBits = std::max(8u, alignedBits);
148 return cir::IntType::get(&cirGenTypes.getMLIRContext(), alignedBits,
149 /*isSigned=*/false);
150 }
151
152 mlir::Type getCharType() {
153 return cir::IntType::get(&cirGenTypes.getMLIRContext(),
154 astContext.getCharWidth(),
155 /*isSigned=*/false);
156 }
157
158 mlir::Type getByteArrayType(CharUnits numberOfChars) {
159 assert(!numberOfChars.isZero() && "Empty byte arrays aren't allowed.");
160 mlir::Type type = getCharType();
161 return numberOfChars == CharUnits::One()
162 ? type
163 : cir::ArrayType::get(type, numberOfChars.getQuantity());
164 }
165
166 // Gets the CIR BaseSubobject type from a CXXRecordDecl.
167 mlir::Type getStorageType(const CXXRecordDecl *RD) {
168 return cirGenTypes.getCIRGenRecordLayout(RD).getBaseSubobjectCIRType();
169 }
170 // This is different from LLVM traditional codegen because CIRGen uses arrays
171 // of bytes instead of arbitrary-sized integers. This is important for packed
172 // structures support.
173 mlir::Type getBitfieldStorageType(unsigned numBits) {
174 unsigned alignedBits = llvm::alignTo(numBits, astContext.getCharWidth());
175 if (cir::isValidFundamentalIntWidth(alignedBits))
176 return builder.getUIntNTy(alignedBits);
177
178 mlir::Type type = getCharType();
179 return cir::ArrayType::get(type, alignedBits / astContext.getCharWidth());
180 }
181
182 mlir::Type getStorageType(const FieldDecl *fieldDecl) {
183 mlir::Type type = cirGenTypes.convertTypeForMem(fieldDecl->getType());
184 if (fieldDecl->isBitField()) {
185 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
186 "getStorageType for bitfields");
187 }
188 return type;
189 }
190
191 uint64_t getFieldBitOffset(const FieldDecl *fieldDecl) {
192 return astRecordLayout.getFieldOffset(fieldDecl->getFieldIndex());
193 }
194
195 /// Fills out the structures that are ultimately consumed.
196 void fillOutputFields();
197
198 void appendPaddingBytes(CharUnits size) {
199 if (!size.isZero()) {
200 fieldTypes.push_back(getByteArrayType(size));
201 padded = true;
202 }
203 }
204
205 CIRGenTypes &cirGenTypes;
206 CIRGenBuilderTy &builder;
207 const ASTContext &astContext;
208 const RecordDecl *recordDecl;
209 const CXXRecordDecl *cxxRecordDecl;
210 const ASTRecordLayout &astRecordLayout;
211 // Helpful intermediate data-structures
212 std::vector<MemberInfo> members;
213 // Output fields, consumed by CIRGenTypes::computeRecordLayout
214 llvm::SmallVector<mlir::Type, 16> fieldTypes;
215 llvm::DenseMap<const FieldDecl *, CIRGenBitFieldInfo> bitFields;
216 llvm::DenseMap<const FieldDecl *, unsigned> fieldIdxMap;
217 llvm::DenseMap<const CXXRecordDecl *, unsigned> nonVirtualBases;
218 llvm::DenseMap<const CXXRecordDecl *, unsigned> virtualBases;
219 cir::CIRDataLayout dataLayout;
220
221 LLVM_PREFERRED_TYPE(bool)
222 unsigned zeroInitializable : 1;
223 LLVM_PREFERRED_TYPE(bool)
224 unsigned zeroInitializableAsBase : 1;
225 LLVM_PREFERRED_TYPE(bool)
226 unsigned packed : 1;
227 LLVM_PREFERRED_TYPE(bool)
228 unsigned padded : 1;
229
230private:
231 CIRRecordLowering(const CIRRecordLowering &) = delete;
232 void operator=(const CIRRecordLowering &) = delete;
233}; // CIRRecordLowering
234} // namespace
235
236CIRRecordLowering::CIRRecordLowering(CIRGenTypes &cirGenTypes,
237 const RecordDecl *recordDecl, bool packed)
238 : cirGenTypes{cirGenTypes}, builder{cirGenTypes.getBuilder()},
239 astContext{cirGenTypes.getASTContext()}, recordDecl{recordDecl},
241 astRecordLayout{
242 cirGenTypes.getASTContext().getASTRecordLayout(recordDecl)},
243 dataLayout{cirGenTypes.getCGModule().getModule()},
244 zeroInitializable{true}, zeroInitializableAsBase{true}, packed{packed},
245 padded{false} {}
246
247void CIRRecordLowering::setBitFieldInfo(const FieldDecl *fd,
248 CharUnits startOffset,
249 mlir::Type storageType) {
250 CIRGenBitFieldInfo &info = bitFields[fd->getCanonicalDecl()];
252 info.offset =
253 (unsigned)(getFieldBitOffset(fd) - astContext.toBits(startOffset));
254 info.size = fd->getBitWidthValue();
255 info.storageSize = getSizeInBits(storageType).getQuantity();
256 info.storageOffset = startOffset;
257 info.storageType = storageType;
258 info.name = fd->getName();
259
260 if (info.size > info.storageSize)
261 info.size = info.storageSize;
262 // Reverse the bit offsets for big endian machines. Since bitfields are laid
263 // out as packed bits within an integer-sized unit, we can imagine the bits
264 // counting from the most-significant-bit instead of the
265 // least-significant-bit.
266 if (dataLayout.isBigEndian())
267 info.offset = info.storageSize - (info.offset + info.size);
268
269 info.volatileStorageSize = 0;
270 info.volatileOffset = 0;
272}
273
274void CIRRecordLowering::lower(bool nonVirtualBaseType) {
275 if (recordDecl->isUnion()) {
276 lowerUnion();
277 computeVolatileBitfields();
278 return;
279 }
280
281 CharUnits size = nonVirtualBaseType ? astRecordLayout.getNonVirtualSize()
282 : astRecordLayout.getSize();
283
284 accumulateFields();
285
286 if (cxxRecordDecl) {
287 accumulateVPtrs();
288 accumulateBases();
289 if (members.empty()) {
290 appendPaddingBytes(size);
291 computeVolatileBitfields();
292 return;
293 }
294 if (!nonVirtualBaseType)
295 accumulateVBases();
296 }
297
298 llvm::stable_sort(members);
299 // TODO: Verify bitfield clipping
301
302 members.push_back(makeStorageInfo(size, getUIntNType(8)));
303 determinePacked(nonVirtualBaseType);
304 insertPadding();
305 members.pop_back();
306
307 calculateZeroInit();
308 fillOutputFields();
309 computeVolatileBitfields();
310}
311
312void CIRRecordLowering::fillOutputFields() {
313 for (const MemberInfo &member : members) {
314 if (member.data)
315 fieldTypes.push_back(member.data);
316 if (member.kind == MemberInfo::InfoKind::Field) {
317 if (member.fieldDecl)
318 fieldIdxMap[member.fieldDecl->getCanonicalDecl()] =
319 fieldTypes.size() - 1;
320 // A field without storage must be a bitfield.
321 if (!member.data) {
322 assert(member.fieldDecl &&
323 "member.data is a nullptr so member.fieldDecl should not be");
324 setBitFieldInfo(member.fieldDecl, member.offset, fieldTypes.back());
325 }
326 } else if (member.kind == MemberInfo::InfoKind::Base) {
327 nonVirtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
328 } else if (member.kind == MemberInfo::InfoKind::VBase) {
329 virtualBases[member.cxxRecordDecl] = fieldTypes.size() - 1;
330 }
331 }
332}
333
335CIRRecordLowering::accumulateBitFields(RecordDecl::field_iterator field,
337 if (isDiscreteBitFieldABI()) {
338 // run stores the first element of the current run of bitfields. fieldEnd is
339 // used as a special value to note that we don't have a current run. A
340 // bitfield run is a contiguous collection of bitfields that can be stored
341 // in the same storage block. Zero-sized bitfields and bitfields that would
342 // cross an alignment boundary break a run and start a new one.
344 // tail is the offset of the first bit off the end of the current run. It's
345 // used to determine if the ASTRecordLayout is treating these two bitfields
346 // as contiguous. StartBitOffset is offset of the beginning of the Run.
347 uint64_t startBitOffset, tail = 0;
348 for (; field != fieldEnd && field->isBitField(); ++field) {
349 // Zero-width bitfields end runs.
350 if (field->isZeroLengthBitField()) {
351 run = fieldEnd;
352 continue;
353 }
354 uint64_t bitOffset = getFieldBitOffset(*field);
355 mlir::Type type = cirGenTypes.convertTypeForMem(field->getType());
356 // If we don't have a run yet, or don't live within the previous run's
357 // allocated storage then we allocate some storage and start a new run.
358 if (run == fieldEnd || bitOffset >= tail) {
359 run = field;
360 startBitOffset = bitOffset;
361 tail = startBitOffset + dataLayout.getTypeAllocSizeInBits(type);
362 // Add the storage member to the record. This must be added to the
363 // record before the bitfield members so that it gets laid out before
364 // the bitfields it contains get laid out.
365 members.push_back(
366 makeStorageInfo(bitsToCharUnits(startBitOffset), type));
367 }
368 // Bitfields get the offset of their storage but come afterward and remain
369 // there after a stable sort.
370 members.push_back(MemberInfo(bitsToCharUnits(startBitOffset),
371 MemberInfo::InfoKind::Field, nullptr,
372 *field));
373 }
374 return field;
375 }
376
377 CharUnits regSize =
378 bitsToCharUnits(astContext.getTargetInfo().getRegisterWidth());
379 unsigned charBits = astContext.getCharWidth();
380
381 // Data about the start of the span we're accumulating to create an access
382 // unit from. 'Begin' is the first bitfield of the span. If 'begin' is
383 // 'fieldEnd', we've not got a current span. The span starts at the
384 // 'beginOffset' character boundary. 'bitSizeSinceBegin' is the size (in bits)
385 // of the span -- this might include padding when we've advanced to a
386 // subsequent bitfield run.
387 RecordDecl::field_iterator begin = fieldEnd;
388 CharUnits beginOffset;
389 uint64_t bitSizeSinceBegin;
390
391 // The (non-inclusive) end of the largest acceptable access unit we've found
392 // since 'begin'. If this is 'begin', we're gathering the initial set of
393 // bitfields of a new span. 'bestEndOffset' is the end of that acceptable
394 // access unit -- it might extend beyond the last character of the bitfield
395 // run, using available padding characters.
396 RecordDecl::field_iterator bestEnd = begin;
397 CharUnits bestEndOffset;
398 bool bestClipped; // Whether the representation must be in a byte array.
399
400 for (;;) {
401 // atAlignedBoundary is true if 'field' is the (potential) start of a new
402 // span (or the end of the bitfields). When true, limitOffset is the
403 // character offset of that span and barrier indicates whether the new
404 // span cannot be merged into the current one.
405 bool atAlignedBoundary = false;
406 bool barrier = false; // a barrier can be a zero Bit Width or non bit member
407 if (field != fieldEnd && field->isBitField()) {
408 uint64_t bitOffset = getFieldBitOffset(*field);
409 if (begin == fieldEnd) {
410 // Beginning a new span.
411 begin = field;
412 bestEnd = begin;
413
414 assert((bitOffset % charBits) == 0 && "Not at start of char");
415 beginOffset = bitsToCharUnits(bitOffset);
416 bitSizeSinceBegin = 0;
417 } else if ((bitOffset % charBits) != 0) {
418 // Bitfield occupies the same character as previous bitfield, it must be
419 // part of the same span. This can include zero-length bitfields, should
420 // the target not align them to character boundaries. Such non-alignment
421 // is at variance with the standards, which require zero-length
422 // bitfields be a barrier between access units. But of course we can't
423 // achieve that in the middle of a character.
424 assert(bitOffset ==
425 astContext.toBits(beginOffset) + bitSizeSinceBegin &&
426 "Concatenating non-contiguous bitfields");
427 } else {
428 // Bitfield potentially begins a new span. This includes zero-length
429 // bitfields on non-aligning targets that lie at character boundaries
430 // (those are barriers to merging).
431 if (field->isZeroLengthBitField())
432 barrier = true;
433 atAlignedBoundary = true;
434 }
435 } else {
436 // We've reached the end of the bitfield run. Either we're done, or this
437 // is a barrier for the current span.
438 if (begin == fieldEnd)
439 break;
440
441 barrier = true;
442 atAlignedBoundary = true;
443 }
444
445 // 'installBest' indicates whether we should create an access unit for the
446 // current best span: fields ['begin', 'bestEnd') occupying characters
447 // ['beginOffset', 'bestEndOffset').
448 bool installBest = false;
449 if (atAlignedBoundary) {
450 // 'field' is the start of a new span or the end of the bitfields. The
451 // just-seen span now extends to 'bitSizeSinceBegin'.
452
453 // Determine if we can accumulate that just-seen span into the current
454 // accumulation.
455 CharUnits accessSize = bitsToCharUnits(bitSizeSinceBegin + charBits - 1);
456 if (bestEnd == begin) {
457 // This is the initial run at the start of a new span. By definition,
458 // this is the best seen so far.
459 bestEnd = field;
460 bestEndOffset = beginOffset + accessSize;
461 // Assume clipped until proven not below.
462 bestClipped = true;
463 if (!bitSizeSinceBegin)
464 // A zero-sized initial span -- this will install nothing and reset
465 // for another.
466 installBest = true;
467 } else if (accessSize > regSize) {
468 // Accumulating the just-seen span would create a multi-register access
469 // unit, which would increase register pressure.
470 installBest = true;
471 }
472
473 if (!installBest) {
474 // Determine if accumulating the just-seen span will create an expensive
475 // access unit or not.
476 mlir::Type type = getUIntNType(astContext.toBits(accessSize));
478 cirGenTypes.getCGModule().errorNYI(
479 field->getSourceRange(), "NYI CheapUnalignedBitFieldAccess");
480
481 if (!installBest) {
482 // Find the next used storage offset to determine what the limit of
483 // the current span is. That's either the offset of the next field
484 // with storage (which might be field itself) or the end of the
485 // non-reusable tail padding.
486 CharUnits limitOffset;
487 for (auto probe = field; probe != fieldEnd; ++probe)
488 if (!isEmptyFieldForLayout(astContext, *probe)) {
489 // A member with storage sets the limit.
490 assert((getFieldBitOffset(*probe) % charBits) == 0 &&
491 "Next storage is not byte-aligned");
492 limitOffset = bitsToCharUnits(getFieldBitOffset(*probe));
493 goto FoundLimit;
494 }
495 limitOffset = cxxRecordDecl ? astRecordLayout.getNonVirtualSize()
496 : astRecordLayout.getDataSize();
497
498 FoundLimit:
499 CharUnits typeSize = getSize(type);
500 if (beginOffset + typeSize <= limitOffset) {
501 // There is space before limitOffset to create a naturally-sized
502 // access unit.
503 bestEndOffset = beginOffset + typeSize;
504 bestEnd = field;
505 bestClipped = false;
506 }
507 if (barrier) {
508 // The next field is a barrier that we cannot merge across.
509 installBest = true;
510 } else if (cirGenTypes.getCGModule()
512 .FineGrainedBitfieldAccesses) {
513 installBest = true;
514 } else {
515 // Otherwise, we're not installing. Update the bit size
516 // of the current span to go all the way to limitOffset, which is
517 // the (aligned) offset of next bitfield to consider.
518 bitSizeSinceBegin = astContext.toBits(limitOffset - beginOffset);
519 }
520 }
521 }
522 }
523
524 if (installBest) {
525 assert((field == fieldEnd || !field->isBitField() ||
526 (getFieldBitOffset(*field) % charBits) == 0) &&
527 "Installing but not at an aligned bitfield or limit");
528 CharUnits accessSize = bestEndOffset - beginOffset;
529 if (!accessSize.isZero()) {
530 // Add the storage member for the access unit to the record. The
531 // bitfields get the offset of their storage but come afterward and
532 // remain there after a stable sort.
533 mlir::Type type;
534 if (bestClipped) {
535 assert(getSize(getUIntNType(astContext.toBits(accessSize))) >
536 accessSize &&
537 "Clipped access need not be clipped");
538 type = getByteArrayType(accessSize);
539 } else {
540 type = getUIntNType(astContext.toBits(accessSize));
541 assert(getSize(type) == accessSize &&
542 "Unclipped access must be clipped");
543 }
544 members.push_back(makeStorageInfo(beginOffset, type));
545 for (; begin != bestEnd; ++begin)
546 if (!begin->isZeroLengthBitField())
547 members.push_back(MemberInfo(
548 beginOffset, MemberInfo::InfoKind::Field, nullptr, *begin));
549 }
550 // Reset to start a new span.
551 field = bestEnd;
552 begin = fieldEnd;
553 } else {
554 assert(field != fieldEnd && field->isBitField() &&
555 "Accumulating past end of bitfields");
556 assert(!barrier && "Accumulating across barrier");
557 // Accumulate this bitfield into the current (potential) span.
558 bitSizeSinceBegin += field->getBitWidthValue();
559 ++field;
560 }
561 }
562
563 return field;
564}
565
566void CIRRecordLowering::accumulateFields() {
567 for (RecordDecl::field_iterator field = recordDecl->field_begin(),
568 fieldEnd = recordDecl->field_end();
569 field != fieldEnd;) {
570 if (field->isBitField()) {
571 field = accumulateBitFields(field, fieldEnd);
572 assert((field == fieldEnd || !field->isBitField()) &&
573 "Failed to accumulate all the bitfields");
574 } else if (isEmptyFieldForLayout(astContext, *field)) {
575 // TODO(cir): do we want to do anything special about zero size members?
577 ++field;
578 } else {
579 // Use base subobject layout for potentially-overlapping fields,
580 // as it is done in RecordLayoutBuilder.
581 members.push_back(MemberInfo(
582 bitsToCharUnits(getFieldBitOffset(*field)),
583 MemberInfo::InfoKind::Field,
584 field->isPotentiallyOverlapping()
585 ? getStorageType(field->getType()->getAsCXXRecordDecl())
586 : getStorageType(*field),
587 *field));
588 ++field;
589 }
590 }
591}
592
593void CIRRecordLowering::calculateZeroInit() {
594 for (const MemberInfo &member : members) {
595 if (member.kind == MemberInfo::InfoKind::Field) {
596 if (!member.fieldDecl || isZeroInitializable(member.fieldDecl))
597 continue;
598 zeroInitializable = zeroInitializableAsBase = false;
599 return;
600 } else if (member.kind == MemberInfo::InfoKind::Base ||
601 member.kind == MemberInfo::InfoKind::VBase) {
602 if (isZeroInitializable(member.cxxRecordDecl))
603 continue;
604 zeroInitializable = false;
605 if (member.kind == MemberInfo::InfoKind::Base)
606 zeroInitializableAsBase = false;
607 }
608 }
609}
610
611void CIRRecordLowering::determinePacked(bool nvBaseType) {
612 if (packed)
613 return;
614 CharUnits alignment = CharUnits::One();
615 CharUnits nvAlignment = CharUnits::One();
616 CharUnits nvSize = !nvBaseType && cxxRecordDecl
617 ? astRecordLayout.getNonVirtualSize()
618 : CharUnits::Zero();
619
620 for (const MemberInfo &member : members) {
621 if (!member.data)
622 continue;
623 // If any member falls at an offset that it not a multiple of its alignment,
624 // then the entire record must be packed.
625 if (!member.offset.isMultipleOf(getAlignment(member.data)))
626 packed = true;
627 if (member.offset < nvSize)
628 nvAlignment = std::max(nvAlignment, getAlignment(member.data));
629 alignment = std::max(alignment, getAlignment(member.data));
630 }
631 // If the size of the record (the capstone's offset) is not a multiple of the
632 // record's alignment, it must be packed.
633 if (!members.back().offset.isMultipleOf(alignment))
634 packed = true;
635 // If the non-virtual sub-object is not a multiple of the non-virtual
636 // sub-object's alignment, it must be packed. We cannot have a packed
637 // non-virtual sub-object and an unpacked complete object or vise versa.
638 if (!nvSize.isMultipleOf(nvAlignment))
639 packed = true;
640 // Update the alignment of the sentinel.
641 if (!packed)
642 members.back().data = getUIntNType(astContext.toBits(alignment));
643}
644
645void CIRRecordLowering::insertPadding() {
646 std::vector<std::pair<CharUnits, CharUnits>> padding;
647 CharUnits size = CharUnits::Zero();
648 for (const MemberInfo &member : members) {
649 if (!member.data)
650 continue;
651 CharUnits offset = member.offset;
652 assert(offset >= size);
653 // Insert padding if we need to.
654 if (offset !=
655 size.alignTo(packed ? CharUnits::One() : getAlignment(member.data)))
656 padding.push_back(std::make_pair(size, offset - size));
657 size = offset + getSize(member.data);
658 }
659 if (padding.empty())
660 return;
661 padded = true;
662 // Add the padding to the Members list and sort it.
663 for (const std::pair<CharUnits, CharUnits> &paddingPair : padding)
664 members.push_back(makeStorageInfo(paddingPair.first,
665 getByteArrayType(paddingPair.second)));
666 llvm::stable_sort(members);
667}
668
669static cir::ArgPassingKind
671 switch (kind) {
673 return cir::ArgPassingKind::CanPassInRegs;
675 return cir::ArgPassingKind::CannotPassInRegs;
677 return cir::ArgPassingKind::CanNeverPassInRegs;
678 }
679 llvm_unreachable("unknown RecordArgPassingKind");
680}
681
682std::unique_ptr<CIRGenRecordLayout>
683CIRGenTypes::computeRecordLayout(const RecordDecl *rd, cir::RecordType *ty) {
684 CIRRecordLowering lowering(*this, rd, /*packed=*/false);
685 assert(ty->isIncomplete() && "recomputing record layout?");
686 lowering.lower(/*nonVirtualBaseType=*/false);
687
688 // If we're in C++, compute the base subobject type.
689 cir::RecordType baseTy;
690 if (llvm::isa<CXXRecordDecl>(rd) && !rd->isUnion() &&
691 !rd->hasAttr<FinalAttr>()) {
692 baseTy = *ty;
693 if (lowering.astRecordLayout.getNonVirtualSize() !=
694 lowering.astRecordLayout.getSize()) {
695 CIRRecordLowering baseLowering(*this, rd, /*Packed=*/lowering.packed);
696 baseLowering.lower(/*NonVirtualBaseType=*/true);
697 std::string baseIdentifier = getRecordTypeName(rd, ".base");
698 baseTy = builder.getCompleteNamedRecordType(
699 baseLowering.fieldTypes, baseLowering.packed, baseLowering.padded,
700 baseIdentifier);
701 // TODO(cir): add something like addRecordTypeName
702
703 // BaseTy and Ty must agree on their packedness for getCIRFieldNo to work
704 // on both of them with the same index.
705 assert(lowering.packed == baseLowering.packed &&
706 "Non-virtual and complete types must agree on packedness");
707 }
708 }
709
710 // Fill in the record *after* computing the base type. Filling in the body
711 // signifies that the type is no longer opaque and record layout is complete,
712 // but we may need to recursively layout rd while laying D out as a base type.
714 ty->complete(lowering.fieldTypes, lowering.packed, lowering.padded);
715
716 // Queue ABI metadata for the module-level cir.record_layouts attribute.
717 if (ty->getName()) {
718 mlir::MLIRContext *mlirCtx = ty->getContext();
719 cir::ArgPassingKind apk =
721
722 bool hasTrivialDestructor = true;
723 if (auto *cxxRD = dyn_cast<CXXRecordDecl>(rd))
724 hasTrivialDestructor = cxxRD->hasTrivialDestructor();
725 const auto &astLayout = astContext.getASTRecordLayout(rd);
726 uint64_t recordAlignInBytes = astLayout.getAlignment().getQuantity();
727
728 cgm.addRecordLayout(ty->getName(), cir::RecordLayoutAttr::get(
729 mlirCtx, apk, hasTrivialDestructor,
730 recordAlignInBytes));
731 }
732
733 auto rl = std::make_unique<CIRGenRecordLayout>(
734 ty ? *ty : cir::RecordType{}, baseTy ? baseTy : cir::RecordType{},
735 (bool)lowering.zeroInitializable, (bool)lowering.zeroInitializableAsBase);
736
737 rl->nonVirtualBases.swap(lowering.nonVirtualBases);
738 rl->completeObjectVirtualBases.swap(lowering.virtualBases);
739
740 // Add all the field numbers.
741 rl->fieldIdxMap.swap(lowering.fieldIdxMap);
742
743 rl->bitFields.swap(lowering.bitFields);
744
745 // Dump the layout, if requested.
746 if (getASTContext().getLangOpts().DumpRecordLayouts) {
747 llvm::outs() << "\n*** Dumping CIRgen Record Layout\n";
748 llvm::outs() << "Record: ";
749 rd->dump(llvm::outs());
750 llvm::outs() << "\nLayout: ";
751 rl->print(llvm::outs());
752 }
753
754 // TODO: implement verification
755 return rl;
756}
757
758void CIRGenRecordLayout::print(raw_ostream &os) const {
759 os << "<CIRecordLayout\n";
760 os << " CIR Type:" << completeObjectType << "\n";
761 if (baseSubobjectType)
762 os << " NonVirtualBaseCIRType:" << baseSubobjectType << "\n";
763 os << " IsZeroInitializable:" << zeroInitializable << "\n";
764 os << " BitFields:[\n";
765 std::vector<std::pair<unsigned, const CIRGenBitFieldInfo *>> bitInfo;
766 for (auto &[decl, info] : bitFields) {
767 const RecordDecl *rd = decl->getParent();
768 unsigned index = 0;
769 for (RecordDecl::field_iterator it = rd->field_begin(); *it != decl; ++it)
770 ++index;
771 bitInfo.push_back(std::make_pair(index, &info));
772 }
773 llvm::array_pod_sort(bitInfo.begin(), bitInfo.end());
774 for (std::pair<unsigned, const CIRGenBitFieldInfo *> &info : bitInfo) {
775 os.indent(4);
776 info.second->print(os);
777 os << "\n";
778 }
779 os << " ]>\n";
780}
781
782void CIRGenBitFieldInfo::print(raw_ostream &os) const {
783 os << "<CIRBitFieldInfo" << " name:" << name << " offset:" << offset
784 << " size:" << size << " isSigned:" << isSigned
785 << " storageSize:" << storageSize
786 << " storageOffset:" << storageOffset.getQuantity()
787 << " volatileOffset:" << volatileOffset
788 << " volatileStorageSize:" << volatileStorageSize
789 << " volatileStorageOffset:" << volatileStorageOffset.getQuantity() << ">";
790}
791
792void CIRGenRecordLayout::dump() const { print(llvm::errs()); }
793
794void CIRGenBitFieldInfo::dump() const { print(llvm::errs()); }
795
796void CIRRecordLowering::lowerUnion() {
797 CharUnits layoutSize = astRecordLayout.getSize();
798 mlir::Type storageType = nullptr;
799 bool seenNamedMember = false;
800
801 // Iterate through the fields setting bitFieldInfo and the Fields array. Also
802 // locate the "most appropriate" storage type.
803 for (const FieldDecl *field : recordDecl->fields()) {
804 mlir::Type fieldType;
805 if (field->isBitField()) {
806 if (field->isZeroLengthBitField())
807 continue;
808 fieldType = getBitfieldStorageType(field->getBitWidthValue());
809 setBitFieldInfo(field, CharUnits::Zero(), fieldType);
810 } else {
811 fieldType = getStorageType(field);
812 }
813
814 // This maps a field to its index. For unions, the index is always 0.
815 fieldIdxMap[field->getCanonicalDecl()] = 0;
816
817 // Compute zero-initializable status.
818 // This union might not be zero initialized: it may contain a pointer to
819 // data member which might have some exotic initialization sequence.
820 // If this is the case, then we ought not to try and come up with a "better"
821 // type, it might not be very easy to come up with a Constant which
822 // correctly initializes it.
823 if (!seenNamedMember) {
824 seenNamedMember = field->getIdentifier();
825 if (!seenNamedMember)
826 if (const RecordDecl *fieldRD = field->getType()->getAsRecordDecl())
827 seenNamedMember = fieldRD->findFirstNamedDataMember();
828 if (seenNamedMember && !isZeroInitializable(field)) {
829 zeroInitializable = zeroInitializableAsBase = false;
830 storageType = fieldType;
831 }
832 }
833
834 // Because our union isn't zero initializable, we won't be getting a better
835 // storage type.
836 if (!zeroInitializable)
837 continue;
838
839 // Conditionally update our storage type if we've got a new "better" one.
840 if (!storageType || getAlignment(fieldType) > getAlignment(storageType) ||
841 (getAlignment(fieldType) == getAlignment(storageType) &&
842 getSize(fieldType) > getSize(storageType)))
843 storageType = fieldType;
844
845 // NOTE(cir): Track all union member's types, not just the largest one. It
846 // allows for proper type-checking and retain more info for analisys.
847 fieldTypes.push_back(fieldType);
848 }
849
850 if (!storageType) {
851 appendPaddingBytes(layoutSize);
852 return;
853 }
854
855 if (layoutSize < getSize(storageType))
856 storageType = getByteArrayType(layoutSize);
857 else
858 appendPaddingBytes(layoutSize - getSize(storageType));
859
860 // Set packed if we need it.
861 if (!layoutSize.isMultipleOf(getAlignment(storageType)))
862 packed = true;
863}
864
865bool CIRRecordLowering::hasOwnStorage(const CXXRecordDecl *decl,
866 const CXXRecordDecl *query) {
867 const ASTRecordLayout &declLayout = astContext.getASTRecordLayout(decl);
868 if (declLayout.isPrimaryBaseVirtual() && declLayout.getPrimaryBase() == query)
869 return false;
870 for (const auto &base : decl->bases())
871 if (!hasOwnStorage(base.getType()->getAsCXXRecordDecl(), query))
872 return false;
873 return true;
874}
875
876/// The AAPCS that defines that, when possible, bit-fields should
877/// be accessed using containers of the declared type width:
878/// When a volatile bit-field is read, and its container does not overlap with
879/// any non-bit-field member or any zero length bit-field member, its container
880/// must be read exactly once using the access width appropriate to the type of
881/// the container. When a volatile bit-field is written, and its container does
882/// not overlap with any non-bit-field member or any zero-length bit-field
883/// member, its container must be read exactly once and written exactly once
884/// using the access width appropriate to the type of the container. The two
885/// accesses are not atomic.
886///
887/// Enforcing the width restriction can be disabled using
888/// -fno-aapcs-bitfield-width.
889void CIRRecordLowering::computeVolatileBitfields() {
890 if (!isAAPCS() ||
891 !cirGenTypes.getCGModule().getCodeGenOpts().AAPCSBitfieldWidth)
892 return;
893
894 for (auto &[field, info] : bitFields) {
895 mlir::Type resLTy = cirGenTypes.convertTypeForMem(field->getType());
896
897 if (astContext.toBits(astRecordLayout.getAlignment()) <
898 getSizeInBits(resLTy).getQuantity())
899 continue;
900
901 // CIRRecordLowering::setBitFieldInfo() pre-adjusts the bit-field offsets
902 // for big-endian targets, but it assumes a container of width
903 // info.storageSize. Since AAPCS uses a different container size (width
904 // of the type), we first undo that calculation here and redo it once
905 // the bit-field offset within the new container is calculated.
906 const unsigned oldOffset =
907 isBigEndian() ? info.storageSize - (info.offset + info.size)
908 : info.offset;
909 // Offset to the bit-field from the beginning of the struct.
910 const unsigned absoluteOffset =
911 astContext.toBits(info.storageOffset) + oldOffset;
912
913 // Container size is the width of the bit-field type.
914 const unsigned storageSize = getSizeInBits(resLTy).getQuantity();
915 // Nothing to do if the access uses the desired
916 // container width and is naturally aligned.
917 if (info.storageSize == storageSize && (oldOffset % storageSize == 0))
918 continue;
919
920 // Offset within the container.
921 unsigned offset = absoluteOffset & (storageSize - 1);
922 // Bail out if an aligned load of the container cannot cover the entire
923 // bit-field. This can happen for example, if the bit-field is part of a
924 // packed struct. AAPCS does not define access rules for such cases, we let
925 // clang to follow its own rules.
926 if (offset + info.size > storageSize)
927 continue;
928
929 // Re-adjust offsets for big-endian targets.
930 if (isBigEndian())
931 offset = storageSize - (offset + info.size);
932
933 const CharUnits storageOffset =
934 astContext.toCharUnitsFromBits(absoluteOffset & ~(storageSize - 1));
935 const CharUnits end = storageOffset +
936 astContext.toCharUnitsFromBits(storageSize) -
938
939 const ASTRecordLayout &layout =
940 astContext.getASTRecordLayout(field->getParent());
941 // If we access outside memory outside the record, than bail out.
942 const CharUnits recordSize = layout.getSize();
943 if (end >= recordSize)
944 continue;
945
946 // Bail out if performing this load would access non-bit-fields members.
947 bool conflict = false;
948 for (const auto *f : recordDecl->fields()) {
949 // Allow sized bit-fields overlaps.
950 if (f->isBitField() && !f->isZeroLengthBitField())
951 continue;
952
953 const CharUnits fOffset = astContext.toCharUnitsFromBits(
954 layout.getFieldOffset(f->getFieldIndex()));
955
956 // As C11 defines, a zero sized bit-field defines a barrier, so
957 // fields after and before it should be race condition free.
958 // The AAPCS acknowledges it and imposes no restritions when the
959 // natural container overlaps a zero-length bit-field.
960 if (f->isZeroLengthBitField()) {
961 if (end > fOffset && storageOffset < fOffset) {
962 conflict = true;
963 break;
964 }
965 }
966
967 const CharUnits fEnd =
968 fOffset +
969 astContext.toCharUnitsFromBits(
970 getSizeInBits(cirGenTypes.convertTypeForMem(f->getType()))
971 .getQuantity()) -
973 // If no overlap, continue.
974 if (end < fOffset || fEnd < storageOffset)
975 continue;
976
977 // The desired load overlaps a non-bit-field member, bail out.
978 conflict = true;
979 break;
980 }
981
982 if (conflict)
983 continue;
984 // Write the new bit-field access parameters.
985 // As the storage offset now is defined as the number of elements from the
986 // start of the structure, we should divide the Offset by the element size.
988 storageOffset /
989 astContext.toCharUnitsFromBits(storageSize).getQuantity();
990 info.volatileStorageSize = storageSize;
991 info.volatileOffset = offset;
992 }
993}
994
995void CIRRecordLowering::accumulateBases() {
996 // If we've got a primary virtual base, we need to add it with the bases.
997 if (astRecordLayout.isPrimaryBaseVirtual()) {
998 const CXXRecordDecl *baseDecl = astRecordLayout.getPrimaryBase();
999 members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::Base,
1000 getStorageType(baseDecl), baseDecl));
1001 }
1002
1003 // Accumulate the non-virtual bases.
1004 for (const auto &base : cxxRecordDecl->bases()) {
1005 if (base.isVirtual())
1006 continue;
1007 // Bases can be zero-sized even if not technically empty if they
1008 // contain only a trailing array member.
1009 const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
1010 if (!baseDecl->isEmpty() &&
1011 !astContext.getASTRecordLayout(baseDecl).getNonVirtualSize().isZero()) {
1012 members.push_back(MemberInfo(astRecordLayout.getBaseClassOffset(baseDecl),
1013 MemberInfo::InfoKind::Base,
1014 getStorageType(baseDecl), baseDecl));
1015 }
1016 }
1017}
1018
1019void CIRRecordLowering::accumulateVBases() {
1020 for (const auto &base : cxxRecordDecl->vbases()) {
1021 const CXXRecordDecl *baseDecl = base.getType()->getAsCXXRecordDecl();
1022 if (isEmptyRecordForLayout(astContext, base.getType()))
1023 continue;
1024 CharUnits offset = astRecordLayout.getVBaseClassOffset(baseDecl);
1025 // If the vbase is a primary virtual base of some base, then it doesn't
1026 // get its own storage location but instead lives inside of that base.
1027 if (isOverlappingVBaseABI() && astContext.isNearlyEmpty(baseDecl) &&
1028 !hasOwnStorage(cxxRecordDecl, baseDecl)) {
1029 members.push_back(
1030 MemberInfo(offset, MemberInfo::InfoKind::VBase, nullptr, baseDecl));
1031 continue;
1032 }
1033 // If we've got a vtordisp, add it as a storage type.
1034 if (astRecordLayout.getVBaseOffsetsMap()
1035 .find(baseDecl)
1036 ->second.hasVtorDisp())
1037 members.push_back(makeStorageInfo(offset - CharUnits::fromQuantity(4),
1038 getUIntNType(32)));
1039 members.push_back(MemberInfo(offset, MemberInfo::InfoKind::VBase,
1040 getStorageType(baseDecl), baseDecl));
1041 }
1042}
1043
1044void CIRRecordLowering::accumulateVPtrs() {
1045 if (astRecordLayout.hasOwnVFPtr())
1046 members.push_back(MemberInfo(CharUnits::Zero(), MemberInfo::InfoKind::VFPtr,
1047 getVFPtrType()));
1048
1049 if (astRecordLayout.hasOwnVBPtr())
1050 cirGenTypes.getCGModule().errorNYI(recordDecl->getSourceRange(),
1051 "accumulateVPtrs: hasOwnVBPtr");
1052}
1053
1054mlir::Type CIRRecordLowering::getVFPtrType() {
1055 return cir::VPtrType::get(builder.getContext());
1056}
Defines the clang::ASTContext interface.
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static cir::ArgPassingKind convertRecordArgPassingKind(RecordArgPassingKind kind)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
static void print(llvm::raw_ostream &OS, const T &V, ASTContext &ASTCtx, QualType Ty)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
bool isBigEndian() const
llvm::TypeSize getTypeAllocSizeInBits(mlir::Type ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
bool isNearlyEmpty(const CXXRecordDecl *RD) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:917
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
CharUnits getAlignment() const
getAlignment - Get the record alignment in characters.
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
CharUnits getSize() const
getSize - Get the record size in characters.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getDataSize() const
getDataSize() - Get the record data size, which is the record size without tail padding,...
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
const VBaseOffsetsMapTy & getVBaseOffsetsMap() const
const CXXRecordDecl * getPrimaryBase() const
getPrimaryBase - Get the primary base for this record.
bool isPrimaryBaseVirtual() const
isPrimaryBaseVirtual - Get whether the primary base for this record is virtual or not.
CharUnits getNonVirtualSize() const
getNonVirtualSize - Get the non-virtual size (in chars) of an object, which is the size of the object...
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const clang::CodeGenOptions & getCodeGenOpts() const
This class organizes the cross-module state that is used while lowering AST types to CIR types.
Definition CIRGenTypes.h:49
CIRGenModule & getCGModule() const
Definition CIRGenTypes.h:83
std::string getRecordTypeName(const clang::RecordDecl *, llvm::StringRef suffix)
clang::ASTContext & getASTContext() const
std::unique_ptr< CIRGenRecordLayout > computeRecordLayout(const clang::RecordDecl *rd, cir::RecordType *ty)
mlir::Type convertTypeForMem(clang::QualType, bool forBitField=false)
Convert type T into an mlir::Type.
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool isEmpty() const
Determine whether this is an empty class in the sense of (C++11 [meta.unary.prop]).
Definition DeclCXX.h:1186
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Definition CharUnits.h:143
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition CharUnits.h:53
void dump() const
bool hasAttr() const
Definition DeclBase.h:585
Represents a member of a struct/union/class.
Definition Decl.h:3175
unsigned getBitWidthValue() const
Computes the bit width of this field, if this is a bit field.
Definition Decl.cpp:4754
FieldDecl * getCanonicalDecl() override
Retrieves the canonical declaration of this field.
Definition Decl.h:3422
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
Represents a struct/union/class.
Definition Decl.h:4342
RecordArgPassingKind getArgPassingRestrictions() const
Definition Decl.h:4483
specific_decl_iterator< FieldDecl > field_iterator
Definition Decl.h:4542
field_iterator field_begin() const
Definition Decl.cpp:5277
bool isUnion() const
Definition Decl.h:3943
virtual unsigned getRegisterWidth() const
Return the "preferred" register width on this target.
Definition TargetInfo.h:907
bool hasCheapUnalignedBitFieldAccess() const
Return true iff unaligned accesses are cheap.
Definition TargetInfo.h:921
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2254
QualType getType() const
Definition Decl.h:723
bool isValidFundamentalIntWidth(unsigned width)
Definition CIRTypes.cpp:601
bool isEmptyFieldForLayout(const ASTContext &context, const FieldDecl *fd)
isEmptyFieldForLayout - Return true if the field is "empty", that is, either a zero-width bit-field o...
bool isEmptyRecordForLayout(const ASTContext &context, QualType t)
isEmptyRecordForLayout - Return true if a structure contains only empty base classes (per isEmptyReco...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Decl, FieldDecl > fieldDecl
Matches field declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, CXXRecordDecl > cxxRecordDecl
Matches C++ class declarations.
const internal::VariadicAllOfMatcher< Decl > decl
Matches declarations.
const internal::VariadicDynCastAllOfMatcher< Decl, RecordDecl > recordDecl
Matches class, struct, and union declarations.
RangeSelector member(std::string ID)
Given a MemberExpr, selects the member token. ID is the node's binding in the match result.
Stencil run(MatchConsumer< std::string > C)
Wraps a MatchConsumer in a Stencil, so that it can be used in a Stencil.
Definition Stencil.cpp:489
The JSON file list parser is used to communicate input to InstallAPI.
bool operator<(DeclarationName LHS, DeclarationName RHS)
Ordering on two declaration names.
RecordArgPassingKind
Enum that represents the different ways arguments are passed to and returned from function calls.
Definition Decl.h:4319
@ CanPassInRegs
The argument of this type can be passed directly in registers.
Definition Decl.h:4321
@ CanNeverPassInRegs
The argument of this type cannot be passed directly in registers.
Definition Decl.h:4335
@ CannotPassInRegs
The argument of this type cannot be passed directly in registers.
Definition Decl.h:4330
unsigned long uint64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
void __ovld __conv barrier(cl_mem_fence_flags)
All work-items in a work-group executing the kernel on a processor must execute this function before ...
#define false
Definition stdbool.h:26
#define true
Definition stdbool.h:25
static bool zeroSizeRecordMembers()
static bool checkBitfieldClipping()
static bool astRecordDeclAttr()
unsigned offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
void print(llvm::raw_ostream &os) const
unsigned storageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
clang::CharUnits storageOffset
The offset of the bitfield storage from the start of the record.
unsigned size
The total size of the bit-field, in bits.
unsigned isSigned
Whether the bit-field is signed.
clang::CharUnits volatileStorageOffset
The offset of the bitfield storage from the start of the record.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
llvm::StringRef name
The name of a bitfield.