20using namespace CodeGen;
21using namespace swiftcall;
32static llvm::Type *
getCommonType(llvm::Type *first, llvm::Type *second) {
33 assert(first != second);
36 if (first->isIntegerTy()) {
37 if (second->isPointerTy())
return first;
38 }
else if (first->isPointerTy()) {
39 if (second->isIntegerTy())
return second;
40 if (second->isPointerTy())
return first;
44 }
else if (
auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
45 if (
auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
46 if (
auto commonTy =
getCommonType(firstVecTy->getElementType(),
47 secondVecTy->getElementType())) {
48 return (commonTy == firstVecTy->getElementType() ? first : second);
72 }
else if (
type->isArrayType()) {
80 for (uint64_t i = 0, e =
arrayType->getZExtSize(); i != e; ++i) {
90 addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
106 auto atomicPadding = atomicSize - valueSize;
127 for (
auto *field : record->
fields()) {
128 if (field->isBitField()) {
129 addBitFieldData(field, begin, 0);
142 auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
150 for (
auto &baseSpecifier : cxxRecord->bases()) {
151 if (baseSpecifier.isVirtual())
continue;
153 auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
164 for (
auto *field : record->
fields()) {
165 auto fieldOffsetInBits = layout.
getFieldOffset(field->getFieldIndex());
166 if (field->isBitField()) {
167 addBitFieldData(field, begin, fieldOffsetInBits);
177 for (
auto &vbaseSpecifier : cxxRecord->vbases()) {
178 auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
184void SwiftAggLowering::addBitFieldData(
const FieldDecl *bitfield,
186 uint64_t bitfieldBitBegin) {
192 if (width == 0)
return;
195 CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
200 uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
204 recordBegin + bitfieldByteEnd);
208 assert(
type &&
"didn't provide type for typed data");
214 assert(
type &&
"didn't provide type for typed data");
218 if (
auto vecTy = dyn_cast<llvm::VectorType>(
type)) {
221 assert(componentTys.size() >= 1);
224 for (
size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
225 llvm::Type *componentTy = componentTys[i];
227 assert(componentSize < end - begin);
228 addLegalTypedData(componentTy, begin, begin + componentSize);
229 begin += componentSize;
232 return addLegalTypedData(componentTys.back(), begin, end);
236 if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
242 return addLegalTypedData(
type, begin, end);
245void SwiftAggLowering::addLegalTypedData(llvm::Type *
type,
251 if (
auto vecTy = dyn_cast<llvm::VectorType>(
type)) {
253 auto eltTy = split.first;
254 auto numElts = split.second;
256 auto eltSize = (end - begin) / numElts;
258 for (
size_t i = 0, e = numElts; i != e; ++i) {
259 addLegalTypedData(eltTy, begin, begin + eltSize);
262 assert(begin == end);
269 addEntry(
type, begin, end);
272void SwiftAggLowering::addEntry(llvm::Type *
type,
275 (!isa<llvm::StructType>(
type) && !isa<llvm::ArrayType>(
type))) &&
276 "cannot add aggregate-typed data");
280 if (Entries.empty() || Entries.back().End <= begin) {
281 Entries.push_back({begin, end,
type});
287 size_t index = Entries.size() - 1;
289 if (Entries[index - 1].End <= begin)
break;
295 if (Entries[index].
Begin >= end) {
299 Entries.insert(Entries.begin() + index, {begin, end, type});
308 if (Entries[index].
Begin == begin && Entries[index].End == end) {
310 if (Entries[index].
Type ==
type)
return;
313 if (Entries[index].
Type ==
nullptr) {
315 }
else if (
type ==
nullptr) {
316 Entries[index].Type =
nullptr;
323 Entries[index].Type = entryType;
328 Entries[index].Type =
nullptr;
335 if (
auto vecTy = dyn_cast_or_null<llvm::VectorType>(
type)) {
336 auto eltTy = vecTy->getElementType();
338 (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
341 e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
343 addEntry(eltTy, begin, begin + eltSize);
346 assert(begin == end);
351 if (Entries[index].
Type && Entries[index].
Type->isVectorTy()) {
352 splitVectorEntry(index);
353 goto restartAfterSplit;
358 Entries[index].Type =
nullptr;
361 if (begin < Entries[index].
Begin) {
362 Entries[index].Begin = begin;
363 assert(index == 0 || begin >= Entries[index - 1].End);
368 while (end > Entries[index].End) {
369 assert(Entries[index].
Type ==
nullptr);
372 if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
373 Entries[index].End = end;
378 Entries[index].End = Entries[index + 1].Begin;
384 if (Entries[index].
Type ==
nullptr)
388 if (Entries[index].
Type->isVectorTy() &&
389 end < Entries[index].End) {
390 splitVectorEntry(index);
394 Entries[index].Type =
nullptr;
400void SwiftAggLowering::splitVectorEntry(
unsigned index) {
401 auto vecTy = cast<llvm::VectorType>(Entries[index].
Type);
404 auto eltTy = split.first;
406 auto numElts = split.second;
407 Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
410 for (
unsigned i = 0; i != numElts; ++i) {
411 unsigned idx = index + i;
412 Entries[idx].Type = eltTy;
413 Entries[idx].Begin = begin;
414 Entries[idx].End = begin + eltSize;
437 if (
type ==
nullptr)
return true;
452 return (!
type->isFloatingPointTy() && !
type->isVectorTy());
455bool SwiftAggLowering::shouldMergeEntries(
const StorageEntry &first,
456 const StorageEntry &second,
470 if (Entries.empty()) {
482 bool hasOpaqueEntries = (Entries[0].Type ==
nullptr);
483 for (
size_t i = 1, e = Entries.size(); i != e; ++i) {
484 if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
485 Entries[i - 1].Type =
nullptr;
486 Entries[i].Type =
nullptr;
487 Entries[i - 1].End = Entries[i].Begin;
488 hasOpaqueEntries =
true;
490 }
else if (Entries[i].
Type ==
nullptr) {
491 hasOpaqueEntries =
true;
497 if (!hasOpaqueEntries) {
503 auto orig = std::move(Entries);
504 assert(Entries.empty());
506 for (
size_t i = 0, e = orig.size(); i != e; ++i) {
508 if (orig[i].
Type !=
nullptr) {
509 Entries.push_back(orig[i]);
516 auto begin = orig[i].Begin;
517 auto end = orig[i].End;
519 orig[i + 1].
Type ==
nullptr &&
520 end == orig[i + 1].
Begin) {
521 end = orig[i + 1].End;
532 CharUnits chunkEnd = chunkBegin + chunkSize;
533 CharUnits localEnd = std::min(end, chunkEnd);
538 for (; ; unitSize *= 2) {
539 assert(unitSize <= chunkSize);
541 unitEnd = unitBegin + unitSize;
542 if (unitEnd >= localEnd)
break;
549 Entries.push_back({unitBegin, unitEnd, entryTy});
553 }
while (begin != end);
561 assert(Finished &&
"haven't yet finished lowering");
563 for (
auto &entry : Entries) {
564 callback(entry.Begin, entry.End, entry.Type);
568std::pair<llvm::StructType*, llvm::Type*>
570 assert(Finished &&
"haven't yet finished lowering");
574 if (Entries.empty()) {
575 auto type = llvm::StructType::get(ctx);
581 bool hasPadding =
false;
583 for (
auto &entry : Entries) {
584 if (entry.Begin != lastEnd) {
585 auto paddingSize = entry.Begin - lastEnd;
586 assert(!paddingSize.isNegative());
588 auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
589 paddingSize.getQuantity());
590 elts.push_back(padding);
598 elts.push_back(entry.Type);
601 assert(entry.End <= lastEnd);
606 auto coercionType = llvm::StructType::get(ctx, elts, packed);
608 llvm::Type *unpaddedType = coercionType;
611 for (
auto &entry : Entries) {
612 elts.push_back(entry.Type);
614 if (elts.size() == 1) {
615 unpaddedType = elts[0];
617 unpaddedType = llvm::StructType::get(ctx, elts,
false);
619 }
else if (Entries.size() == 1) {
620 unpaddedType = Entries[0].Type;
623 return { coercionType, unpaddedType };
627 assert(Finished &&
"haven't yet finished lowering");
630 if (Entries.empty())
return false;
633 if (Entries.size() == 1) {
639 componentTys.reserve(Entries.size());
640 for (
auto &entry : Entries) {
641 componentTys.push_back(entry.Type);
648 bool asReturnValue) {
662 size = llvm::bit_ceil(size);
668 llvm::IntegerType *intTy) {
669 auto size = intTy->getBitWidth();
688 llvm::VectorType *vectorTy) {
690 CGM, vectorSize, vectorTy->getElementType(),
691 cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
695 llvm::Type *eltTy,
unsigned numElts) {
696 assert(numElts > 1 &&
"illegal vector length");
700std::pair<llvm::Type*, unsigned>
702 llvm::VectorType *vectorTy) {
703 auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
704 auto eltTy = vectorTy->getElementType();
709 return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
712 return {eltTy, numElts};
716 llvm::VectorType *origVectorTy,
720 components.push_back(origVectorTy);
725 auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
726 auto eltTy = origVectorTy->getElementType();
727 assert(numElts != 1);
731 unsigned logCandidateNumElts = llvm::Log2_32(numElts);
732 unsigned candidateNumElts = 1U << logCandidateNumElts;
733 assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
736 if (candidateNumElts == numElts) {
737 logCandidateNumElts--;
738 candidateNumElts >>= 1;
741 CharUnits eltSize = (origVectorSize / numElts);
742 CharUnits candidateSize = eltSize * candidateNumElts;
747 while (logCandidateNumElts > 0) {
748 assert(candidateNumElts == 1U << logCandidateNumElts);
749 assert(candidateNumElts <= numElts);
750 assert(candidateSize == eltSize * candidateNumElts);
754 logCandidateNumElts--;
755 candidateNumElts /= 2;
761 auto numVecs = numElts >> logCandidateNumElts;
762 components.append(numVecs,
763 llvm::FixedVectorType::get(eltTy, candidateNumElts));
764 numElts -= (numVecs << logCandidateNumElts);
766 if (numElts == 0)
return;
773 components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
779 logCandidateNumElts--;
780 candidateNumElts /= 2;
782 }
while (candidateNumElts > numElts);
786 components.append(numElts, eltTy);
800 if (lowering.
empty()) {
828 if (isa<ComplexType>(
type)) {
833 if (isa<VectorType>(
type)) {
847 if (
type->isVoidType()) {
868 for (
unsigned i = 0, e = FI.
arg_size(); i != e; ++i) {
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
static llvm::Type * getCommonType(llvm::Type *first, llvm::Type *second)
Given two types with the same size, try to find a common type.
static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize)
Given a power-of-two unit size, return the offset of the aligned unit of that size which contains the...
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
static bool isPowerOf2(unsigned n)
static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering, bool forReturn, CharUnits alignmentForIndirect)
static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type)
static bool areBytesInSameUnit(CharUnits first, CharUnits second, CharUnits chunkSize)
static const SwiftABIInfo & getSwiftABIInfo(CodeGenModule &CGM)
static bool isMergeableEntryType(llvm::Type *type)
const ConstantArrayType * getAsConstantArrayType(QualType T) const
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getVBPtrOffset() const
getVBPtrOffset - Get the offset for virtual base table pointer.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
static CharUnits One()
One - Construct a CharUnits quantity of one.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
static ABIArgInfo getIgnore()
static ABIArgInfo getExpand()
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true, unsigned Align=0)
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
CGFunctionInfo - Class to encapsulate the information about a function definition.
ABIArgInfo & getReturnInfo()
const_arg_iterator arg_begin() const
CanQualType getReturnType() const
unsigned arg_size() const
This class organizes the cross-function state that is used while generating LLVM code.
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
llvm::LLVMContext & getLLVMContext()
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Target specific hooks for defining how a type should be passed or returned from functions with one of...
virtual bool isLegalVectorType(CharUnits VectorSize, llvm::Type *EltTy, unsigned NumElts) const
Returns true if the given vector type is legal from Swift's calling convention perspective.
bool isSwiftErrorInRegister() const
Returns true if swifterror is lowered to a register by the target ABI.
virtual bool shouldPassIndirectly(ArrayRef< llvm::Type * > ComponentTys, bool AsReturnValue) const
Returns true if an aggregate which expands to the given type sequence should be passed / returned ind...
const SwiftABIInfo & getSwiftABIInfo() const
Returns Swift ABI info helper for the target.
void addOpaqueData(CharUnits begin, CharUnits end)
std::pair< llvm::StructType *, llvm::Type * > getCoerceAndExpandTypes() const
Return the types for a coerce-and-expand operation.
void enumerateComponents(EnumerationCallback callback) const
Enumerate the expanded components of this type.
llvm::function_ref< void(CharUnits offset, CharUnits end, llvm::Type *type)> EnumerationCallback
bool empty() const
Does this lowering require passing any data?
void addTypedData(QualType type, CharUnits begin)
bool shouldPassIndirectly(bool asReturnValue) const
According to the target Swift ABI, should a value with this lowering be passed indirectly?
Complex values, per C99 6.2.5p11.
Represents a member of a struct/union/class.
bool isBitField() const
Determines whether this field is a bitfield.
unsigned getBitWidthValue(const ASTContext &Ctx) const
Computes the bit width of this field, if this is a bit field.
A pointer to member type per C++ 8.3.3 - Pointers to members.
A (possibly-)qualified type.
Represents a struct/union/class.
bool canPassInRegisters() const
Determine whether this class can be passed in registers.
field_range fields() const
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
uint64_t getPointerWidth(LangAS AddrSpace) const
Return the width of pointers on this target, for the specified address space.
virtual bool hasInt128Type() const
Determine whether the __int128 type is supported on this target.
The base class of the type hierarchy.
Defines the clang::TargetInfo interface.
bool isSwiftErrorLoweredInRegister(CodeGenModule &CGM)
Is swifterror lowered to a register by the target ABI?
bool shouldPassIndirectly(CodeGenModule &CGM, ArrayRef< llvm::Type * > types, bool asReturnValue)
Should an aggregate which expands to the given type sequence be passed/returned indirectly under swif...
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
bool mustPassRecordIndirectly(CodeGenModule &CGM, const RecordDecl *record)
Is the given record type required to be passed and returned indirectly because of language restrictio...
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
bool isLegalIntegerType(CodeGenModule &CGM, llvm::IntegerType *type)
Is the given integer type "legal" for Swift's perspective on the current platform?
void legalizeVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy, llvm::SmallVectorImpl< llvm::Type * > &types)
Turn a vector type in a sequence of legal component vector types.
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
bool isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Is the given vector type "legal" for Swift's perspective on the current platform?
std::pair< llvm::Type *, unsigned > splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Minimally split a legal vector type.
CharUnits getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type)
Return the Swift CC's notion of the natural alignment of a type.
CharUnits getMaximumVoluntaryIntegerSize(CodeGenModule &CGM)
Return the maximum voluntary integer size for the current target.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
const AstTypeMatcher< AtomicType > atomicType
Matches atomic types.
const AstTypeMatcher< RecordType > recordType
Matches record types (e.g.
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
The JSON file list parser is used to communicate input to InstallAPI.
llvm::PointerType * Int8PtrTy