18 using namespace clang;
19 using namespace CodeGen;
20 using namespace swiftcall;
31 static llvm::Type *
getCommonType(llvm::Type *first, llvm::Type *second) {
32 assert(first != second);
35 if (first->isIntegerTy()) {
36 if (second->isPointerTy())
return first;
37 }
else if (first->isPointerTy()) {
38 if (second->isIntegerTy())
return second;
39 if (second->isPointerTy())
return first;
43 }
else if (
auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
44 if (
auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
45 if (
auto commonTy =
getCommonType(firstVecTy->getElementType(),
46 secondVecTy->getElementType())) {
47 return (commonTy == firstVecTy->getElementType() ? first : second);
71 }
else if (
type->isArrayType()) {
79 for (uint64_t i = 0, e =
arrayType->getSize().getZExtValue(); i != e; ++i) {
89 addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
105 auto atomicPadding = atomicSize - valueSize;
126 for (
auto field : record->
fields()) {
127 if (field->isBitField()) {
128 addBitFieldData(field, begin, 0);
141 auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
149 for (
auto &baseSpecifier : cxxRecord->bases()) {
150 if (baseSpecifier.isVirtual())
continue;
152 auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
163 for (
auto field : record->
fields()) {
164 auto fieldOffsetInBits = layout.
getFieldOffset(field->getFieldIndex());
165 if (field->isBitField()) {
166 addBitFieldData(field, begin, fieldOffsetInBits);
176 for (
auto &vbaseSpecifier : cxxRecord->vbases()) {
177 auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
183 void SwiftAggLowering::addBitFieldData(
const FieldDecl *bitfield,
185 uint64_t bitfieldBitBegin) {
191 if (width == 0)
return;
194 CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
199 uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
203 recordBegin + bitfieldByteEnd);
207 assert(
type &&
"didn't provide type for typed data");
213 assert(
type &&
"didn't provide type for typed data");
217 if (
auto vecTy = dyn_cast<llvm::VectorType>(
type)) {
220 assert(componentTys.size() >= 1);
223 for (
size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
224 llvm::Type *componentTy = componentTys[i];
226 assert(componentSize < end - begin);
227 addLegalTypedData(componentTy, begin, begin + componentSize);
228 begin += componentSize;
231 return addLegalTypedData(componentTys.back(), begin, end);
235 if (
auto intTy = dyn_cast<llvm::IntegerType>(
type)) {
241 return addLegalTypedData(
type, begin, end);
244 void SwiftAggLowering::addLegalTypedData(llvm::Type *
type,
250 if (
auto vecTy = dyn_cast<llvm::VectorType>(
type)) {
252 auto eltTy = split.first;
253 auto numElts = split.second;
255 auto eltSize = (end - begin) / numElts;
257 for (
size_t i = 0, e = numElts; i != e; ++i) {
258 addLegalTypedData(eltTy, begin, begin + eltSize);
261 assert(begin == end);
268 addEntry(
type, begin, end);
271 void SwiftAggLowering::addEntry(llvm::Type *
type,
274 (!isa<llvm::StructType>(
type) && !isa<llvm::ArrayType>(
type))) &&
275 "cannot add aggregate-typed data");
279 if (Entries.empty() || Entries.back().End <= begin) {
280 Entries.push_back({begin, end,
type});
286 size_t index = Entries.size() - 1;
288 if (Entries[index - 1].
End <= begin)
break;
294 if (Entries[index].
Begin >= end) {
298 Entries.insert(Entries.begin() + index, {begin, end, type});
307 if (Entries[index].
Begin == begin && Entries[index].
End == end) {
309 if (Entries[index].
Type ==
type)
return;
312 if (Entries[index].
Type ==
nullptr) {
314 }
else if (
type ==
nullptr) {
315 Entries[index].Type =
nullptr;
322 Entries[index].Type = entryType;
327 Entries[index].Type =
nullptr;
334 if (
auto vecTy = dyn_cast_or_null<llvm::VectorType>(
type)) {
335 auto eltTy = vecTy->getElementType();
337 (end - begin) / cast<llvm::FixedVectorType>(vecTy)->getNumElements();
340 e = cast<llvm::FixedVectorType>(vecTy)->getNumElements();
342 addEntry(eltTy, begin, begin + eltSize);
345 assert(begin == end);
350 if (Entries[index].
Type && Entries[index].
Type->isVectorTy()) {
351 splitVectorEntry(index);
352 goto restartAfterSplit;
357 Entries[index].Type =
nullptr;
360 if (begin < Entries[index].
Begin) {
361 Entries[index].Begin = begin;
362 assert(index == 0 || begin >= Entries[index - 1].
End);
367 while (end > Entries[index].
End) {
368 assert(Entries[index].
Type ==
nullptr);
371 if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
372 Entries[index].End = end;
377 Entries[index].End = Entries[index + 1].Begin;
383 if (Entries[index].
Type ==
nullptr)
387 if (Entries[index].
Type->isVectorTy() &&
388 end < Entries[index].End) {
389 splitVectorEntry(index);
393 Entries[index].Type =
nullptr;
399 void SwiftAggLowering::splitVectorEntry(
unsigned index) {
400 auto vecTy = cast<llvm::VectorType>(Entries[index].
Type);
403 auto eltTy = split.first;
405 auto numElts = split.second;
406 Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
409 for (
unsigned i = 0; i != numElts; ++i) {
410 Entries[index].Type = eltTy;
411 Entries[index].Begin = begin;
412 Entries[index].End = begin + eltSize;
435 if (
type ==
nullptr)
return true;
450 return (!
type->isFloatingPointTy() && !
type->isVectorTy());
453 bool SwiftAggLowering::shouldMergeEntries(
const StorageEntry &first,
454 const StorageEntry &second,
468 if (Entries.empty()) {
480 bool hasOpaqueEntries = (Entries[0].Type ==
nullptr);
481 for (
size_t i = 1, e = Entries.size(); i != e; ++i) {
482 if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
483 Entries[i - 1].Type =
nullptr;
484 Entries[i].Type =
nullptr;
485 Entries[i - 1].End = Entries[i].Begin;
486 hasOpaqueEntries =
true;
488 }
else if (Entries[i].
Type ==
nullptr) {
489 hasOpaqueEntries =
true;
495 if (!hasOpaqueEntries) {
501 auto orig = std::move(Entries);
502 assert(Entries.empty());
504 for (
size_t i = 0, e = orig.size(); i != e; ++i) {
506 if (orig[i].
Type !=
nullptr) {
507 Entries.push_back(orig[i]);
514 auto begin = orig[i].Begin;
515 auto end = orig[i].End;
517 orig[i + 1].
Type ==
nullptr &&
518 end == orig[i + 1].
Begin) {
519 end = orig[i + 1].End;
530 CharUnits chunkEnd = chunkBegin + chunkSize;
536 for (; ; unitSize *= 2) {
537 assert(unitSize <= chunkSize);
539 unitEnd = unitBegin + unitSize;
540 if (unitEnd >= localEnd)
break;
547 Entries.push_back({unitBegin, unitEnd, entryTy});
551 }
while (begin != end);
559 assert(Finished &&
"haven't yet finished lowering");
561 for (
auto &entry : Entries) {
562 callback(entry.Begin, entry.End, entry.Type);
566 std::pair<llvm::StructType*, llvm::Type*>
568 assert(Finished &&
"haven't yet finished lowering");
572 if (Entries.empty()) {
573 auto type = llvm::StructType::get(ctx);
579 bool hasPadding =
false;
581 for (
auto &entry : Entries) {
582 if (entry.Begin != lastEnd) {
583 auto paddingSize = entry.Begin - lastEnd;
584 assert(!paddingSize.isNegative());
586 auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
587 paddingSize.getQuantity());
588 elts.push_back(padding);
592 if (!packed && !entry.Begin.isMultipleOf(
597 elts.push_back(entry.Type);
600 assert(entry.End <= lastEnd);
605 auto coercionType = llvm::StructType::get(ctx, elts, packed);
607 llvm::Type *unpaddedType = coercionType;
610 for (
auto &entry : Entries) {
611 elts.push_back(entry.Type);
613 if (elts.size() == 1) {
614 unpaddedType = elts[0];
616 unpaddedType = llvm::StructType::get(ctx, elts,
false);
618 }
else if (Entries.size() == 1) {
619 unpaddedType = Entries[0].Type;
622 return { coercionType, unpaddedType };
626 assert(Finished &&
"haven't yet finished lowering");
629 if (Entries.empty())
return false;
632 if (Entries.size() == 1) {
639 componentTys.reserve(Entries.size());
640 for (
auto &entry : Entries) {
641 componentTys.push_back(entry.Type);
649 bool asReturnValue) {
665 size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
672 llvm::IntegerType *intTy) {
673 auto size = intTy->getBitWidth();
692 llvm::VectorType *vectorTy) {
694 CGM, vectorSize, vectorTy->getElementType(),
695 cast<llvm::FixedVectorType>(vectorTy)->getNumElements());
699 llvm::Type *eltTy,
unsigned numElts) {
700 assert(numElts > 1 &&
"illegal vector length");
705 std::pair<llvm::Type*, unsigned>
707 llvm::VectorType *vectorTy) {
708 auto numElts = cast<llvm::FixedVectorType>(vectorTy)->getNumElements();
709 auto eltTy = vectorTy->getElementType();
714 return {llvm::FixedVectorType::get(eltTy, numElts / 2), 2};
717 return {eltTy, numElts};
721 llvm::VectorType *origVectorTy,
725 components.push_back(origVectorTy);
730 auto numElts = cast<llvm::FixedVectorType>(origVectorTy)->getNumElements();
731 auto eltTy = origVectorTy->getElementType();
732 assert(numElts != 1);
736 unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
737 unsigned candidateNumElts = 1
U << logCandidateNumElts;
738 assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
741 if (candidateNumElts == numElts) {
742 logCandidateNumElts--;
743 candidateNumElts >>= 1;
746 CharUnits eltSize = (origVectorSize / numElts);
747 CharUnits candidateSize = eltSize * candidateNumElts;
752 while (logCandidateNumElts > 0) {
753 assert(candidateNumElts == 1
U << logCandidateNumElts);
754 assert(candidateNumElts <= numElts);
755 assert(candidateSize == eltSize * candidateNumElts);
759 logCandidateNumElts--;
760 candidateNumElts /= 2;
766 auto numVecs = numElts >> logCandidateNumElts;
767 components.append(numVecs,
768 llvm::FixedVectorType::get(eltTy, candidateNumElts));
769 numElts -= (numVecs << logCandidateNumElts);
771 if (numElts == 0)
return;
778 components.push_back(llvm::FixedVectorType::get(eltTy, numElts));
784 logCandidateNumElts--;
785 candidateNumElts /= 2;
787 }
while (candidateNumElts > numElts);
791 components.append(numElts, eltTy);
805 if (lowering.
empty()) {
833 if (isa<ComplexType>(
type)) {
838 if (isa<VectorType>(
type)) {
852 if (
type->isVoidType()) {
873 for (
unsigned i = 0, e = FI.
arg_size(); i != e; ++i) {