clang  10.0.0svn
SwiftCallingConv.cpp
Go to the documentation of this file.
1 //===--- SwiftCallingConv.cpp - Lowering for the Swift calling convention -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of the abstract lowering for the Swift calling convention.
10 //
11 //===----------------------------------------------------------------------===//
12 
14 #include "clang/Basic/TargetInfo.h"
15 #include "CodeGenModule.h"
16 #include "TargetInfo.h"
17 
18 using namespace clang;
19 using namespace CodeGen;
20 using namespace swiftcall;
21 
23  return cast<SwiftABIInfo>(CGM.getTargetCodeGenInfo().getABIInfo());
24 }
25 
26 static bool isPowerOf2(unsigned n) {
27  return n == (n & -n);
28 }
29 
30 /// Given two types with the same size, try to find a common type.
31 static llvm::Type *getCommonType(llvm::Type *first, llvm::Type *second) {
32  assert(first != second);
33 
34  // Allow pointers to merge with integers, but prefer the integer type.
35  if (first->isIntegerTy()) {
36  if (second->isPointerTy()) return first;
37  } else if (first->isPointerTy()) {
38  if (second->isIntegerTy()) return second;
39  if (second->isPointerTy()) return first;
40 
41  // Allow two vectors to be merged (given that they have the same size).
42  // This assumes that we never have two different vector register sets.
43  } else if (auto firstVecTy = dyn_cast<llvm::VectorType>(first)) {
44  if (auto secondVecTy = dyn_cast<llvm::VectorType>(second)) {
45  if (auto commonTy = getCommonType(firstVecTy->getElementType(),
46  secondVecTy->getElementType())) {
47  return (commonTy == firstVecTy->getElementType() ? first : second);
48  }
49  }
50  }
51 
52  return nullptr;
53 }
54 
56  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeStoreSize(type));
57 }
58 
60  return CharUnits::fromQuantity(CGM.getDataLayout().getTypeAllocSize(type));
61 }
62 
64  // Deal with various aggregate types as special cases:
65 
66  // Record types.
67  if (auto recType = type->getAs<RecordType>()) {
68  addTypedData(recType->getDecl(), begin);
69 
70  // Array types.
71  } else if (type->isArrayType()) {
72  // Incomplete array types (flexible array members?) don't provide
73  // data to lay out, and the other cases shouldn't be possible.
74  auto arrayType = CGM.getContext().getAsConstantArrayType(type);
75  if (!arrayType) return;
76 
77  QualType eltType = arrayType->getElementType();
78  auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
79  for (uint64_t i = 0, e = arrayType->getSize().getZExtValue(); i != e; ++i) {
80  addTypedData(eltType, begin + i * eltSize);
81  }
82 
83  // Complex types.
84  } else if (auto complexType = type->getAs<ComplexType>()) {
85  auto eltType = complexType->getElementType();
86  auto eltSize = CGM.getContext().getTypeSizeInChars(eltType);
87  auto eltLLVMType = CGM.getTypes().ConvertType(eltType);
88  addTypedData(eltLLVMType, begin, begin + eltSize);
89  addTypedData(eltLLVMType, begin + eltSize, begin + 2 * eltSize);
90 
91  // Member pointer types.
92  } else if (type->getAs<MemberPointerType>()) {
93  // Just add it all as opaque.
94  addOpaqueData(begin, begin + CGM.getContext().getTypeSizeInChars(type));
95 
96  // Everything else is scalar and should not convert as an LLVM aggregate.
97  } else {
98  // We intentionally convert as !ForMem because we want to preserve
99  // that a type was an i1.
100  auto llvmType = CGM.getTypes().ConvertType(type);
101  addTypedData(llvmType, begin);
102  }
103 }
104 
106  addTypedData(record, begin, CGM.getContext().getASTRecordLayout(record));
107 }
108 
110  const ASTRecordLayout &layout) {
111  // Unions are a special case.
112  if (record->isUnion()) {
113  for (auto field : record->fields()) {
114  if (field->isBitField()) {
115  addBitFieldData(field, begin, 0);
116  } else {
117  addTypedData(field->getType(), begin);
118  }
119  }
120  return;
121  }
122 
123  // Note that correctness does not rely on us adding things in
124  // their actual order of layout; it's just somewhat more efficient
125  // for the builder.
126 
127  // With that in mind, add "early" C++ data.
128  auto cxxRecord = dyn_cast<CXXRecordDecl>(record);
129  if (cxxRecord) {
130  // - a v-table pointer, if the class adds its own
131  if (layout.hasOwnVFPtr()) {
132  addTypedData(CGM.Int8PtrTy, begin);
133  }
134 
135  // - non-virtual bases
136  for (auto &baseSpecifier : cxxRecord->bases()) {
137  if (baseSpecifier.isVirtual()) continue;
138 
139  auto baseRecord = baseSpecifier.getType()->getAsCXXRecordDecl();
140  addTypedData(baseRecord, begin + layout.getBaseClassOffset(baseRecord));
141  }
142 
143  // - a vbptr if the class adds its own
144  if (layout.hasOwnVBPtr()) {
145  addTypedData(CGM.Int8PtrTy, begin + layout.getVBPtrOffset());
146  }
147  }
148 
149  // Add fields.
150  for (auto field : record->fields()) {
151  auto fieldOffsetInBits = layout.getFieldOffset(field->getFieldIndex());
152  if (field->isBitField()) {
153  addBitFieldData(field, begin, fieldOffsetInBits);
154  } else {
155  addTypedData(field->getType(),
156  begin + CGM.getContext().toCharUnitsFromBits(fieldOffsetInBits));
157  }
158  }
159 
160  // Add "late" C++ data:
161  if (cxxRecord) {
162  // - virtual bases
163  for (auto &vbaseSpecifier : cxxRecord->vbases()) {
164  auto baseRecord = vbaseSpecifier.getType()->getAsCXXRecordDecl();
165  addTypedData(baseRecord, begin + layout.getVBaseClassOffset(baseRecord));
166  }
167  }
168 }
169 
170 void SwiftAggLowering::addBitFieldData(const FieldDecl *bitfield,
171  CharUnits recordBegin,
172  uint64_t bitfieldBitBegin) {
173  assert(bitfield->isBitField());
174  auto &ctx = CGM.getContext();
175  auto width = bitfield->getBitWidthValue(ctx);
176 
177  // We can ignore zero-width bit-fields.
178  if (width == 0) return;
179 
180  // toCharUnitsFromBits rounds down.
181  CharUnits bitfieldByteBegin = ctx.toCharUnitsFromBits(bitfieldBitBegin);
182 
183  // Find the offset of the last byte that is partially occupied by the
184  // bit-field; since we otherwise expect exclusive ends, the end is the
185  // next byte.
186  uint64_t bitfieldBitLast = bitfieldBitBegin + width - 1;
187  CharUnits bitfieldByteEnd =
188  ctx.toCharUnitsFromBits(bitfieldBitLast) + CharUnits::One();
189  addOpaqueData(recordBegin + bitfieldByteBegin,
190  recordBegin + bitfieldByteEnd);
191 }
192 
194  assert(type && "didn't provide type for typed data");
195  addTypedData(type, begin, begin + getTypeStoreSize(CGM, type));
196 }
197 
199  CharUnits begin, CharUnits end) {
200  assert(type && "didn't provide type for typed data");
201  assert(getTypeStoreSize(CGM, type) == end - begin);
202 
203  // Legalize vector types.
204  if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
205  SmallVector<llvm::Type*, 4> componentTys;
206  legalizeVectorType(CGM, end - begin, vecTy, componentTys);
207  assert(componentTys.size() >= 1);
208 
209  // Walk the initial components.
210  for (size_t i = 0, e = componentTys.size(); i != e - 1; ++i) {
211  llvm::Type *componentTy = componentTys[i];
212  auto componentSize = getTypeStoreSize(CGM, componentTy);
213  assert(componentSize < end - begin);
214  addLegalTypedData(componentTy, begin, begin + componentSize);
215  begin += componentSize;
216  }
217 
218  return addLegalTypedData(componentTys.back(), begin, end);
219  }
220 
221  // Legalize integer types.
222  if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
223  if (!isLegalIntegerType(CGM, intTy))
224  return addOpaqueData(begin, end);
225  }
226 
227  // All other types should be legal.
228  return addLegalTypedData(type, begin, end);
229 }
230 
231 void SwiftAggLowering::addLegalTypedData(llvm::Type *type,
232  CharUnits begin, CharUnits end) {
233  // Require the type to be naturally aligned.
234  if (!begin.isZero() && !begin.isMultipleOf(getNaturalAlignment(CGM, type))) {
235 
236  // Try splitting vector types.
237  if (auto vecTy = dyn_cast<llvm::VectorType>(type)) {
238  auto split = splitLegalVectorType(CGM, end - begin, vecTy);
239  auto eltTy = split.first;
240  auto numElts = split.second;
241 
242  auto eltSize = (end - begin) / numElts;
243  assert(eltSize == getTypeStoreSize(CGM, eltTy));
244  for (size_t i = 0, e = numElts; i != e; ++i) {
245  addLegalTypedData(eltTy, begin, begin + eltSize);
246  begin += eltSize;
247  }
248  assert(begin == end);
249  return;
250  }
251 
252  return addOpaqueData(begin, end);
253  }
254 
255  addEntry(type, begin, end);
256 }
257 
258 void SwiftAggLowering::addEntry(llvm::Type *type,
259  CharUnits begin, CharUnits end) {
260  assert((!type ||
261  (!isa<llvm::StructType>(type) && !isa<llvm::ArrayType>(type))) &&
262  "cannot add aggregate-typed data");
263  assert(!type || begin.isMultipleOf(getNaturalAlignment(CGM, type)));
264 
265  // Fast path: we can just add entries to the end.
266  if (Entries.empty() || Entries.back().End <= begin) {
267  Entries.push_back({begin, end, type});
268  return;
269  }
270 
271  // Find the first existing entry that ends after the start of the new data.
272  // TODO: do a binary search if Entries is big enough for it to matter.
273  size_t index = Entries.size() - 1;
274  while (index != 0) {
275  if (Entries[index - 1].End <= begin) break;
276  --index;
277  }
278 
279  // The entry ends after the start of the new data.
280  // If the entry starts after the end of the new data, there's no conflict.
281  if (Entries[index].Begin >= end) {
282  // This insertion is potentially O(n), but the way we generally build
283  // these layouts makes that unlikely to matter: we'd need a union of
284  // several very large types.
285  Entries.insert(Entries.begin() + index, {begin, end, type});
286  return;
287  }
288 
289  // Otherwise, the ranges overlap. The new range might also overlap
290  // with later ranges.
291 restartAfterSplit:
292 
293  // Simplest case: an exact overlap.
294  if (Entries[index].Begin == begin && Entries[index].End == end) {
295  // If the types match exactly, great.
296  if (Entries[index].Type == type) return;
297 
298  // If either type is opaque, make the entry opaque and return.
299  if (Entries[index].Type == nullptr) {
300  return;
301  } else if (type == nullptr) {
302  Entries[index].Type = nullptr;
303  return;
304  }
305 
306  // If they disagree in an ABI-agnostic way, just resolve the conflict
307  // arbitrarily.
308  if (auto entryType = getCommonType(Entries[index].Type, type)) {
309  Entries[index].Type = entryType;
310  return;
311  }
312 
313  // Otherwise, make the entry opaque.
314  Entries[index].Type = nullptr;
315  return;
316  }
317 
318  // Okay, we have an overlapping conflict of some sort.
319 
320  // If we have a vector type, split it.
321  if (auto vecTy = dyn_cast_or_null<llvm::VectorType>(type)) {
322  auto eltTy = vecTy->getElementType();
323  CharUnits eltSize = (end - begin) / vecTy->getNumElements();
324  assert(eltSize == getTypeStoreSize(CGM, eltTy));
325  for (unsigned i = 0, e = vecTy->getNumElements(); i != e; ++i) {
326  addEntry(eltTy, begin, begin + eltSize);
327  begin += eltSize;
328  }
329  assert(begin == end);
330  return;
331  }
332 
333  // If the entry is a vector type, split it and try again.
334  if (Entries[index].Type && Entries[index].Type->isVectorTy()) {
335  splitVectorEntry(index);
336  goto restartAfterSplit;
337  }
338 
339  // Okay, we have no choice but to make the existing entry opaque.
340 
341  Entries[index].Type = nullptr;
342 
343  // Stretch the start of the entry to the beginning of the range.
344  if (begin < Entries[index].Begin) {
345  Entries[index].Begin = begin;
346  assert(index == 0 || begin >= Entries[index - 1].End);
347  }
348 
349  // Stretch the end of the entry to the end of the range; but if we run
350  // into the start of the next entry, just leave the range there and repeat.
351  while (end > Entries[index].End) {
352  assert(Entries[index].Type == nullptr);
353 
354  // If the range doesn't overlap the next entry, we're done.
355  if (index == Entries.size() - 1 || end <= Entries[index + 1].Begin) {
356  Entries[index].End = end;
357  break;
358  }
359 
360  // Otherwise, stretch to the start of the next entry.
361  Entries[index].End = Entries[index + 1].Begin;
362 
363  // Continue with the next entry.
364  index++;
365 
366  // This entry needs to be made opaque if it is not already.
367  if (Entries[index].Type == nullptr)
368  continue;
369 
370  // Split vector entries unless we completely subsume them.
371  if (Entries[index].Type->isVectorTy() &&
372  end < Entries[index].End) {
373  splitVectorEntry(index);
374  }
375 
376  // Make the entry opaque.
377  Entries[index].Type = nullptr;
378  }
379 }
380 
381 /// Replace the entry of vector type at offset 'index' with a sequence
382 /// of its component vectors.
383 void SwiftAggLowering::splitVectorEntry(unsigned index) {
384  auto vecTy = cast<llvm::VectorType>(Entries[index].Type);
385  auto split = splitLegalVectorType(CGM, Entries[index].getWidth(), vecTy);
386 
387  auto eltTy = split.first;
388  CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
389  auto numElts = split.second;
390  Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
391 
392  CharUnits begin = Entries[index].Begin;
393  for (unsigned i = 0; i != numElts; ++i) {
394  Entries[index].Type = eltTy;
395  Entries[index].Begin = begin;
396  Entries[index].End = begin + eltSize;
397  begin += eltSize;
398  }
399 }
400 
401 /// Given a power-of-two unit size, return the offset of the aligned unit
402 /// of that size which contains the given offset.
403 ///
404 /// In other words, round down to the nearest multiple of the unit size.
406  assert(isPowerOf2(unitSize.getQuantity()));
407  auto unitMask = ~(unitSize.getQuantity() - 1);
408  return CharUnits::fromQuantity(offset.getQuantity() & unitMask);
409 }
410 
411 static bool areBytesInSameUnit(CharUnits first, CharUnits second,
412  CharUnits chunkSize) {
413  return getOffsetAtStartOfUnit(first, chunkSize)
414  == getOffsetAtStartOfUnit(second, chunkSize);
415 }
416 
418  // Opaquely-typed memory is always mergeable.
419  if (type == nullptr) return true;
420 
421  // Pointers and integers are always mergeable. In theory we should not
422  // merge pointers, but (1) it doesn't currently matter in practice because
423  // the chunk size is never greater than the size of a pointer and (2)
424  // Swift IRGen uses integer types for a lot of things that are "really"
425  // just storing pointers (like Optional<SomePointer>). If we ever have a
426  // target that would otherwise combine pointers, we should put some effort
427  // into fixing those cases in Swift IRGen and then call out pointer types
428  // here.
429 
430  // Floating-point and vector types should never be merged.
431  // Most such types are too large and highly-aligned to ever trigger merging
432  // in practice, but it's important for the rule to cover at least 'half'
433  // and 'float', as well as things like small vectors of 'i1' or 'i8'.
434  return (!type->isFloatingPointTy() && !type->isVectorTy());
435 }
436 
437 bool SwiftAggLowering::shouldMergeEntries(const StorageEntry &first,
438  const StorageEntry &second,
439  CharUnits chunkSize) {
440  // Only merge entries that overlap the same chunk. We test this first
441  // despite being a bit more expensive because this is the condition that
442  // tends to prevent merging.
443  if (!areBytesInSameUnit(first.End - CharUnits::One(), second.Begin,
444  chunkSize))
445  return false;
446 
447  return (isMergeableEntryType(first.Type) &&
448  isMergeableEntryType(second.Type));
449 }
450 
452  if (Entries.empty()) {
453  Finished = true;
454  return;
455  }
456 
457  // We logically split the layout down into a series of chunks of this size,
458  // which is generally the size of a pointer.
459  const CharUnits chunkSize = getMaximumVoluntaryIntegerSize(CGM);
460 
461  // First pass: if two entries should be merged, make them both opaque
462  // and stretch one to meet the next.
463  // Also, remember if there are any opaque entries.
464  bool hasOpaqueEntries = (Entries[0].Type == nullptr);
465  for (size_t i = 1, e = Entries.size(); i != e; ++i) {
466  if (shouldMergeEntries(Entries[i - 1], Entries[i], chunkSize)) {
467  Entries[i - 1].Type = nullptr;
468  Entries[i].Type = nullptr;
469  Entries[i - 1].End = Entries[i].Begin;
470  hasOpaqueEntries = true;
471 
472  } else if (Entries[i].Type == nullptr) {
473  hasOpaqueEntries = true;
474  }
475  }
476 
477  // The rest of the algorithm leaves non-opaque entries alone, so if we
478  // have no opaque entries, we're done.
479  if (!hasOpaqueEntries) {
480  Finished = true;
481  return;
482  }
483 
484  // Okay, move the entries to a temporary and rebuild Entries.
485  auto orig = std::move(Entries);
486  assert(Entries.empty());
487 
488  for (size_t i = 0, e = orig.size(); i != e; ++i) {
489  // Just copy over non-opaque entries.
490  if (orig[i].Type != nullptr) {
491  Entries.push_back(orig[i]);
492  continue;
493  }
494 
495  // Scan forward to determine the full extent of the next opaque range.
496  // We know from the first pass that only contiguous ranges will overlap
497  // the same aligned chunk.
498  auto begin = orig[i].Begin;
499  auto end = orig[i].End;
500  while (i + 1 != e &&
501  orig[i + 1].Type == nullptr &&
502  end == orig[i + 1].Begin) {
503  end = orig[i + 1].End;
504  i++;
505  }
506 
507  // Add an entry per intersected chunk.
508  do {
509  // Find the smallest aligned storage unit in the maximal aligned
510  // storage unit containing 'begin' that contains all the bytes in
511  // the intersection between the range and this chunk.
512  CharUnits localBegin = begin;
513  CharUnits chunkBegin = getOffsetAtStartOfUnit(localBegin, chunkSize);
514  CharUnits chunkEnd = chunkBegin + chunkSize;
515  CharUnits localEnd = std::min(end, chunkEnd);
516 
517  // Just do a simple loop over ever-increasing unit sizes.
518  CharUnits unitSize = CharUnits::One();
519  CharUnits unitBegin, unitEnd;
520  for (; ; unitSize *= 2) {
521  assert(unitSize <= chunkSize);
522  unitBegin = getOffsetAtStartOfUnit(localBegin, unitSize);
523  unitEnd = unitBegin + unitSize;
524  if (unitEnd >= localEnd) break;
525  }
526 
527  // Add an entry for this unit.
528  auto entryTy =
529  llvm::IntegerType::get(CGM.getLLVMContext(),
530  CGM.getContext().toBits(unitSize));
531  Entries.push_back({unitBegin, unitEnd, entryTy});
532 
533  // The next chunk starts where this chunk left off.
534  begin = localEnd;
535  } while (begin != end);
536  }
537 
538  // Okay, finally finished.
539  Finished = true;
540 }
541 
543  assert(Finished && "haven't yet finished lowering");
544 
545  for (auto &entry : Entries) {
546  callback(entry.Begin, entry.End, entry.Type);
547  }
548 }
549 
550 std::pair<llvm::StructType*, llvm::Type*>
552  assert(Finished && "haven't yet finished lowering");
553 
554  auto &ctx = CGM.getLLVMContext();
555 
556  if (Entries.empty()) {
557  auto type = llvm::StructType::get(ctx);
558  return { type, type };
559  }
560 
562  CharUnits lastEnd = CharUnits::Zero();
563  bool hasPadding = false;
564  bool packed = false;
565  for (auto &entry : Entries) {
566  if (entry.Begin != lastEnd) {
567  auto paddingSize = entry.Begin - lastEnd;
568  assert(!paddingSize.isNegative());
569 
570  auto padding = llvm::ArrayType::get(llvm::Type::getInt8Ty(ctx),
571  paddingSize.getQuantity());
572  elts.push_back(padding);
573  hasPadding = true;
574  }
575 
576  if (!packed && !entry.Begin.isMultipleOf(
578  CGM.getDataLayout().getABITypeAlignment(entry.Type))))
579  packed = true;
580 
581  elts.push_back(entry.Type);
582 
583  lastEnd = entry.Begin + getTypeAllocSize(CGM, entry.Type);
584  assert(entry.End <= lastEnd);
585  }
586 
587  // We don't need to adjust 'packed' to deal with possible tail padding
588  // because we never do that kind of access through the coercion type.
589  auto coercionType = llvm::StructType::get(ctx, elts, packed);
590 
591  llvm::Type *unpaddedType = coercionType;
592  if (hasPadding) {
593  elts.clear();
594  for (auto &entry : Entries) {
595  elts.push_back(entry.Type);
596  }
597  if (elts.size() == 1) {
598  unpaddedType = elts[0];
599  } else {
600  unpaddedType = llvm::StructType::get(ctx, elts, /*packed*/ false);
601  }
602  } else if (Entries.size() == 1) {
603  unpaddedType = Entries[0].Type;
604  }
605 
606  return { coercionType, unpaddedType };
607 }
608 
609 bool SwiftAggLowering::shouldPassIndirectly(bool asReturnValue) const {
610  assert(Finished && "haven't yet finished lowering");
611 
612  // Empty types don't need to be passed indirectly.
613  if (Entries.empty()) return false;
614 
615  // Avoid copying the array of types when there's just a single element.
616  if (Entries.size() == 1) {
618  Entries.back().Type,
619  asReturnValue);
620  }
621 
622  SmallVector<llvm::Type*, 8> componentTys;
623  componentTys.reserve(Entries.size());
624  for (auto &entry : Entries) {
625  componentTys.push_back(entry.Type);
626  }
627  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
628  asReturnValue);
629 }
630 
632  ArrayRef<llvm::Type*> componentTys,
633  bool asReturnValue) {
634  return getSwiftABIInfo(CGM).shouldPassIndirectlyForSwift(componentTys,
635  asReturnValue);
636 }
637 
639  // Currently always the size of an ordinary pointer.
640  return CGM.getContext().toCharUnitsFromBits(
642 }
643 
645  // For Swift's purposes, this is always just the store size of the type
646  // rounded up to a power of 2.
647  auto size = (unsigned long long) getTypeStoreSize(CGM, type).getQuantity();
648  if (!isPowerOf2(size)) {
649  size = 1ULL << (llvm::findLastSet(size, llvm::ZB_Undefined) + 1);
650  }
651  assert(size >= CGM.getDataLayout().getABITypeAlignment(type));
652  return CharUnits::fromQuantity(size);
653 }
654 
656  llvm::IntegerType *intTy) {
657  auto size = intTy->getBitWidth();
658  switch (size) {
659  case 1:
660  case 8:
661  case 16:
662  case 32:
663  case 64:
664  // Just assume that the above are always legal.
665  return true;
666 
667  case 128:
668  return CGM.getContext().getTargetInfo().hasInt128Type();
669 
670  default:
671  return false;
672  }
673 }
674 
676  llvm::VectorType *vectorTy) {
677  return isLegalVectorType(CGM, vectorSize, vectorTy->getElementType(),
678  vectorTy->getNumElements());
679 }
680 
682  llvm::Type *eltTy, unsigned numElts) {
683  assert(numElts > 1 && "illegal vector length");
684  return getSwiftABIInfo(CGM)
685  .isLegalVectorTypeForSwift(vectorSize, eltTy, numElts);
686 }
687 
688 std::pair<llvm::Type*, unsigned>
690  llvm::VectorType *vectorTy) {
691  auto numElts = vectorTy->getNumElements();
692  auto eltTy = vectorTy->getElementType();
693 
694  // Try to split the vector type in half.
695  if (numElts >= 4 && isPowerOf2(numElts)) {
696  if (isLegalVectorType(CGM, vectorSize / 2, eltTy, numElts / 2))
697  return {llvm::VectorType::get(eltTy, numElts / 2), 2};
698  }
699 
700  return {eltTy, numElts};
701 }
702 
704  llvm::VectorType *origVectorTy,
706  // If it's already a legal vector type, use it.
707  if (isLegalVectorType(CGM, origVectorSize, origVectorTy)) {
708  components.push_back(origVectorTy);
709  return;
710  }
711 
712  // Try to split the vector into legal subvectors.
713  auto numElts = origVectorTy->getNumElements();
714  auto eltTy = origVectorTy->getElementType();
715  assert(numElts != 1);
716 
717  // The largest size that we're still considering making subvectors of.
718  // Always a power of 2.
719  unsigned logCandidateNumElts = llvm::findLastSet(numElts, llvm::ZB_Undefined);
720  unsigned candidateNumElts = 1U << logCandidateNumElts;
721  assert(candidateNumElts <= numElts && candidateNumElts * 2 > numElts);
722 
723  // Minor optimization: don't check the legality of this exact size twice.
724  if (candidateNumElts == numElts) {
725  logCandidateNumElts--;
726  candidateNumElts >>= 1;
727  }
728 
729  CharUnits eltSize = (origVectorSize / numElts);
730  CharUnits candidateSize = eltSize * candidateNumElts;
731 
732  // The sensibility of this algorithm relies on the fact that we never
733  // have a legal non-power-of-2 vector size without having the power of 2
734  // also be legal.
735  while (logCandidateNumElts > 0) {
736  assert(candidateNumElts == 1U << logCandidateNumElts);
737  assert(candidateNumElts <= numElts);
738  assert(candidateSize == eltSize * candidateNumElts);
739 
740  // Skip illegal vector sizes.
741  if (!isLegalVectorType(CGM, candidateSize, eltTy, candidateNumElts)) {
742  logCandidateNumElts--;
743  candidateNumElts /= 2;
744  candidateSize /= 2;
745  continue;
746  }
747 
748  // Add the right number of vectors of this size.
749  auto numVecs = numElts >> logCandidateNumElts;
750  components.append(numVecs, llvm::VectorType::get(eltTy, candidateNumElts));
751  numElts -= (numVecs << logCandidateNumElts);
752 
753  if (numElts == 0) return;
754 
755  // It's possible that the number of elements remaining will be legal.
756  // This can happen with e.g. <7 x float> when <3 x float> is legal.
757  // This only needs to be separately checked if it's not a power of 2.
758  if (numElts > 2 && !isPowerOf2(numElts) &&
759  isLegalVectorType(CGM, eltSize * numElts, eltTy, numElts)) {
760  components.push_back(llvm::VectorType::get(eltTy, numElts));
761  return;
762  }
763 
764  // Bring vecSize down to something no larger than numElts.
765  do {
766  logCandidateNumElts--;
767  candidateNumElts /= 2;
768  candidateSize /= 2;
769  } while (candidateNumElts > numElts);
770  }
771 
772  // Otherwise, just append a bunch of individual elements.
773  components.append(numElts, eltTy);
774 }
775 
777  const RecordDecl *record) {
778  // FIXME: should we not rely on the standard computation in Sema, just in
779  // case we want to diverge from the platform ABI (e.g. on targets where
780  // that uses the MSVC rule)?
781  return !record->canPassInRegisters();
782 }
783 
785  bool forReturn,
786  CharUnits alignmentForIndirect) {
787  if (lowering.empty()) {
788  return ABIArgInfo::getIgnore();
789  } else if (lowering.shouldPassIndirectly(forReturn)) {
790  return ABIArgInfo::getIndirect(alignmentForIndirect, /*byval*/ false);
791  } else {
792  auto types = lowering.getCoerceAndExpandTypes();
793  return ABIArgInfo::getCoerceAndExpand(types.first, types.second);
794  }
795 }
796 
798  bool forReturn) {
799  if (auto recordType = dyn_cast<RecordType>(type)) {
800  auto record = recordType->getDecl();
801  auto &layout = CGM.getContext().getASTRecordLayout(record);
802 
803  if (mustPassRecordIndirectly(CGM, record))
804  return ABIArgInfo::getIndirect(layout.getAlignment(), /*byval*/ false);
805 
806  SwiftAggLowering lowering(CGM);
807  lowering.addTypedData(recordType->getDecl(), CharUnits::Zero(), layout);
808  lowering.finish();
809 
810  return classifyExpandedType(lowering, forReturn, layout.getAlignment());
811  }
812 
813  // Just assume that all of our target ABIs can support returning at least
814  // two integer or floating-point values.
815  if (isa<ComplexType>(type)) {
816  return (forReturn ? ABIArgInfo::getDirect() : ABIArgInfo::getExpand());
817  }
818 
819  // Vector types may need to be legalized.
820  if (isa<VectorType>(type)) {
821  SwiftAggLowering lowering(CGM);
822  lowering.addTypedData(type, CharUnits::Zero());
823  lowering.finish();
824 
825  CharUnits alignment = CGM.getContext().getTypeAlignInChars(type);
826  return classifyExpandedType(lowering, forReturn, alignment);
827  }
828 
829  // Member pointer types need to be expanded, but it's a simple form of
830  // expansion that 'Direct' can handle. Note that CanBeFlattened should be
831  // true for this to work.
832 
833  // 'void' needs to be ignored.
834  if (type->isVoidType()) {
835  return ABIArgInfo::getIgnore();
836  }
837 
838  // Everything else can be passed directly.
839  return ABIArgInfo::getDirect();
840 }
841 
843  return classifyType(CGM, type, /*forReturn*/ true);
844 }
845 
847  CanQualType type) {
848  return classifyType(CGM, type, /*forReturn*/ false);
849 }
850 
852  auto &retInfo = FI.getReturnInfo();
853  retInfo = classifyReturnType(CGM, FI.getReturnType());
854 
855  for (unsigned i = 0, e = FI.arg_size(); i != e; ++i) {
856  auto &argInfo = FI.arg_begin()[i];
857  argInfo.info = classifyArgumentType(CGM, argInfo.type);
858  }
859 }
860 
861 // Is swifterror lowered to a register by the target ABI.
864 }
const llvm::DataLayout & getDataLayout() const
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
A (possibly-)qualified type.
Definition: Type.h:643
bool isArrayType() const
Definition: Type.h:6446
llvm::LLVMContext & getLLVMContext()
static ABIArgInfo classifyType(CodeGenModule &CGM, CanQualType type, bool forReturn)
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D...
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:232
The base class of the type hierarchy.
Definition: Type.h:1436
const ABIInfo & getABIInfo() const
getABIInfo() - Returns ABI info helper for the target.
Definition: TargetInfo.h:54
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:115
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:703
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6857
bool hasOwnVFPtr() const
hasOwnVFPtr - Does this class provide its own virtual-function table pointer, rather than inheriting ...
Definition: RecordLayout.h:259
std::pair< llvm::Type *, unsigned > splitLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Minimally split a legal vector type.
static ABIArgInfo getIgnore()
const AstTypeMatcher< RecordType > recordType
Matches record types (e.g.
Represents a struct/union/class.
Definition: Decl.h:3634
uint64_t getPointerWidth(unsigned AddrSpace) const
Return the width of pointers on this target, for the specified address space.
Definition: TargetInfo.h:360
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
ABIArgInfo classifyArgumentType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to pass a particular type.
static CharUnits getTypeAllocSize(CodeGenModule &CGM, llvm::Type *type)
field_range fields() const
Definition: Decl.h:3849
Represents a member of a struct/union/class.
Definition: Decl.h:2615
static bool isPowerOf2(unsigned n)
static bool isMergeableEntryType(llvm::Type *type)
ABIArgInfo classifyReturnType(CodeGenModule &CGM, CanQualType type)
Classify the rules for how to return a particular type.
static CharUnits Zero()
Zero - Construct a CharUnits quantity of zero.
Definition: CharUnits.h:52
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:240
const AstTypeMatcher< ComplexType > complexType
Matches C99 complex types.
static ABIArgInfo getDirect(llvm::Type *T=nullptr, unsigned Offset=0, llvm::Type *Padding=nullptr, bool CanBeFlattened=true)
bool isBitField() const
Determines whether this field is a bitfield.
Definition: Decl.h:2693
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:37
const_arg_iterator arg_begin() const
virtual bool shouldPassIndirectlyForSwift(ArrayRef< llvm::Type *> types, bool asReturnValue) const =0
llvm::function_ref< void(CharUnits offset, CharUnits end, llvm::Type *type)> EnumerationCallback
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
virtual bool isSwiftErrorInRegister() const =0
static ABIArgInfo classifyExpandedType(SwiftAggLowering &lowering, bool forReturn, CharUnits alignmentForIndirect)
unsigned getBitWidthValue(const ASTContext &Ctx) const
Definition: Decl.cpp:3923
static ABIArgInfo getExpand()
static bool areBytesInSameUnit(CharUnits first, CharUnits second, CharUnits chunkSize)
CanQualType getReturnType() const
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:57
static llvm::Type * getCommonType(llvm::Type *first, llvm::Type *second)
Given two types with the same size, try to find a common type.
const TargetCodeGenInfo & getTargetCodeGenInfo()
CharUnits getNaturalAlignment(CodeGenModule &CGM, llvm::Type *type)
Return the Swift CC&#39;s notion of the natural alignment of a type.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:178
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
void addOpaqueData(CharUnits begin, CharUnits end)
SourceLocation End
std::pair< llvm::StructType *, llvm::Type * > getCoerceAndExpandTypes() const
Return the types for a coerce-and-expand operation.
bool isSwiftErrorLoweredInRegister(CodeGenModule &CGM)
Is swifterror lowered to a register by the target ABI?
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
SourceLocation Begin
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:62
const AstTypeMatcher< ArrayType > arrayType
Matches all kinds of arrays.
bool shouldPassIndirectly(CodeGenModule &CGM, ArrayRef< llvm::Type *> types, bool asReturnValue)
Should an aggregate which expands to the given type sequence be passed/returned indirectly under swif...
CharUnits getVBPtrOffset() const
getVBPtrOffset - Get the offset for virtual base table pointer.
Definition: RecordLayout.h:305
ASTContext & getContext() const
The l-value was considered opaque, so the alignment was determined from a type.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:190
bool empty() const
Does this lowering require passing any data?
void addTypedData(QualType type, CharUnits begin)
static CharUnits getTypeStoreSize(CodeGenModule &CGM, llvm::Type *type)
void computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI)
Compute the ABI information of a swiftcall function.
const ConstantArrayType * getAsConstantArrayType(QualType T) const
Definition: ASTContext.h:2447
static const SwiftABIInfo & getSwiftABIInfo(CodeGenModule &CGM)
bool canPassInRegisters() const
Determine whether this class can be passed in registers.
Definition: Decl.h:3784
static CharUnits getOffsetAtStartOfUnit(CharUnits offset, CharUnits unitSize)
Given a power-of-two unit size, return the offset of the aligned unit of that size which contains the...
void legalizeVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy, llvm::SmallVectorImpl< llvm::Type *> &types)
Turn a vector type in a sequence of legal component vector types.
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
void enumerateComponents(EnumerationCallback callback) const
Enumerate the expanded components of this type.
bool isLegalVectorType(CodeGenModule &CGM, CharUnits vectorSize, llvm::VectorType *vectorTy)
Is the given vector type "legal" for Swift&#39;s perspective on the current platform? ...
A refining implementation of ABIInfo for targets that support swiftcall.
Definition: ABIInfo.h:123
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:2794
virtual bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy, unsigned elts) const
Definition: TargetInfo.cpp:131
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:4444
Complex values, per C99 6.2.5p11.
Definition: Type.h:2515
bool mustPassRecordIndirectly(CodeGenModule &CGM, const RecordDecl *record)
Is the given record type required to be passed and returned indirectly because of language restrictio...
virtual bool hasInt128Type() const
Determine whether the __int128 type is supported on this target.
Definition: TargetInfo.h:522
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Definition: CharUnits.h:136
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
static ABIArgInfo getCoerceAndExpand(llvm::StructType *coerceToType, llvm::Type *unpaddedCoerceToType)
Represents a C++ struct/union/class.
Definition: DeclCXX.h:254
bool isLegalIntegerType(CodeGenModule &CGM, llvm::IntegerType *type)
Is the given integer type "legal" for Swift&#39;s perspective on the current platform?
__DEVICE__ int min(int __a, int __b)
Defines the clang::TargetInfo interface.
bool isUnion() const
Definition: Decl.h:3293
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CharUnits getMaximumVoluntaryIntegerSize(CodeGenModule &CGM)
Return the maximum voluntary integer size for the current target.
bool hasOwnVBPtr() const
hasOwnVBPtr - Does this class provide its own virtual-base table pointer, rather than inheriting one ...
Definition: RecordLayout.h:279
static ABIArgInfo getIndirect(CharUnits Alignment, bool ByVal=true, bool Realign=false, llvm::Type *Padding=nullptr)
bool shouldPassIndirectly(bool asReturnValue) const
According to the target Swift ABI, should a value with this lowering be passed indirectly?