clang  15.0.0git
TargetInfo.cpp
Go to the documentation of this file.
1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "TargetInfo.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGValue.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/Basic/Builtins.h"
27 #include "llvm/ADT/SmallBitVector.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/ADT/StringSwitch.h"
30 #include "llvm/ADT/Triple.h"
31 #include "llvm/ADT/Twine.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/IntrinsicsNVPTX.h"
34 #include "llvm/IR/IntrinsicsS390.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
38 #include <algorithm> // std::sort
39 
40 using namespace clang;
41 using namespace CodeGen;
42 
43 // Helper for coercing an aggregate argument or return value into an integer
44 // array of the same size (including padding) and alignment. This alternate
45 // coercion happens only for the RenderScript ABI and can be removed after
46 // runtimes that rely on it are no longer supported.
47 //
48 // RenderScript assumes that the size of the argument / return value in the IR
49 // is the same as the size of the corresponding qualified type. This helper
50 // coerces the aggregate type into an array of the same size (including
51 // padding). This coercion is used in lieu of expansion of struct members or
52 // other canonical coercions that return a coerced-type of larger size.
53 //
54 // Ty - The argument / return value type
55 // Context - The associated ASTContext
56 // LLVMContext - The associated LLVMContext
58  ASTContext &Context,
59  llvm::LLVMContext &LLVMContext) {
60  // Alignment and Size are measured in bits.
61  const uint64_t Size = Context.getTypeSize(Ty);
62  const uint64_t Alignment = Context.getTypeAlign(Ty);
63  llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
64  const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
65  return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
66 }
67 
69  llvm::Value *Array,
70  llvm::Value *Value,
71  unsigned FirstIndex,
72  unsigned LastIndex) {
73  // Alternatively, we could emit this as a loop in the source.
74  for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
75  llvm::Value *Cell =
76  Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
77  Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
78  }
79 }
80 
84 }
85 
87  bool Realign,
88  llvm::Type *Padding) const {
89  return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty), ByVal,
90  Realign, Padding);
91 }
92 
95  return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
96  /*ByVal*/ false, Realign);
97 }
98 
100  QualType Ty) const {
101  return Address::invalid();
102 }
103 
104 static llvm::Type *getVAListElementType(CodeGenFunction &CGF) {
105  return CGF.ConvertTypeForMem(
107 }
108 
110  if (Ty->isPromotableIntegerType())
111  return true;
112 
113  if (const auto *EIT = Ty->getAs<BitIntType>())
114  if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
115  return true;
116 
117  return false;
118 }
119 
121 
122 /// Does the given lowering require more than the given number of
123 /// registers when expanded?
124 ///
125 /// This is intended to be the basis of a reasonable basic implementation
126 /// of should{Pass,Return}IndirectlyForSwift.
127 ///
128 /// For most targets, a limit of four total registers is reasonable; this
129 /// limits the amount of code required in order to move around the value
130 /// in case it wasn't produced immediately prior to the call by the caller
131 /// (or wasn't produced in exactly the right registers) or isn't used
132 /// immediately within the callee. But some targets may need to further
133 /// limit the register count due to an inability to support that many
134 /// return registers.
136  ArrayRef<llvm::Type*> scalarTypes,
137  unsigned maxAllRegisters) {
138  unsigned intCount = 0, fpCount = 0;
139  for (llvm::Type *type : scalarTypes) {
140  if (type->isPointerTy()) {
141  intCount++;
142  } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
143  auto ptrWidth = cgt.getTarget().getPointerWidth(0);
144  intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
145  } else {
146  assert(type->isVectorTy() || type->isFloatingPointTy());
147  fpCount++;
148  }
149  }
150 
151  return (intCount + fpCount > maxAllRegisters);
152 }
153 
155  llvm::Type *eltTy,
156  unsigned numElts) const {
157  // The default implementation of this assumes that the target guarantees
158  // 128-bit SIMD support but nothing more.
159  return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
160 }
161 
163  CGCXXABI &CXXABI) {
164  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
165  if (!RD) {
166  if (!RT->getDecl()->canPassInRegisters())
167  return CGCXXABI::RAA_Indirect;
168  return CGCXXABI::RAA_Default;
169  }
170  return CXXABI.getRecordArgABI(RD);
171 }
172 
174  CGCXXABI &CXXABI) {
175  const RecordType *RT = T->getAs<RecordType>();
176  if (!RT)
177  return CGCXXABI::RAA_Default;
178  return getRecordArgABI(RT, CXXABI);
179 }
180 
182  const ABIInfo &Info) {
183  QualType Ty = FI.getReturnType();
184 
185  if (const auto *RT = Ty->getAs<RecordType>())
186  if (!isa<CXXRecordDecl>(RT->getDecl()) &&
187  !RT->getDecl()->canPassInRegisters()) {
188  FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
189  return true;
190  }
191 
192  return CXXABI.classifyReturnType(FI);
193 }
194 
195 /// Pass transparent unions as if they were the type of the first element. Sema
196 /// should ensure that all elements of the union have the same "machine type".
198  if (const RecordType *UT = Ty->getAsUnionType()) {
199  const RecordDecl *UD = UT->getDecl();
200  if (UD->hasAttr<TransparentUnionAttr>()) {
201  assert(!UD->field_empty() && "sema created an empty transparent union");
202  return UD->field_begin()->getType();
203  }
204  }
205  return Ty;
206 }
207 
209  return CGT.getCXXABI();
210 }
211 
213  return CGT.getContext();
214 }
215 
216 llvm::LLVMContext &ABIInfo::getVMContext() const {
217  return CGT.getLLVMContext();
218 }
219 
220 const llvm::DataLayout &ABIInfo::getDataLayout() const {
221  return CGT.getDataLayout();
222 }
223 
225  return CGT.getTarget();
226 }
227 
229  return CGT.getCodeGenOpts();
230 }
231 
232 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
233 
235  return false;
236 }
237 
239  uint64_t Members) const {
240  return false;
241 }
242 
243 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
244  raw_ostream &OS = llvm::errs();
245  OS << "(ABIArgInfo Kind=";
246  switch (TheKind) {
247  case Direct:
248  OS << "Direct Type=";
249  if (llvm::Type *Ty = getCoerceToType())
250  Ty->print(OS);
251  else
252  OS << "null";
253  break;
254  case Extend:
255  OS << "Extend";
256  break;
257  case Ignore:
258  OS << "Ignore";
259  break;
260  case InAlloca:
261  OS << "InAlloca Offset=" << getInAllocaFieldIndex();
262  break;
263  case Indirect:
264  OS << "Indirect Align=" << getIndirectAlign().getQuantity()
265  << " ByVal=" << getIndirectByVal()
266  << " Realign=" << getIndirectRealign();
267  break;
268  case IndirectAliased:
269  OS << "Indirect Align=" << getIndirectAlign().getQuantity()
270  << " AadrSpace=" << getIndirectAddrSpace()
271  << " Realign=" << getIndirectRealign();
272  break;
273  case Expand:
274  OS << "Expand";
275  break;
276  case CoerceAndExpand:
277  OS << "CoerceAndExpand Type=";
278  getCoerceAndExpandType()->print(OS);
279  break;
280  }
281  OS << ")\n";
282 }
283 
284 // Dynamically round a pointer up to a multiple of the given alignment.
286  llvm::Value *Ptr,
287  CharUnits Align) {
288  llvm::Value *PtrAsInt = Ptr;
289  // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
290  PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
291  PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
292  llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
293  PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
294  llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
295  PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
296  Ptr->getType(),
297  Ptr->getName() + ".aligned");
298  return PtrAsInt;
299 }
300 
301 /// Emit va_arg for a platform using the common void* representation,
302 /// where arguments are simply emitted in an array of slots on the stack.
303 ///
304 /// This version implements the core direct-value passing rules.
305 ///
306 /// \param SlotSize - The size and alignment of a stack slot.
307 /// Each argument will be allocated to a multiple of this number of
308 /// slots, and all the slots will be aligned to this value.
309 /// \param AllowHigherAlign - The slot alignment is not a cap;
310 /// an argument type with an alignment greater than the slot size
311 /// will be emitted on a higher-alignment address, potentially
312 /// leaving one or more empty slots behind as padding. If this
313 /// is false, the returned address might be less-aligned than
314 /// DirectAlign.
316  Address VAListAddr,
317  llvm::Type *DirectTy,
318  CharUnits DirectSize,
319  CharUnits DirectAlign,
320  CharUnits SlotSize,
321  bool AllowHigherAlign) {
322  // Cast the element type to i8* if necessary. Some platforms define
323  // va_list as a struct containing an i8* instead of just an i8*.
324  if (VAListAddr.getElementType() != CGF.Int8PtrTy)
325  VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
326 
327  llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
328 
329  // If the CC aligns values higher than the slot size, do so if needed.
330  Address Addr = Address::invalid();
331  if (AllowHigherAlign && DirectAlign > SlotSize) {
332  Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
333  CGF.Int8Ty, DirectAlign);
334  } else {
335  Addr = Address(Ptr, CGF.Int8Ty, SlotSize);
336  }
337 
338  // Advance the pointer past the argument, then store that back.
339  CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
340  Address NextPtr =
341  CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
342  CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
343 
344  // If the argument is smaller than a slot, and this is a big-endian
345  // target, the argument will be right-adjusted in its slot.
346  if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
347  !DirectTy->isStructTy()) {
348  Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
349  }
350 
351  Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
352  return Addr;
353 }
354 
355 /// Emit va_arg for a platform using the common void* representation,
356 /// where arguments are simply emitted in an array of slots on the stack.
357 ///
358 /// \param IsIndirect - Values of this type are passed indirectly.
359 /// \param ValueInfo - The size and alignment of this type, generally
360 /// computed with getContext().getTypeInfoInChars(ValueTy).
361 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
362 /// Each argument will be allocated to a multiple of this number of
363 /// slots, and all the slots will be aligned to this value.
364 /// \param AllowHigherAlign - The slot alignment is not a cap;
365 /// an argument type with an alignment greater than the slot size
366 /// will be emitted on a higher-alignment address, potentially
367 /// leaving one or more empty slots behind as padding.
369  QualType ValueTy, bool IsIndirect,
370  TypeInfoChars ValueInfo,
371  CharUnits SlotSizeAndAlign,
372  bool AllowHigherAlign) {
373  // The size and alignment of the value that was passed directly.
374  CharUnits DirectSize, DirectAlign;
375  if (IsIndirect) {
376  DirectSize = CGF.getPointerSize();
377  DirectAlign = CGF.getPointerAlign();
378  } else {
379  DirectSize = ValueInfo.Width;
380  DirectAlign = ValueInfo.Align;
381  }
382 
383  // Cast the address we've calculated to the right type.
384  llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy), *ElementTy = DirectTy;
385  if (IsIndirect)
386  DirectTy = DirectTy->getPointerTo(0);
387 
388  Address Addr =
389  emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, DirectSize, DirectAlign,
390  SlotSizeAndAlign, AllowHigherAlign);
391 
392  if (IsIndirect) {
393  Addr = Address(CGF.Builder.CreateLoad(Addr), ElementTy, ValueInfo.Align);
394  }
395 
396  return Addr;
397 }
398 
400  QualType Ty, CharUnits SlotSize,
401  CharUnits EltSize, const ComplexType *CTy) {
402  Address Addr =
403  emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty, SlotSize * 2,
404  SlotSize, SlotSize, /*AllowHigher*/ true);
405 
406  Address RealAddr = Addr;
407  Address ImagAddr = RealAddr;
408  if (CGF.CGM.getDataLayout().isBigEndian()) {
409  RealAddr =
410  CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize - EltSize);
411  ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
412  2 * SlotSize - EltSize);
413  } else {
414  ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
415  }
416 
417  llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
418  RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
419  ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
420  llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
421  llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
422 
423  Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
424  CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
425  /*init*/ true);
426  return Temp;
427 }
428 
430  Address Addr1, llvm::BasicBlock *Block1,
431  Address Addr2, llvm::BasicBlock *Block2,
432  const llvm::Twine &Name = "") {
433  assert(Addr1.getType() == Addr2.getType());
434  llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
435  PHI->addIncoming(Addr1.getPointer(), Block1);
436  PHI->addIncoming(Addr2.getPointer(), Block2);
437  CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
438  return Address(PHI, Addr1.getElementType(), Align);
439 }
440 
442 
443 // If someone can figure out a general rule for this, that would be great.
444 // It's probably just doomed to be platform-dependent, though.
446  // Verified for:
447  // x86-64 FreeBSD, Linux, Darwin
448  // x86-32 FreeBSD, Linux, Darwin
449  // PowerPC Linux, Darwin
450  // ARM Darwin (*not* EABI)
451  // AArch64 Linux
452  return 32;
453 }
454 
456  const FunctionNoProtoType *fnType) const {
457  // The following conventions are known to require this to be false:
458  // x86_stdcall
459  // MIPS
460  // For everything else, we just prefer false unless we opt out.
461  return false;
462 }
463 
464 void
466  llvm::SmallString<24> &Opt) const {
467  // This assumes the user is passing a library name like "rt" instead of a
468  // filename like "librt.a/so", and that they don't care whether it's static or
469  // dynamic.
470  Opt = "-l";
471  Opt += Lib;
472 }
473 
475  // OpenCL kernels are called via an explicit runtime API with arguments
476  // set with clSetKernelArg(), not as normal sub-functions.
477  // Return SPIR_KERNEL by default as the kernel calling convention to
478  // ensure the fingerprint is fixed such way that each OpenCL argument
479  // gets one matching argument in the produced kernel function argument
480  // list to enable feasible implementation of clSetKernelArg() with
481  // aggregates etc. In case we would use the default C calling conv here,
482  // clSetKernelArg() might break depending on the target-specific
483  // conventions; different targets might split structs passed as values
484  // to multiple function arguments etc.
485  return llvm::CallingConv::SPIR_KERNEL;
486 }
487 
489  llvm::PointerType *T, QualType QT) const {
490  return llvm::ConstantPointerNull::get(T);
491 }
492 
494  const VarDecl *D) const {
495  assert(!CGM.getLangOpts().OpenCL &&
496  !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
497  "Address space agnostic languages only");
498  return D ? D->getType().getAddressSpace() : LangAS::Default;
499 }
500 
502  CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
503  LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
504  // Since target may map different address spaces in AST to the same address
505  // space, an address space conversion may end up as a bitcast.
506  if (auto *C = dyn_cast<llvm::Constant>(Src))
507  return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
508  // Try to preserve the source's name to make IR more readable.
510  Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
511 }
512 
513 llvm::Constant *
515  LangAS SrcAddr, LangAS DestAddr,
516  llvm::Type *DestTy) const {
517  // Since target may map different address spaces in AST to the same address
518  // space, an address space conversion may end up as a bitcast.
519  return llvm::ConstantExpr::getPointerCast(Src, DestTy);
520 }
521 
525  llvm::AtomicOrdering Ordering,
526  llvm::LLVMContext &Ctx) const {
527  return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
528 }
529 
530 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
531 
532 /// isEmptyField - Return true iff a the field is "empty", that is it
533 /// is an unnamed bit-field or an (array of) empty record(s).
534 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
535  bool AllowArrays) {
536  if (FD->isUnnamedBitfield())
537  return true;
538 
539  QualType FT = FD->getType();
540 
541  // Constant arrays of empty records count as empty, strip them off.
542  // Constant arrays of zero length always count as empty.
543  bool WasArray = false;
544  if (AllowArrays)
545  while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
546  if (AT->getSize() == 0)
547  return true;
548  FT = AT->getElementType();
549  // The [[no_unique_address]] special case below does not apply to
550  // arrays of C++ empty records, so we need to remember this fact.
551  WasArray = true;
552  }
553 
554  const RecordType *RT = FT->getAs<RecordType>();
555  if (!RT)
556  return false;
557 
558  // C++ record fields are never empty, at least in the Itanium ABI.
559  //
560  // FIXME: We should use a predicate for whether this behavior is true in the
561  // current ABI.
562  //
563  // The exception to the above rule are fields marked with the
564  // [[no_unique_address]] attribute (since C++20). Those do count as empty
565  // according to the Itanium ABI. The exception applies only to records,
566  // not arrays of records, so we must also check whether we stripped off an
567  // array type above.
568  if (isa<CXXRecordDecl>(RT->getDecl()) &&
569  (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
570  return false;
571 
572  return isEmptyRecord(Context, FT, AllowArrays);
573 }
574 
575 /// isEmptyRecord - Return true iff a structure contains only empty
576 /// fields. Note that a structure with a flexible array member is not
577 /// considered empty.
578 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
579  const RecordType *RT = T->getAs<RecordType>();
580  if (!RT)
581  return false;
582  const RecordDecl *RD = RT->getDecl();
583  if (RD->hasFlexibleArrayMember())
584  return false;
585 
586  // If this is a C++ record, check the bases first.
587  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
588  for (const auto &I : CXXRD->bases())
589  if (!isEmptyRecord(Context, I.getType(), true))
590  return false;
591 
592  for (const auto *I : RD->fields())
593  if (!isEmptyField(Context, I, AllowArrays))
594  return false;
595  return true;
596 }
597 
598 /// isSingleElementStruct - Determine if a structure is a "single
599 /// element struct", i.e. it has exactly one non-empty field or
600 /// exactly one field which is itself a single element
601 /// struct. Structures with flexible array members are never
602 /// considered single element structs.
603 ///
604 /// \return The field declaration for the single non-empty field, if
605 /// it exists.
606 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
607  const RecordType *RT = T->getAs<RecordType>();
608  if (!RT)
609  return nullptr;
610 
611  const RecordDecl *RD = RT->getDecl();
612  if (RD->hasFlexibleArrayMember())
613  return nullptr;
614 
615  const Type *Found = nullptr;
616 
617  // If this is a C++ record, check the bases first.
618  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
619  for (const auto &I : CXXRD->bases()) {
620  // Ignore empty records.
621  if (isEmptyRecord(Context, I.getType(), true))
622  continue;
623 
624  // If we already found an element then this isn't a single-element struct.
625  if (Found)
626  return nullptr;
627 
628  // If this is non-empty and not a single element struct, the composite
629  // cannot be a single element struct.
630  Found = isSingleElementStruct(I.getType(), Context);
631  if (!Found)
632  return nullptr;
633  }
634  }
635 
636  // Check for single element.
637  for (const auto *FD : RD->fields()) {
638  QualType FT = FD->getType();
639 
640  // Ignore empty fields.
641  if (isEmptyField(Context, FD, true))
642  continue;
643 
644  // If we already found an element then this isn't a single-element
645  // struct.
646  if (Found)
647  return nullptr;
648 
649  // Treat single element arrays as the element.
650  while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
651  if (AT->getSize().getZExtValue() != 1)
652  break;
653  FT = AT->getElementType();
654  }
655 
656  if (!isAggregateTypeForABI(FT)) {
657  Found = FT.getTypePtr();
658  } else {
659  Found = isSingleElementStruct(FT, Context);
660  if (!Found)
661  return nullptr;
662  }
663  }
664 
665  // We don't consider a struct a single-element struct if it has
666  // padding beyond the element type.
667  if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
668  return nullptr;
669 
670  return Found;
671 }
672 
673 namespace {
674 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
675  const ABIArgInfo &AI) {
676  // This default implementation defers to the llvm backend's va_arg
677  // instruction. It can handle only passing arguments directly
678  // (typically only handled in the backend for primitive types), or
679  // aggregates passed indirectly by pointer (NOTE: if the "byval"
680  // flag has ABI impact in the callee, this implementation cannot
681  // work.)
682 
683  // Only a few cases are covered here at the moment -- those needed
684  // by the default abi.
685  llvm::Value *Val;
686 
687  if (AI.isIndirect()) {
688  assert(!AI.getPaddingType() &&
689  "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
690  assert(
691  !AI.getIndirectRealign() &&
692  "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
693 
694  auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
695  CharUnits TyAlignForABI = TyInfo.Align;
696 
697  llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty);
698  llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
699  llvm::Value *Addr =
700  CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
701  return Address(Addr, ElementTy, TyAlignForABI);
702  } else {
703  assert((AI.isDirect() || AI.isExtend()) &&
704  "Unexpected ArgInfo Kind in generic VAArg emitter!");
705 
706  assert(!AI.getInReg() &&
707  "Unexpected InReg seen in arginfo in generic VAArg emitter!");
708  assert(!AI.getPaddingType() &&
709  "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
710  assert(!AI.getDirectOffset() &&
711  "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
712  assert(!AI.getCoerceToType() &&
713  "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
714 
715  Address Temp = CGF.CreateMemTemp(Ty, "varet");
716  Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(),
717  CGF.ConvertTypeForMem(Ty));
718  CGF.Builder.CreateStore(Val, Temp);
719  return Temp;
720  }
721 }
722 
723 /// DefaultABIInfo - The default implementation for ABI specific
724 /// details. This implementation provides information which results in
725 /// self-consistent and sensible LLVM IR generation, but does not
726 /// conform to any particular ABI.
727 class DefaultABIInfo : public ABIInfo {
728 public:
729  DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
730 
733 
734  void computeInfo(CGFunctionInfo &FI) const override {
735  if (!getCXXABI().classifyReturnType(FI))
737  for (auto &I : FI.arguments())
738  I.info = classifyArgumentType(I.type);
739  }
740 
741  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
742  QualType Ty) const override {
743  return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
744  }
745 };
746 
747 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
748 public:
749  DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
750  : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
751 };
752 
755 
756  if (isAggregateTypeForABI(Ty)) {
757  // Records with non-trivial destructors/copy-constructors should not be
758  // passed by value.
759  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
760  return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
761 
762  return getNaturalAlignIndirect(Ty);
763  }
764 
765  // Treat an enum type as its underlying type.
766  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
767  Ty = EnumTy->getDecl()->getIntegerType();
768 
769  ASTContext &Context = getContext();
770  if (const auto *EIT = Ty->getAs<BitIntType>())
771  if (EIT->getNumBits() >
772  Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
773  ? Context.Int128Ty
774  : Context.LongLongTy))
775  return getNaturalAlignIndirect(Ty);
776 
777  return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
779 }
780 
782  if (RetTy->isVoidType())
783  return ABIArgInfo::getIgnore();
784 
785  if (isAggregateTypeForABI(RetTy))
786  return getNaturalAlignIndirect(RetTy);
787 
788  // Treat an enum type as its underlying type.
789  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
790  RetTy = EnumTy->getDecl()->getIntegerType();
791 
792  if (const auto *EIT = RetTy->getAs<BitIntType>())
793  if (EIT->getNumBits() >
794  getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
795  ? getContext().Int128Ty
796  : getContext().LongLongTy))
797  return getNaturalAlignIndirect(RetTy);
798 
799  return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
801 }
802 
803 //===----------------------------------------------------------------------===//
804 // WebAssembly ABI Implementation
805 //
806 // This is a very simple ABI that relies a lot on DefaultABIInfo.
807 //===----------------------------------------------------------------------===//
808 
809 class WebAssemblyABIInfo final : public SwiftABIInfo {
810 public:
811  enum ABIKind {
812  MVP = 0,
813  ExperimentalMV = 1,
814  };
815 
816 private:
817  DefaultABIInfo defaultInfo;
818  ABIKind Kind;
819 
820 public:
821  explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
822  : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
823 
824 private:
827 
828  // DefaultABIInfo's classifyReturnType and classifyArgumentType are
829  // non-virtual, but computeInfo and EmitVAArg are virtual, so we
830  // overload them.
831  void computeInfo(CGFunctionInfo &FI) const override {
832  if (!getCXXABI().classifyReturnType(FI))
834  for (auto &Arg : FI.arguments())
835  Arg.info = classifyArgumentType(Arg.type);
836  }
837 
838  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
839  QualType Ty) const override;
840 
841  bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
842  bool asReturnValue) const override {
843  return occupiesMoreThan(CGT, scalars, /*total*/ 4);
844  }
845 
846  bool isSwiftErrorInRegister() const override {
847  return false;
848  }
849 };
850 
851 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
852 public:
853  explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
854  WebAssemblyABIInfo::ABIKind K)
855  : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
856 
857  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
858  CodeGen::CodeGenModule &CGM) const override {
860  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
861  if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
862  llvm::Function *Fn = cast<llvm::Function>(GV);
863  llvm::AttrBuilder B(GV->getContext());
864  B.addAttribute("wasm-import-module", Attr->getImportModule());
865  Fn->addFnAttrs(B);
866  }
867  if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
868  llvm::Function *Fn = cast<llvm::Function>(GV);
869  llvm::AttrBuilder B(GV->getContext());
870  B.addAttribute("wasm-import-name", Attr->getImportName());
871  Fn->addFnAttrs(B);
872  }
873  if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
874  llvm::Function *Fn = cast<llvm::Function>(GV);
875  llvm::AttrBuilder B(GV->getContext());
876  B.addAttribute("wasm-export-name", Attr->getExportName());
877  Fn->addFnAttrs(B);
878  }
879  }
880 
881  if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
882  llvm::Function *Fn = cast<llvm::Function>(GV);
883  if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
884  Fn->addFnAttr("no-prototype");
885  }
886  }
887 };
888 
889 /// Classify argument of given type \p Ty.
892 
893  if (isAggregateTypeForABI(Ty)) {
894  // Records with non-trivial destructors/copy-constructors should not be
895  // passed by value.
896  if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
897  return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
898  // Ignore empty structs/unions.
899  if (isEmptyRecord(getContext(), Ty, true))
900  return ABIArgInfo::getIgnore();
901  // Lower single-element structs to just pass a regular value. TODO: We
902  // could do reasonable-size multiple-element structs too, using getExpand(),
903  // though watch out for things like bitfields.
904  if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
905  return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
906  // For the experimental multivalue ABI, fully expand all other aggregates
907  if (Kind == ABIKind::ExperimentalMV) {
908  const RecordType *RT = Ty->getAs<RecordType>();
909  assert(RT);
910  bool HasBitField = false;
911  for (auto *Field : RT->getDecl()->fields()) {
912  if (Field->isBitField()) {
913  HasBitField = true;
914  break;
915  }
916  }
917  if (!HasBitField)
918  return ABIArgInfo::getExpand();
919  }
920  }
921 
922  // Otherwise just do the default thing.
923  return defaultInfo.classifyArgumentType(Ty);
924 }
925 
927  if (isAggregateTypeForABI(RetTy)) {
928  // Records with non-trivial destructors/copy-constructors should not be
929  // returned by value.
930  if (!getRecordArgABI(RetTy, getCXXABI())) {
931  // Ignore empty structs/unions.
932  if (isEmptyRecord(getContext(), RetTy, true))
933  return ABIArgInfo::getIgnore();
934  // Lower single-element structs to just return a regular value. TODO: We
935  // could do reasonable-size multiple-element structs too, using
936  // ABIArgInfo::getDirect().
937  if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
938  return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
939  // For the experimental multivalue ABI, return all other aggregates
940  if (Kind == ABIKind::ExperimentalMV)
941  return ABIArgInfo::getDirect();
942  }
943  }
944 
945  // Otherwise just do the default thing.
946  return defaultInfo.classifyReturnType(RetTy);
947 }
948 
949 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
950  QualType Ty) const {
951  bool IsIndirect = isAggregateTypeForABI(Ty) &&
952  !isEmptyRecord(getContext(), Ty, true) &&
953  !isSingleElementStruct(Ty, getContext());
954  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
955  getContext().getTypeInfoInChars(Ty),
957  /*AllowHigherAlign=*/true);
958 }
959 
960 //===----------------------------------------------------------------------===//
961 // le32/PNaCl bitcode ABI Implementation
962 //
963 // This is a simplified version of the x86_32 ABI. Arguments and return values
964 // are always passed on the stack.
965 //===----------------------------------------------------------------------===//
966 
967 class PNaClABIInfo : public ABIInfo {
968  public:
969  PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
970 
973 
974  void computeInfo(CGFunctionInfo &FI) const override;
975  Address EmitVAArg(CodeGenFunction &CGF,
976  Address VAListAddr, QualType Ty) const override;
977 };
978 
979 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
980  public:
981  PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
982  : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
983 };
984 
985 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
986  if (!getCXXABI().classifyReturnType(FI))
988 
989  for (auto &I : FI.arguments())
990  I.info = classifyArgumentType(I.type);
991 }
992 
993 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
994  QualType Ty) const {
995  // The PNaCL ABI is a bit odd, in that varargs don't use normal
996  // function classification. Structs get passed directly for varargs
997  // functions, through a rewriting transform in
998  // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
999  // this target to actually support a va_arg instructions with an
1000  // aggregate type, unlike other targets.
1001  return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
1002 }
1003 
1004 /// Classify argument of given type \p Ty.
1006  if (isAggregateTypeForABI(Ty)) {
1007  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
1008  return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
1009  return getNaturalAlignIndirect(Ty);
1010  } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
1011  // Treat an enum type as its underlying type.
1012  Ty = EnumTy->getDecl()->getIntegerType();
1013  } else if (Ty->isFloatingType()) {
1014  // Floating-point types don't go inreg.
1015  return ABIArgInfo::getDirect();
1016  } else if (const auto *EIT = Ty->getAs<BitIntType>()) {
1017  // Treat bit-precise integers as integers if <= 64, otherwise pass
1018  // indirectly.
1019  if (EIT->getNumBits() > 64)
1020  return getNaturalAlignIndirect(Ty);
1021  return ABIArgInfo::getDirect();
1022  }
1023 
1024  return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
1025  : ABIArgInfo::getDirect());
1026 }
1027 
1029  if (RetTy->isVoidType())
1030  return ABIArgInfo::getIgnore();
1031 
1032  // In the PNaCl ABI we always return records/structures on the stack.
1033  if (isAggregateTypeForABI(RetTy))
1034  return getNaturalAlignIndirect(RetTy);
1035 
1036  // Treat bit-precise integers as integers if <= 64, otherwise pass indirectly.
1037  if (const auto *EIT = RetTy->getAs<BitIntType>()) {
1038  if (EIT->getNumBits() > 64)
1039  return getNaturalAlignIndirect(RetTy);
1040  return ABIArgInfo::getDirect();
1041  }
1042 
1043  // Treat an enum type as its underlying type.
1044  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1045  RetTy = EnumTy->getDecl()->getIntegerType();
1046 
1047  return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1048  : ABIArgInfo::getDirect());
1049 }
1050 
1051 /// IsX86_MMXType - Return true if this is an MMX type.
1052 bool IsX86_MMXType(llvm::Type *IRType) {
1053  // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
1054  return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1055  cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1056  IRType->getScalarSizeInBits() != 64;
1057 }
1058 
1059 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1060  StringRef Constraint,
1061  llvm::Type* Ty) {
1062  bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1063  .Cases("y", "&y", "^Ym", true)
1064  .Default(false);
1065  if (IsMMXCons && Ty->isVectorTy()) {
1066  if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
1067  64) {
1068  // Invalid MMX constraint
1069  return nullptr;
1070  }
1071 
1072  return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
1073  }
1074 
1075  // No operation needed
1076  return Ty;
1077 }
1078 
1079 /// Returns true if this type can be passed in SSE registers with the
1080 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1081 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
1082  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1083  if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1084  if (BT->getKind() == BuiltinType::LongDouble) {
1085  if (&Context.getTargetInfo().getLongDoubleFormat() ==
1086  &llvm::APFloat::x87DoubleExtended())
1087  return false;
1088  }
1089  return true;
1090  }
1091  } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
1092  // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
1093  // registers specially.
1094  unsigned VecSize = Context.getTypeSize(VT);
1095  if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1096  return true;
1097  }
1098  return false;
1099 }
1100 
1101 /// Returns true if this aggregate is small enough to be passed in SSE registers
1102 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1103 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1104  return NumMembers <= 4;
1105 }
1106 
1107 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
1108 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
1109  auto AI = ABIArgInfo::getDirect(T);
1110  AI.setInReg(true);
1111  AI.setCanBeFlattened(false);
1112  return AI;
1113 }
1114 
1115 //===----------------------------------------------------------------------===//
1116 // X86-32 ABI Implementation
1117 //===----------------------------------------------------------------------===//
1118 
1119 /// Similar to llvm::CCState, but for Clang.
1120 struct CCState {
1121  CCState(CGFunctionInfo &FI)
1122  : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1123 
1124  llvm::SmallBitVector IsPreassigned;
1125  unsigned CC = CallingConv::CC_C;
1126  unsigned FreeRegs = 0;
1127  unsigned FreeSSERegs = 0;
1128 };
1129 
1130 /// X86_32ABIInfo - The X86-32 ABI information.
1131 class X86_32ABIInfo : public SwiftABIInfo {
1132  enum Class {
1133  Integer,
1134  Float
1135  };
1136 
1137  static const unsigned MinABIStackAlignInBytes = 4;
1138 
1139  bool IsDarwinVectorABI;
1140  bool IsRetSmallStructInRegABI;
1141  bool IsWin32StructABI;
1142  bool IsSoftFloatABI;
1143  bool IsMCUABI;
1144  bool IsLinuxABI;
1145  unsigned DefaultNumRegisterParameters;
1146 
1147  static bool isRegisterSize(unsigned Size) {
1148  return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1149  }
1150 
1151  bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1152  // FIXME: Assumes vectorcall is in use.
1153  return isX86VectorTypeForVectorCall(getContext(), Ty);
1154  }
1155 
1156  bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1157  uint64_t NumMembers) const override {
1158  // FIXME: Assumes vectorcall is in use.
1159  return isX86VectorCallAggregateSmallEnough(NumMembers);
1160  }
1161 
1162  bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1163 
1164  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1165  /// such that the argument will be passed in memory.
1166  ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1167 
1168  ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1169 
1170  /// Return the alignment to use for the given type on the stack.
1171  unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1172 
1173  Class classify(QualType Ty) const;
1174  ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1175  ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1176 
1177  /// Updates the number of available free registers, returns
1178  /// true if any registers were allocated.
1179  bool updateFreeRegs(QualType Ty, CCState &State) const;
1180 
1181  bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1182  bool &NeedsPadding) const;
1183  bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1184 
1185  bool canExpandIndirectArgument(QualType Ty) const;
1186 
1187  /// Rewrite the function info so that all memory arguments use
1188  /// inalloca.
1189  void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1190 
1191  void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1192  CharUnits &StackOffset, ABIArgInfo &Info,
1193  QualType Type) const;
1194  void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
1195 
1196 public:
1197 
1198  void computeInfo(CGFunctionInfo &FI) const override;
1199  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1200  QualType Ty) const override;
1201 
1202  X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1203  bool RetSmallStructInRegABI, bool Win32StructABI,
1204  unsigned NumRegisterParameters, bool SoftFloatABI)
1205  : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1206  IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1207  IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
1208  IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1209  IsLinuxABI(CGT.getTarget().getTriple().isOSLinux() ||
1210  CGT.getTarget().getTriple().isOSCygMing()),
1211  DefaultNumRegisterParameters(NumRegisterParameters) {}
1212 
1213  bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1214  bool asReturnValue) const override {
1215  // LLVM's x86-32 lowering currently only assigns up to three
1216  // integer registers and three fp registers. Oddly, it'll use up to
1217  // four vector registers for vectors, but those can overlap with the
1218  // scalar registers.
1219  return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1220  }
1221 
1222  bool isSwiftErrorInRegister() const override {
1223  // x86-32 lowering does not support passing swifterror in a register.
1224  return false;
1225  }
1226 };
1227 
1228 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1229 public:
1230  X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1231  bool RetSmallStructInRegABI, bool Win32StructABI,
1232  unsigned NumRegisterParameters, bool SoftFloatABI)
1233  : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
1234  CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1235  NumRegisterParameters, SoftFloatABI)) {}
1236 
1237  static bool isStructReturnInRegABI(
1238  const llvm::Triple &Triple, const CodeGenOptions &Opts);
1239 
1240  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1241  CodeGen::CodeGenModule &CGM) const override;
1242 
1243  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1244  // Darwin uses different dwarf register numbers for EH.
1245  if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1246  return 4;
1247  }
1248 
1249  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1250  llvm::Value *Address) const override;
1251 
1252  llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1253  StringRef Constraint,
1254  llvm::Type* Ty) const override {
1255  return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1256  }
1257 
1258  void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1259  std::string &Constraints,
1260  std::vector<llvm::Type *> &ResultRegTypes,
1261  std::vector<llvm::Type *> &ResultTruncRegTypes,
1262  std::vector<LValue> &ResultRegDests,
1263  std::string &AsmString,
1264  unsigned NumOutputs) const override;
1265 
1266  llvm::Constant *
1267  getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1268  unsigned Sig = (0xeb << 0) | // jmp rel8
1269  (0x06 << 8) | // .+0x08
1270  ('v' << 16) |
1271  ('2' << 24);
1272  return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1273  }
1274 
1275  StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1276  return "movl\t%ebp, %ebp"
1277  "\t\t// marker for objc_retainAutoreleaseReturnValue";
1278  }
1279 };
1280 
1281 }
1282 
1283 /// Rewrite input constraint references after adding some output constraints.
1284 /// In the case where there is one output and one input and we add one output,
1285 /// we need to replace all operand references greater than or equal to 1:
1286 /// mov $0, $1
1287 /// mov eax, $1
1288 /// The result will be:
1289 /// mov $0, $2
1290 /// mov eax, $2
1291 static void rewriteInputConstraintReferences(unsigned FirstIn,
1292  unsigned NumNewOuts,
1293  std::string &AsmString) {
1294  std::string Buf;
1295  llvm::raw_string_ostream OS(Buf);
1296  size_t Pos = 0;
1297  while (Pos < AsmString.size()) {
1298  size_t DollarStart = AsmString.find('$', Pos);
1299  if (DollarStart == std::string::npos)
1300  DollarStart = AsmString.size();
1301  size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1302  if (DollarEnd == std::string::npos)
1303  DollarEnd = AsmString.size();
1304  OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1305  Pos = DollarEnd;
1306  size_t NumDollars = DollarEnd - DollarStart;
1307  if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1308  // We have an operand reference.
1309  size_t DigitStart = Pos;
1310  if (AsmString[DigitStart] == '{') {
1311  OS << '{';
1312  ++DigitStart;
1313  }
1314  size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1315  if (DigitEnd == std::string::npos)
1316  DigitEnd = AsmString.size();
1317  StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1318  unsigned OperandIndex;
1319  if (!OperandStr.getAsInteger(10, OperandIndex)) {
1320  if (OperandIndex >= FirstIn)
1321  OperandIndex += NumNewOuts;
1322  OS << OperandIndex;
1323  } else {
1324  OS << OperandStr;
1325  }
1326  Pos = DigitEnd;
1327  }
1328  }
1329  AsmString = std::move(OS.str());
1330 }
1331 
1332 /// Add output constraints for EAX:EDX because they are return registers.
1333 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1334  CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1335  std::vector<llvm::Type *> &ResultRegTypes,
1336  std::vector<llvm::Type *> &ResultTruncRegTypes,
1337  std::vector<LValue> &ResultRegDests, std::string &AsmString,
1338  unsigned NumOutputs) const {
1339  uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1340 
1341  // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1342  // larger.
1343  if (!Constraints.empty())
1344  Constraints += ',';
1345  if (RetWidth <= 32) {
1346  Constraints += "={eax}";
1347  ResultRegTypes.push_back(CGF.Int32Ty);
1348  } else {
1349  // Use the 'A' constraint for EAX:EDX.
1350  Constraints += "=A";
1351  ResultRegTypes.push_back(CGF.Int64Ty);
1352  }
1353 
1354  // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1355  llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1356  ResultTruncRegTypes.push_back(CoerceTy);
1357 
1358  // Coerce the integer by bitcasting the return slot pointer.
1359  ReturnSlot.setAddress(
1360  CGF.Builder.CreateElementBitCast(ReturnSlot.getAddress(CGF), CoerceTy));
1361  ResultRegDests.push_back(ReturnSlot);
1362 
1363  rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1364 }
1365 
1366 /// shouldReturnTypeInRegister - Determine if the given type should be
1367 /// returned in a register (for the Darwin and MCU ABI).
1368 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1369  ASTContext &Context) const {
1370  uint64_t Size = Context.getTypeSize(Ty);
1371 
1372  // For i386, type must be register sized.
1373  // For the MCU ABI, it only needs to be <= 8-byte
1374  if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1375  return false;
1376 
1377  if (Ty->isVectorType()) {
1378  // 64- and 128- bit vectors inside structures are not returned in
1379  // registers.
1380  if (Size == 64 || Size == 128)
1381  return false;
1382 
1383  return true;
1384  }
1385 
1386  // If this is a builtin, pointer, enum, complex type, member pointer, or
1387  // member function pointer it is ok.
1388  if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1389  Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1390  Ty->isBlockPointerType() || Ty->isMemberPointerType())
1391  return true;
1392 
1393  // Arrays are treated like records.
1394  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1395  return shouldReturnTypeInRegister(AT->getElementType(), Context);
1396 
1397  // Otherwise, it must be a record type.
1398  const RecordType *RT = Ty->getAs<RecordType>();
1399  if (!RT) return false;
1400 
1401  // FIXME: Traverse bases here too.
1402 
1403  // Structure types are passed in register if all fields would be
1404  // passed in a register.
1405  for (const auto *FD : RT->getDecl()->fields()) {
1406  // Empty fields are ignored.
1407  if (isEmptyField(Context, FD, true))
1408  continue;
1409 
1410  // Check fields recursively.
1411  if (!shouldReturnTypeInRegister(FD->getType(), Context))
1412  return false;
1413  }
1414  return true;
1415 }
1416 
1417 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1418  // Treat complex types as the element type.
1419  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1420  Ty = CTy->getElementType();
1421 
1422  // Check for a type which we know has a simple scalar argument-passing
1423  // convention without any padding. (We're specifically looking for 32
1424  // and 64-bit integer and integer-equivalents, float, and double.)
1425  if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1426  !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1427  return false;
1428 
1429  uint64_t Size = Context.getTypeSize(Ty);
1430  return Size == 32 || Size == 64;
1431 }
1432 
1433 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1434  uint64_t &Size) {
1435  for (const auto *FD : RD->fields()) {
1436  // Scalar arguments on the stack get 4 byte alignment on x86. If the
1437  // argument is smaller than 32-bits, expanding the struct will create
1438  // alignment padding.
1439  if (!is32Or64BitBasicType(FD->getType(), Context))
1440  return false;
1441 
1442  // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1443  // how to expand them yet, and the predicate for telling if a bitfield still
1444  // counts as "basic" is more complicated than what we were doing previously.
1445  if (FD->isBitField())
1446  return false;
1447 
1448  Size += Context.getTypeSize(FD->getType());
1449  }
1450  return true;
1451 }
1452 
1453 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1454  uint64_t &Size) {
1455  // Don't do this if there are any non-empty bases.
1456  for (const CXXBaseSpecifier &Base : RD->bases()) {
1457  if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1458  Size))
1459  return false;
1460  }
1461  if (!addFieldSizes(Context, RD, Size))
1462  return false;
1463  return true;
1464 }
1465 
1466 /// Test whether an argument type which is to be passed indirectly (on the
1467 /// stack) would have the equivalent layout if it was expanded into separate
1468 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1469 /// optimizations.
1470 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1471  // We can only expand structure types.
1472  const RecordType *RT = Ty->getAs<RecordType>();
1473  if (!RT)
1474  return false;
1475  const RecordDecl *RD = RT->getDecl();
1476  uint64_t Size = 0;
1477  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1478  if (!IsWin32StructABI) {
1479  // On non-Windows, we have to conservatively match our old bitcode
1480  // prototypes in order to be ABI-compatible at the bitcode level.
1481  if (!CXXRD->isCLike())
1482  return false;
1483  } else {
1484  // Don't do this for dynamic classes.
1485  if (CXXRD->isDynamicClass())
1486  return false;
1487  }
1488  if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1489  return false;
1490  } else {
1491  if (!addFieldSizes(getContext(), RD, Size))
1492  return false;
1493  }
1494 
1495  // We can do this if there was no alignment padding.
1496  return Size == getContext().getTypeSize(Ty);
1497 }
1498 
1499 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1500  // If the return value is indirect, then the hidden argument is consuming one
1501  // integer register.
1502  if (State.FreeRegs) {
1503  --State.FreeRegs;
1504  if (!IsMCUABI)
1505  return getNaturalAlignIndirectInReg(RetTy);
1506  }
1507  return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1508 }
1509 
1511  CCState &State) const {
1512  if (RetTy->isVoidType())
1513  return ABIArgInfo::getIgnore();
1514 
1515  const Type *Base = nullptr;
1516  uint64_t NumElts = 0;
1517  if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1518  State.CC == llvm::CallingConv::X86_RegCall) &&
1519  isHomogeneousAggregate(RetTy, Base, NumElts)) {
1520  // The LLVM struct type for such an aggregate should lower properly.
1521  return ABIArgInfo::getDirect();
1522  }
1523 
1524  if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1525  // On Darwin, some vectors are returned in registers.
1526  if (IsDarwinVectorABI) {
1527  uint64_t Size = getContext().getTypeSize(RetTy);
1528 
1529  // 128-bit vectors are a special case; they are returned in
1530  // registers and we need to make sure to pick a type the LLVM
1531  // backend will like.
1532  if (Size == 128)
1533  return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
1534  llvm::Type::getInt64Ty(getVMContext()), 2));
1535 
1536  // Always return in register if it fits in a general purpose
1537  // register, or if it is 64 bits and has a single element.
1538  if ((Size == 8 || Size == 16 || Size == 32) ||
1539  (Size == 64 && VT->getNumElements() == 1))
1540  return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1541  Size));
1542 
1543  return getIndirectReturnResult(RetTy, State);
1544  }
1545 
1546  return ABIArgInfo::getDirect();
1547  }
1548 
1549  if (isAggregateTypeForABI(RetTy)) {
1550  if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1551  // Structures with flexible arrays are always indirect.
1552  if (RT->getDecl()->hasFlexibleArrayMember())
1553  return getIndirectReturnResult(RetTy, State);
1554  }
1555 
1556  // If specified, structs and unions are always indirect.
1557  if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1558  return getIndirectReturnResult(RetTy, State);
1559 
1560  // Ignore empty structs/unions.
1561  if (isEmptyRecord(getContext(), RetTy, true))
1562  return ABIArgInfo::getIgnore();
1563 
1564  // Return complex of _Float16 as <2 x half> so the backend will use xmm0.
1565  if (const ComplexType *CT = RetTy->getAs<ComplexType>()) {
1566  QualType ET = getContext().getCanonicalType(CT->getElementType());
1567  if (ET->isFloat16Type())
1568  return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
1569  llvm::Type::getHalfTy(getVMContext()), 2));
1570  }
1571 
1572  // Small structures which are register sized are generally returned
1573  // in a register.
1574  if (shouldReturnTypeInRegister(RetTy, getContext())) {
1575  uint64_t Size = getContext().getTypeSize(RetTy);
1576 
1577  // As a special-case, if the struct is a "single-element" struct, and
1578  // the field is of type "float" or "double", return it in a
1579  // floating-point register. (MSVC does not apply this special case.)
1580  // We apply a similar transformation for pointer types to improve the
1581  // quality of the generated IR.
1582  if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1583  if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1584  || SeltTy->hasPointerRepresentation())
1585  return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1586 
1587  // FIXME: We should be able to narrow this integer in cases with dead
1588  // padding.
1589  return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1590  }
1591 
1592  return getIndirectReturnResult(RetTy, State);
1593  }
1594 
1595  // Treat an enum type as its underlying type.
1596  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1597  RetTy = EnumTy->getDecl()->getIntegerType();
1598 
1599  if (const auto *EIT = RetTy->getAs<BitIntType>())
1600  if (EIT->getNumBits() > 64)
1601  return getIndirectReturnResult(RetTy, State);
1602 
1603  return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1604  : ABIArgInfo::getDirect());
1605 }
1606 
1607 static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
1608  return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1609 }
1610 
1612  const RecordType *RT = Ty->getAs<RecordType>();
1613  if (!RT)
1614  return false;
1615  const RecordDecl *RD = RT->getDecl();
1616 
1617  // If this is a C++ record, check the bases first.
1618  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1619  for (const auto &I : CXXRD->bases())
1620  if (!isRecordWithSIMDVectorType(Context, I.getType()))
1621  return false;
1622 
1623  for (const auto *i : RD->fields()) {
1624  QualType FT = i->getType();
1625 
1626  if (isSIMDVectorType(Context, FT))
1627  return true;
1628 
1629  if (isRecordWithSIMDVectorType(Context, FT))
1630  return true;
1631  }
1632 
1633  return false;
1634 }
1635 
1636 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1637  unsigned Align) const {
1638  // Otherwise, if the alignment is less than or equal to the minimum ABI
1639  // alignment, just use the default; the backend will handle this.
1640  if (Align <= MinABIStackAlignInBytes)
1641  return 0; // Use default alignment.
1642 
1643  if (IsLinuxABI) {
1644  // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
1645  // want to spend any effort dealing with the ramifications of ABI breaks.
1646  //
1647  // If the vector type is __m128/__m256/__m512, return the default alignment.
1648  if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
1649  return Align;
1650  }
1651  // On non-Darwin, the stack type alignment is always 4.
1652  if (!IsDarwinVectorABI) {
1653  // Set explicit alignment, since we may need to realign the top.
1654  return MinABIStackAlignInBytes;
1655  }
1656 
1657  // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1658  if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
1659  isRecordWithSIMDVectorType(getContext(), Ty)))
1660  return 16;
1661 
1662  return MinABIStackAlignInBytes;
1663 }
1664 
1665 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1666  CCState &State) const {
1667  if (!ByVal) {
1668  if (State.FreeRegs) {
1669  --State.FreeRegs; // Non-byval indirects just use one pointer.
1670  if (!IsMCUABI)
1671  return getNaturalAlignIndirectInReg(Ty);
1672  }
1673  return getNaturalAlignIndirect(Ty, false);
1674  }
1675 
1676  // Compute the byval alignment.
1677  unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1678  unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1679  if (StackAlign == 0)
1680  return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1681 
1682  // If the stack alignment is less than the type alignment, realign the
1683  // argument.
1684  bool Realign = TypeAlign > StackAlign;
1686  /*ByVal=*/true, Realign);
1687 }
1688 
1689 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1690  const Type *T = isSingleElementStruct(Ty, getContext());
1691  if (!T)
1692  T = Ty.getTypePtr();
1693 
1694  if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1695  BuiltinType::Kind K = BT->getKind();
1696  if (K == BuiltinType::Float || K == BuiltinType::Double)
1697  return Float;
1698  }
1699  return Integer;
1700 }
1701 
1702 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1703  if (!IsSoftFloatABI) {
1704  Class C = classify(Ty);
1705  if (C == Float)
1706  return false;
1707  }
1708 
1709  unsigned Size = getContext().getTypeSize(Ty);
1710  unsigned SizeInRegs = (Size + 31) / 32;
1711 
1712  if (SizeInRegs == 0)
1713  return false;
1714 
1715  if (!IsMCUABI) {
1716  if (SizeInRegs > State.FreeRegs) {
1717  State.FreeRegs = 0;
1718  return false;
1719  }
1720  } else {
1721  // The MCU psABI allows passing parameters in-reg even if there are
1722  // earlier parameters that are passed on the stack. Also,
1723  // it does not allow passing >8-byte structs in-register,
1724  // even if there are 3 free registers available.
1725  if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1726  return false;
1727  }
1728 
1729  State.FreeRegs -= SizeInRegs;
1730  return true;
1731 }
1732 
1733 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1734  bool &InReg,
1735  bool &NeedsPadding) const {
1736  // On Windows, aggregates other than HFAs are never passed in registers, and
1737  // they do not consume register slots. Homogenous floating-point aggregates
1738  // (HFAs) have already been dealt with at this point.
1739  if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1740  return false;
1741 
1742  NeedsPadding = false;
1743  InReg = !IsMCUABI;
1744 
1745  if (!updateFreeRegs(Ty, State))
1746  return false;
1747 
1748  if (IsMCUABI)
1749  return true;
1750 
1751  if (State.CC == llvm::CallingConv::X86_FastCall ||
1752  State.CC == llvm::CallingConv::X86_VectorCall ||
1753  State.CC == llvm::CallingConv::X86_RegCall) {
1754  if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1755  NeedsPadding = true;
1756 
1757  return false;
1758  }
1759 
1760  return true;
1761 }
1762 
1763 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1764  if (!updateFreeRegs(Ty, State))
1765  return false;
1766 
1767  if (IsMCUABI)
1768  return false;
1769 
1770  if (State.CC == llvm::CallingConv::X86_FastCall ||
1771  State.CC == llvm::CallingConv::X86_VectorCall ||
1772  State.CC == llvm::CallingConv::X86_RegCall) {
1773  if (getContext().getTypeSize(Ty) > 32)
1774  return false;
1775 
1776  return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1777  Ty->isReferenceType());
1778  }
1779 
1780  return true;
1781 }
1782 
1783 void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
1784  // Vectorcall x86 works subtly different than in x64, so the format is
1785  // a bit different than the x64 version. First, all vector types (not HVAs)
1786  // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
1787  // This differs from the x64 implementation, where the first 6 by INDEX get
1788  // registers.
1789  // In the second pass over the arguments, HVAs are passed in the remaining
1790  // vector registers if possible, or indirectly by address. The address will be
1791  // passed in ECX/EDX if available. Any other arguments are passed according to
1792  // the usual fastcall rules.
1794  for (int I = 0, E = Args.size(); I < E; ++I) {
1795  const Type *Base = nullptr;
1796  uint64_t NumElts = 0;
1797  const QualType &Ty = Args[I].type;
1798  if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1799  isHomogeneousAggregate(Ty, Base, NumElts)) {
1800  if (State.FreeSSERegs >= NumElts) {
1801  State.FreeSSERegs -= NumElts;
1802  Args[I].info = ABIArgInfo::getDirectInReg();
1803  State.IsPreassigned.set(I);
1804  }
1805  }
1806  }
1807 }
1808 
1810  CCState &State) const {
1811  // FIXME: Set alignment on indirect arguments.
1812  bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1813  bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1814  bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1815 
1817  TypeInfo TI = getContext().getTypeInfo(Ty);
1818 
1819  // Check with the C++ ABI first.
1820  const RecordType *RT = Ty->getAs<RecordType>();
1821  if (RT) {
1822  CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1823  if (RAA == CGCXXABI::RAA_Indirect) {
1824  return getIndirectResult(Ty, false, State);
1825  } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1826  // The field index doesn't matter, we'll fix it up later.
1827  return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1828  }
1829  }
1830 
1831  // Regcall uses the concept of a homogenous vector aggregate, similar
1832  // to other targets.
1833  const Type *Base = nullptr;
1834  uint64_t NumElts = 0;
1835  if ((IsRegCall || IsVectorCall) &&
1836  isHomogeneousAggregate(Ty, Base, NumElts)) {
1837  if (State.FreeSSERegs >= NumElts) {
1838  State.FreeSSERegs -= NumElts;
1839 
1840  // Vectorcall passes HVAs directly and does not flatten them, but regcall
1841  // does.
1842  if (IsVectorCall)
1843  return getDirectX86Hva();
1844 
1845  if (Ty->isBuiltinType() || Ty->isVectorType())
1846  return ABIArgInfo::getDirect();
1847  return ABIArgInfo::getExpand();
1848  }
1849  return getIndirectResult(Ty, /*ByVal=*/false, State);
1850  }
1851 
1852  if (isAggregateTypeForABI(Ty)) {
1853  // Structures with flexible arrays are always indirect.
1854  // FIXME: This should not be byval!
1855  if (RT && RT->getDecl()->hasFlexibleArrayMember())
1856  return getIndirectResult(Ty, true, State);
1857 
1858  // Ignore empty structs/unions on non-Windows.
1859  if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1860  return ABIArgInfo::getIgnore();
1861 
1862  llvm::LLVMContext &LLVMContext = getVMContext();
1863  llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1864  bool NeedsPadding = false;
1865  bool InReg;
1866  if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1867  unsigned SizeInRegs = (TI.Width + 31) / 32;
1868  SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1869  llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1870  if (InReg)
1871  return ABIArgInfo::getDirectInReg(Result);
1872  else
1873  return ABIArgInfo::getDirect(Result);
1874  }
1875  llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1876 
1877  // Pass over-aligned aggregates on Windows indirectly. This behavior was
1878  // added in MSVC 2015.
1879  if (IsWin32StructABI && TI.isAlignRequired() && TI.Align > 32)
1880  return getIndirectResult(Ty, /*ByVal=*/false, State);
1881 
1882  // Expand small (<= 128-bit) record types when we know that the stack layout
1883  // of those arguments will match the struct. This is important because the
1884  // LLVM backend isn't smart enough to remove byval, which inhibits many
1885  // optimizations.
1886  // Don't do this for the MCU if there are still free integer registers
1887  // (see X86_64 ABI for full explanation).
1888  if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
1889  canExpandIndirectArgument(Ty))
1891  IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1892 
1893  return getIndirectResult(Ty, true, State);
1894  }
1895 
1896  if (const VectorType *VT = Ty->getAs<VectorType>()) {
1897  // On Windows, vectors are passed directly if registers are available, or
1898  // indirectly if not. This avoids the need to align argument memory. Pass
1899  // user-defined vector types larger than 512 bits indirectly for simplicity.
1900  if (IsWin32StructABI) {
1901  if (TI.Width <= 512 && State.FreeSSERegs > 0) {
1902  --State.FreeSSERegs;
1903  return ABIArgInfo::getDirectInReg();
1904  }
1905  return getIndirectResult(Ty, /*ByVal=*/false, State);
1906  }
1907 
1908  // On Darwin, some vectors are passed in memory, we handle this by passing
1909  // it as an i8/i16/i32/i64.
1910  if (IsDarwinVectorABI) {
1911  if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
1912  (TI.Width == 64 && VT->getNumElements() == 1))
1913  return ABIArgInfo::getDirect(
1914  llvm::IntegerType::get(getVMContext(), TI.Width));
1915  }
1916 
1917  if (IsX86_MMXType(CGT.ConvertType(Ty)))
1918  return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1919 
1920  return ABIArgInfo::getDirect();
1921  }
1922 
1923 
1924  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1925  Ty = EnumTy->getDecl()->getIntegerType();
1926 
1927  bool InReg = shouldPrimitiveUseInReg(Ty, State);
1928 
1929  if (isPromotableIntegerTypeForABI(Ty)) {
1930  if (InReg)
1931  return ABIArgInfo::getExtendInReg(Ty);
1932  return ABIArgInfo::getExtend(Ty);
1933  }
1934 
1935  if (const auto *EIT = Ty->getAs<BitIntType>()) {
1936  if (EIT->getNumBits() <= 64) {
1937  if (InReg)
1938  return ABIArgInfo::getDirectInReg();
1939  return ABIArgInfo::getDirect();
1940  }
1941  return getIndirectResult(Ty, /*ByVal=*/false, State);
1942  }
1943 
1944  if (InReg)
1945  return ABIArgInfo::getDirectInReg();
1946  return ABIArgInfo::getDirect();
1947 }
1948 
1949 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1950  CCState State(FI);
1951  if (IsMCUABI)
1952  State.FreeRegs = 3;
1953  else if (State.CC == llvm::CallingConv::X86_FastCall) {
1954  State.FreeRegs = 2;
1955  State.FreeSSERegs = 3;
1956  } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1957  State.FreeRegs = 2;
1958  State.FreeSSERegs = 6;
1959  } else if (FI.getHasRegParm())
1960  State.FreeRegs = FI.getRegParm();
1961  else if (State.CC == llvm::CallingConv::X86_RegCall) {
1962  State.FreeRegs = 5;
1963  State.FreeSSERegs = 8;
1964  } else if (IsWin32StructABI) {
1965  // Since MSVC 2015, the first three SSE vectors have been passed in
1966  // registers. The rest are passed indirectly.
1967  State.FreeRegs = DefaultNumRegisterParameters;
1968  State.FreeSSERegs = 3;
1969  } else
1970  State.FreeRegs = DefaultNumRegisterParameters;
1971 
1972  if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1974  } else if (FI.getReturnInfo().isIndirect()) {
1975  // The C++ ABI is not aware of register usage, so we have to check if the
1976  // return value was sret and put it in a register ourselves if appropriate.
1977  if (State.FreeRegs) {
1978  --State.FreeRegs; // The sret parameter consumes a register.
1979  if (!IsMCUABI)
1980  FI.getReturnInfo().setInReg(true);
1981  }
1982  }
1983 
1984  // The chain argument effectively gives us another free register.
1985  if (FI.isChainCall())
1986  ++State.FreeRegs;
1987 
1988  // For vectorcall, do a first pass over the arguments, assigning FP and vector
1989  // arguments to XMM registers as available.
1990  if (State.CC == llvm::CallingConv::X86_VectorCall)
1991  runVectorCallFirstPass(FI, State);
1992 
1993  bool UsedInAlloca = false;
1995  for (int I = 0, E = Args.size(); I < E; ++I) {
1996  // Skip arguments that have already been assigned.
1997  if (State.IsPreassigned.test(I))
1998  continue;
1999 
2000  Args[I].info = classifyArgumentType(Args[I].type, State);
2001  UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
2002  }
2003 
2004  // If we needed to use inalloca for any argument, do a second pass and rewrite
2005  // all the memory arguments to use inalloca.
2006  if (UsedInAlloca)
2007  rewriteWithInAlloca(FI);
2008 }
2009 
2010 void
2011 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
2012  CharUnits &StackOffset, ABIArgInfo &Info,
2013  QualType Type) const {
2014  // Arguments are always 4-byte-aligned.
2015  CharUnits WordSize = CharUnits::fromQuantity(4);
2016  assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
2017 
2018  // sret pointers and indirect things will require an extra pointer
2019  // indirection, unless they are byval. Most things are byval, and will not
2020  // require this indirection.
2021  bool IsIndirect = false;
2022  if (Info.isIndirect() && !Info.getIndirectByVal())
2023  IsIndirect = true;
2024  Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
2025  llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
2026  if (IsIndirect)
2027  LLTy = LLTy->getPointerTo(0);
2028  FrameFields.push_back(LLTy);
2029  StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
2030 
2031  // Insert padding bytes to respect alignment.
2032  CharUnits FieldEnd = StackOffset;
2033  StackOffset = FieldEnd.alignTo(WordSize);
2034  if (StackOffset != FieldEnd) {
2035  CharUnits NumBytes = StackOffset - FieldEnd;
2036  llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
2037  Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
2038  FrameFields.push_back(Ty);
2039  }
2040 }
2041 
2042 static bool isArgInAlloca(const ABIArgInfo &Info) {
2043  // Leave ignored and inreg arguments alone.
2044  switch (Info.getKind()) {
2045  case ABIArgInfo::InAlloca:
2046  return true;
2047  case ABIArgInfo::Ignore:
2049  return false;
2050  case ABIArgInfo::Indirect:
2051  case ABIArgInfo::Direct:
2052  case ABIArgInfo::Extend:
2053  return !Info.getInReg();
2054  case ABIArgInfo::Expand:
2056  // These are aggregate types which are never passed in registers when
2057  // inalloca is involved.
2058  return true;
2059  }
2060  llvm_unreachable("invalid enum");
2061 }
2062 
2063 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
2064  assert(IsWin32StructABI && "inalloca only supported on win32");
2065 
2066  // Build a packed struct type for all of the arguments in memory.
2067  SmallVector<llvm::Type *, 6> FrameFields;
2068 
2069  // The stack alignment is always 4.
2070  CharUnits StackAlign = CharUnits::fromQuantity(4);
2071 
2072  CharUnits StackOffset;
2073  CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
2074 
2075  // Put 'this' into the struct before 'sret', if necessary.
2076  bool IsThisCall =
2077  FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
2078  ABIArgInfo &Ret = FI.getReturnInfo();
2079  if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
2080  isArgInAlloca(I->info)) {
2081  addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2082  ++I;
2083  }
2084 
2085  // Put the sret parameter into the inalloca struct if it's in memory.
2086  if (Ret.isIndirect() && !Ret.getInReg()) {
2087  addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
2088  // On Windows, the hidden sret parameter is always returned in eax.
2089  Ret.setInAllocaSRet(IsWin32StructABI);
2090  }
2091 
2092  // Skip the 'this' parameter in ecx.
2093  if (IsThisCall)
2094  ++I;
2095 
2096  // Put arguments passed in memory into the struct.
2097  for (; I != E; ++I) {
2098  if (isArgInAlloca(I->info))
2099  addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2100  }
2101 
2102  FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2103  /*isPacked=*/true),
2104  StackAlign);
2105 }
2106 
2107 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
2108  Address VAListAddr, QualType Ty) const {
2109 
2110  auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2111 
2112  // x86-32 changes the alignment of certain arguments on the stack.
2113  //
2114  // Just messing with TypeInfo like this works because we never pass
2115  // anything indirectly.
2117  getTypeStackAlignInBytes(Ty, TypeInfo.Align.getQuantity()));
2118 
2119  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
2121  /*AllowHigherAlign*/ true);
2122 }
2123 
2124 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2125  const llvm::Triple &Triple, const CodeGenOptions &Opts) {
2126  assert(Triple.getArch() == llvm::Triple::x86);
2127 
2128  switch (Opts.getStructReturnConvention()) {
2130  break;
2131  case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
2132  return false;
2133  case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
2134  return true;
2135  }
2136 
2137  if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2138  return true;
2139 
2140  switch (Triple.getOS()) {
2141  case llvm::Triple::DragonFly:
2142  case llvm::Triple::FreeBSD:
2143  case llvm::Triple::OpenBSD:
2144  case llvm::Triple::Win32:
2145  return true;
2146  default:
2147  return false;
2148  }
2149 }
2150 
2151 static void addX86InterruptAttrs(const FunctionDecl *FD, llvm::GlobalValue *GV,
2152  CodeGen::CodeGenModule &CGM) {
2153  if (!FD->hasAttr<AnyX86InterruptAttr>())
2154  return;
2155 
2156  llvm::Function *Fn = cast<llvm::Function>(GV);
2157  Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2158  if (FD->getNumParams() == 0)
2159  return;
2160 
2161  auto PtrTy = cast<PointerType>(FD->getParamDecl(0)->getType());
2162  llvm::Type *ByValTy = CGM.getTypes().ConvertType(PtrTy->getPointeeType());
2163  llvm::Attribute NewAttr = llvm::Attribute::getWithByValType(
2164  Fn->getContext(), ByValTy);
2165  Fn->addParamAttr(0, NewAttr);
2166 }
2167 
2168 void X86_32TargetCodeGenInfo::setTargetAttributes(
2169  const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2170  if (GV->isDeclaration())
2171  return;
2172  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2173  if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2174  llvm::Function *Fn = cast<llvm::Function>(GV);
2175  Fn->addFnAttr("stackrealign");
2176  }
2177 
2178  addX86InterruptAttrs(FD, GV, CGM);
2179  }
2180 }
2181 
2182 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2184  llvm::Value *Address) const {
2185  CodeGen::CGBuilderTy &Builder = CGF.Builder;
2186 
2187  llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2188 
2189  // 0-7 are the eight integer registers; the order is different
2190  // on Darwin (for EH), but the range is the same.
2191  // 8 is %eip.
2192  AssignToArrayRange(Builder, Address, Four8, 0, 8);
2193 
2194  if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2195  // 12-16 are st(0..4). Not sure why we stop at 4.
2196  // These have size 16, which is sizeof(long double) on
2197  // platforms with 8-byte alignment for that type.
2198  llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2199  AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2200 
2201  } else {
2202  // 9 is %eflags, which doesn't get a size on Darwin for some
2203  // reason.
2204  Builder.CreateAlignedStore(
2205  Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2206  CharUnits::One());
2207 
2208  // 11-16 are st(0..5). Not sure why we stop at 5.
2209  // These have size 12, which is sizeof(long double) on
2210  // platforms with 4-byte alignment for that type.
2211  llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2212  AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2213  }
2214 
2215  return false;
2216 }
2217 
2218 //===----------------------------------------------------------------------===//
2219 // X86-64 ABI Implementation
2220 //===----------------------------------------------------------------------===//
2221 
2222 
2223 namespace {
2224 /// The AVX ABI level for X86 targets.
2225 enum class X86AVXABILevel {
2226  None,
2227  AVX,
2228  AVX512
2229 };
2230 
2231 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2232 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2233  switch (AVXLevel) {
2234  case X86AVXABILevel::AVX512:
2235  return 512;
2236  case X86AVXABILevel::AVX:
2237  return 256;
2238  case X86AVXABILevel::None:
2239  return 128;
2240  }
2241  llvm_unreachable("Unknown AVXLevel");
2242 }
2243 
2244 /// X86_64ABIInfo - The X86_64 ABI information.
2245 class X86_64ABIInfo : public SwiftABIInfo {
2246  enum Class {
2247  Integer = 0,
2248  SSE,
2249  SSEUp,
2250  X87,
2251  X87Up,
2252  ComplexX87,
2253  NoClass,
2254  Memory
2255  };
2256 
2257  /// merge - Implement the X86_64 ABI merging algorithm.
2258  ///
2259  /// Merge an accumulating classification \arg Accum with a field
2260  /// classification \arg Field.
2261  ///
2262  /// \param Accum - The accumulating classification. This should
2263  /// always be either NoClass or the result of a previous merge
2264  /// call. In addition, this should never be Memory (the caller
2265  /// should just return Memory for the aggregate).
2266  static Class merge(Class Accum, Class Field);
2267 
2268  /// postMerge - Implement the X86_64 ABI post merging algorithm.
2269  ///
2270  /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2271  /// final MEMORY or SSE classes when necessary.
2272  ///
2273  /// \param AggregateSize - The size of the current aggregate in
2274  /// the classification process.
2275  ///
2276  /// \param Lo - The classification for the parts of the type
2277  /// residing in the low word of the containing object.
2278  ///
2279  /// \param Hi - The classification for the parts of the type
2280  /// residing in the higher words of the containing object.
2281  ///
2282  void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2283 
2284  /// classify - Determine the x86_64 register classes in which the
2285  /// given type T should be passed.
2286  ///
2287  /// \param Lo - The classification for the parts of the type
2288  /// residing in the low word of the containing object.
2289  ///
2290  /// \param Hi - The classification for the parts of the type
2291  /// residing in the high word of the containing object.
2292  ///
2293  /// \param OffsetBase - The bit offset of this type in the
2294  /// containing object. Some parameters are classified different
2295  /// depending on whether they straddle an eightbyte boundary.
2296  ///
2297  /// \param isNamedArg - Whether the argument in question is a "named"
2298  /// argument, as used in AMD64-ABI 3.5.7.
2299  ///
2300  /// \param IsRegCall - Whether the calling conversion is regcall.
2301  ///
2302  /// If a word is unused its result will be NoClass; if a type should
2303  /// be passed in Memory then at least the classification of \arg Lo
2304  /// will be Memory.
2305  ///
2306  /// The \arg Lo class will be NoClass iff the argument is ignored.
2307  ///
2308  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2309  /// also be ComplexX87.
2310  void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2311  bool isNamedArg, bool IsRegCall = false) const;
2312 
2313  llvm::Type *GetByteVectorType(QualType Ty) const;
2314  llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2315  unsigned IROffset, QualType SourceTy,
2316  unsigned SourceOffset) const;
2317  llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2318  unsigned IROffset, QualType SourceTy,
2319  unsigned SourceOffset) const;
2320 
2321  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2322  /// such that the argument will be returned in memory.
2323  ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2324 
2325  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2326  /// such that the argument will be passed in memory.
2327  ///
2328  /// \param freeIntRegs - The number of free integer registers remaining
2329  /// available.
2330  ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2331 
2332  ABIArgInfo classifyReturnType(QualType RetTy) const;
2333 
2334  ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2335  unsigned &neededInt, unsigned &neededSSE,
2336  bool isNamedArg,
2337  bool IsRegCall = false) const;
2338 
2339  ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2340  unsigned &NeededSSE,
2341  unsigned &MaxVectorWidth) const;
2342 
2343  ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2344  unsigned &NeededSSE,
2345  unsigned &MaxVectorWidth) const;
2346 
2347  bool IsIllegalVectorType(QualType Ty) const;
2348 
2349  /// The 0.98 ABI revision clarified a lot of ambiguities,
2350  /// unfortunately in ways that were not always consistent with
2351  /// certain previous compilers. In particular, platforms which
2352  /// required strict binary compatibility with older versions of GCC
2353  /// may need to exempt themselves.
2354  bool honorsRevision0_98() const {
2355  return !getTarget().getTriple().isOSDarwin();
2356  }
2357 
2358  /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2359  /// classify it as INTEGER (for compatibility with older clang compilers).
2360  bool classifyIntegerMMXAsSSE() const {
2361  // Clang <= 3.8 did not do this.
2362  if (getContext().getLangOpts().getClangABICompat() <=
2364  return false;
2365 
2366  const llvm::Triple &Triple = getTarget().getTriple();
2367  if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2368  return false;
2369  if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2370  return false;
2371  return true;
2372  }
2373 
2374  // GCC classifies vectors of __int128 as memory.
2375  bool passInt128VectorsInMem() const {
2376  // Clang <= 9.0 did not do this.
2377  if (getContext().getLangOpts().getClangABICompat() <=
2379  return false;
2380 
2381  const llvm::Triple &T = getTarget().getTriple();
2382  return T.isOSLinux() || T.isOSNetBSD();
2383  }
2384 
2385  X86AVXABILevel AVXLevel;
2386  // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2387  // 64-bit hardware.
2388  bool Has64BitPointers;
2389 
2390 public:
2391  X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2392  SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2393  Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2394  }
2395 
2396  bool isPassedUsingAVXType(QualType type) const {
2397  unsigned neededInt, neededSSE;
2398  // The freeIntRegs argument doesn't matter here.
2399  ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2400  /*isNamedArg*/true);
2401  if (info.isDirect()) {
2402  llvm::Type *ty = info.getCoerceToType();
2403  if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2404  return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
2405  }
2406  return false;
2407  }
2408 
2409  void computeInfo(CGFunctionInfo &FI) const override;
2410 
2411  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2412  QualType Ty) const override;
2413  Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2414  QualType Ty) const override;
2415 
2416  bool has64BitPointers() const {
2417  return Has64BitPointers;
2418  }
2419 
2420  bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2421  bool asReturnValue) const override {
2422  return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2423  }
2424  bool isSwiftErrorInRegister() const override {
2425  return true;
2426  }
2427 };
2428 
2429 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2430 class WinX86_64ABIInfo : public SwiftABIInfo {
2431 public:
2432  WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2433  : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2434  IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2435 
2436  void computeInfo(CGFunctionInfo &FI) const override;
2437 
2438  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2439  QualType Ty) const override;
2440 
2441  bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2442  // FIXME: Assumes vectorcall is in use.
2443  return isX86VectorTypeForVectorCall(getContext(), Ty);
2444  }
2445 
2446  bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2447  uint64_t NumMembers) const override {
2448  // FIXME: Assumes vectorcall is in use.
2449  return isX86VectorCallAggregateSmallEnough(NumMembers);
2450  }
2451 
2452  bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2453  bool asReturnValue) const override {
2454  return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2455  }
2456 
2457  bool isSwiftErrorInRegister() const override {
2458  return true;
2459  }
2460 
2461 private:
2462  ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2463  bool IsVectorCall, bool IsRegCall) const;
2464  ABIArgInfo reclassifyHvaArgForVectorCall(QualType Ty, unsigned &FreeSSERegs,
2465  const ABIArgInfo &current) const;
2466 
2467  X86AVXABILevel AVXLevel;
2468 
2469  bool IsMingw64;
2470 };
2471 
2472 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2473 public:
2474  X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2475  : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
2476 
2477  const X86_64ABIInfo &getABIInfo() const {
2478  return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2479  }
2480 
2481  /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2482  /// autoreleaseRV/retainRV and autoreleaseRV/unsafeClaimRV optimizations.
2483  bool markARCOptimizedReturnCallsAsNoTail() const override { return true; }
2484 
2485  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2486  return 7;
2487  }
2488 
2489  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2490  llvm::Value *Address) const override {
2491  llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2492 
2493  // 0-15 are the 16 integer registers.
2494  // 16 is %rip.
2495  AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2496  return false;
2497  }
2498 
2499  llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2500  StringRef Constraint,
2501  llvm::Type* Ty) const override {
2502  return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2503  }
2504 
2505  bool isNoProtoCallVariadic(const CallArgList &args,
2506  const FunctionNoProtoType *fnType) const override {
2507  // The default CC on x86-64 sets %al to the number of SSA
2508  // registers used, and GCC sets this when calling an unprototyped
2509  // function, so we override the default behavior. However, don't do
2510  // that when AVX types are involved: the ABI explicitly states it is
2511  // undefined, and it doesn't work in practice because of how the ABI
2512  // defines varargs anyway.
2513  if (fnType->getCallConv() == CC_C) {
2514  bool HasAVXType = false;
2515  for (CallArgList::const_iterator
2516  it = args.begin(), ie = args.end(); it != ie; ++it) {
2517  if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2518  HasAVXType = true;
2519  break;
2520  }
2521  }
2522 
2523  if (!HasAVXType)
2524  return true;
2525  }
2526 
2527  return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2528  }
2529 
2530  llvm::Constant *
2531  getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2532  unsigned Sig = (0xeb << 0) | // jmp rel8
2533  (0x06 << 8) | // .+0x08
2534  ('v' << 16) |
2535  ('2' << 24);
2536  return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2537  }
2538 
2539  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2540  CodeGen::CodeGenModule &CGM) const override {
2541  if (GV->isDeclaration())
2542  return;
2543  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2544  if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2545  llvm::Function *Fn = cast<llvm::Function>(GV);
2546  Fn->addFnAttr("stackrealign");
2547  }
2548 
2549  addX86InterruptAttrs(FD, GV, CGM);
2550  }
2551  }
2552 
2553  void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
2554  const FunctionDecl *Caller,
2555  const FunctionDecl *Callee,
2556  const CallArgList &Args) const override;
2557 };
2558 
2559 static void initFeatureMaps(const ASTContext &Ctx,
2560  llvm::StringMap<bool> &CallerMap,
2561  const FunctionDecl *Caller,
2562  llvm::StringMap<bool> &CalleeMap,
2563  const FunctionDecl *Callee) {
2564  if (CalleeMap.empty() && CallerMap.empty()) {
2565  // The caller is potentially nullptr in the case where the call isn't in a
2566  // function. In this case, the getFunctionFeatureMap ensures we just get
2567  // the TU level setting (since it cannot be modified by 'target'..
2568  Ctx.getFunctionFeatureMap(CallerMap, Caller);
2569  Ctx.getFunctionFeatureMap(CalleeMap, Callee);
2570  }
2571 }
2572 
2573 static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
2574  SourceLocation CallLoc,
2575  const llvm::StringMap<bool> &CallerMap,
2576  const llvm::StringMap<bool> &CalleeMap,
2577  QualType Ty, StringRef Feature,
2578  bool IsArgument) {
2579  bool CallerHasFeat = CallerMap.lookup(Feature);
2580  bool CalleeHasFeat = CalleeMap.lookup(Feature);
2581  if (!CallerHasFeat && !CalleeHasFeat)
2582  return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
2583  << IsArgument << Ty << Feature;
2584 
2585  // Mixing calling conventions here is very clearly an error.
2586  if (!CallerHasFeat || !CalleeHasFeat)
2587  return Diag.Report(CallLoc, diag::err_avx_calling_convention)
2588  << IsArgument << Ty << Feature;
2589 
2590  // Else, both caller and callee have the required feature, so there is no need
2591  // to diagnose.
2592  return false;
2593 }
2594 
2595 static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
2596  SourceLocation CallLoc,
2597  const llvm::StringMap<bool> &CallerMap,
2598  const llvm::StringMap<bool> &CalleeMap, QualType Ty,
2599  bool IsArgument) {
2600  uint64_t Size = Ctx.getTypeSize(Ty);
2601  if (Size > 256)
2602  return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
2603  "avx512f", IsArgument);
2604 
2605  if (Size > 128)
2606  return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
2607  IsArgument);
2608 
2609  return false;
2610 }
2611 
2612 void X86_64TargetCodeGenInfo::checkFunctionCallABI(
2613  CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
2614  const FunctionDecl *Callee, const CallArgList &Args) const {
2615  llvm::StringMap<bool> CallerMap;
2616  llvm::StringMap<bool> CalleeMap;
2617  unsigned ArgIndex = 0;
2618 
2619  // We need to loop through the actual call arguments rather than the the
2620  // function's parameters, in case this variadic.
2621  for (const CallArg &Arg : Args) {
2622  // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
2623  // additionally changes how vectors >256 in size are passed. Like GCC, we
2624  // warn when a function is called with an argument where this will change.
2625  // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
2626  // the caller and callee features are mismatched.
2627  // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
2628  // change its ABI with attribute-target after this call.
2629  if (Arg.getType()->isVectorType() &&
2630  CGM.getContext().getTypeSize(Arg.getType()) > 128) {
2631  initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2632  QualType Ty = Arg.getType();
2633  // The CallArg seems to have desugared the type already, so for clearer
2634  // diagnostics, replace it with the type in the FunctionDecl if possible.
2635  if (ArgIndex < Callee->getNumParams())
2636  Ty = Callee->getParamDecl(ArgIndex)->getType();
2637 
2638  if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2639  CalleeMap, Ty, /*IsArgument*/ true))
2640  return;
2641  }
2642  ++ArgIndex;
2643  }
2644 
2645  // Check return always, as we don't have a good way of knowing in codegen
2646  // whether this value is used, tail-called, etc.
2647  if (Callee->getReturnType()->isVectorType() &&
2648  CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
2649  initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2650  checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2651  CalleeMap, Callee->getReturnType(),
2652  /*IsArgument*/ false);
2653  }
2654 }
2655 
2656 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2657  // If the argument does not end in .lib, automatically add the suffix.
2658  // If the argument contains a space, enclose it in quotes.
2659  // This matches the behavior of MSVC.
2660  bool Quote = Lib.contains(' ');
2661  std::string ArgStr = Quote ? "\"" : "";
2662  ArgStr += Lib;
2663  if (!Lib.endswith_insensitive(".lib") && !Lib.endswith_insensitive(".a"))
2664  ArgStr += ".lib";
2665  ArgStr += Quote ? "\"" : "";
2666  return ArgStr;
2667 }
2668 
2669 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2670 public:
2671  WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2672  bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2673  unsigned NumRegisterParameters)
2674  : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2675  Win32StructABI, NumRegisterParameters, false) {}
2676 
2677  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2678  CodeGen::CodeGenModule &CGM) const override;
2679 
2680  void getDependentLibraryOption(llvm::StringRef Lib,
2681  llvm::SmallString<24> &Opt) const override {
2682  Opt = "/DEFAULTLIB:";
2683  Opt += qualifyWindowsLibrary(Lib);
2684  }
2685 
2686  void getDetectMismatchOption(llvm::StringRef Name,
2687  llvm::StringRef Value,
2688  llvm::SmallString<32> &Opt) const override {
2689  Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2690  }
2691 };
2692 
2693 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2694  CodeGen::CodeGenModule &CGM) {
2695  if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2696 
2697  if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2698  Fn->addFnAttr("stack-probe-size",
2699  llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2700  if (CGM.getCodeGenOpts().NoStackArgProbe)
2701  Fn->addFnAttr("no-stack-arg-probe");
2702  }
2703 }
2704 
2705 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2706  const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2707  X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2708  if (GV->isDeclaration())
2709  return;
2710  addStackProbeTargetAttributes(D, GV, CGM);
2711 }
2712 
2713 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2714 public:
2715  WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2716  X86AVXABILevel AVXLevel)
2717  : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
2718 
2719  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2720  CodeGen::CodeGenModule &CGM) const override;
2721 
2722  int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2723  return 7;
2724  }
2725 
2726  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2727  llvm::Value *Address) const override {
2728  llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2729 
2730  // 0-15 are the 16 integer registers.
2731  // 16 is %rip.
2732  AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2733  return false;
2734  }
2735 
2736  void getDependentLibraryOption(llvm::StringRef Lib,
2737  llvm::SmallString<24> &Opt) const override {
2738  Opt = "/DEFAULTLIB:";
2739  Opt += qualifyWindowsLibrary(Lib);
2740  }
2741 
2742  void getDetectMismatchOption(llvm::StringRef Name,
2743  llvm::StringRef Value,
2744  llvm::SmallString<32> &Opt) const override {
2745  Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2746  }
2747 };
2748 
2749 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2750  const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2752  if (GV->isDeclaration())
2753  return;
2754  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2755  if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2756  llvm::Function *Fn = cast<llvm::Function>(GV);
2757  Fn->addFnAttr("stackrealign");
2758  }
2759 
2760  addX86InterruptAttrs(FD, GV, CGM);
2761  }
2762 
2763  addStackProbeTargetAttributes(D, GV, CGM);
2764 }
2765 }
2766 
2767 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2768  Class &Hi) const {
2769  // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2770  //
2771  // (a) If one of the classes is Memory, the whole argument is passed in
2772  // memory.
2773  //
2774  // (b) If X87UP is not preceded by X87, the whole argument is passed in
2775  // memory.
2776  //
2777  // (c) If the size of the aggregate exceeds two eightbytes and the first
2778  // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2779  // argument is passed in memory. NOTE: This is necessary to keep the
2780  // ABI working for processors that don't support the __m256 type.
2781  //
2782  // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2783  //
2784  // Some of these are enforced by the merging logic. Others can arise
2785  // only with unions; for example:
2786  // union { _Complex double; unsigned; }
2787  //
2788  // Note that clauses (b) and (c) were added in 0.98.
2789  //
2790  if (Hi == Memory)
2791  Lo = Memory;
2792  if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2793  Lo = Memory;
2794  if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2795  Lo = Memory;
2796  if (Hi == SSEUp && Lo != SSE)
2797  Hi = SSE;
2798 }
2799 
2800 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2801  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2802  // classified recursively so that always two fields are
2803  // considered. The resulting class is calculated according to
2804  // the classes of the fields in the eightbyte:
2805  //
2806  // (a) If both classes are equal, this is the resulting class.
2807  //
2808  // (b) If one of the classes is NO_CLASS, the resulting class is
2809  // the other class.
2810  //
2811  // (c) If one of the classes is MEMORY, the result is the MEMORY
2812  // class.
2813  //
2814  // (d) If one of the classes is INTEGER, the result is the
2815  // INTEGER.
2816  //
2817  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2818  // MEMORY is used as class.
2819  //
2820  // (f) Otherwise class SSE is used.
2821 
2822  // Accum should never be memory (we should have returned) or
2823  // ComplexX87 (because this cannot be passed in a structure).
2824  assert((Accum != Memory && Accum != ComplexX87) &&
2825  "Invalid accumulated classification during merge.");
2826  if (Accum == Field || Field == NoClass)
2827  return Accum;
2828  if (Field == Memory)
2829  return Memory;
2830  if (Accum == NoClass)
2831  return Field;
2832  if (Accum == Integer || Field == Integer)
2833  return Integer;
2834  if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2835  Accum == X87 || Accum == X87Up)
2836  return Memory;
2837  return SSE;
2838 }
2839 
2840 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
2841  Class &Hi, bool isNamedArg, bool IsRegCall) const {
2842  // FIXME: This code can be simplified by introducing a simple value class for
2843  // Class pairs with appropriate constructor methods for the various
2844  // situations.
2845 
2846  // FIXME: Some of the split computations are wrong; unaligned vectors
2847  // shouldn't be passed in registers for example, so there is no chance they
2848  // can straddle an eightbyte. Verify & simplify.
2849 
2850  Lo = Hi = NoClass;
2851 
2852  Class &Current = OffsetBase < 64 ? Lo : Hi;
2853  Current = Memory;
2854 
2855  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2856  BuiltinType::Kind k = BT->getKind();
2857 
2858  if (k == BuiltinType::Void) {
2859  Current = NoClass;
2860  } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2861  Lo = Integer;
2862  Hi = Integer;
2863  } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2864  Current = Integer;
2865  } else if (k == BuiltinType::Float || k == BuiltinType::Double ||
2866  k == BuiltinType::Float16) {
2867  Current = SSE;
2868  } else if (k == BuiltinType::LongDouble) {
2869  const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2870  if (LDF == &llvm::APFloat::IEEEquad()) {
2871  Lo = SSE;
2872  Hi = SSEUp;
2873  } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2874  Lo = X87;
2875  Hi = X87Up;
2876  } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2877  Current = SSE;
2878  } else
2879  llvm_unreachable("unexpected long double representation!");
2880  }
2881  // FIXME: _Decimal32 and _Decimal64 are SSE.
2882  // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2883  return;
2884  }
2885 
2886  if (const EnumType *ET = Ty->getAs<EnumType>()) {
2887  // Classify the underlying integer type.
2888  classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2889  return;
2890  }
2891 
2892  if (Ty->hasPointerRepresentation()) {
2893  Current = Integer;
2894  return;
2895  }
2896 
2897  if (Ty->isMemberPointerType()) {
2898  if (Ty->isMemberFunctionPointerType()) {
2899  if (Has64BitPointers) {
2900  // If Has64BitPointers, this is an {i64, i64}, so classify both
2901  // Lo and Hi now.
2902  Lo = Hi = Integer;
2903  } else {
2904  // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2905  // straddles an eightbyte boundary, Hi should be classified as well.
2906  uint64_t EB_FuncPtr = (OffsetBase) / 64;
2907  uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2908  if (EB_FuncPtr != EB_ThisAdj) {
2909  Lo = Hi = Integer;
2910  } else {
2911  Current = Integer;
2912  }
2913  }
2914  } else {
2915  Current = Integer;
2916  }
2917  return;
2918  }
2919 
2920  if (const VectorType *VT = Ty->getAs<VectorType>()) {
2921  uint64_t Size = getContext().getTypeSize(VT);
2922  if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2923  // gcc passes the following as integer:
2924  // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2925  // 2 bytes - <2 x char>, <1 x short>
2926  // 1 byte - <1 x char>
2927  Current = Integer;
2928 
2929  // If this type crosses an eightbyte boundary, it should be
2930  // split.
2931  uint64_t EB_Lo = (OffsetBase) / 64;
2932  uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2933  if (EB_Lo != EB_Hi)
2934  Hi = Lo;
2935  } else if (Size == 64) {
2936  QualType ElementType = VT->getElementType();
2937 
2938  // gcc passes <1 x double> in memory. :(
2939  if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2940  return;
2941 
2942  // gcc passes <1 x long long> as SSE but clang used to unconditionally
2943  // pass them as integer. For platforms where clang is the de facto
2944  // platform compiler, we must continue to use integer.
2945  if (!classifyIntegerMMXAsSSE() &&
2946  (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2947  ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2948  ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2949  ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2950  Current = Integer;
2951  else
2952  Current = SSE;
2953 
2954  // If this type crosses an eightbyte boundary, it should be
2955  // split.
2956  if (OffsetBase && OffsetBase != 64)
2957  Hi = Lo;
2958  } else if (Size == 128 ||
2959  (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2960  QualType ElementType = VT->getElementType();
2961 
2962  // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
2963  if (passInt128VectorsInMem() && Size != 128 &&
2964  (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
2965  ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
2966  return;
2967 
2968  // Arguments of 256-bits are split into four eightbyte chunks. The
2969  // least significant one belongs to class SSE and all the others to class
2970  // SSEUP. The original Lo and Hi design considers that types can't be
2971  // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2972  // This design isn't correct for 256-bits, but since there're no cases
2973  // where the upper parts would need to be inspected, avoid adding
2974  // complexity and just consider Hi to match the 64-256 part.
2975  //
2976  // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2977  // registers if they are "named", i.e. not part of the "..." of a
2978  // variadic function.
2979  //
2980  // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2981  // split into eight eightbyte chunks, one SSE and seven SSEUP.
2982  Lo = SSE;
2983  Hi = SSEUp;
2984  }
2985  return;
2986  }
2987 
2988  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2989  QualType ET = getContext().getCanonicalType(CT->getElementType());
2990 
2991  uint64_t Size = getContext().getTypeSize(Ty);
2992  if (ET->isIntegralOrEnumerationType()) {
2993  if (Size <= 64)
2994  Current = Integer;
2995  else if (Size <= 128)
2996  Lo = Hi = Integer;
2997  } else if (ET->isFloat16Type() || ET == getContext().FloatTy) {
2998  Current = SSE;
2999  } else if (ET == getContext().DoubleTy) {
3000  Lo = Hi = SSE;
3001  } else if (ET == getContext().LongDoubleTy) {
3002  const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3003  if (LDF == &llvm::APFloat::IEEEquad())
3004  Current = Memory;
3005  else if (LDF == &llvm::APFloat::x87DoubleExtended())
3006  Current = ComplexX87;
3007  else if (LDF == &llvm::APFloat::IEEEdouble())
3008  Lo = Hi = SSE;
3009  else
3010  llvm_unreachable("unexpected long double representation!");
3011  }
3012 
3013  // If this complex type crosses an eightbyte boundary then it
3014  // should be split.
3015  uint64_t EB_Real = (OffsetBase) / 64;
3016  uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
3017  if (Hi == NoClass && EB_Real != EB_Imag)
3018  Hi = Lo;
3019 
3020  return;
3021  }
3022 
3023  if (const auto *EITy = Ty->getAs<BitIntType>()) {
3024  if (EITy->getNumBits() <= 64)
3025  Current = Integer;
3026  else if (EITy->getNumBits() <= 128)
3027  Lo = Hi = Integer;
3028  // Larger values need to get passed in memory.
3029  return;
3030  }
3031 
3032  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
3033  // Arrays are treated like structures.
3034 
3035  uint64_t Size = getContext().getTypeSize(Ty);
3036 
3037  // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
3038  // than eight eightbytes, ..., it has class MEMORY.
3039  // regcall ABI doesn't have limitation to an object. The only limitation
3040  // is the free registers, which will be checked in computeInfo.
3041  if (!IsRegCall && Size > 512)
3042  return;
3043 
3044  // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
3045  // fields, it has class MEMORY.
3046  //
3047  // Only need to check alignment of array base.
3048  if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
3049  return;
3050 
3051  // Otherwise implement simplified merge. We could be smarter about
3052  // this, but it isn't worth it and would be harder to verify.
3053  Current = NoClass;
3054  uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
3055  uint64_t ArraySize = AT->getSize().getZExtValue();
3056 
3057  // The only case a 256-bit wide vector could be used is when the array
3058  // contains a single 256-bit element. Since Lo and Hi logic isn't extended
3059  // to work for sizes wider than 128, early check and fallback to memory.
3060  //
3061  if (Size > 128 &&
3062  (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
3063  return;
3064 
3065  for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
3066  Class FieldLo, FieldHi;
3067  classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
3068  Lo = merge(Lo, FieldLo);
3069  Hi = merge(Hi, FieldHi);
3070  if (Lo == Memory || Hi == Memory)
3071  break;
3072  }
3073 
3074  postMerge(Size, Lo, Hi);
3075  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
3076  return;
3077  }
3078 
3079  if (const RecordType *RT = Ty->getAs<RecordType>()) {
3080  uint64_t Size = getContext().getTypeSize(Ty);
3081 
3082  // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
3083  // than eight eightbytes, ..., it has class MEMORY.
3084  if (Size > 512)
3085  return;
3086 
3087  // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
3088  // copy constructor or a non-trivial destructor, it is passed by invisible
3089  // reference.
3090  if (getRecordArgABI(RT, getCXXABI()))
3091  return;
3092 
3093  const RecordDecl *RD = RT->getDecl();
3094 
3095  // Assume variable sized types are passed in memory.
3096  if (RD->hasFlexibleArrayMember())
3097  return;
3098 
3099  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
3100 
3101  // Reset Lo class, this will be recomputed.
3102  Current = NoClass;
3103 
3104  // If this is a C++ record, classify the bases first.
3105  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3106  for (const auto &I : CXXRD->bases()) {
3107  assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3108  "Unexpected base class!");
3109  const auto *Base =
3110  cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3111 
3112  // Classify this field.
3113  //
3114  // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
3115  // single eightbyte, each is classified separately. Each eightbyte gets
3116  // initialized to class NO_CLASS.
3117  Class FieldLo, FieldHi;
3118  uint64_t Offset =
3119  OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
3120  classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
3121  Lo = merge(Lo, FieldLo);
3122  Hi = merge(Hi, FieldHi);
3123  if (Lo == Memory || Hi == Memory) {
3124  postMerge(Size, Lo, Hi);
3125  return;
3126  }
3127  }
3128  }
3129 
3130  // Classify the fields one at a time, merging the results.
3131  unsigned idx = 0;
3132  bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
3134  getContext().getTargetInfo().getTriple().isPS4();
3135  bool IsUnion = RT->isUnionType() && !UseClang11Compat;
3136 
3137  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3138  i != e; ++i, ++idx) {
3139  uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3140  bool BitField = i->isBitField();
3141 
3142  // Ignore padding bit-fields.
3143  if (BitField && i->isUnnamedBitfield())
3144  continue;
3145 
3146  // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
3147  // eight eightbytes, or it contains unaligned fields, it has class MEMORY.
3148  //
3149  // The only case a 256-bit or a 512-bit wide vector could be used is when
3150  // the struct contains a single 256-bit or 512-bit element. Early check
3151  // and fallback to memory.
3152  //
3153  // FIXME: Extended the Lo and Hi logic properly to work for size wider
3154  // than 128.
3155  if (Size > 128 &&
3156  ((!IsUnion && Size != getContext().getTypeSize(i->getType())) ||
3157  Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
3158  Lo = Memory;
3159  postMerge(Size, Lo, Hi);
3160  return;
3161  }
3162  // Note, skip this test for bit-fields, see below.
3163  if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
3164  Lo = Memory;
3165  postMerge(Size, Lo, Hi);
3166  return;
3167  }
3168 
3169  // Classify this field.
3170  //
3171  // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
3172  // exceeds a single eightbyte, each is classified
3173  // separately. Each eightbyte gets initialized to class
3174  // NO_CLASS.
3175  Class FieldLo, FieldHi;
3176 
3177  // Bit-fields require special handling, they do not force the
3178  // structure to be passed in memory even if unaligned, and
3179  // therefore they can straddle an eightbyte.
3180  if (BitField) {
3181  assert(!i->isUnnamedBitfield());
3182  uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3183  uint64_t Size = i->getBitWidthValue(getContext());
3184 
3185  uint64_t EB_Lo = Offset / 64;
3186  uint64_t EB_Hi = (Offset + Size - 1) / 64;
3187 
3188  if (EB_Lo) {
3189  assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
3190  FieldLo = NoClass;
3191  FieldHi = Integer;
3192  } else {
3193  FieldLo = Integer;
3194  FieldHi = EB_Hi ? Integer : NoClass;
3195  }
3196  } else
3197  classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
3198  Lo = merge(Lo, FieldLo);
3199  Hi = merge(Hi, FieldHi);
3200  if (Lo == Memory || Hi == Memory)
3201  break;
3202  }
3203 
3204  postMerge(Size, Lo, Hi);
3205  }
3206 }
3207 
3208 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
3209  // If this is a scalar LLVM value then assume LLVM will pass it in the right
3210  // place naturally.
3211  if (!isAggregateTypeForABI(Ty)) {
3212  // Treat an enum type as its underlying type.
3213  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3214  Ty = EnumTy->getDecl()->getIntegerType();
3215 
3216  if (Ty->isBitIntType())
3217  return getNaturalAlignIndirect(Ty);
3218 
3219  return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3220  : ABIArgInfo::getDirect());
3221  }
3222 
3223  return getNaturalAlignIndirect(Ty);
3224 }
3225 
3226 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
3227  if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
3228  uint64_t Size = getContext().getTypeSize(VecTy);
3229  unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
3230  if (Size <= 64 || Size > LargestVector)
3231  return true;
3232  QualType EltTy = VecTy->getElementType();
3233  if (passInt128VectorsInMem() &&
3234  (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
3235  EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
3236  return true;
3237  }
3238 
3239  return false;
3240 }
3241 
3242 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
3243  unsigned freeIntRegs) const {
3244  // If this is a scalar LLVM value then assume LLVM will pass it in the right
3245  // place naturally.
3246  //
3247  // This assumption is optimistic, as there could be free registers available
3248  // when we need to pass this argument in memory, and LLVM could try to pass
3249  // the argument in the free register. This does not seem to happen currently,
3250  // but this code would be much safer if we could mark the argument with
3251  // 'onstack'. See PR12193.
3252  if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
3253  !Ty->isBitIntType()) {
3254  // Treat an enum type as its underlying type.
3255  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3256  Ty = EnumTy->getDecl()->getIntegerType();
3257 
3258  return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3259  : ABIArgInfo::getDirect());
3260  }
3261 
3262  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3263  return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3264 
3265  // Compute the byval alignment. We specify the alignment of the byval in all
3266  // cases so that the mid-level optimizer knows the alignment of the byval.
3267  unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
3268 
3269  // Attempt to avoid passing indirect results using byval when possible. This
3270  // is important for good codegen.
3271  //
3272  // We do this by coercing the value into a scalar type which the backend can
3273  // handle naturally (i.e., without using byval).
3274  //
3275  // For simplicity, we currently only do this when we have exhausted all of the
3276  // free integer registers. Doing this when there are free integer registers
3277  // would require more care, as we would have to ensure that the coerced value
3278  // did not claim the unused register. That would require either reording the
3279  // arguments to the function (so that any subsequent inreg values came first),
3280  // or only doing this optimization when there were no following arguments that
3281  // might be inreg.
3282  //
3283  // We currently expect it to be rare (particularly in well written code) for
3284  // arguments to be passed on the stack when there are still free integer
3285  // registers available (this would typically imply large structs being passed
3286  // by value), so this seems like a fair tradeoff for now.
3287  //
3288  // We can revisit this if the backend grows support for 'onstack' parameter
3289  // attributes. See PR12193.
3290  if (freeIntRegs == 0) {
3291  uint64_t Size = getContext().getTypeSize(Ty);
3292 
3293  // If this type fits in an eightbyte, coerce it into the matching integral
3294  // type, which will end up on the stack (with alignment 8).
3295  if (Align == 8 && Size <= 64)
3296  return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3297  Size));
3298  }
3299 
3301 }
3302 
3303 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
3304 /// register. Pick an LLVM IR type that will be passed as a vector register.
3305 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
3306  // Wrapper structs/arrays that only contain vectors are passed just like
3307  // vectors; strip them off if present.
3308  if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
3309  Ty = QualType(InnerTy, 0);
3310 
3311  llvm::Type *IRType = CGT.ConvertType(Ty);
3312  if (isa<llvm::VectorType>(IRType)) {
3313  // Don't pass vXi128 vectors in their native type, the backend can't
3314  // legalize them.
3315  if (passInt128VectorsInMem() &&
3316  cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
3317  // Use a vXi64 vector.
3318  uint64_t Size = getContext().getTypeSize(Ty);
3319  return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3320  Size / 64);
3321  }
3322 
3323  return IRType;
3324  }
3325 
3326  if (IRType->getTypeID() == llvm::Type::FP128TyID)
3327  return IRType;
3328 
3329  // We couldn't find the preferred IR vector type for 'Ty'.
3330  uint64_t Size = getContext().getTypeSize(Ty);
3331  assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
3332 
3333 
3334  // Return a LLVM IR vector type based on the size of 'Ty'.
3335  return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3336  Size / 64);
3337 }
3338 
3339 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
3340 /// is known to either be off the end of the specified type or being in
3341 /// alignment padding. The user type specified is known to be at most 128 bits
3342 /// in size, and have passed through X86_64ABIInfo::classify with a successful
3343 /// classification that put one of the two halves in the INTEGER class.
3344 ///
3345 /// It is conservatively correct to return false.
3346 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3347  unsigned EndBit, ASTContext &Context) {
3348  // If the bytes being queried are off the end of the type, there is no user
3349  // data hiding here. This handles analysis of builtins, vectors and other
3350  // types that don't contain interesting padding.
3351  unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3352  if (TySize <= StartBit)
3353  return true;
3354 
3355  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3356  unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3357  unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3358 
3359  // Check each element to see if the element overlaps with the queried range.
3360  for (unsigned i = 0; i != NumElts; ++i) {
3361  // If the element is after the span we care about, then we're done..
3362  unsigned EltOffset = i*EltSize;
3363  if (EltOffset >= EndBit) break;
3364 
3365  unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3366  if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3367  EndBit-EltOffset, Context))
3368  return false;
3369  }
3370  // If it overlaps no elements, then it is safe to process as padding.
3371  return true;
3372  }
3373 
3374  if (const RecordType *RT = Ty->getAs<RecordType>()) {
3375  const RecordDecl *RD = RT->getDecl();
3376  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3377 
3378  // If this is a C++ record, check the bases first.
3379  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3380  for (const auto &I : CXXRD->bases()) {
3381  assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3382  "Unexpected base class!");
3383  const auto *Base =
3384  cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3385 
3386  // If the base is after the span we care about, ignore it.
3387  unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3388  if (BaseOffset >= EndBit) continue;
3389 
3390  unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3391  if (!BitsContainNoUserData(I.getType(), BaseStart,
3392  EndBit-BaseOffset, Context))
3393  return false;
3394  }
3395  }
3396 
3397  // Verify that no field has data that overlaps the region of interest. Yes
3398  // this could be sped up a lot by being smarter about queried fields,
3399  // however we're only looking at structs up to 16 bytes, so we don't care
3400  // much.
3401  unsigned idx = 0;
3402  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3403  i != e; ++i, ++idx) {
3404  unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3405 
3406  // If we found a field after the region we care about, then we're done.
3407  if (FieldOffset >= EndBit) break;
3408 
3409  unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3410  if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3411  Context))
3412  return false;
3413  }
3414 
3415  // If nothing in this record overlapped the area of interest, then we're
3416  // clean.
3417  return true;
3418  }
3419 
3420  return false;
3421 }
3422 
3423 /// getFPTypeAtOffset - Return a floating point type at the specified offset.
3424 static llvm::Type *getFPTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3425  const llvm::DataLayout &TD) {
3426  if (IROffset == 0 && IRType->isFloatingPointTy())
3427  return IRType;
3428 
3429  // If this is a struct, recurse into the field at the specified offset.
3430  if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3431  if (!STy->getNumContainedTypes())
3432  return nullptr;
3433 
3434  const llvm::StructLayout *SL = TD.getStructLayout(STy);
3435  unsigned Elt = SL->getElementContainingOffset(IROffset);
3436  IROffset -= SL->getElementOffset(Elt);
3437  return getFPTypeAtOffset(STy->getElementType(Elt), IROffset, TD);
3438  }
3439 
3440  // If this is an array, recurse into the field at the specified offset.
3441  if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3442  llvm::Type *EltTy = ATy->getElementType();
3443  unsigned EltSize = TD.getTypeAllocSize(EltTy);
3444  IROffset -= IROffset / EltSize * EltSize;
3445  return getFPTypeAtOffset(EltTy, IROffset, TD);
3446  }
3447 
3448  return nullptr;
3449 }
3450 
3451 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3452 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3453 llvm::Type *X86_64ABIInfo::
3454 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3455  QualType SourceTy, unsigned SourceOffset) const {
3456  const llvm::DataLayout &TD = getDataLayout();
3457  unsigned SourceSize =
3458  (unsigned)getContext().getTypeSize(SourceTy) / 8 - SourceOffset;
3459  llvm::Type *T0 = getFPTypeAtOffset(IRType, IROffset, TD);
3460  if (!T0 || T0->isDoubleTy())
3461  return llvm::Type::getDoubleTy(getVMContext());
3462 
3463  // Get the adjacent FP type.
3464  llvm::Type *T1 = nullptr;
3465  unsigned T0Size = TD.getTypeAllocSize(T0);
3466  if (SourceSize > T0Size)
3467  T1 = getFPTypeAtOffset(IRType, IROffset + T0Size, TD);
3468  if (T1 == nullptr) {
3469  // Check if IRType is a half + float. float type will be in IROffset+4 due
3470  // to its alignment.
3471  if (T0->isHalfTy() && SourceSize > 4)
3472  T1 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
3473  // If we can't get a second FP type, return a simple half or float.
3474  // avx512fp16-abi.c:pr51813_2 shows it works to return float for
3475  // {float, i8} too.
3476  if (T1 == nullptr)
3477  return T0;
3478  }
3479 
3480  if (T0->isFloatTy() && T1->isFloatTy())
3481  return llvm::FixedVectorType::get(T0, 2);
3482 
3483  if (T0->isHalfTy() && T1->isHalfTy()) {
3484  llvm::Type *T2 = nullptr;
3485  if (SourceSize > 4)
3486  T2 = getFPTypeAtOffset(IRType, IROffset + 4, TD);
3487  if (T2 == nullptr)
3488  return llvm::FixedVectorType::get(T0, 2);
3489  return llvm::FixedVectorType::get(T0, 4);
3490  }
3491 
3492  if (T0->isHalfTy() || T1->isHalfTy())
3493  return llvm::FixedVectorType::get(llvm::Type::getHalfTy(getVMContext()), 4);
3494 
3495  return llvm::Type::getDoubleTy(getVMContext());
3496 }
3497 
3498 
3499 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3500 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3501 /// about the high or low part of an up-to-16-byte struct. This routine picks
3502 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3503 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3504 /// etc).
3505 ///
3506 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3507 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3508 /// the 8-byte value references. PrefType may be null.
3509 ///
3510 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3511 /// an offset into this that we're processing (which is always either 0 or 8).
3512 ///
3513 llvm::Type *X86_64ABIInfo::
3514 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3515  QualType SourceTy, unsigned SourceOffset) const {
3516  // If we're dealing with an un-offset LLVM IR type, then it means that we're
3517  // returning an 8-byte unit starting with it. See if we can safely use it.
3518  if (IROffset == 0) {
3519  // Pointers and int64's always fill the 8-byte unit.
3520  if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3521  IRType->isIntegerTy(64))
3522  return IRType;
3523 
3524  // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3525  // goodness in the source type is just tail padding. This is allowed to
3526  // kick in for struct {double,int} on the int, but not on
3527  // struct{double,int,int} because we wouldn't return the second int. We
3528  // have to do this analysis on the source type because we can't depend on
3529  // unions being lowered a specific way etc.
3530  if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3531  IRType->isIntegerTy(32) ||
3532  (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3533  unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3534  cast<llvm::IntegerType>(IRType)->getBitWidth();
3535 
3536  if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3537  SourceOffset*8+64, getContext()))
3538  return IRType;
3539  }
3540  }
3541 
3542  if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3543  // If this is a struct, recurse into the field at the specified offset.
3544  const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3545  if (IROffset < SL->getSizeInBytes()) {
3546  unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3547  IROffset -= SL->getElementOffset(FieldIdx);
3548 
3549  return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3550  SourceTy, SourceOffset);
3551  }
3552  }
3553 
3554  if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3555  llvm::Type *EltTy = ATy->getElementType();
3556  unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3557  unsigned EltOffset = IROffset/EltSize*EltSize;
3558  return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3559  SourceOffset);
3560  }
3561 
3562  // Okay, we don't have any better idea of what to pass, so we pass this in an
3563  // integer register that isn't too big to fit the rest of the struct.
3564  unsigned TySizeInBytes =
3565  (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3566 
3567  assert(TySizeInBytes != SourceOffset && "Empty field?");
3568 
3569  // It is always safe to classify this as an integer type up to i64 that
3570  // isn't larger than the structure.
3571  return llvm::IntegerType::get(getVMContext(),
3572  std::min(TySizeInBytes-SourceOffset, 8U)*8);
3573 }
3574 
3575 
3576 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3577 /// be used as elements of a two register pair to pass or return, return a
3578 /// first class aggregate to represent them. For example, if the low part of
3579 /// a by-value argument should be passed as i32* and the high part as float,
3580 /// return {i32*, float}.
3581 static llvm::Type *
3582 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3583  const llvm::DataLayout &TD) {
3584  // In order to correctly satisfy the ABI, we need to the high part to start
3585  // at offset 8. If the high and low parts we inferred are both 4-byte types
3586  // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3587  // the second element at offset 8. Check for this:
3588  unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3589  unsigned HiAlign = TD.getABITypeAlignment(Hi);
3590  unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3591  assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3592 
3593  // To handle this, we have to increase the size of the low part so that the
3594  // second element will start at an 8 byte offset. We can't increase the size
3595  // of the second element because it might make us access off the end of the
3596  // struct.
3597  if (HiStart != 8) {
3598  // There are usually two sorts of types the ABI generation code can produce
3599  // for the low part of a pair that aren't 8 bytes in size: half, float or
3600  // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3601  // NaCl).
3602  // Promote these to a larger type.
3603  if (Lo->isHalfTy() || Lo->isFloatTy())
3604  Lo = llvm::Type::getDoubleTy(Lo->getContext());
3605  else {
3606  assert((Lo->isIntegerTy() || Lo->isPointerTy())
3607  && "Invalid/unknown lo type");
3608  Lo = llvm::Type::getInt64Ty(Lo->getContext());
3609  }
3610  }
3611 
3612  llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3613 
3614  // Verify that the second element is at an 8-byte offset.
3615  assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3616  "Invalid x86-64 argument pair!");
3617  return Result;
3618 }
3619 
3621 classifyReturnType(QualType RetTy) const {
3622  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3623  // classification algorithm.
3624  X86_64ABIInfo::Class Lo, Hi;
3625  classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3626 
3627  // Check some invariants.
3628  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3629  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3630 
3631  llvm::Type *ResType = nullptr;
3632  switch (Lo) {
3633  case NoClass:
3634  if (Hi == NoClass)
3635  return ABIArgInfo::getIgnore();
3636  // If the low part is just padding, it takes no register, leave ResType
3637  // null.
3638  assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3639  "Unknown missing lo part");
3640  break;
3641 
3642  case SSEUp:
3643  case X87Up:
3644  llvm_unreachable("Invalid classification for lo word.");
3645 
3646  // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3647  // hidden argument.
3648  case Memory:
3649  return getIndirectReturnResult(RetTy);
3650 
3651  // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3652  // available register of the sequence %rax, %rdx is used.
3653  case Integer:
3654  ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3655 
3656  // If we have a sign or zero extended integer, make sure to return Extend
3657  // so that the parameter gets the right LLVM IR attributes.
3658  if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3659  // Treat an enum type as its underlying type.
3660  if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3661  RetTy = EnumTy->getDecl()->getIntegerType();
3662 
3663  if (RetTy->isIntegralOrEnumerationType() &&
3664  isPromotableIntegerTypeForABI(RetTy))
3665  return ABIArgInfo::getExtend(RetTy);
3666  }
3667  break;
3668 
3669  // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3670  // available SSE register of the sequence %xmm0, %xmm1 is used.
3671  case SSE:
3672  ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3673  break;
3674 
3675  // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3676  // returned on the X87 stack in %st0 as 80-bit x87 number.
3677  case X87:
3678  ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3679  break;
3680 
3681  // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3682  // part of the value is returned in %st0 and the imaginary part in
3683  // %st1.
3684  case ComplexX87:
3685  assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3686  ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3687  llvm::Type::getX86_FP80Ty(getVMContext()));
3688  break;
3689  }
3690 
3691  llvm::Type *HighPart = nullptr;
3692  switch (Hi) {
3693  // Memory was handled previously and X87 should
3694  // never occur as a hi class.
3695  case Memory:
3696  case X87:
3697  llvm_unreachable("Invalid classification for hi word.");
3698 
3699  case ComplexX87: // Previously handled.
3700  case NoClass:
3701  break;
3702 
3703  case Integer:
3704  HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3705  if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3706  return ABIArgInfo::getDirect(HighPart, 8);
3707  break;
3708  case SSE:
3709  HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3710  if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3711  return ABIArgInfo::getDirect(HighPart, 8);
3712  break;
3713 
3714  // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3715  // is passed in the next available eightbyte chunk if the last used
3716  // vector register.
3717  //
3718  // SSEUP should always be preceded by SSE, just widen.
3719  case SSEUp:
3720  assert(Lo == SSE && "Unexpected SSEUp classification.");
3721  ResType = GetByteVectorType(RetTy);
3722  break;
3723 
3724  // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3725  // returned together with the previous X87 value in %st0.
3726  case X87Up:
3727  // If X87Up is preceded by X87, we don't need to do
3728  // anything. However, in some cases with unions it may not be
3729  // preceded by X87. In such situations we follow gcc and pass the
3730  // extra bits in an SSE reg.
3731  if (Lo != X87) {
3732  HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3733  if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3734  return ABIArgInfo::getDirect(HighPart, 8);
3735  }
3736  break;
3737  }
3738 
3739  // If a high part was specified, merge it together with the low part. It is
3740  // known to pass in the high eightbyte of the result. We do this by forming a
3741  // first class struct aggregate with the high and low part: {low, high}
3742  if (HighPart)
3743  ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3744 
3745  return ABIArgInfo::getDirect(ResType);
3746 }
3747 
3748 ABIArgInfo
3749 X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
3750  unsigned &neededInt, unsigned &neededSSE,
3751  bool isNamedArg, bool IsRegCall) const {
3753 
3754  X86_64ABIInfo::Class Lo, Hi;
3755  classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
3756 
3757  // Check some invariants.
3758  // FIXME: Enforce these by construction.
3759  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3760  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3761 
3762  neededInt = 0;
3763  neededSSE = 0;
3764  llvm::Type *ResType = nullptr;
3765  switch (Lo) {
3766  case NoClass:
3767  if (Hi == NoClass)
3768  return ABIArgInfo::getIgnore();
3769  // If the low part is just padding, it takes no register, leave ResType
3770  // null.
3771  assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3772  "Unknown missing lo part");
3773  break;
3774 
3775  // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3776  // on the stack.
3777  case Memory:
3778 
3779  // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3780  // COMPLEX_X87, it is passed in memory.
3781  case X87:
3782  case ComplexX87:
3783  if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3784  ++neededInt;
3785  return getIndirectResult(Ty, freeIntRegs);
3786 
3787  case SSEUp:
3788  case X87Up:
3789  llvm_unreachable("Invalid classification for lo word.");
3790 
3791  // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3792  // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3793  // and %r9 is used.
3794  case Integer:
3795  ++neededInt;
3796 
3797  // Pick an 8-byte type based on the preferred type.
3798  ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3799 
3800  // If we have a sign or zero extended integer, make sure to return Extend
3801  // so that the parameter gets the right LLVM IR attributes.
3802  if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3803  // Treat an enum type as its underlying type.
3804  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3805  Ty = EnumTy->getDecl()->getIntegerType();
3806 
3807  if (Ty->isIntegralOrEnumerationType() &&
3808  isPromotableIntegerTypeForABI(Ty))
3809  return ABIArgInfo::getExtend(Ty);
3810  }
3811 
3812  break;
3813 
3814  // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3815  // available SSE register is used, the registers are taken in the
3816  // order from %xmm0 to %xmm7.
3817  case SSE: {
3818  llvm::Type *IRType = CGT.ConvertType(Ty);
3819  ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3820  ++neededSSE;
3821  break;
3822  }
3823  }
3824 
3825  llvm::Type *HighPart = nullptr;
3826  switch (Hi) {
3827  // Memory was handled previously, ComplexX87 and X87 should
3828  // never occur as hi classes, and X87Up must be preceded by X87,
3829  // which is passed in memory.
3830  case Memory:
3831  case X87:
3832  case ComplexX87:
3833  llvm_unreachable("Invalid classification for hi word.");
3834 
3835  case NoClass: break;
3836 
3837  case Integer:
3838  ++neededInt;
3839  // Pick an 8-byte type based on the preferred type.
3840  HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3841 
3842  if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3843  return ABIArgInfo::getDirect(HighPart, 8);
3844  break;
3845 
3846  // X87Up generally doesn't occur here (long double is passed in
3847  // memory), except in situations involving unions.
3848  case X87Up:
3849  case SSE:
3850  HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3851 
3852  if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3853  return ABIArgInfo::getDirect(HighPart, 8);
3854 
3855  ++neededSSE;
3856  break;
3857 
3858  // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3859  // eightbyte is passed in the upper half of the last used SSE
3860  // register. This only happens when 128-bit vectors are passed.
3861  case SSEUp:
3862  assert(Lo == SSE && "Unexpected SSEUp classification");
3863  ResType = GetByteVectorType(Ty);
3864  break;
3865  }
3866 
3867  // If a high part was specified, merge it together with the low part. It is
3868  // known to pass in the high eightbyte of the result. We do this by forming a
3869  // first class struct aggregate with the high and low part: {low, high}
3870  if (HighPart)
3871  ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3872 
3873  return ABIArgInfo::getDirect(ResType);
3874 }
3875 
3876 ABIArgInfo
3877 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3878  unsigned &NeededSSE,
3879  unsigned &MaxVectorWidth) const {
3880  auto RT = Ty->getAs<RecordType>();
3881  assert(RT && "classifyRegCallStructType only valid with struct types");
3882 
3883  if (RT->getDecl()->hasFlexibleArrayMember())
3884  return getIndirectReturnResult(Ty);
3885 
3886  // Sum up bases
3887  if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3888  if (CXXRD->isDynamicClass()) {
3889  NeededInt = NeededSSE = 0;
3890  return getIndirectReturnResult(Ty);
3891  }
3892 
3893  for (const auto &I : CXXRD->bases())
3894  if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
3895  MaxVectorWidth)
3896  .isIndirect()) {
3897  NeededInt = NeededSSE = 0;
3898  return getIndirectReturnResult(Ty);
3899  }
3900  }
3901 
3902  // Sum up members
3903  for (const auto *FD : RT->getDecl()->fields()) {
3904  QualType MTy = FD->getType();
3905  if (MTy->isRecordType() && !MTy->isUnionType()) {
3906  if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
3907  MaxVectorWidth)
3908  .isIndirect()) {
3909  NeededInt = NeededSSE = 0;
3910  return getIndirectReturnResult(Ty);
3911  }
3912  } else {
3913  unsigned LocalNeededInt, LocalNeededSSE;
3914  if (classifyArgumentType(MTy, UINT_MAX, LocalNeededInt, LocalNeededSSE,
3915  true, true)
3916  .isIndirect()) {
3917  NeededInt = NeededSSE = 0;
3918  return getIndirectReturnResult(Ty);
3919  }
3920  if (const auto *AT = getContext().getAsConstantArrayType(MTy))
3921  MTy = AT->getElementType();
3922  if (const auto *VT = MTy->getAs<VectorType>())
3923  if (getContext().getTypeSize(VT) > MaxVectorWidth)
3924  MaxVectorWidth = getContext().getTypeSize(VT);
3925  NeededInt += LocalNeededInt;
3926  NeededSSE += LocalNeededSSE;
3927  }
3928  }
3929 
3930  return ABIArgInfo::getDirect();
3931 }
3932 
3933 ABIArgInfo
3934 X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
3935  unsigned &NeededSSE,
3936  unsigned &MaxVectorWidth) const {
3937 
3938  NeededInt = 0;
3939  NeededSSE = 0;
3940  MaxVectorWidth = 0;
3941 
3942  return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
3943  MaxVectorWidth);
3944 }
3945 
3946 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3947 
3948  const unsigned CallingConv = FI.getCallingConvention();
3949  // It is possible to force Win64 calling convention on any x86_64 target by
3950  // using __attribute__((ms_abi)). In such case to correctly emit Win64
3951  // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3952  if (CallingConv == llvm::CallingConv::Win64) {
3953  WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3954  Win64ABIInfo.computeInfo(FI);
3955  return;
3956  }
3957 
3958  bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3959 
3960  // Keep track of the number of assigned registers.
3961  unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3962  unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3963  unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
3964 
3965  if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3966  if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3967  !FI.getReturnType()->getTypePtr()->isUnionType()) {
3968  FI.getReturnInfo() = classifyRegCallStructType(
3969  FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);
3970  if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3971  FreeIntRegs -= NeededInt;
3972  FreeSSERegs -= NeededSSE;
3973  } else {
3974  FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3975  }
3976  } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
3977  getContext().getCanonicalType(FI.getReturnType()
3978  ->getAs<ComplexType>()
3979  ->getElementType()) ==
3980  getContext().LongDoubleTy)
3981  // Complex Long Double Type is passed in Memory when Regcall
3982  // calling convention is used.
3983  FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3984  else
3986  }
3987 
3988  // If the return value is indirect, then the hidden argument is consuming one
3989  // integer register.
3990  if (FI.getReturnInfo().isIndirect())
3991  --FreeIntRegs;
3992  else if (NeededSSE && MaxVectorWidth > 0)
3993  FI.setMaxVectorWidth(MaxVectorWidth);
3994 
3995  // The chain argument effectively gives us another free register.
3996  if (FI.isChainCall())
3997  ++FreeIntRegs;
3998 
3999  unsigned NumRequiredArgs = FI.getNumRequiredArgs();
4000  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
4001  // get assigned (in left-to-right order) for passing as follows...
4002  unsigned ArgNo = 0;
4003  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
4004  it != ie; ++it, ++ArgNo) {
4005  bool IsNamedArg = ArgNo < NumRequiredArgs;
4006 
4007  if (IsRegCall && it->type->isStructureOrClassType())
4008  it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
4009  MaxVectorWidth);
4010  else
4011  it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
4012  NeededSSE, IsNamedArg);
4013 
4014  // AMD64-ABI 3.2.3p3: If there are no registers available for any
4015  // eightbyte of an argument, the whole argument is passed on the
4016  // stack. If registers have already been assigned for some
4017  // eightbytes of such an argument, the assignments get reverted.
4018  if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
4019  FreeIntRegs -= NeededInt;
4020  FreeSSERegs -= NeededSSE;
4021  if (MaxVectorWidth > FI.getMaxVectorWidth())
4022  FI.setMaxVectorWidth(MaxVectorWidth);
4023  } else {
4024  it->info = getIndirectResult(it->type, FreeIntRegs);
4025  }
4026  }
4027 }
4028 
4030  Address VAListAddr, QualType Ty) {
4031  Address overflow_arg_area_p =
4032  CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
4033  llvm::Value *overflow_arg_area =
4034  CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
4035 
4036  // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
4037  // byte boundary if alignment needed by type exceeds 8 byte boundary.
4038  // It isn't stated explicitly in the standard, but in practice we use
4039  // alignment greater than 16 where necessary.
4040  CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4041  if (Align > CharUnits::fromQuantity(8)) {
4042  overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
4043  Align);
4044  }
4045 
4046  // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
4047  llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
4048  llvm::Value *Res =
4049  CGF.Builder.CreateBitCast(overflow_arg_area,
4050  llvm::PointerType::getUnqual(LTy));
4051 
4052  // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
4053  // l->overflow_arg_area + sizeof(type).
4054  // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
4055  // an 8 byte boundary.
4056 
4057  uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
4058  llvm::Value *Offset =
4059  llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
4060  overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
4061  Offset, "overflow_arg_area.next");
4062  CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
4063 
4064  // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
4065  return Address(Res, LTy, Align);
4066 }
4067 
4068 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4069  QualType Ty) const {
4070  // Assume that va_list type is correct; should be pointer to LLVM type:
4071  // struct {
4072  // i32 gp_offset;
4073  // i32 fp_offset;
4074  // i8* overflow_arg_area;
4075  // i8* reg_save_area;
4076  // };
4077  unsigned neededInt, neededSSE;
4078 
4079  Ty = getContext().getCanonicalType(Ty);
4080  ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
4081  /*isNamedArg*/false);
4082 
4083  // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
4084  // in the registers. If not go to step 7.
4085  if (!neededInt && !neededSSE)
4086  return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
4087 
4088  // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
4089  // general purpose registers needed to pass type and num_fp to hold
4090  // the number of floating point registers needed.
4091 
4092  // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
4093  // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
4094  // l->fp_offset > 304 - num_fp * 16 go to step 7.
4095  //
4096  // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
4097  // register save space).
4098 
4099  llvm::Value *InRegs = nullptr;
4100  Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
4101  llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
4102  if (neededInt) {
4103  gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
4104  gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
4105  InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
4106  InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
4107  }
4108 
4109  if (neededSSE) {
4110  fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
4111  fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
4112  llvm::Value *FitsInFP =
4113  llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
4114  FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
4115  InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
4116  }
4117 
4118  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4119  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4120  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4121  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4122 
4123  // Emit code to load the value if it was passed in registers.
4124 
4125  CGF.EmitBlock(InRegBlock);
4126 
4127  // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
4128  // an offset of l->gp_offset and/or l->fp_offset. This may require
4129  // copying to a temporary location in case the parameter is passed
4130  // in different register classes or requires an alignment greater
4131  // than 8 for general purpose registers and 16 for XMM registers.
4132  //
4133  // FIXME: This really results in shameful code when we end up needing to
4134  // collect arguments from different places; often what should result in a
4135  // simple assembling of a structure from scattered addresses has many more
4136  // loads than necessary. Can we clean this up?
4137  llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
4138  llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
4139  CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
4140 
4141  Address RegAddr = Address::invalid();
4142  if (neededInt && neededSSE) {
4143  // FIXME: Cleanup.
4144  assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
4145  llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
4146  Address Tmp = CGF.CreateMemTemp(Ty);
4147  Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4148  assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
4149  llvm::Type *TyLo = ST->getElementType(0);
4150  llvm::Type *TyHi = ST->getElementType(1);
4151  assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
4152  "Unexpected ABI info for mixed regs");
4153  llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
4154  llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
4155  llvm::Value *GPAddr =
4156  CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
4157  llvm::Value *FPAddr =
4158  CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
4159  llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
4160  llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
4161 
4162  // Copy the first element.
4163  // FIXME: Our choice of alignment here and below is probably pessimistic.
4164  llvm::Value *V = CGF.Builder.CreateAlignedLoad(
4165  TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
4166  CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
4167  CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4168 
4169  // Copy the second element.
4170  V = CGF.Builder.CreateAlignedLoad(
4171  TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
4172  CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
4173  CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4174 
4175  RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4176  } else if (neededInt) {
4177  RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
4179  RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4180 
4181  // Copy to a temporary if necessary to ensure the appropriate alignment.
4182  auto TInfo = getContext().getTypeInfoInChars(Ty);
4183  uint64_t TySize = TInfo.Width.getQuantity();
4184  CharUnits TyAlign = TInfo.Align;
4185 
4186  // Copy into a temporary if the type is more aligned than the
4187  // register save area.
4188  if (TyAlign.getQuantity() > 8) {
4189  Address Tmp = CGF.CreateMemTemp(Ty);
4190  CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
4191  RegAddr = Tmp;
4192  }
4193 
4194  } else if (neededSSE == 1) {
4195  RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
4196  CGF.Int8Ty, CharUnits::fromQuantity(16));
4197  RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4198  } else {
4199  assert(neededSSE == 2 && "Invalid number of needed registers!");
4200  // SSE registers are spaced 16 bytes apart in the register save
4201  // area, we need to collect the two eightbytes together.
4202  // The ABI isn't explicit about this, but it seems reasonable
4203  // to assume that the slots are 16-byte aligned, since the stack is
4204  // naturally 16-byte aligned and the prologue is expected to store
4205  // all the SSE registers to the RSA.
4206  Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
4207  fp_offset),
4208  CGF.Int8Ty, CharUnits::fromQuantity(16));
4209  Address RegAddrHi =
4210  CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
4212  llvm::Type *ST = AI.canHaveCoerceToType()
4213  ? AI.getCoerceToType()
4214  : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
4215  llvm::Value *V;
4216  Address Tmp = CGF.CreateMemTemp(Ty);
4217  Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4219  RegAddrLo, ST->getStructElementType(0)));
4220  CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4222  RegAddrHi, ST->getStructElementType(1)));
4223  CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4224 
4225  RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4226  }
4227 
4228  // AMD64-ABI 3.5.7p5: Step 5. Set:
4229  // l->gp_offset = l->gp_offset + num_gp * 8
4230  // l->fp_offset = l->fp_offset + num_fp * 16.
4231  if (neededInt) {
4232  llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
4233  CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
4234  gp_offset_p);
4235  }
4236  if (neededSSE) {
4237  llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
4238  CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
4239  fp_offset_p);
4240  }
4241  CGF.EmitBranch(ContBlock);
4242 
4243  // Emit code to load the value if it was passed in memory.
4244 
4245  CGF.EmitBlock(InMemBlock);
4246  Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
4247 
4248  // Return the appropriate result.
4249 
4250  CGF.EmitBlock(ContBlock);
4251  Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
4252  "vaarg.addr");
4253  return ResAddr;
4254 }
4255 
4256 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4257  QualType Ty) const {
4258  // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4259  // not 1, 2, 4, or 8 bytes, must be passed by reference."
4260  uint64_t Width = getContext().getTypeSize(Ty);
4261  bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4262 
4263  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4264  CGF.getContext().getTypeInfoInChars(Ty),
4266  /*allowHigherAlign*/ false);
4267 }
4268 
4269 ABIArgInfo WinX86_64ABIInfo::reclassifyHvaArgForVectorCall(
4270  QualType Ty, unsigned &FreeSSERegs, const ABIArgInfo &current) const {
4271  const Type *Base = nullptr;
4272  uint64_t NumElts = 0;
4273 
4274  if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
4275  isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
4276  FreeSSERegs -= NumElts;
4277  return getDirectX86Hva();
4278  }
4279  return current;
4280 }
4281 
4282 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
4283  bool IsReturnType, bool IsVectorCall,
4284  bool IsRegCall) const {
4285 
4286  if (Ty->isVoidType())
4287  return ABIArgInfo::getIgnore();
4288 
4289  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4290  Ty = EnumTy->getDecl()->getIntegerType();
4291 
4292  TypeInfo Info = getContext().getTypeInfo(Ty);
4293  uint64_t Width = Info.Width;
4294  CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
4295 
4296  const RecordType *RT = Ty->getAs<RecordType>();
4297  if (RT) {
4298  if (!IsReturnType) {
4299  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
4300  return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4301  }
4302 
4303  if (RT->getDecl()->hasFlexibleArrayMember())
4304  return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4305 
4306  }
4307 
4308  const Type *Base = nullptr;
4309  uint64_t NumElts = 0;
4310  // vectorcall adds the concept of a homogenous vector aggregate, similar to
4311  // other targets.
4312  if ((IsVectorCall || IsRegCall) &&
4313  isHomogeneousAggregate(Ty, Base, NumElts)) {
4314  if (IsRegCall) {
4315  if (FreeSSERegs >= NumElts) {
4316  FreeSSERegs -= NumElts;
4317  if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
4318  return ABIArgInfo::getDirect();
4319  return ABIArgInfo::getExpand();
4320  }
4321  return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4322  } else if (IsVectorCall) {
4323  if (FreeSSERegs >= NumElts &&
4324  (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
4325  FreeSSERegs -= NumElts;
4326  return ABIArgInfo::getDirect();
4327  } else if (IsReturnType) {
4328  return ABIArgInfo::getExpand();
4329  } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
4330  // HVAs are delayed and reclassified in the 2nd step.
4331  return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4332  }
4333  }
4334  }
4335 
4336  if (Ty->isMemberPointerType()) {
4337  // If the member pointer is represented by an LLVM int or ptr, pass it
4338  // directly.
4339  llvm::Type *LLTy = CGT.ConvertType(Ty);
4340  if (LLTy->isPointerTy() || LLTy->isIntegerTy())
4341  return ABIArgInfo::getDirect();
4342  }
4343 
4344  if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
4345  // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4346  // not 1, 2, 4, or 8 bytes, must be passed by reference."
4347  if (Width > 64 || !llvm::isPowerOf2_64(Width))
4348  return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4349 
4350  // Otherwise, coerce it to a small integer.
4351  return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
4352  }
4353 
4354  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4355  switch (BT->getKind()) {
4356  case BuiltinType::Bool:
4357  // Bool type is always extended to the ABI, other builtin types are not
4358  // extended.
4359  return ABIArgInfo::getExtend(Ty);
4360 
4361  case BuiltinType::LongDouble:
4362  // Mingw64 GCC uses the old 80 bit extended precision floating point
4363  // unit. It passes them indirectly through memory.
4364  if (IsMingw64) {
4365  const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4366  if (LDF == &llvm::APFloat::x87DoubleExtended())
4367  return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4368  }
4369  break;
4370 
4371  case BuiltinType::Int128:
4372  case BuiltinType::UInt128:
4373  // If it's a parameter type, the normal ABI rule is that arguments larger
4374  // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
4375  // even though it isn't particularly efficient.
4376  if (!IsReturnType)
4377  return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4378 
4379  // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4380  // Clang matches them for compatibility.
4381  return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
4382  llvm::Type::getInt64Ty(getVMContext()), 2));
4383 
4384  default:
4385  break;
4386  }
4387  }
4388 
4389  if (Ty->isBitIntType()) {
4390  // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4391  // not 1, 2, 4, or 8 bytes, must be passed by reference."
4392  // However, non-power-of-two bit-precise integers will be passed as 1, 2, 4,
4393  // or 8 bytes anyway as long is it fits in them, so we don't have to check
4394  // the power of 2.
4395  if (Width <= 64)
4396  return ABIArgInfo::getDirect();
4397  return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4398  }
4399 
4400  return ABIArgInfo::getDirect();
4401 }
4402 
4403 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4404  const unsigned CC = FI.getCallingConvention();
4405  bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4406  bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4407 
4408  // If __attribute__((sysv_abi)) is in use, use the SysV argument
4409  // classification rules.
4410  if (CC == llvm::CallingConv::X86_64_SysV) {
4411  X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4412  SysVABIInfo.computeInfo(FI);
4413  return;
4414  }
4415 
4416  unsigned FreeSSERegs = 0;
4417  if (IsVectorCall) {
4418  // We can use up to 4 SSE return registers with vectorcall.
4419  FreeSSERegs = 4;
4420  } else if (IsRegCall) {
4421  // RegCall gives us 16 SSE registers.
4422  FreeSSERegs = 16;
4423  }
4424 
4425  if (!getCXXABI().classifyReturnType(FI))
4426  FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4427  IsVectorCall, IsRegCall);
4428 
4429  if (IsVectorCall) {
4430  // We can use up to 6 SSE register parameters with vectorcall.
4431  FreeSSERegs = 6;
4432  } else if (IsRegCall) {
4433  // RegCall gives us 16 SSE registers, we can reuse the return registers.
4434  FreeSSERegs = 16;
4435  }
4436 
4437  unsigned ArgNum = 0;
4438  unsigned ZeroSSERegs = 0;
4439  for (auto &I : FI.arguments()) {
4440  // Vectorcall in x64 only permits the first 6 arguments to be passed as
4441  // XMM/YMM registers. After the sixth argument, pretend no vector
4442  // registers are left.
4443  unsigned *MaybeFreeSSERegs =
4444  (IsVectorCall && ArgNum >= 6) ? &ZeroSSERegs : &FreeSSERegs;
4445  I.info =
4446  classify(I.type, *MaybeFreeSSERegs, false, IsVectorCall, IsRegCall);
4447  ++ArgNum;
4448  }
4449 
4450  if (IsVectorCall) {
4451  // For vectorcall, assign aggregate HVAs to any free vector registers in a
4452  // second pass.
4453  for (auto &I : FI.arguments())
4454  I.info = reclassifyHvaArgForVectorCall(I.type, FreeSSERegs, I.info);
4455  }
4456 }
4457 
4458 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4459  QualType Ty) const {
4460  // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4461  // not 1, 2, 4, or 8 bytes, must be passed by reference."
4462  uint64_t Width = getContext().getTypeSize(Ty);
4463  bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4464 
4465  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4466  CGF.getContext().getTypeInfoInChars(Ty),
4468  /*allowHigherAlign*/ false);
4469 }
4470 
4472  llvm::Value *Address, bool Is64Bit,
4473  bool IsAIX) {
4474  // This is calculated from the LLVM and GCC tables and verified
4475  // against gcc output. AFAIK all PPC ABIs use the same encoding.
4476 
4477  CodeGen::CGBuilderTy &Builder = CGF.Builder;
4478 
4479  llvm::IntegerType *i8 = CGF.Int8Ty;
4480  llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4481  llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4482  llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4483 
4484  // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
4485  AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
4486 
4487  // 32-63: fp0-31, the 8-byte floating-point registers
4488  AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4489 
4490  // 64-67 are various 4-byte or 8-byte special-purpose registers:
4491  // 64: mq
4492  // 65: lr
4493  // 66: ctr
4494  // 67: ap
4495  AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
4496 
4497  // 68-76 are various 4-byte special-purpose registers:
4498  // 68-75 cr0-7
4499  // 76: xer
4500  AssignToArrayRange(Builder, Address, Four8, 68, 76);
4501 
4502  // 77-108: v0-31, the 16-byte vector registers
4503  AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4504 
4505  // 109: vrsave
4506  // 110: vscr
4507  AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
4508 
4509  // AIX does not utilize the rest of the registers.
4510  if (IsAIX)
4511  return false;
4512 
4513  // 111: spe_acc
4514  // 112: spefscr
4515  // 113: sfp
4516  AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
4517 
4518  if (!Is64Bit)
4519  return false;
4520 
4521  // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
4522  // or above CPU.
4523  // 64-bit only registers:
4524  // 114: tfhar
4525  // 115: tfiar
4526  // 116: texasr
4527  AssignToArrayRange(Builder, Address, Eight8, 114, 116);
4528 
4529  return false;
4530 }
4531 
4532 // AIX
4533 namespace {
4534 /// AIXABIInfo - The AIX XCOFF ABI information.
4535 class AIXABIInfo : public ABIInfo {
4536  const bool Is64Bit;
4537  const unsigned PtrByteSize;
4538  CharUnits getParamTypeAlignment(QualType Ty) const;
4539 
4540 public:
4541  AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4542  : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
4543 
4544  bool isPromotableTypeForABI(QualType Ty) const;
4545 
4546  ABIArgInfo classifyReturnType(QualType RetTy) const;
4548 
4549  void computeInfo(CGFunctionInfo &FI) const override {
4550  if (!getCXXABI().classifyReturnType(FI))
4552 
4553  for (auto &I : FI.arguments())
4555  }
4556 
4557  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4558  QualType Ty) const override;
4559 };
4560 
4561 class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
4562  const bool Is64Bit;
4563 
4564 public:
4565  AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4566  : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
4567  Is64Bit(Is64Bit) {}
4568  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4569  return 1; // r1 is the dedicated stack pointer
4570  }
4571 
4572  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4573  llvm::Value *Address) const override;
4574 };
4575 } // namespace
4576 
4577 // Return true if the ABI requires Ty to be passed sign- or zero-
4578 // extended to 32/64 bits.
4579 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
4580  // Treat an enum type as its underlying type.
4581  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4582  Ty = EnumTy->getDecl()->getIntegerType();
4583 
4584  // Promotable integer types are required to be promoted by the ABI.
4585  if (Ty->isPromotableIntegerType())
4586  return true;
4587 
4588  if (!Is64Bit)
4589  return false;
4590 
4591  // For 64 bit mode, in addition to the usual promotable integer types, we also
4592  // need to extend all 32-bit types, since the ABI requires promotion to 64
4593  // bits.
4594  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4595  switch (BT->getKind()) {
4596  case BuiltinType::Int:
4597  case BuiltinType::UInt:
4598  return true;
4599  default:
4600  break;
4601  }
4602 
4603  return false;
4604 }
4605 
4607  if (RetTy->isAnyComplexType())
4608  return ABIArgInfo::getDirect();
4609 
4610  if (RetTy->isVectorType())
4611  return ABIArgInfo::getDirect();
4612 
4613  if (RetTy->isVoidType())
4614  return ABIArgInfo::getIgnore();
4615 
4616  if (isAggregateTypeForABI(RetTy))
4617  return getNaturalAlignIndirect(RetTy);
4618 
4619  return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4620  : ABIArgInfo::getDirect());
4621 }
4622 
4625 
4626  if (Ty->isAnyComplexType())
4627  return ABIArgInfo::getDirect();
4628 
4629  if (Ty->isVectorType())
4630  return ABIArgInfo::getDirect();
4631 
4632  if (isAggregateTypeForABI(Ty)) {
4633  // Records with non-trivial destructors/copy-constructors should not be
4634  // passed by value.
4635  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4636  return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4637 
4638  CharUnits CCAlign = getParamTypeAlignment(Ty);
4639  CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
4640 
4641  return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
4642  /*Realign*/ TyAlign > CCAlign);
4643  }
4644 
4645  return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4646  : ABIArgInfo::getDirect());
4647 }
4648 
4649 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
4650  // Complex types are passed just like their elements.
4651  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4652  Ty = CTy->getElementType();
4653 
4654  if (Ty->isVectorType())
4655  return CharUnits::fromQuantity(16);
4656 
4657  // If the structure contains a vector type, the alignment is 16.
4658  if (isRecordWithSIMDVectorType(getContext(), Ty))
4659  return CharUnits::fromQuantity(16);
4660 
4661  return CharUnits::fromQuantity(PtrByteSize);
4662 }
4663 
4664 Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4665  QualType Ty) const {
4666 
4667  auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4668  TypeInfo.Align = getParamTypeAlignment(Ty);
4669 
4670  CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
4671 
4672  // If we have a complex type and the base type is smaller than the register
4673  // size, the ABI calls for the real and imaginary parts to be right-adjusted
4674  // in separate words in 32bit mode or doublewords in 64bit mode. However,
4675  // Clang expects us to produce a pointer to a structure with the two parts
4676  // packed tightly. So generate loads of the real and imaginary parts relative
4677  // to the va_list pointer, and store them to a temporary structure. We do the
4678  // same as the PPC64ABI here.
4679  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4680  CharUnits EltSize = TypeInfo.Width / 2;
4681  if (EltSize < SlotSize)
4682  return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
4683  }
4684 
4685  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
4686  SlotSize, /*AllowHigher*/ true);
4687 }
4688 
4689 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
4690  CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
4691  return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
4692 }
4693 
4694 // PowerPC-32
4695 namespace {
4696 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4697 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4698  bool IsSoftFloatABI;
4699  bool IsRetSmallStructInRegABI;
4700 
4701  CharUnits getParamTypeAlignment(QualType Ty) const;
4702 
4703 public:
4704  PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
4705  bool RetSmallStructInRegABI)
4706  : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
4707  IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
4708 
4709  ABIArgInfo classifyReturnType(QualType RetTy) const;
4710 
4711  void computeInfo(CGFunctionInfo &FI) const override {
4712  if (!getCXXABI().classifyReturnType(FI))
4714  for (auto &I : FI.arguments())
4716  }
4717 
4718  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4719  QualType Ty) const override;
4720 };
4721 
4722 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4723 public:
4724  PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
4725  bool RetSmallStructInRegABI)
4726  : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
4727  CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
4728 
4729  static bool isStructReturnInRegABI(const llvm::Triple &Triple,
4730  const CodeGenOptions &Opts);
4731 
4732  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4733  // This is recovered from gcc output.
4734  return 1; // r1 is the dedicated stack pointer
4735  }
4736 
4737  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4738  llvm::Value *Address) const override;
4739 };
4740 }
4741 
4742 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4743  // Complex types are passed just like their elements.
4744  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4745  Ty = CTy->getElementType();
4746 
4747  if (Ty->isVectorType())
4748  return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4749  : 4);
4750 
4751  // For single-element float/vector structs, we consider the whole type
4752  // to have the same alignment requirements as its single element.
4753  const Type *AlignTy = nullptr;
4754  if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4755  const BuiltinType *BT = EltType->getAs<BuiltinType>();
4756  if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4757  (BT && BT->isFloatingPoint()))
4758  AlignTy = EltType;
4759  }
4760 
4761  if (AlignTy)
4762  return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4763  return CharUnits::fromQuantity(4);
4764 }
4765 
4767  uint64_t Size;
4768 
4769  // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
4770  if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
4771  (Size = getContext().getTypeSize(RetTy)) <= 64) {
4772  // System V ABI (1995), page 3-22, specified:
4773  // > A structure or union whose size is less than or equal to 8 bytes
4774  // > shall be returned in r3 and r4, as if it were first stored in the
4775  // > 8-byte aligned memory area and then the low addressed word were
4776  // > loaded into r3 and the high-addressed word into r4. Bits beyond
4777  // > the last member of the structure or union are not defined.
4778  //
4779  // GCC for big-endian PPC32 inserts the pad before the first member,
4780  // not "beyond the last member" of the struct. To stay compatible
4781  // with GCC, we coerce the struct to an integer of the same size.
4782  // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
4783  if (Size == 0)
4784  return ABIArgInfo::getIgnore();
4785  else {
4786  llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
4787  return ABIArgInfo::getDirect(CoerceTy);
4788  }
4789  }
4790 
4791  return DefaultABIInfo::classifyReturnType(RetTy);
4792 }
4793 
4794 // TODO: this implementation is now likely redundant with
4795 // DefaultABIInfo::EmitVAArg.
4796 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4797  QualType Ty) const {
4798  if (getTarget().getTriple().isOSDarwin()) {
4799  auto TI = getContext().getTypeInfoInChars(Ty);
4800  TI.Align = getParamTypeAlignment(Ty);
4801 
4802  CharUnits SlotSize = CharUnits::fromQuantity(4);
4803  return emitVoidPtrVAArg(CGF, VAList, Ty,
4804  classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4805  /*AllowHigherAlign=*/true);
4806  }
4807 
4808  const unsigned OverflowLimit = 8;
4809  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4810  // TODO: Implement this. For now ignore.
4811  (void)CTy;
4812  return Address::invalid(); // FIXME?
4813  }
4814 
4815  // struct __va_list_tag {
4816  // unsigned char gpr;
4817  // unsigned char fpr;
4818  // unsigned short reserved;
4819  // void *overflow_arg_area;
4820  // void *reg_save_area;
4821  // };
4822 
4823  bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4824  bool isInt = !Ty->isFloatingType();
4825  bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4826 
4827  // All aggregates are passed indirectly? That doesn't seem consistent
4828  // with the argument-lowering code.
4829  bool isIndirect = isAggregateTypeForABI(Ty);
4830 
4831  CGBuilderTy &Builder = CGF.Builder;
4832 
4833  // The calling convention either uses 1-2 GPRs or 1 FPR.
4834  Address NumRegsAddr = Address::invalid();
4835  if (isInt || IsSoftFloatABI) {
4836  NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4837  } else {
4838  NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4839  }
4840 
4841  llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4842 
4843  // "Align" the register count when TY is i64.
4844  if (isI64 || (isF64 && IsSoftFloatABI)) {
4845  NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4846  NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4847  }
4848 
4849  llvm::Value *CC =
4850  Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4851 
4852  llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4853  llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4854  llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4855 
4856  Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4857 
4858  llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
4859  if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4860 
4861  // Case 1: consume registers.
4862  Address RegAddr = Address::invalid();
4863  {
4864  CGF.EmitBlock(UsingRegs);
4865 
4866  Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4867  RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
4869  assert(RegAddr.getElementType() == CGF.Int8Ty);
4870 
4871  // Floating-point registers start after the general-purpose registers.
4872  if (!(isInt || IsSoftFloatABI)) {
4873  RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4875  }
4876 
4877  // Get the address of the saved value by scaling the number of
4878  // registers we've used by the number of
4879  CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4880  llvm::Value *RegOffset =
4881  Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4882  RegAddr = Address(
4883  Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
4884  CGF.Int8Ty, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4885  RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4886 
4887  // Increase the used-register count.
4888  NumRegs =
4889  Builder.CreateAdd(NumRegs,
4890  Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4891  Builder.CreateStore(NumRegs, NumRegsAddr);
4892 
4893  CGF.EmitBranch(Cont);
4894  }
4895 
4896  // Case 2: consume space in the overflow area.
4897  Address MemAddr = Address::invalid();
4898  {
4899  CGF.EmitBlock(UsingOverflow);
4900 
4901  Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4902 
4903  // Everything in the overflow area is rounded up to a size of at least 4.
4904  CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4905 
4906  CharUnits Size;
4907  if (!isIndirect) {
4908  auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4909  Size = TypeInfo.Width.alignTo(OverflowAreaAlign);
4910  } else {
4911  Size = CGF.getPointerSize();
4912  }
4913 
4914  Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4915  Address OverflowArea =
4916  Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
4917  OverflowAreaAlign);
4918  // Round up address of argument to alignment
4919  CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4920  if (Align > OverflowAreaAlign) {
4921  llvm::Value *Ptr = OverflowArea.getPointer();
4922  OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4923  OverflowArea.getElementType(), Align);
4924  }
4925 
4926  MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4927 
4928  // Increase the overflow area.
4929  OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4930  Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4931  CGF.EmitBranch(Cont);
4932  }
4933 
4934  CGF.EmitBlock(Cont);
4935 
4936  // Merge the cases with a phi.
4937  Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4938  "vaarg.addr");
4939 
4940  // Load the pointer if the argument was passed indirectly.
4941  if (isIndirect) {
4942  Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
4943  getContext().getTypeAlignInChars(Ty));
4944  }
4945 
4946  return Result;
4947 }
4948 
4949 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
4950  const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4951  assert(Triple.isPPC32());
4952 
4953  switch (Opts.getStructReturnConvention()) {
4955  break;
4956  case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
4957  return false;
4958  case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
4959  return true;
4960  }
4961 
4962  if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
4963  return true;
4964 
4965  return false;
4966 }
4967 
4968 bool
4969 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4970  llvm::Value *Address) const {
4971  return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
4972  /*IsAIX*/ false);
4973 }
4974 
4975 // PowerPC-64
4976 
4977 namespace {
4978 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4979 class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4980 public:
4981  enum ABIKind {
4982  ELFv1 = 0,
4983  ELFv2
4984  };
4985 
4986 private:
4987  static const unsigned GPRBits = 64;
4988  ABIKind Kind;
4989  bool IsSoftFloatABI;
4990 
4991 public:
4992  PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind,
4993  bool SoftFloatABI)
4994  : SwiftABIInfo(CGT), Kind(Kind), IsSoftFloatABI(SoftFloatABI) {}
4995 
4996  bool isPromotableTypeForABI(QualType Ty) const;
4997  CharUnits getParamTypeAlignment(QualType Ty) const;
4998 
4999  ABIArgInfo classifyReturnType(QualType RetTy) const;
5001 
5002  bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5003  bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5004  uint64_t Members) const override;
5005 
5006  // TODO: We can add more logic to computeInfo to improve performance.
5007  // Example: For aggregate arguments that fit in a register, we could
5008  // use getDirectInReg (as is done below for structs containing a single
5009  // floating-point value) to avoid pushing them to memory on function
5010  // entry. This would require changing the logic in PPCISelLowering
5011  // when lowering the parameters in the caller and args in the callee.
5012  void computeInfo(CGFunctionInfo &FI) const override {
5013  if (!getCXXABI().classifyReturnType(FI))
5015  for (auto &I : FI.arguments()) {
5016  // We rely on the default argument classification for the most part.
5017  // One exception: An aggregate containing a single floating-point
5018  // or vector item must be passed in a register if one is available.
5019  const Type *T = isSingleElementStruct(I.type, getContext());
5020  if (T) {
5021  const BuiltinType *BT = T->getAs<BuiltinType>();
5022  if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
5023  (BT && BT->isFloatingPoint())) {
5024  QualType QT(T, 0);
5025  I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
5026  continue;
5027  }
5028  }
5030  }
5031  }
5032 
5033  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5034  QualType Ty) const override;
5035 
5036  bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5037  bool asReturnValue) const override {
5038  return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5039  }
5040 
5041  bool isSwiftErrorInRegister() const override {
5042  return false;
5043  }
5044 };
5045 
5046 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
5047 
5048 public:
5049  PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
5050  PPC64_SVR4_ABIInfo::ABIKind Kind,
5051  bool SoftFloatABI)
5053  std::make_unique<PPC64_SVR4_ABIInfo>(CGT, Kind, SoftFloatABI)) {}
5054 
5055  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5056  // This is recovered from gcc output.
5057  return 1; // r1 is the dedicated stack pointer
5058  }
5059 
5060  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5061  llvm::Value *Address) const override;
5062 };
5063 
5064 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5065 public:
5066  PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
5067 
5068  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5069  // This is recovered from gcc output.
5070  return 1; // r1 is the dedicated stack pointer
5071  }
5072 
5073  bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5074  llvm::Value *Address) const override;
5075 };
5076 
5077 }
5078 
5079 // Return true if the ABI requires Ty to be passed sign- or zero-
5080 // extended to 64 bits.
5081 bool
5082 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
5083  // Treat an enum type as its underlying type.
5084  if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5085  Ty = EnumTy->getDecl()->getIntegerType();
5086 
5087  // Promotable integer types are required to be promoted by the ABI.
5088  if (isPromotableIntegerTypeForABI(Ty))
5089  return true;
5090 
5091  // In addition to the usual promotable integer types, we also need to
5092  // extend all 32-bit types, since the ABI requires promotion to 64 bits.
5093  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5094  switch (BT->getKind()) {
5095  case BuiltinType::Int:
5096  case BuiltinType::UInt:
5097  return true;
5098  default:
5099  break;
5100  }
5101 
5102  if (const auto *EIT = Ty->getAs<BitIntType>())
5103  if (EIT->getNumBits() < 64)
5104  return true;
5105 
5106  return false;
5107 }
5108 
5109 /// isAlignedParamType - Determine whether a type requires 16-byte or
5110 /// higher alignment in the parameter area. Always returns at least 8.
5111 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
5112  // Complex types are passed just like their elements.
5113  if (const ComplexType *CTy = Ty->getAs<ComplexType>())
5114  Ty = CTy->getElementType();
5115 
5116  auto FloatUsesVector = [this](QualType Ty){
5117  return Ty->isRealFloatingType() && &getContext().getFloatTypeSemantics(
5118  Ty) == &llvm::APFloat::IEEEquad();
5119  };
5120 
5121  // Only vector types of size 16 bytes need alignment (larger types are
5122  // passed via reference, smaller types are not aligned).
5123  if (Ty->isVectorType()) {
5124  return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
5125  } else if (FloatUsesVector(Ty)) {
5126  // According to ABI document section 'Optional Save Areas': If extended
5127  // precision floating-point values in IEEE BINARY 128 QUADRUPLE PRECISION
5128  // format are supported, map them to a single quadword, quadword aligned.
5129  return CharUnits::fromQuantity(16);
5130  }
5131 
5132  // For single-element float/vector structs, we consider the whole type
5133  // to have the same alignment requirements as its single element.
5134  const Type *AlignAsType = nullptr;
5135  const Type *EltType = isSingleElementStruct(Ty, getContext());
5136  if (EltType) {
5137  const BuiltinType *BT = EltType->getAs<BuiltinType>();
5138  if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
5139  (BT && BT->isFloatingPoint()))
5140  AlignAsType = EltType;
5141  }
5142 
5143  // Likewise for ELFv2 homogeneous aggregates.
5144  const Type *Base = nullptr;
5145  uint64_t Members = 0;
5146  if (!AlignAsType && Kind == ELFv2 &&
5147  isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
5148  AlignAsType = Base;
5149 
5150  // With special case aggregates, only vector base types need alignment.
5151  if (AlignAsType) {
5152  bool UsesVector = AlignAsType->isVectorType() ||
5153  FloatUsesVector(QualType(AlignAsType, 0));
5154  return CharUnits::fromQuantity(UsesVector ? 16 : 8);
5155  }
5156 
5157  // Otherwise, we only need alignment for any aggregate type that
5158  // has an alignment requirement of >= 16 bytes.
5159  if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
5160  return CharUnits::fromQuantity(16);
5161  }
5162 
5163  return CharUnits::fromQuantity(8);
5164 }
5165 
5166 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
5167 /// aggregate. Base is set to the base element type, and Members is set
5168 /// to the number of base elements.
5170  uint64_t &Members) const {
5171  if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
5172  uint64_t NElements = AT->getSize().getZExtValue();
5173  if (NElements == 0)
5174  return false;
5175  if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
5176  return false;
5177  Members *= NElements;
5178  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
5179  const RecordDecl *RD = RT->getDecl();
5180  if (RD->hasFlexibleArrayMember())
5181  return false;
5182 
5183  Members = 0;
5184 
5185  // If this is a C++ record, check the properties of the record such as
5186  // bases and ABI specific restrictions
5187  if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
5188  if (!getCXXABI().isPermittedToBeHomogeneousAggregate(CXXRD))
5189  return false;
5190 
5191  for (const auto &I : CXXRD->bases()) {
5192  // Ignore empty records.
5193  if (isEmptyRecord(getContext(), I.getType(), true))
5194  continue;
5195 
5196  uint64_t FldMembers;
5197  if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
5198  return false;
5199 
5200  Members += FldMembers;
5201  }
5202  }
5203 
5204  for (const auto *FD : RD->fields()) {
5205  // Ignore (non-zero arrays of) empty records.
5206  QualType FT = FD->getType();
5207  while (const ConstantArrayType *AT =
5208  getContext().getAsConstantArrayType(FT)) {
5209  if (AT->getSize().getZExtValue() == 0)
5210  return false;
5211  FT = AT->getElementType();
5212  }
5213  if (isEmptyRecord(getContext(), FT, true))
5214  continue;
5215 
5216  // For compatibility with GCC, ignore empty bitfields in C++ mode.
5217  if (getContext().getLangOpts().CPlusPlus &&
5218  FD->isZeroLengthBitField(getContext()))
5219  continue;
5220 
5221  uint64_t FldMembers;
5222  if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
5223  return false;
5224 
5225  Members = (RD->isUnion() ?
5226  std::max(Members, FldMembers) : Members + FldMembers);
5227  }
5228 
5229  if (!Base)
5230  return false;
5231 
5232  // Ensure there is no padding.
5233  if (getContext().getTypeSize(Base) * Members !=
5234  getContext().getTypeSize(Ty))
5235  return false;
5236  } else {
5237  Members = 1;
5238  if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
5239  Members = 2;
5240  Ty = CT->getElementType();
5241  }
5242 
5243  // Most ABIs only support float, double, and some vector type widths.
5245  return false;
5246 
5247  // The base type must be the same for all members. Types that
5248  // agree in both total size and mode (float vs. vector) are
5249  // treated as being equivalent here.
5250  const Type *TyPtr = Ty.getTypePtr();
5251  if (!Base) {
5252  Base = TyPtr;
5253  // If it's a non-power-of-2 vector, its size is already a power-of-2,
5254  // so make sure to widen it explicitly.
5255  if (const VectorType *VT = Base->getAs<VectorType>()) {
5256  QualType EltTy = VT->getElementType();
5257  unsigned NumElements =
5258  getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
5259  Base = getContext()
5260  .getVectorType(EltTy, NumElements, VT->getVectorKind())
5261  .getTypePtr();
5262  }
5263  }
5264 
5265  if (Base->isVectorType() != TyPtr->isVectorType() ||
5266  getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
5267  return false;
5268  }
5269  return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
5270 }
5271 
5272 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5273  // Homogeneous aggregates for ELFv2 must have base types of float,
5274  // double, long double, or 128-bit vectors.
5275  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5276  if (BT->getKind() == BuiltinType::Float ||
5277  BT->getKind() == BuiltinType::Double ||
5278  BT->getKind() == BuiltinType::LongDouble ||
5279  BT->getKind() == BuiltinType::Ibm128 ||
5280  (getContext().getTargetInfo().hasFloat128Type() &&
5281  (BT->getKind() == BuiltinType::Float128))) {
5282  if (IsSoftFloatABI)
5283  return false;
5284  return true;
5285  }
5286  }
5287  if (const VectorType *VT = Ty->getAs<VectorType>()) {
5288  if (getContext().getTypeSize(VT) == 128)
5289  return true;
5290  }
5291  return false;
5292 }
5293 
5294 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
5295  const Type *Base, uint64_t Members) const {
5296  // Vector and fp128 types require one register, other floating point types
5297  // require one or two registers depending on their size.
5298  uint32_t NumRegs =
5299  ((getContext().getTargetInfo().hasFloat128Type() &&
5300  Base->isFloat128Type()) ||
5301  Base->isVectorType()) ? 1
5302  : (getContext().getTypeSize(Base) + 63) / 64;
5303 
5304  // Homogeneous Aggregates may occupy at most 8 registers.
5305  return Members * NumRegs <= 8;
5306 }
5307 
5308 ABIArgInfo
5311 
5312  if (Ty->isAnyComplexType())
5313  return ABIArgInfo::getDirect();
5314 
5315  // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
5316  // or via reference (larger than 16 bytes).
5317  if (Ty->isVectorType()) {
5318  uint64_t Size = getContext().getTypeSize(Ty);
5319  if (Size > 128)
5320  return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5321  else if (Size < 128) {
5322  llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5323  return ABIArgInfo::getDirect(CoerceTy);
5324  }
5325  }
5326 
5327  if (const auto *EIT = Ty->getAs<BitIntType>())
5328  if (EIT->getNumBits() > 128)
5329  return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
5330 
5331  if (isAggregateTypeForABI(Ty)) {
5332  if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5333  return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5334 
5335  uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
5336  uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5337 
5338  // ELFv2 homogeneous aggregates are passed as array types.
5339  const Type *Base = nullptr;
5340  uint64_t Members = 0;
5341  if (Kind == ELFv2 &&
5342  isHomogeneousAggregate(Ty, Base, Members)) {
5343  llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5344  llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5345  return ABIArgInfo::getDirect(CoerceTy);
5346  }
5347 
5348  // If an aggregate may end up fully in registers, we do not
5349  // use the ByVal method, but pass the aggregate as array.
5350  // This is usually beneficial since we avoid forcing the
5351  // back-end to store the argument to memory.
5352  uint64_t Bits = getContext().getTypeSize(Ty);
5353  if (Bits > 0 && Bits <= 8 * GPRBits) {
5354  llvm::Type *CoerceTy;
5355 
5356  // Types up to 8 bytes are passed as integer type (which will be
5357  // properly aligned in the argument save area doubleword).
5358  if (Bits <= GPRBits)
5359  CoerceTy =
5360  llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5361  // Larger types are passed as arrays, with the base type selected
5362  // according to the required alignment in the save area.
5363  else {
5364  uint64_t RegBits = ABIAlign * 8;
5365  uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
5366  llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
5367  CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
5368  }
5369 
5370  return ABIArgInfo::getDirect(CoerceTy);
5371  }
5372 
5373  // All other aggregates are passed ByVal.
5375  /*ByVal=*/true,
5376  /*Realign=*/TyAlign > ABIAlign);
5377  }
5378 
5379  return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
5380  : ABIArgInfo::getDirect());
5381 }
5382 
5383 ABIArgInfo
5385  if (RetTy->isVoidType())
5386  return ABIArgInfo::getIgnore();
5387 
5388  if (RetTy->isAnyComplexType())
5389  return ABIArgInfo::getDirect();
5390 
5391  // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
5392  // or via reference (larger than 16 bytes).
5393  if (RetTy->isVectorType()) {
5394  uint64_t Size = getContext().getTypeSize(RetTy);
5395  if (Size > 128)
5396  return getNaturalAlignIndirect(RetTy);
5397  else if (Size < 128) {
5398  llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5399  return ABIArgInfo::getDirect(CoerceTy);
5400  }
5401  }
5402 
5403  if (const auto *EIT = RetTy->getAs<BitIntType>())
5404  if (EIT->getNumBits() > 128)
5405  return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
5406 
5407  if (isAggregateTypeForABI(RetTy)) {
5408  // ELFv2 homogeneous aggregates are returned as array types.
5409  const Type *Base = nullptr;
5410  uint64_t Members = 0;
5411  if (Kind == ELFv2 &&
5412  isHomogeneousAggregate(RetTy, Base, Members)) {
5413  llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5414  llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5415  return ABIArgInfo::getDirect(CoerceTy);
5416  }
5417 
5418  // ELFv2 small aggregates are returned in up to two registers.
5419  uint64_t Bits = getContext().getTypeSize(RetTy);
5420  if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
5421  if (Bits == 0)
5422  return ABIArgInfo::getIgnore();
5423 
5424  llvm::Type *CoerceTy;
5425  if (Bits > GPRBits) {
5426  CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
5427  CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
5428  } else
5429  CoerceTy =
5430  llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5431  return ABIArgInfo::getDirect(CoerceTy);
5432  }
5433 
5434  // All other aggregates are returned indirectly.
5435  return getNaturalAlignIndirect(RetTy);
5436  }
5437 
5438  return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
5439  : ABIArgInfo::getDirect());
5440 }
5441 
5442 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
5443 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5444  QualType Ty) const {
5445  auto TypeInfo = getContext().getTypeInfoInChars(Ty);
5446  TypeInfo.Align = getParamTypeAlignment(Ty);
5447 
5448  CharUnits SlotSize = CharUnits::fromQuantity(8);
5449 
5450  // If we have a complex type and the base type is smaller than 8 bytes,
5451  // the ABI calls for the real and imaginary parts to be right-adjusted
5452  // in separate doublewords. However, Clang expects us to produce a
5453  // pointer to a structure with the two parts packed tightly. So generate
5454  // loads of the real and imaginary parts relative to the va_list pointer,
5455  // and store them to a temporary structure.
5456  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
5457  CharUnits EltSize = TypeInfo.Width / 2;
5458  if (EltSize < SlotSize)
5459  return complexTempStructure(CGF, VAListAddr, Ty, SlotSize, EltSize, CTy);
5460  }
5461 
5462  // Otherwise, just use the general rule.
5463  return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
5464  TypeInfo, SlotSize, /*AllowHigher*/ true);
5465 }
5466 
5467 bool
5468 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
5470  llvm::Value *Address) const {
5471  return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5472  /*IsAIX*/ false);
5473 }
5474 
5475 bool
5476 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5477  llvm::Value *Address) const {
5478  return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5479  /*IsAIX*/ false);
5480 }
5481 
5482 //===----------------------------------------------------------------------===//
5483 // AArch64 ABI Implementation
5484 //===----------------------------------------------------------------------===//
5485 
5486 namespace {
5487 
5488 class AArch64ABIInfo : public SwiftABIInfo {
5489 public:
5490  enum ABIKind {
5491  AAPCS = 0,
5492  DarwinPCS,
5493  Win64
5494  };
5495 
5496 private:
5497  ABIKind Kind;
5498 
5499 public:
5500  AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
5501  : SwiftABIInfo(CGT), Kind(Kind) {}
5502 
5503 private:
5504  ABIKind getABIKind() const { return Kind; }
5505  bool isDarwinPCS() const { return Kind == DarwinPCS; }
5506 
5507  ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
5508  ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
5509  unsigned CallingConvention) const;
5510  ABIArgInfo coerceIllegalVector(QualType Ty) const;
5511  bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5512  bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5513  uint64_t Members) const override;
5514 
5515  bool isIllegalVectorType(QualType Ty) const;
5516 
5517  void computeInfo(CGFunctionInfo &FI) const override {
5518  if (!::classifyReturnType(getCXXABI(), FI, *this))
5519  FI.getReturnInfo() =
5521 
5522  for (auto &it : FI.arguments())
5523  it.info = classifyArgumentType(it.type, FI.isVariadic(),
5524  FI.getCallingConvention());
5525  }
5526 
5527  Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5528  CodeGenFunction &CGF) const;
5529 
5530  Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5531  CodeGenFunction &CGF) const;
5532 
5533  Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5534  QualType Ty) const override {
5535  llvm::Type *BaseTy = CGF.ConvertType(Ty);
5536  if (isa<llvm::ScalableVectorType>(BaseTy))
5537  llvm::report_fatal_error("Passing SVE types to variadic functions is "
5538  "currently not supported");
5539 
5540  return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5541  : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5542  : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5543  }
5544 
5545  Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5546  QualType Ty) const override;
5547 
5548  bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5549  bool asReturnValue) const override {
5550  return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5551  }
5552  bool isSwiftErrorInRegister() const override {
5553  return true;
5554  }
5555 
5556  bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5557  unsigned elts) const override;
5558 
5559  bool allowBFloatArgsAndRet() const override {
5560  return getTarget().hasBFloat16Type();
5561  }
5562 };
5563 
5564 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5565 public:
5566  AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5567  : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
5568 
5569  StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5570  return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5571  }
5572 
5573  int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5574  return 31;
5575  }
5576 
5577  bool doesReturnSlotInterfereWithArgs() const override { return false; }
5578 
5579  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5580  CodeGen::CodeGenModule &CGM) const override {
5581  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5582  if (!FD)
5583  return;
5584 
5585  const auto *TA = FD->getAttr<TargetAttr>();
5586  if (TA == nullptr)
5587  return;
5588 
5589  ParsedTargetAttr Attr = TA->parse();
5590  if (Attr.BranchProtection.empty())
5591  return;
5592 
5594  StringRef Error;
5595  (void)CGM.getTarget().validateBranchProtection(
5596  Attr.BranchProtection, Attr.Architecture, BPI, Error);
5597  assert(Error.empty());
5598 
5599  auto *Fn = cast<llvm::Function>(GV);
5600  static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
5601  Fn->addFnAttr("sign-return-address", SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
5602 
5604  Fn->addFnAttr("sign-return-address-key",
5606  ? "a_key"
5607  : "b_key");
5608  }
5609 
5610  Fn->addFnAttr("branch-target-enforcement",
5611  BPI.BranchTargetEnforcement ? "true" : "false");
5612  }
5613 
5614  bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,
5615  llvm::Type *Ty) const override {
5616  if (CGF.getTarget().hasFeature("ls64")) {
5617  auto *ST = dyn_cast<llvm::StructType>(Ty);
5618  if (ST && ST->getNumElements() == 1) {
5619  auto *AT = dyn_cast<llvm::ArrayType>(ST->getElementType(0));
5620  if (AT && AT->getNumElements() == 8 &&
5621  AT->getElementType()->isIntegerTy(64))
5622  return true;
5623  }
5624  }
5626  }
5627 };
5628 
5629 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5630 public:
5631  WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5632  : AArch64TargetCodeGenInfo(CGT, K) {}
5633 
5634  void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5635  CodeGen::CodeGenModule &CGM) const override;
5636 
5637  void getDependentLibraryOption(llvm::StringRef Lib,
5638  llvm::SmallString<24> &Opt) const override {
5639  Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5640  }
5641 
5642  void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5643  llvm::SmallString<32> &Opt) const override {
5644  Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5645  }
5646 };
5647 
5648 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5649  const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5650  AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5651  if (GV->isDeclaration())
5652  return;
5653  addStackProbeTargetAttributes(D, GV, CGM);
5654 }
5655 }
5656 
5657 ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
5658  assert(Ty->isVectorType() && "expected vector type!");
5659 
5660  const auto *VT = Ty->castAs<VectorType>();
5661  if (VT->getVectorKind() == VectorType::SveFixedLengthPredicateVector) {
5662  assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
5663  assert(VT->getElementType()->castAs<BuiltinType>()->getKind() ==
5664  BuiltinType::UChar &&
5665  "unexpected builtin type for SVE predicate!");
5666  return ABIArgInfo::getDirect(llvm::ScalableVectorType::get(
5667  llvm::Type::getInt1Ty(getVMContext()), 16));
5668  }
5669 
5670  if (VT->getVectorKind() == VectorType::SveFixedLengthDataVector) {
5671  assert(VT->getElementType()->isBuiltinType() && "expected builtin type!");
5672 
5673  const auto *BT = VT->getElementType()->castAs<BuiltinType>();
5674  llvm::ScalableVectorType *ResType = nullptr;
5675  switch (BT->getKind()) {
5676  default:
5677  llvm_unreachable("unexpected builtin type for SVE vector!");
5678  case BuiltinType::SChar:
5679  case BuiltinType::UChar:
5680  ResType = llvm::ScalableVectorType::get(
5681  llvm::Type::getInt8Ty(getVMContext()), 16);
5682  break;
5683  case BuiltinType::Short:
5684  case BuiltinType::UShort:
5685  ResType = llvm::ScalableVectorType::get(
5686  llvm::Type::getInt16Ty(getVMContext()), 8);
5687  break;
5688  case BuiltinType::Int:
5689  case BuiltinType::UInt:
5690  ResType = llvm::ScalableVectorType::get(
5691  llvm::Type::getInt32Ty(getVMContext()), 4);
5692  break;
5693  case BuiltinType::Long:
5694  case BuiltinType::ULong:
5695  ResType = llvm::ScalableVectorType::get(
5696  llvm::Type::getInt64Ty(getVMContext()), 2);
5697  break;
5698  case BuiltinType::Half:
5699  ResType = llvm::ScalableVectorType::get(
5700  llvm::Type::getHalfTy(getVMContext()), 8);
5701  break;
5702  case BuiltinType::Float:
5703  ResType = llvm::ScalableVectorType::get(
5704  llvm::Type::getFloatTy(getVMContext()), 4);
5705  break;
5706  case BuiltinType::Double:
5707  ResType = llvm::ScalableVectorType::get(
5708  llvm::Type::getDoubleTy(getVMContext()), 2);
5709  break;
5710  case BuiltinType::BFloat16:
5711  ResType = llvm::ScalableVectorType::get(
5712  llvm::Type::getBFloatTy(getVMContext()), 8);
5713  break;
5714  }
5715  return ABIArgInfo::getDirect(ResType);
5716  }
5717 
5718  uint64_t Size = getContext().getTypeSize(Ty);
5719  // Android promotes <2 x i8> to i16,