clang 20.0.0git
AArch64.cpp
Go to the documentation of this file.
1//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements AArch64 TargetInfo objects.
10//
11//===----------------------------------------------------------------------===//
12
13#include "AArch64.h"
18#include "llvm/ADT/APSInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/ADT/StringSwitch.h"
22#include "llvm/TargetParser/AArch64TargetParser.h"
23#include "llvm/TargetParser/ARMTargetParserCommon.h"
24#include <optional>
25
26using namespace clang;
27using namespace clang::targets;
28
29static constexpr Builtin::Info BuiltinInfo[] = {
30#define BUILTIN(ID, TYPE, ATTRS) \
31 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
32#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
33 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
34#include "clang/Basic/BuiltinsNEON.def"
35
36#define BUILTIN(ID, TYPE, ATTRS) \
37 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
38#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
39 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
40#include "clang/Basic/BuiltinsSVE.def"
41
42#define BUILTIN(ID, TYPE, ATTRS) \
43 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
44#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
45 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
46#include "clang/Basic/BuiltinsSME.def"
47
48#define BUILTIN(ID, TYPE, ATTRS) \
49 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
50#define LANGBUILTIN(ID, TYPE, ATTRS, LANG) \
51 {#ID, TYPE, ATTRS, nullptr, HeaderDesc::NO_HEADER, LANG},
52#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
53 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::NO_HEADER, ALL_LANGUAGES},
54#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
55 {#ID, TYPE, ATTRS, FEATURE, HeaderDesc::HEADER, LANGS},
56#include "clang/Basic/BuiltinsAArch64.def"
57};
58
60 if (*ArchInfo == llvm::AArch64::ARMV8R) {
61 HasDotProd = true;
62 HasDIT = true;
63 HasFlagM = true;
64 HasRCPC = true;
65 FPU |= NeonMode;
66 HasCCPP = true;
67 HasCRC = true;
68 HasLSE = true;
69 HasRDM = true;
70 } else if (ArchInfo->Version.getMajor() == 8) {
71 if (ArchInfo->Version.getMinor() >= 7u) {
72 HasWFxT = true;
73 }
74 if (ArchInfo->Version.getMinor() >= 6u) {
75 HasBFloat16 = true;
76 HasMatMul = true;
77 }
78 if (ArchInfo->Version.getMinor() >= 5u) {
79 HasAlternativeNZCV = true;
80 HasFRInt3264 = true;
81 HasSSBS = true;
82 HasSB = true;
83 HasPredRes = true;
84 HasBTI = true;
85 }
86 if (ArchInfo->Version.getMinor() >= 4u) {
87 HasDotProd = true;
88 HasDIT = true;
89 HasFlagM = true;
90 }
91 if (ArchInfo->Version.getMinor() >= 3u) {
92 HasRCPC = true;
93 FPU |= NeonMode;
94 }
95 if (ArchInfo->Version.getMinor() >= 2u) {
96 HasCCPP = true;
97 }
98 if (ArchInfo->Version.getMinor() >= 1u) {
99 HasCRC = true;
100 HasLSE = true;
101 HasRDM = true;
102 }
103 } else if (ArchInfo->Version.getMajor() == 9) {
104 if (ArchInfo->Version.getMinor() >= 2u) {
105 HasWFxT = true;
106 }
107 if (ArchInfo->Version.getMinor() >= 1u) {
108 HasBFloat16 = true;
109 HasMatMul = true;
110 }
111 FPU |= SveMode;
112 HasSVE2 = true;
113 HasFullFP16 = true;
114 HasAlternativeNZCV = true;
115 HasFRInt3264 = true;
116 HasSSBS = true;
117 HasSB = true;
118 HasPredRes = true;
119 HasBTI = true;
120 HasDotProd = true;
121 HasDIT = true;
122 HasFlagM = true;
123 HasRCPC = true;
124 FPU |= NeonMode;
125 HasCCPP = true;
126 HasCRC = true;
127 HasLSE = true;
128 HasRDM = true;
129 }
130}
131
132AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
133 const TargetOptions &Opts)
134 : TargetInfo(Triple), ABI("aapcs") {
135 if (getTriple().isOSOpenBSD()) {
138 } else {
139 if (!getTriple().isOSDarwin() && !getTriple().isOSNetBSD())
141
144 }
145
147
148 // All AArch64 implementations support ARMv8 FP, which makes half a legal type.
149 HasLegalHalfType = true;
150 HalfArgsAndReturns = true;
151 HasFloat16 = true;
152 HasStrictFP = true;
153
154 if (Triple.isArch64Bit())
156 else
158
159 BitIntMaxAlign = 128;
160 MaxVectorAlign = 128;
163
165 LongDoubleFormat = &llvm::APFloat::IEEEquad();
166
168 BFloat16Format = &llvm::APFloat::BFloat();
169
170 // Make __builtin_ms_va_list available.
171 HasBuiltinMSVaList = true;
172
173 // Make the SVE types available. Note that this deliberately doesn't
174 // depend on SveMode, since in principle it should be possible to turn
175 // SVE on and off within a translation unit. It should also be possible
176 // to compile the global declaration:
177 //
178 // __SVInt8_t *ptr;
179 //
180 // even without SVE.
181 HasAArch64SVETypes = true;
182
183 // {} in inline assembly are neon specifiers, not assembly variant
184 // specifiers.
185 NoAsmVariants = true;
186
187 // AAPCS gives rules for bitfields. 7.1.7 says: "The container type
188 // contributes to the alignment of the containing aggregate in the same way
189 // a plain (non bit-field) member of that type would, without exception for
190 // zero-sized or anonymous bit-fields."
191 assert(UseBitFieldTypeAlignment && "bitfields affect type alignment");
193
194 HasUnalignedAccess = true;
195
196 // AArch64 targets default to using the ARM C++ ABI.
197 TheCXXABI.set(TargetCXXABI::GenericAArch64);
198
199 if (Triple.getOS() == llvm::Triple::Linux)
200 this->MCountName = "\01_mcount";
201 else if (Triple.getOS() == llvm::Triple::UnknownOS)
202 this->MCountName =
203 Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
204}
205
206StringRef AArch64TargetInfo::getABI() const { return ABI; }
207
208bool AArch64TargetInfo::setABI(const std::string &Name) {
209 if (Name != "aapcs" && Name != "aapcs-soft" && Name != "darwinpcs" &&
210 Name != "pauthtest")
211 return false;
212
213 ABI = Name;
214 return true;
215}
216
218 if (hasFeature("fp") && ABI == "aapcs-soft") {
219 // aapcs-soft is not allowed for targets with an FPU, to avoid there being
220 // two incomatible ABIs.
221 Diags.Report(diag::err_target_unsupported_abi_with_fpu) << ABI;
222 return false;
223 }
224 if (getTriple().getEnvironment() == llvm::Triple::PAuthTest &&
225 getTriple().getOS() != llvm::Triple::Linux) {
226 Diags.Report(diag::err_target_unsupported_abi_for_triple)
227 << getTriple().getEnvironmentName() << getTriple().getTriple();
228 return false;
229 }
230 return true;
231}
232
234 StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const {
235 if (RegName == "sp") {
236 HasSizeMismatch = RegSize != 64;
237 return true;
238 }
239 if (RegName.starts_with("w"))
240 HasSizeMismatch = RegSize != 32;
241 else if (RegName.starts_with("x"))
242 HasSizeMismatch = RegSize != 64;
243 else
244 return false;
245 StringRef RegNum = RegName.drop_front();
246 // Check if the register is reserved. See also
247 // AArch64TargetLowering::getRegisterByName().
248 return RegNum == "0" ||
249 (RegNum == "18" &&
250 llvm::AArch64::isX18ReservedByDefault(getTriple())) ||
251 getTargetOpts().FeatureMap.lookup(("reserve-x" + RegNum).str());
252}
253
254bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
256 StringRef &Err) const {
257 llvm::ARM::ParsedBranchProtection PBP;
258 if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err, HasPAuthLR))
259 return false;
260
261 BPI.SignReturnAddr =
262 llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
266
267 if (PBP.Key == "a_key")
269 else
271
272 BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
273 BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
274 BPI.GuardedControlStack = PBP.GuardedControlStack;
275 return true;
276}
277
278bool AArch64TargetInfo::isValidCPUName(StringRef Name) const {
279 return llvm::AArch64::parseCpu(Name).has_value();
280}
281
282bool AArch64TargetInfo::setCPU(const std::string &Name) {
283 return isValidCPUName(Name);
284}
285
287 SmallVectorImpl<StringRef> &Values) const {
288 llvm::AArch64::fillValidCPUArchList(Values);
289}
290
292 MacroBuilder &Builder) const {
293 Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
294}
295
297 MacroBuilder &Builder) const {
298 // Also include the ARMv8.1 defines
299 getTargetDefinesARMV81A(Opts, Builder);
300}
301
303 MacroBuilder &Builder) const {
304 Builder.defineMacro("__ARM_FEATURE_COMPLEX", "1");
305 Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
306 // Also include the Armv8.2 defines
307 getTargetDefinesARMV82A(Opts, Builder);
308}
309
311 MacroBuilder &Builder) const {
312 // Also include the Armv8.3 defines
313 getTargetDefinesARMV83A(Opts, Builder);
314}
315
317 MacroBuilder &Builder) const {
318 Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
319 // Also include the Armv8.4 defines
320 getTargetDefinesARMV84A(Opts, Builder);
321}
322
324 MacroBuilder &Builder) const {
325 // Also include the Armv8.5 defines
326 // FIXME: Armv8.6 makes the following extensions mandatory:
327 // - __ARM_FEATURE_BF16
328 // - __ARM_FEATURE_MATMUL_INT8
329 // Handle them here.
330 getTargetDefinesARMV85A(Opts, Builder);
331}
332
334 MacroBuilder &Builder) const {
335 // Also include the Armv8.6 defines
336 getTargetDefinesARMV86A(Opts, Builder);
337}
338
340 MacroBuilder &Builder) const {
341 // Also include the Armv8.7 defines
342 getTargetDefinesARMV87A(Opts, Builder);
343}
344
346 MacroBuilder &Builder) const {
347 // Also include the Armv8.8 defines
348 getTargetDefinesARMV88A(Opts, Builder);
349}
350
352 MacroBuilder &Builder) const {
353 // Armv9-A maps to Armv8.5-A
354 getTargetDefinesARMV85A(Opts, Builder);
355}
356
358 MacroBuilder &Builder) const {
359 // Armv9.1-A maps to Armv8.6-A
360 getTargetDefinesARMV86A(Opts, Builder);
361}
362
364 MacroBuilder &Builder) const {
365 // Armv9.2-A maps to Armv8.7-A
366 getTargetDefinesARMV87A(Opts, Builder);
367}
368
370 MacroBuilder &Builder) const {
371 // Armv9.3-A maps to Armv8.8-A
372 getTargetDefinesARMV88A(Opts, Builder);
373}
374
376 MacroBuilder &Builder) const {
377 // Armv9.4-A maps to Armv8.9-A
378 getTargetDefinesARMV89A(Opts, Builder);
379}
380
382 MacroBuilder &Builder) const {
383 // Armv9.5-A does not have a v8.* equivalent, but is a superset of v9.4-A.
384 getTargetDefinesARMV94A(Opts, Builder);
385}
386
388 MacroBuilder &Builder) const {
389 // Armv9.6-A does not have a v8.* equivalent, but is a superset of v9.5-A.
390 getTargetDefinesARMV95A(Opts, Builder);
391}
392
394 MacroBuilder &Builder) const {
395 // Target identification.
396 if (getTriple().isWindowsArm64EC()) {
397 // Define the same set of macros as would be defined on x86_64 to ensure that
398 // ARM64EC datatype layouts match those of x86_64 compiled code
399 Builder.defineMacro("__amd64__");
400 Builder.defineMacro("__amd64");
401 Builder.defineMacro("__x86_64");
402 Builder.defineMacro("__x86_64__");
403 Builder.defineMacro("__arm64ec__");
404 } else {
405 Builder.defineMacro("__aarch64__");
406 }
407
408 // Inline assembly supports AArch64 flag outputs.
409 Builder.defineMacro("__GCC_ASM_FLAG_OUTPUTS__");
410
411 std::string CodeModel = getTargetOpts().CodeModel;
412 if (CodeModel == "default")
413 CodeModel = "small";
414 for (char &c : CodeModel)
415 c = toupper(c);
416 Builder.defineMacro("__AARCH64_CMODEL_" + CodeModel + "__");
417
418 // ACLE predefines. Many can only have one possible value on v8 AArch64.
419 Builder.defineMacro("__ARM_ACLE_VERSION(year, quarter, patch)",
420 "(100 * (year) + 10 * (quarter) + (patch))");
421#define ARM_ACLE_VERSION(Y, Q, P) (100 * (Y) + 10 * (Q) + (P))
422 Builder.defineMacro("__ARM_ACLE", Twine(ARM_ACLE_VERSION(2024, 2, 0)));
423 Builder.defineMacro("__FUNCTION_MULTI_VERSIONING_SUPPORT_LEVEL",
424 Twine(ARM_ACLE_VERSION(2024, 2, 0)));
425#undef ARM_ACLE_VERSION
426 Builder.defineMacro("__ARM_ARCH",
427 std::to_string(ArchInfo->Version.getMajor()));
428 Builder.defineMacro("__ARM_ARCH_PROFILE",
429 std::string("'") + (char)ArchInfo->Profile + "'");
430
431 Builder.defineMacro("__ARM_64BIT_STATE", "1");
432 Builder.defineMacro("__ARM_PCS_AAPCS64", "1");
433 Builder.defineMacro("__ARM_ARCH_ISA_A64", "1");
434
435 Builder.defineMacro("__ARM_FEATURE_CLZ", "1");
436 Builder.defineMacro("__ARM_FEATURE_FMA", "1");
437 Builder.defineMacro("__ARM_FEATURE_LDREX", "0xF");
438 Builder.defineMacro("__ARM_FEATURE_IDIV", "1"); // As specified in ACLE
439 Builder.defineMacro("__ARM_FEATURE_DIV"); // For backwards compatibility
440 Builder.defineMacro("__ARM_FEATURE_NUMERIC_MAXMIN", "1");
441 Builder.defineMacro("__ARM_FEATURE_DIRECTED_ROUNDING", "1");
442
443 Builder.defineMacro("__ARM_ALIGN_MAX_STACK_PWR", "4");
444
445 // These macros are set when Clang can parse declarations with these
446 // attributes.
447 Builder.defineMacro("__ARM_STATE_ZA", "1");
448 Builder.defineMacro("__ARM_STATE_ZT0", "1");
449
450 // 0xe implies support for half, single and double precision operations.
451 if (FPU & FPUMode)
452 Builder.defineMacro("__ARM_FP", "0xE");
453
454 // PCS specifies this for SysV variants, which is all we support. Other ABIs
455 // may choose __ARM_FP16_FORMAT_ALTERNATIVE.
456 Builder.defineMacro("__ARM_FP16_FORMAT_IEEE", "1");
457 Builder.defineMacro("__ARM_FP16_ARGS", "1");
458
459 // Clang supports arm_neon_sve_bridge.h
460 Builder.defineMacro("__ARM_NEON_SVE_BRIDGE", "1");
461
462 if (Opts.UnsafeFPMath)
463 Builder.defineMacro("__ARM_FP_FAST", "1");
464
465 Builder.defineMacro("__ARM_SIZEOF_WCHAR_T",
466 Twine(Opts.WCharSize ? Opts.WCharSize : 4));
467
468 Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM", Opts.ShortEnums ? "1" : "4");
469
470 if (FPU & NeonMode) {
471 Builder.defineMacro("__ARM_NEON", "1");
472 // 64-bit NEON supports half, single and double precision operations.
473 Builder.defineMacro("__ARM_NEON_FP", "0xE");
474 }
475
476 if (FPU & SveMode)
477 Builder.defineMacro("__ARM_FEATURE_SVE", "1");
478
479 if (HasSVE2)
480 Builder.defineMacro("__ARM_FEATURE_SVE2", "1");
481
482 if (HasSVE2p1)
483 Builder.defineMacro("__ARM_FEATURE_SVE2p1", "1");
484
485 if (HasSVE2 && HasSVEAES)
486 Builder.defineMacro("__ARM_FEATURE_SVE2_AES", "1");
487
488 if (HasSVE2 && HasSVEBitPerm)
489 Builder.defineMacro("__ARM_FEATURE_SVE2_BITPERM", "1");
490
491 if (HasSVE2 && HasSVE2SHA3)
492 Builder.defineMacro("__ARM_FEATURE_SVE2_SHA3", "1");
493
494 if (HasSVE2 && HasSVE2SM4)
495 Builder.defineMacro("__ARM_FEATURE_SVE2_SM4", "1");
496
497 if (HasSVEB16B16)
498 Builder.defineMacro("__ARM_FEATURE_SVE_B16B16", "1");
499
500 if (HasSME) {
501 Builder.defineMacro("__ARM_FEATURE_SME");
502 Builder.defineMacro("__ARM_FEATURE_LOCALLY_STREAMING", "1");
503 }
504
505 if (HasSME2)
506 Builder.defineMacro("__ARM_FEATURE_SME2", "1");
507
508 if (HasSME2p1)
509 Builder.defineMacro("__ARM_FEATURE_SME2p1", "1");
510
511 if (HasSMEF16F16)
512 Builder.defineMacro("__ARM_FEATURE_SME_F16F16", "1");
513
514 if (HasSMEB16B16)
515 Builder.defineMacro("__ARM_FEATURE_SME_B16B16", "1");
516
517 if (HasCRC)
518 Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
519
520 if (HasRCPC3)
521 Builder.defineMacro("__ARM_FEATURE_RCPC", "3");
522 else if (HasRCPC)
523 Builder.defineMacro("__ARM_FEATURE_RCPC", "1");
524
525 if (HasFMV)
526 Builder.defineMacro("__HAVE_FUNCTION_MULTI_VERSIONING", "1");
527
528 // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
529 // macros for AES, SHA2, SHA3 and SM4
530 if (HasAES && HasSHA2)
531 Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
532
533 if (HasAES)
534 Builder.defineMacro("__ARM_FEATURE_AES", "1");
535
536 if (HasSHA2)
537 Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
538
539 if (HasSHA3) {
540 Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
541 Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
542 }
543
544 if (HasSM4) {
545 Builder.defineMacro("__ARM_FEATURE_SM3", "1");
546 Builder.defineMacro("__ARM_FEATURE_SM4", "1");
547 }
548
549 if (HasPAuth)
550 Builder.defineMacro("__ARM_FEATURE_PAUTH", "1");
551
552 if (HasPAuthLR)
553 Builder.defineMacro("__ARM_FEATURE_PAUTH_LR", "1");
554
555 if (HasBTI)
556 Builder.defineMacro("__ARM_FEATURE_BTI", "1");
557
559 Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
560
561 if ((FPU & NeonMode) && HasFullFP16)
562 Builder.defineMacro("__ARM_FEATURE_FP16_VECTOR_ARITHMETIC", "1");
563 if (HasFullFP16)
564 Builder.defineMacro("__ARM_FEATURE_FP16_SCALAR_ARITHMETIC", "1");
565
566 if (HasDotProd)
567 Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
568
569 if (HasMTE)
570 Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
571
572 if (HasTME)
573 Builder.defineMacro("__ARM_FEATURE_TME", "1");
574
575 if (HasMatMul)
576 Builder.defineMacro("__ARM_FEATURE_MATMUL_INT8", "1");
577
578 if (HasLSE)
579 Builder.defineMacro("__ARM_FEATURE_ATOMICS", "1");
580
581 if (HasBFloat16) {
582 Builder.defineMacro("__ARM_FEATURE_BF16", "1");
583 Builder.defineMacro("__ARM_FEATURE_BF16_VECTOR_ARITHMETIC", "1");
584 Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
585 Builder.defineMacro("__ARM_FEATURE_BF16_SCALAR_ARITHMETIC", "1");
586 }
587
588 if ((FPU & SveMode) && HasBFloat16) {
589 Builder.defineMacro("__ARM_FEATURE_SVE_BF16", "1");
590 }
591
592 if ((FPU & SveMode) && HasMatmulFP64)
593 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP64", "1");
594
595 if ((FPU & SveMode) && HasMatmulFP32)
596 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_FP32", "1");
597
598 if ((FPU & SveMode) && HasMatMul)
599 Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
600
601 if ((FPU & NeonMode) && HasFP16FML)
602 Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
603
604 if (Opts.hasSignReturnAddress()) {
605 // Bitmask:
606 // 0: Protection using the A key
607 // 1: Protection using the B key
608 // 2: Protection including leaf functions
609 // 3: Protection using PC as a diversifier
610 unsigned Value = 0;
611
613 Value |= (1 << 0);
614 else
615 Value |= (1 << 1);
616
618 Value |= (1 << 2);
619
620 if (Opts.BranchProtectionPAuthLR)
621 Value |= (1 << 3);
622
623 Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", std::to_string(Value));
624 }
625
626 if (Opts.BranchTargetEnforcement)
627 Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
628
629 if (Opts.GuardedControlStack)
630 Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
631
632 if (HasLS64)
633 Builder.defineMacro("__ARM_FEATURE_LS64", "1");
634
635 if (HasRandGen)
636 Builder.defineMacro("__ARM_FEATURE_RNG", "1");
637
638 if (HasMOPS)
639 Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
640
641 if (HasD128)
642 Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
643
644 if (HasGCS)
645 Builder.defineMacro("__ARM_FEATURE_GCS", "1");
646
647 if (*ArchInfo == llvm::AArch64::ARMV8_1A)
648 getTargetDefinesARMV81A(Opts, Builder);
649 else if (*ArchInfo == llvm::AArch64::ARMV8_2A)
650 getTargetDefinesARMV82A(Opts, Builder);
651 else if (*ArchInfo == llvm::AArch64::ARMV8_3A)
652 getTargetDefinesARMV83A(Opts, Builder);
653 else if (*ArchInfo == llvm::AArch64::ARMV8_4A)
654 getTargetDefinesARMV84A(Opts, Builder);
655 else if (*ArchInfo == llvm::AArch64::ARMV8_5A)
656 getTargetDefinesARMV85A(Opts, Builder);
657 else if (*ArchInfo == llvm::AArch64::ARMV8_6A)
658 getTargetDefinesARMV86A(Opts, Builder);
659 else if (*ArchInfo == llvm::AArch64::ARMV8_7A)
660 getTargetDefinesARMV87A(Opts, Builder);
661 else if (*ArchInfo == llvm::AArch64::ARMV8_8A)
662 getTargetDefinesARMV88A(Opts, Builder);
663 else if (*ArchInfo == llvm::AArch64::ARMV8_9A)
664 getTargetDefinesARMV89A(Opts, Builder);
665 else if (*ArchInfo == llvm::AArch64::ARMV9A)
666 getTargetDefinesARMV9A(Opts, Builder);
667 else if (*ArchInfo == llvm::AArch64::ARMV9_1A)
668 getTargetDefinesARMV91A(Opts, Builder);
669 else if (*ArchInfo == llvm::AArch64::ARMV9_2A)
670 getTargetDefinesARMV92A(Opts, Builder);
671 else if (*ArchInfo == llvm::AArch64::ARMV9_3A)
672 getTargetDefinesARMV93A(Opts, Builder);
673 else if (*ArchInfo == llvm::AArch64::ARMV9_4A)
674 getTargetDefinesARMV94A(Opts, Builder);
675 else if (*ArchInfo == llvm::AArch64::ARMV9_5A)
676 getTargetDefinesARMV95A(Opts, Builder);
677 else if (*ArchInfo == llvm::AArch64::ARMV9_6A)
678 getTargetDefinesARMV96A(Opts, Builder);
679
680 // All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8|16) builtins work.
681 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
682 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
683 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
684 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
685 Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
686
687 // Allow detection of fast FMA support.
688 Builder.defineMacro("__FP_FAST_FMA", "1");
689 Builder.defineMacro("__FP_FAST_FMAF", "1");
690
691 // C/C++ operators work on both VLS and VLA SVE types
692 if (FPU & SveMode)
693 Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS", "2");
694
695 if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
696 Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
697 }
698}
699
703}
704
705std::optional<std::pair<unsigned, unsigned>>
707 if (LangOpts.VScaleMin || LangOpts.VScaleMax)
708 return std::pair<unsigned, unsigned>(
709 LangOpts.VScaleMin ? LangOpts.VScaleMin : 1, LangOpts.VScaleMax);
710
711 if (hasFeature("sve"))
712 return std::pair<unsigned, unsigned>(1, 16);
713
714 return std::nullopt;
715}
716
718 return llvm::AArch64::getFMVPriority(Features);
719}
720
722 // FMV extensions which imply no backend features do not affect codegen.
723 if (auto Ext = llvm::AArch64::parseFMVExtension(Name))
724 return Ext->ID.has_value();
725 return false;
726}
727
728bool AArch64TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
729 // CPU features might be separated by '+', extract them and check
731 FeatureStr.split(Features, "+");
732 for (auto &Feature : Features)
733 if (!llvm::AArch64::parseFMVExtension(Feature.trim()).has_value())
734 return false;
735 return true;
736}
737
738bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
739 return llvm::StringSwitch<bool>(Feature)
740 .Cases("aarch64", "arm64", "arm", true)
741 .Case("fmv", HasFMV)
742 .Case("fp", FPU & FPUMode)
743 .Cases("neon", "simd", FPU & NeonMode)
744 .Case("jscvt", HasJSCVT)
745 .Case("fcma", HasFCMA)
746 .Case("rng", HasRandGen)
747 .Case("flagm", HasFlagM)
748 .Case("flagm2", HasAlternativeNZCV)
749 .Case("fp16fml", HasFP16FML)
750 .Case("dotprod", HasDotProd)
751 .Case("sm4", HasSM4)
752 .Case("rdm", HasRDM)
753 .Case("lse", HasLSE)
754 .Case("crc", HasCRC)
755 .Case("sha2", HasSHA2)
756 .Case("sha3", HasSHA3)
757 .Cases("aes", "pmull", HasAES)
758 .Cases("fp16", "fullfp16", HasFullFP16)
759 .Case("dit", HasDIT)
760 .Case("dpb", HasCCPP)
761 .Case("dpb2", HasCCDP)
762 .Case("rcpc", HasRCPC)
763 .Case("frintts", HasFRInt3264)
764 .Case("i8mm", HasMatMul)
765 .Case("bf16", HasBFloat16)
766 .Case("sve", FPU & SveMode)
767 .Case("sve-b16b16", HasSVEB16B16)
768 .Case("f32mm", FPU & SveMode && HasMatmulFP32)
769 .Case("f64mm", FPU & SveMode && HasMatmulFP64)
770 .Case("sve2", FPU & SveMode && HasSVE2)
771 .Case("sve-aes", HasSVEAES)
772 .Case("sve-bitperm", FPU & HasSVEBitPerm)
773 .Case("sve2-sha3", FPU & SveMode && HasSVE2SHA3)
774 .Case("sve2-sm4", FPU & SveMode && HasSVE2SM4)
775 .Case("sve2p1", FPU & SveMode && HasSVE2p1)
776 .Case("sme", HasSME)
777 .Case("sme2", HasSME2)
778 .Case("sme2p1", HasSME2p1)
779 .Case("sme-f64f64", HasSMEF64F64)
780 .Case("sme-i16i64", HasSMEI16I64)
781 .Case("sme-fa64", HasSMEFA64)
782 .Case("sme-f16f16", HasSMEF16F16)
783 .Case("sme-b16b16", HasSMEB16B16)
784 .Case("memtag", HasMTE)
785 .Case("sb", HasSB)
786 .Case("predres", HasPredRes)
787 .Cases("ssbs", "ssbs2", HasSSBS)
788 .Case("bti", HasBTI)
789 .Cases("ls64", "ls64_v", "ls64_accdata", HasLS64)
790 .Case("wfxt", HasWFxT)
791 .Case("rcpc3", HasRCPC3)
792 .Default(false);
793}
794
795void AArch64TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
796 StringRef Name, bool Enabled) const {
797 Features[Name] = Enabled;
798 // If the feature is an architecture feature (like v8.2a), add all previous
799 // architecture versions and any dependant target features.
800 const std::optional<llvm::AArch64::ArchInfo> ArchInfo =
801 llvm::AArch64::ArchInfo::findBySubArch(Name);
802
803 if (!ArchInfo)
804 return; // Not an architecture, nothing more to do.
805
806 // Disabling an architecture feature does not affect dependent features
807 if (!Enabled)
808 return;
809
810 for (const auto *OtherArch : llvm::AArch64::ArchInfos)
811 if (ArchInfo->implies(*OtherArch))
812 Features[OtherArch->getSubArch()] = true;
813
814 // Set any features implied by the architecture
815 std::vector<StringRef> CPUFeats;
816 if (llvm::AArch64::getExtensionFeatures(ArchInfo->DefaultExts, CPUFeats)) {
817 for (auto F : CPUFeats) {
818 assert(F[0] == '+' && "Expected + in target feature!");
819 Features[F.drop_front(1)] = true;
820 }
821 }
822}
823
824bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
825 DiagnosticsEngine &Diags) {
826 for (const auto &Feature : Features) {
827 if (Feature == "-fp-armv8")
828 HasNoFP = true;
829 if (Feature == "-neon")
830 HasNoNeon = true;
831 if (Feature == "-sve")
832 HasNoSVE = true;
833
834 if (Feature == "+neon" || Feature == "+fp-armv8")
835 FPU |= NeonMode;
836 if (Feature == "+jscvt") {
837 HasJSCVT = true;
838 FPU |= NeonMode;
839 }
840 if (Feature == "+fcma") {
841 HasFCMA = true;
842 FPU |= NeonMode;
843 }
844
845 if (Feature == "+sve") {
846 FPU |= NeonMode;
847 FPU |= SveMode;
848 HasFullFP16 = true;
849 }
850 if (Feature == "+sve2") {
851 FPU |= NeonMode;
852 FPU |= SveMode;
853 HasFullFP16 = true;
854 HasSVE2 = true;
855 }
856 if (Feature == "+sve2p1") {
857 FPU |= NeonMode;
858 FPU |= SveMode;
859 HasFullFP16 = true;
860 HasSVE2 = true;
861 HasSVE2p1 = true;
862 }
863 if (Feature == "+sve-aes") {
864 FPU |= NeonMode;
865 HasFullFP16 = true;
866 HasSVEAES = true;
867 }
868 if (Feature == "+sve2-sha3") {
869 FPU |= NeonMode;
870 FPU |= SveMode;
871 HasFullFP16 = true;
872 HasSVE2 = true;
873 HasSVE2SHA3 = true;
874 }
875 if (Feature == "+sve2-sm4") {
876 FPU |= NeonMode;
877 FPU |= SveMode;
878 HasFullFP16 = true;
879 HasSVE2 = true;
880 HasSVE2SM4 = true;
881 }
882 if (Feature == "+sve-b16b16")
883 HasSVEB16B16 = true;
884 if (Feature == "+sve-bitperm") {
885 FPU |= NeonMode;
886 HasFullFP16 = true;
887 HasSVEBitPerm = true;
888 }
889 if (Feature == "+f32mm") {
890 FPU |= NeonMode;
891 FPU |= SveMode;
892 HasFullFP16 = true;
893 HasMatmulFP32 = true;
894 }
895 if (Feature == "+f64mm") {
896 FPU |= NeonMode;
897 FPU |= SveMode;
898 HasFullFP16 = true;
899 HasMatmulFP64 = true;
900 }
901 if (Feature == "+sme") {
902 HasSME = true;
903 HasBFloat16 = true;
904 HasFullFP16 = true;
905 }
906 if (Feature == "+sme2") {
907 HasSME = true;
908 HasSME2 = true;
909 HasBFloat16 = true;
910 HasFullFP16 = true;
911 }
912 if (Feature == "+sme2p1") {
913 HasSME = true;
914 HasSME2 = true;
915 HasSME2p1 = true;
916 HasBFloat16 = true;
917 HasFullFP16 = true;
918 }
919 if (Feature == "+sme-f64f64") {
920 HasSME = true;
921 HasSMEF64F64 = true;
922 HasBFloat16 = true;
923 HasFullFP16 = true;
924 }
925 if (Feature == "+sme-i16i64") {
926 HasSME = true;
927 HasSMEI16I64 = true;
928 HasBFloat16 = true;
929 HasFullFP16 = true;
930 }
931 if (Feature == "+sme-fa64") {
932 FPU |= NeonMode;
933 FPU |= SveMode;
934 HasSME = true;
935 HasSVE2 = true;
936 HasSMEFA64 = true;
937 }
938 if (Feature == "+sme-f16f16") {
939 HasSME = true;
940 HasSME2 = true;
941 HasBFloat16 = true;
942 HasFullFP16 = true;
943 HasSMEF16F16 = true;
944 }
945 if (Feature == "+sme-b16b16") {
946 HasSME = true;
947 HasSME2 = true;
948 HasBFloat16 = true;
949 HasFullFP16 = true;
950 HasSVEB16B16 = true;
951 HasSMEB16B16 = true;
952 }
953 if (Feature == "+sb")
954 HasSB = true;
955 if (Feature == "+predres")
956 HasPredRes = true;
957 if (Feature == "+ssbs")
958 HasSSBS = true;
959 if (Feature == "+bti")
960 HasBTI = true;
961 if (Feature == "+wfxt")
962 HasWFxT = true;
963 if (Feature == "-fmv")
964 HasFMV = false;
965 if (Feature == "+crc")
966 HasCRC = true;
967 if (Feature == "+rcpc")
968 HasRCPC = true;
969 if (Feature == "+aes") {
970 FPU |= NeonMode;
971 HasAES = true;
972 }
973 if (Feature == "+sha2") {
974 FPU |= NeonMode;
975 HasSHA2 = true;
976 }
977 if (Feature == "+sha3") {
978 FPU |= NeonMode;
979 HasSHA2 = true;
980 HasSHA3 = true;
981 }
982 if (Feature == "+rdm") {
983 FPU |= NeonMode;
984 HasRDM = true;
985 }
986 if (Feature == "+dit")
987 HasDIT = true;
988 if (Feature == "+cccp")
989 HasCCPP = true;
990 if (Feature == "+ccdp") {
991 HasCCPP = true;
992 HasCCDP = true;
993 }
994 if (Feature == "+fptoint")
995 HasFRInt3264 = true;
996 if (Feature == "+sm4") {
997 FPU |= NeonMode;
998 HasSM4 = true;
999 }
1000 if (Feature == "+strict-align")
1001 HasUnalignedAccess = false;
1002
1003 // All predecessor archs are added but select the latest one for ArchKind.
1004 if (Feature == "+v8a" && ArchInfo->Version < llvm::AArch64::ARMV8A.Version)
1005 ArchInfo = &llvm::AArch64::ARMV8A;
1006 if (Feature == "+v8.1a" &&
1007 ArchInfo->Version < llvm::AArch64::ARMV8_1A.Version)
1008 ArchInfo = &llvm::AArch64::ARMV8_1A;
1009 if (Feature == "+v8.2a" &&
1010 ArchInfo->Version < llvm::AArch64::ARMV8_2A.Version)
1011 ArchInfo = &llvm::AArch64::ARMV8_2A;
1012 if (Feature == "+v8.3a" &&
1013 ArchInfo->Version < llvm::AArch64::ARMV8_3A.Version)
1014 ArchInfo = &llvm::AArch64::ARMV8_3A;
1015 if (Feature == "+v8.4a" &&
1016 ArchInfo->Version < llvm::AArch64::ARMV8_4A.Version)
1017 ArchInfo = &llvm::AArch64::ARMV8_4A;
1018 if (Feature == "+v8.5a" &&
1019 ArchInfo->Version < llvm::AArch64::ARMV8_5A.Version)
1020 ArchInfo = &llvm::AArch64::ARMV8_5A;
1021 if (Feature == "+v8.6a" &&
1022 ArchInfo->Version < llvm::AArch64::ARMV8_6A.Version)
1023 ArchInfo = &llvm::AArch64::ARMV8_6A;
1024 if (Feature == "+v8.7a" &&
1025 ArchInfo->Version < llvm::AArch64::ARMV8_7A.Version)
1026 ArchInfo = &llvm::AArch64::ARMV8_7A;
1027 if (Feature == "+v8.8a" &&
1028 ArchInfo->Version < llvm::AArch64::ARMV8_8A.Version)
1029 ArchInfo = &llvm::AArch64::ARMV8_8A;
1030 if (Feature == "+v8.9a" &&
1031 ArchInfo->Version < llvm::AArch64::ARMV8_9A.Version)
1032 ArchInfo = &llvm::AArch64::ARMV8_9A;
1033 if (Feature == "+v9a" && ArchInfo->Version < llvm::AArch64::ARMV9A.Version)
1034 ArchInfo = &llvm::AArch64::ARMV9A;
1035 if (Feature == "+v9.1a" &&
1036 ArchInfo->Version < llvm::AArch64::ARMV9_1A.Version)
1037 ArchInfo = &llvm::AArch64::ARMV9_1A;
1038 if (Feature == "+v9.2a" &&
1039 ArchInfo->Version < llvm::AArch64::ARMV9_2A.Version)
1040 ArchInfo = &llvm::AArch64::ARMV9_2A;
1041 if (Feature == "+v9.3a" &&
1042 ArchInfo->Version < llvm::AArch64::ARMV9_3A.Version)
1043 ArchInfo = &llvm::AArch64::ARMV9_3A;
1044 if (Feature == "+v9.4a" &&
1045 ArchInfo->Version < llvm::AArch64::ARMV9_4A.Version)
1046 ArchInfo = &llvm::AArch64::ARMV9_4A;
1047 if (Feature == "+v9.5a" &&
1048 ArchInfo->Version < llvm::AArch64::ARMV9_5A.Version)
1049 ArchInfo = &llvm::AArch64::ARMV9_5A;
1050 if (Feature == "+v9.6a" &&
1051 ArchInfo->Version < llvm::AArch64::ARMV9_6A.Version)
1052 ArchInfo = &llvm::AArch64::ARMV9_6A;
1053 if (Feature == "+v8r")
1054 ArchInfo = &llvm::AArch64::ARMV8R;
1055 if (Feature == "+fullfp16") {
1056 FPU |= NeonMode;
1057 HasFullFP16 = true;
1058 }
1059 if (Feature == "+dotprod") {
1060 FPU |= NeonMode;
1061 HasDotProd = true;
1062 }
1063 if (Feature == "+fp16fml") {
1064 FPU |= NeonMode;
1065 HasFullFP16 = true;
1066 HasFP16FML = true;
1067 }
1068 if (Feature == "+mte")
1069 HasMTE = true;
1070 if (Feature == "+tme")
1071 HasTME = true;
1072 if (Feature == "+pauth")
1073 HasPAuth = true;
1074 if (Feature == "+i8mm")
1075 HasMatMul = true;
1076 if (Feature == "+bf16")
1077 HasBFloat16 = true;
1078 if (Feature == "+lse")
1079 HasLSE = true;
1080 if (Feature == "+ls64")
1081 HasLS64 = true;
1082 if (Feature == "+rand")
1083 HasRandGen = true;
1084 if (Feature == "+flagm")
1085 HasFlagM = true;
1086 if (Feature == "+altnzcv") {
1087 HasFlagM = true;
1088 HasAlternativeNZCV = true;
1089 }
1090 if (Feature == "+mops")
1091 HasMOPS = true;
1092 if (Feature == "+d128")
1093 HasD128 = true;
1094 if (Feature == "+gcs")
1095 HasGCS = true;
1096 if (Feature == "+rcpc3")
1097 HasRCPC3 = true;
1098 if (Feature == "+pauth-lr") {
1099 HasPAuthLR = true;
1100 HasPAuth = true;
1101 }
1102 }
1103
1104 // Check features that are manually disabled by command line options.
1105 // This needs to be checked after architecture-related features are handled,
1106 // making sure they are properly disabled when required.
1107 for (const auto &Feature : Features) {
1108 if (Feature == "-d128")
1109 HasD128 = false;
1110 }
1111
1112 setDataLayout();
1114
1115 if (HasNoFP) {
1116 FPU &= ~FPUMode;
1117 FPU &= ~NeonMode;
1118 FPU &= ~SveMode;
1119 }
1120 if (HasNoNeon) {
1121 FPU &= ~NeonMode;
1122 FPU &= ~SveMode;
1123 }
1124 if (HasNoSVE)
1125 FPU &= ~SveMode;
1126
1127 return true;
1128}
1129
1130// Parse AArch64 Target attributes, which are a comma separated list of:
1131// "arch=<arch>" - parsed to features as per -march=..
1132// "cpu=<cpu>" - parsed to features as per -mcpu=.., with CPU set to <cpu>
1133// "tune=<cpu>" - TuneCPU set to <cpu>
1134// "feature", "no-feature" - Add (or remove) feature.
1135// "+feature", "+nofeature" - Add (or remove) feature.
1136//
1137// A feature may correspond to an Extension (anything with a corresponding
1138// AEK_), in which case an ExtensionSet is used to parse it and expand its
1139// dependencies. If the feature does not yield a successful parse then it
1140// is passed through.
1142 ParsedTargetAttr Ret;
1143 if (Features == "default")
1144 return Ret;
1145 SmallVector<StringRef, 1> AttrFeatures;
1146 Features.split(AttrFeatures, ",");
1147 bool FoundArch = false;
1148
1149 auto SplitAndAddFeatures = [](StringRef FeatString,
1150 std::vector<std::string> &Features,
1151 llvm::AArch64::ExtensionSet &FeatureBits) {
1152 SmallVector<StringRef, 8> SplitFeatures;
1153 FeatString.split(SplitFeatures, StringRef("+"), -1, false);
1154 for (StringRef Feature : SplitFeatures) {
1155 if (FeatureBits.parseModifier(Feature))
1156 continue;
1157 // Pass through anything that failed to parse so that we can emit
1158 // diagnostics, as well as valid internal feature names.
1159 //
1160 // FIXME: We should consider rejecting internal feature names like
1161 // neon, v8a, etc.
1162 // FIXME: We should consider emitting diagnostics here.
1163 if (Feature.starts_with("no"))
1164 Features.push_back("-" + Feature.drop_front(2).str());
1165 else
1166 Features.push_back("+" + Feature.str());
1167 }
1168 };
1169
1170 llvm::AArch64::ExtensionSet FeatureBits;
1171 // Reconstruct the bitset from the command line option features.
1172 FeatureBits.reconstructFromParsedFeatures(getTargetOpts().FeaturesAsWritten,
1173 Ret.Features);
1174
1175 for (auto &Feature : AttrFeatures) {
1176 Feature = Feature.trim();
1177 if (Feature.starts_with("fpmath="))
1178 continue;
1179
1180 if (Feature.starts_with("branch-protection=")) {
1181 Ret.BranchProtection = Feature.split('=').second.trim();
1182 continue;
1183 }
1184
1185 if (Feature.starts_with("arch=")) {
1186 if (FoundArch)
1187 Ret.Duplicate = "arch=";
1188 FoundArch = true;
1189 std::pair<StringRef, StringRef> Split =
1190 Feature.split("=").second.trim().split("+");
1191 const llvm::AArch64::ArchInfo *AI = llvm::AArch64::parseArch(Split.first);
1192
1193 // Parse the architecture version, adding the required features to
1194 // Ret.Features.
1195 if (!AI)
1196 continue;
1197 FeatureBits.addArchDefaults(*AI);
1198 // Add any extra features, after the +
1199 SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1200 } else if (Feature.starts_with("cpu=")) {
1201 if (!Ret.CPU.empty())
1202 Ret.Duplicate = "cpu=";
1203 else {
1204 // Split the cpu string into "cpu=", "cortex-a710" and any remaining
1205 // "+feat" features.
1206 std::pair<StringRef, StringRef> Split =
1207 Feature.split("=").second.trim().split("+");
1208 Ret.CPU = Split.first;
1209 if (auto CpuInfo = llvm::AArch64::parseCpu(Ret.CPU)) {
1210 FeatureBits.addCPUDefaults(*CpuInfo);
1211 SplitAndAddFeatures(Split.second, Ret.Features, FeatureBits);
1212 }
1213 }
1214 } else if (Feature.starts_with("tune=")) {
1215 if (!Ret.Tune.empty())
1216 Ret.Duplicate = "tune=";
1217 else
1218 Ret.Tune = Feature.split("=").second.trim();
1219 } else if (Feature.starts_with("+")) {
1220 SplitAndAddFeatures(Feature, Ret.Features, FeatureBits);
1221 } else {
1222 if (FeatureBits.parseModifier(Feature, /* AllowNoDashForm = */ true))
1223 continue;
1224 // Pass through anything that failed to parse so that we can emit
1225 // diagnostics, as well as valid internal feature names.
1226 //
1227 // FIXME: We should consider rejecting internal feature names like
1228 // neon, v8a, etc.
1229 // FIXME: We should consider emitting diagnostics here.
1230 if (Feature.starts_with("no-"))
1231 Ret.Features.push_back("-" + Feature.drop_front(3).str());
1232 else
1233 Ret.Features.push_back("+" + Feature.str());
1234 }
1235 }
1236 FeatureBits.toLLVMFeatureList(Ret.Features);
1237 return Ret;
1238}
1239
1241 return true;
1242}
1243
1246 switch (CC) {
1247 case CC_C:
1248 case CC_Swift:
1249 case CC_SwiftAsync:
1250 case CC_PreserveMost:
1251 case CC_PreserveAll:
1252 case CC_PreserveNone:
1253 case CC_OpenCLKernel:
1255 case CC_AArch64SVEPCS:
1256 case CC_Win64:
1257 return CCCR_OK;
1258 default:
1259 return CCCR_Warning;
1260 }
1261}
1262
1263bool AArch64TargetInfo::isCLZForZeroUndef() const { return false; }
1264
1267}
1268
1269const char *const AArch64TargetInfo::GCCRegNames[] = {
1270 // clang-format off
1271
1272 // 32-bit Integer registers
1273 "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11",
1274 "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22",
1275 "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "wsp",
1276
1277 // 64-bit Integer registers
1278 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11",
1279 "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22",
1280 "x23", "x24", "x25", "x26", "x27", "x28", "fp", "lr", "sp",
1281
1282 // 32-bit floating point regsisters
1283 "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
1284 "s12", "s13", "s14", "s15", "s16", "s17", "s18", "s19", "s20", "s21", "s22",
1285 "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
1286
1287 // 64-bit floating point regsisters
1288 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11",
1289 "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
1290 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
1291
1292 // Neon vector registers
1293 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
1294 "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22",
1295 "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
1296
1297 // SVE vector registers
1298 "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10",
1299 "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21",
1300 "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31",
1301
1302 // SVE predicate registers
1303 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", "p8", "p9", "p10",
1304 "p11", "p12", "p13", "p14", "p15",
1305
1306 // SVE predicate-as-counter registers
1307 "pn0", "pn1", "pn2", "pn3", "pn4", "pn5", "pn6", "pn7", "pn8",
1308 "pn9", "pn10", "pn11", "pn12", "pn13", "pn14", "pn15",
1309
1310 // SME registers
1311 "za", "zt0",
1312
1313 // clang-format on
1314};
1315
1317 return llvm::ArrayRef(GCCRegNames);
1318}
1319
1320const TargetInfo::GCCRegAlias AArch64TargetInfo::GCCRegAliases[] = {
1321 {{"w31"}, "wsp"},
1322 {{"x31"}, "sp"},
1323 // GCC rN registers are aliases of xN registers.
1324 {{"r0"}, "x0"},
1325 {{"r1"}, "x1"},
1326 {{"r2"}, "x2"},
1327 {{"r3"}, "x3"},
1328 {{"r4"}, "x4"},
1329 {{"r5"}, "x5"},
1330 {{"r6"}, "x6"},
1331 {{"r7"}, "x7"},
1332 {{"r8"}, "x8"},
1333 {{"r9"}, "x9"},
1334 {{"r10"}, "x10"},
1335 {{"r11"}, "x11"},
1336 {{"r12"}, "x12"},
1337 {{"r13"}, "x13"},
1338 {{"r14"}, "x14"},
1339 {{"r15"}, "x15"},
1340 {{"r16"}, "x16"},
1341 {{"r17"}, "x17"},
1342 {{"r18"}, "x18"},
1343 {{"r19"}, "x19"},
1344 {{"r20"}, "x20"},
1345 {{"r21"}, "x21"},
1346 {{"r22"}, "x22"},
1347 {{"r23"}, "x23"},
1348 {{"r24"}, "x24"},
1349 {{"r25"}, "x25"},
1350 {{"r26"}, "x26"},
1351 {{"r27"}, "x27"},
1352 {{"r28"}, "x28"},
1353 {{"r29", "x29"}, "fp"},
1354 {{"r30", "x30"}, "lr"},
1355 // The S/D/Q and W/X registers overlap, but aren't really aliases; we
1356 // don't want to substitute one of these for a different-sized one.
1357};
1358
1360 return llvm::ArrayRef(GCCRegAliases);
1361}
1362
1363// Returns the length of cc constraint.
1364static unsigned matchAsmCCConstraint(const char *Name) {
1365 constexpr unsigned len = 5;
1366 auto RV = llvm::StringSwitch<unsigned>(Name)
1367 .Case("@cceq", len)
1368 .Case("@ccne", len)
1369 .Case("@cchs", len)
1370 .Case("@cccs", len)
1371 .Case("@cccc", len)
1372 .Case("@cclo", len)
1373 .Case("@ccmi", len)
1374 .Case("@ccpl", len)
1375 .Case("@ccvs", len)
1376 .Case("@ccvc", len)
1377 .Case("@cchi", len)
1378 .Case("@ccls", len)
1379 .Case("@ccge", len)
1380 .Case("@cclt", len)
1381 .Case("@ccgt", len)
1382 .Case("@ccle", len)
1383 .Default(0);
1384 return RV;
1385}
1386
1387std::string
1388AArch64TargetInfo::convertConstraint(const char *&Constraint) const {
1389 std::string R;
1390 switch (*Constraint) {
1391 case 'U': // Three-character constraint; add "@3" hint for later parsing.
1392 R = std::string("@3") + std::string(Constraint, 3);
1393 Constraint += 2;
1394 break;
1395 case '@':
1396 if (const unsigned Len = matchAsmCCConstraint(Constraint)) {
1397 std::string Converted = "{" + std::string(Constraint, Len) + "}";
1398 Constraint += Len - 1;
1399 return Converted;
1400 }
1401 return std::string(1, *Constraint);
1402 default:
1403 R = TargetInfo::convertConstraint(Constraint);
1404 break;
1405 }
1406 return R;
1407}
1408
1410 const char *&Name, TargetInfo::ConstraintInfo &Info) const {
1411 switch (*Name) {
1412 default:
1413 return false;
1414 case 'w': // Floating point and SIMD registers (V0-V31)
1415 Info.setAllowsRegister();
1416 return true;
1417 case 'I': // Constant that can be used with an ADD instruction
1418 case 'J': // Constant that can be used with a SUB instruction
1419 case 'K': // Constant that can be used with a 32-bit logical instruction
1420 case 'L': // Constant that can be used with a 64-bit logical instruction
1421 case 'M': // Constant that can be used as a 32-bit MOV immediate
1422 case 'N': // Constant that can be used as a 64-bit MOV immediate
1423 case 'Y': // Floating point constant zero
1424 case 'Z': // Integer constant zero
1425 return true;
1426 case 'Q': // A memory reference with base register and no offset
1427 Info.setAllowsMemory();
1428 return true;
1429 case 'S': // A symbolic address
1430 Info.setAllowsRegister();
1431 return true;
1432 case 'U':
1433 if (Name[1] == 'p' &&
1434 (Name[2] == 'l' || Name[2] == 'a' || Name[2] == 'h')) {
1435 // SVE predicate registers ("Upa"=P0-15, "Upl"=P0-P7, "Uph"=P8-P15)
1436 Info.setAllowsRegister();
1437 Name += 2;
1438 return true;
1439 }
1440 if (Name[1] == 'c' && (Name[2] == 'i' || Name[2] == 'j')) {
1441 // Gpr registers ("Uci"=w8-11, "Ucj"=w12-15)
1442 Info.setAllowsRegister();
1443 Name += 2;
1444 return true;
1445 }
1446 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes.
1447 // Utf: A memory address suitable for ldp/stp in TF mode.
1448 // Usa: An absolute symbolic address.
1449 // Ush: The high part (bits 32:12) of a pc-relative symbolic address.
1450
1451 // Better to return an error saying that it's an unrecognised constraint
1452 // even if this is a valid constraint in gcc.
1453 return false;
1454 case 'z': // Zero register, wzr or xzr
1455 Info.setAllowsRegister();
1456 return true;
1457 case 'x': // Floating point and SIMD registers (V0-V15)
1458 Info.setAllowsRegister();
1459 return true;
1460 case 'y': // SVE registers (V0-V7)
1461 Info.setAllowsRegister();
1462 return true;
1463 case '@':
1464 // CC condition
1465 if (const unsigned Len = matchAsmCCConstraint(Name)) {
1466 Name += Len - 1;
1467 Info.setAllowsRegister();
1468 return true;
1469 }
1470 }
1471 return false;
1472}
1473
1475 StringRef Constraint, char Modifier, unsigned Size,
1476 std::string &SuggestedModifier) const {
1477 // Strip off constraint modifiers.
1478 Constraint = Constraint.ltrim("=+&");
1479
1480 switch (Constraint[0]) {
1481 default:
1482 return true;
1483 case 'z':
1484 case 'r': {
1485 switch (Modifier) {
1486 case 'x':
1487 case 'w':
1488 // For now assume that the person knows what they're
1489 // doing with the modifier.
1490 return true;
1491 default:
1492 // By default an 'r' constraint will be in the 'x'
1493 // registers.
1494 if (Size == 64)
1495 return true;
1496
1497 if (Size == 512)
1498 return HasLS64;
1499
1500 SuggestedModifier = "w";
1501 return false;
1502 }
1503 }
1504 }
1505}
1506
1507std::string_view AArch64TargetInfo::getClobbers() const { return ""; }
1508
1510 if (RegNo == 0)
1511 return 0;
1512 if (RegNo == 1)
1513 return 1;
1514 return -1;
1515}
1516
1518 const llvm::APSInt &value) const {
1519 return 0 <= value && value <= 3;
1520}
1521
1522bool AArch64TargetInfo::hasInt128Type() const { return true; }
1523
1525 const TargetOptions &Opts)
1526 : AArch64TargetInfo(Triple, Opts) {}
1527
1528void AArch64leTargetInfo::setDataLayout() {
1529 if (getTriple().isOSBinFormatMachO()) {
1530 if(getTriple().isArch32Bit())
1531 resetDataLayout("e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-"
1532 "i128:128-n32:64-S128-Fn32",
1533 "_");
1534 else
1535 resetDataLayout("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-"
1536 "n32:64-S128-Fn32",
1537 "_");
1538 } else
1539 resetDataLayout("e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-"
1540 "i64:64-i128:128-n32:64-S128-Fn32");
1541}
1542
1544 MacroBuilder &Builder) const {
1545 Builder.defineMacro("__AARCH64EL__");
1547}
1548
1550 const TargetOptions &Opts)
1551 : AArch64TargetInfo(Triple, Opts) {}
1552
1554 MacroBuilder &Builder) const {
1555 Builder.defineMacro("__AARCH64EB__");
1556 Builder.defineMacro("__AARCH_BIG_ENDIAN");
1557 Builder.defineMacro("__ARM_BIG_ENDIAN");
1559}
1560
1561void AArch64beTargetInfo::setDataLayout() {
1562 assert(!getTriple().isOSBinFormatMachO());
1563 resetDataLayout("E-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-"
1564 "i64:64-i128:128-n32:64-S128-Fn32");
1565}
1566
1568 const TargetOptions &Opts)
1569 : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
1570
1571 // This is an LLP64 platform.
1572 // int:4, long:4, long long:8, long double:8.
1573 IntWidth = IntAlign = 32;
1574 LongWidth = LongAlign = 32;
1575 DoubleAlign = LongLongAlign = 64;
1576 LongDoubleWidth = LongDoubleAlign = 64;
1577 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1578 IntMaxType = SignedLongLong;
1579 Int64Type = SignedLongLong;
1580 SizeType = UnsignedLongLong;
1581 PtrDiffType = SignedLongLong;
1582 IntPtrType = SignedLongLong;
1583}
1584
1586 resetDataLayout(Triple.isOSBinFormatMachO()
1587 ? "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:"
1588 "128-n32:64-S128-Fn32"
1589 : "e-m:w-p270:32:32-p271:32:32-p272:64:64-p:64:64-i32:32-"
1590 "i64:64-i128:128-n32:64-S128-Fn32",
1591 Triple.isOSBinFormatMachO() ? "_" : "");
1592}
1593
1597}
1598
1601 switch (CC) {
1602 case CC_X86VectorCall:
1603 if (getTriple().isWindowsArm64EC())
1604 return CCCR_OK;
1605 return CCCR_Ignore;
1606 case CC_X86StdCall:
1607 case CC_X86ThisCall:
1608 case CC_X86FastCall:
1609 return CCCR_Ignore;
1610 case CC_C:
1611 case CC_OpenCLKernel:
1612 case CC_PreserveMost:
1613 case CC_PreserveAll:
1614 case CC_PreserveNone:
1615 case CC_Swift:
1616 case CC_SwiftAsync:
1617 case CC_Win64:
1618 return CCCR_OK;
1619 default:
1620 return CCCR_Warning;
1621 }
1622}
1623
1625 const TargetOptions &Opts)
1626 : WindowsARM64TargetInfo(Triple, Opts) {
1627 TheCXXABI.set(TargetCXXABI::Microsoft);
1628}
1629
1631 MacroBuilder &Builder) const {
1633 if (getTriple().isWindowsArm64EC()) {
1634 Builder.defineMacro("_M_X64", "100");
1635 Builder.defineMacro("_M_AMD64", "100");
1636 Builder.defineMacro("_M_ARM64EC", "1");
1637 } else {
1638 Builder.defineMacro("_M_ARM64", "1");
1639 }
1640}
1641
1644 return CCK_MicrosoftWin64;
1645}
1646
1648 bool HasNonWeakDef) const {
1649 unsigned Align =
1650 WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize, HasNonWeakDef);
1651
1652 // MSVC does size based alignment for arm64 based on alignment section in
1653 // below document, replicate that to keep alignment consistent with object
1654 // files compiled by MSVC.
1655 // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
1656 if (TypeSize >= 512) { // TypeSize >= 64 bytes
1657 Align = std::max(Align, 128u); // align type at least 16 bytes
1658 } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
1659 Align = std::max(Align, 64u); // align type at least 8 butes
1660 } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
1661 Align = std::max(Align, 32u); // align type at least 4 bytes
1662 }
1663 return Align;
1664}
1665
1667 const TargetOptions &Opts)
1668 : WindowsARM64TargetInfo(Triple, Opts) {
1669 TheCXXABI.set(TargetCXXABI::GenericAArch64);
1670}
1671
1673 const llvm::Triple &Triple, const TargetOptions &Opts)
1674 : AppleMachOTargetInfo<AArch64leTargetInfo>(Triple, Opts) {}
1675
1677 const TargetOptions &Opts)
1678 : DarwinTargetInfo<AArch64leTargetInfo>(Triple, Opts) {
1679 Int64Type = SignedLongLong;
1680 if (getTriple().isArch32Bit())
1681 IntMaxType = SignedLongLong;
1682
1683 WCharType = SignedInt;
1684 UseSignedCharForObjCBool = false;
1685
1686 LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
1687 LongDoubleFormat = &llvm::APFloat::IEEEdouble();
1688
1689 UseZeroLengthBitfieldAlignment = false;
1690
1691 if (getTriple().isArch32Bit()) {
1692 UseBitFieldTypeAlignment = false;
1693 ZeroLengthBitfieldBoundary = 32;
1694 UseZeroLengthBitfieldAlignment = true;
1695 TheCXXABI.set(TargetCXXABI::WatchOS);
1696 } else
1697 TheCXXABI.set(TargetCXXABI::AppleARM64);
1698}
1699
1701 const LangOptions &Opts,
1702 const llvm::Triple &Triple) {
1703 Builder.defineMacro("__AARCH64_SIMD__");
1704 if (Triple.isArch32Bit())
1705 Builder.defineMacro("__ARM64_ARCH_8_32__");
1706 else
1707 Builder.defineMacro("__ARM64_ARCH_8__");
1708 Builder.defineMacro("__ARM_NEON__");
1709 Builder.defineMacro("__REGISTER_PREFIX__", "");
1710 Builder.defineMacro("__arm64", "1");
1711 Builder.defineMacro("__arm64__", "1");
1712
1713 if (Triple.isArm64e())
1714 Builder.defineMacro("__arm64e__", "1");
1715}
1716
1718 const llvm::Triple &Triple,
1719 MacroBuilder &Builder) const {
1720 getAppleMachOAArch64Defines(Builder, Opts, Triple);
1722 Builder);
1723}
1724
1726 const llvm::Triple &Triple,
1727 MacroBuilder &Builder) const {
1728 getAppleMachOAArch64Defines(Builder, Opts, Triple);
1730}
1731
1735}
Defines the Diagnostic-related interfaces.
static unsigned matchAsmCCConstraint(const char *Name)
Definition: AArch64.cpp:1364
#define ARM_ACLE_VERSION(Y, Q, P)
static constexpr Builtin::Info BuiltinInfo[]
Definition: AArch64.cpp:29
static constexpr Builtin::Info BuiltinInfo[]
Definition: Builtins.cpp:32
Defines the clang::LangOptions interface.
Enumerates target-specific builtins in their own namespaces within namespace clang.
__device__ __2f16 float c
Concrete class used by the front-end to report problems and issues.
Definition: Diagnostic.h:231
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1493
@ NonLeaf
Sign the return address of functions that spill LR.
@ All
Sign the return address of all functions,.
@ BKey
Return address signing uses APIB key.
@ AKey
Return address signing uses APIA key.
Keeps track of the various options that can be enabled, which controls the dialect of C or C++ that i...
Definition: LangOptions.h:499
bool isSignReturnAddressWithAKey() const
Check if return address signing uses AKey.
Definition: LangOptions.h:746
bool hasSignReturnAddress() const
Check if return address signing is enabled.
Definition: LangOptions.h:741
bool isSignReturnAddressScopeAll() const
Check if leaf functions are also signed.
Definition: LangOptions.h:751
void set(Kind kind)
Definition: TargetCXXABI.h:76
LangOptions::SignReturnAddressScopeKind SignReturnAddr
Definition: TargetInfo.h:1411
LangOptions::SignReturnAddressKeyKind SignKey
Definition: TargetInfo.h:1412
Exposes information about the current target.
Definition: TargetInfo.h:220
TargetOptions & getTargetOpts() const
Retrieve the target options.
Definition: TargetInfo.h:311
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
Definition: TargetInfo.h:1262
const LangASMap * AddrSpaceMap
Definition: TargetInfo.h:250
unsigned HasAArch64SVETypes
Definition: TargetInfo.h:266
void resetDataLayout(StringRef DL, const char *UserLabelPrefix="")
Definition: TargetInfo.cpp:190
BuiltinVaListKind
The different kinds of __builtin_va_list types defined by the target implementation.
Definition: TargetInfo.h:318
@ AArch64ABIBuiltinVaList
__builtin_va_list as defined by the AArch64 ABI http://infocenter.arm.com/help/topic/com....
Definition: TargetInfo.h:327
@ CharPtrBuiltinVaList
typedef char* __builtin_va_list;
Definition: TargetInfo.h:320
const char * MCountName
Definition: TargetInfo.h:247
unsigned HasUnalignedAccess
Definition: TargetInfo.h:275
unsigned char MaxAtomicPromoteWidth
Definition: TargetInfo.h:244
virtual std::string convertConstraint(const char *&Constraint) const
Definition: TargetInfo.h:1239
unsigned char MaxAtomicInlineWidth
Definition: TargetInfo.h:244
TargetCXXABI TheCXXABI
Definition: TargetInfo.h:249
unsigned HasBuiltinMSVaList
Definition: TargetInfo.h:263
Options for controlling the target.
Definition: TargetOptions.h:26
llvm::EABI EABIVersion
The EABI version to use.
Definition: TargetOptions.h:48
std::vector< std::string > FeaturesAsWritten
The list of target specific features to enable or disable, as written on the command line.
Definition: TargetOptions.h:54
llvm::StringMap< bool > FeatureMap
The map of which features have been enabled disabled based on the command line.
Definition: TargetOptions.h:62
void getTargetDefinesARMV95A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:381
std::string_view getClobbers() const override
Returns a string of target-specific clobbers, in LLVM format.
Definition: AArch64.cpp:1507
bool hasFeature(StringRef Feature) const override
Determine whether the given target has the given feature.
Definition: AArch64.cpp:738
std::string convertConstraint(const char *&Constraint) const override
Definition: AArch64.cpp:1388
ArrayRef< TargetInfo::GCCRegAlias > getGCCRegAliases() const override
Definition: AArch64.cpp:1359
bool hasBFloat16Type() const override
Determine whether the _BFloat16 type is supported on this target.
Definition: AArch64.cpp:1240
ParsedTargetAttr parseTargetAttr(StringRef Str) const override
Definition: AArch64.cpp:1141
void getTargetDefinesARMV96A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:387
AArch64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
Definition: AArch64.cpp:132
ArrayRef< const char * > getGCCRegNames() const override
Definition: AArch64.cpp:1316
bool validateGlobalRegisterVariable(StringRef RegName, unsigned RegSize, bool &HasSizeMismatch) const override
Validate register name used for global register variables.
Definition: AArch64.cpp:233
bool handleTargetFeatures(std::vector< std::string > &Features, DiagnosticsEngine &Diags) override
Perform initialization based on the user configured set of features (e.g., +sse4).
Definition: AArch64.cpp:824
void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override
===-— Other target property query methods -----------------------—===//
Definition: AArch64.cpp:393
void setFeatureEnabled(llvm::StringMap< bool > &Features, StringRef Name, bool Enabled) const override
Enable or disable a specific target feature; the feature name must be valid.
Definition: AArch64.cpp:795
void getTargetDefinesARMV89A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:345
void getTargetDefinesARMV92A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:363
uint64_t getFMVPriority(ArrayRef< StringRef > Features) const override
Definition: AArch64.cpp:717
bool validateTarget(DiagnosticsEngine &Diags) const override
Check the target is valid after it is fully initialized.
Definition: AArch64.cpp:217
void getTargetDefinesARMV93A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:369
bool setABI(const std::string &Name) override
Use the specified ABI.
Definition: AArch64.cpp:208
void getTargetDefinesARMV84A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:310
bool isValidCPUName(StringRef Name) const override
Determine whether this TargetInfo supports the given CPU name.
Definition: AArch64.cpp:278
void fillValidCPUList(SmallVectorImpl< StringRef > &Values) const override
Fill a SmallVectorImpl with the valid values to setCPU.
Definition: AArch64.cpp:286
StringRef getABI() const override
Get the ABI currently in use.
Definition: AArch64.cpp:206
bool hasInt128Type() const override
Determine whether the __int128 type is supported on this target.
Definition: AArch64.cpp:1522
void getTargetDefinesARMV88A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:339
bool validateBranchProtection(StringRef Spec, StringRef Arch, BranchProtectionInfo &BPI, StringRef &Err) const override
Determine if this TargetInfo supports the given branch protection specification.
Definition: AArch64.cpp:254
void getTargetDefinesARMV87A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:333
void getTargetDefinesARMV9A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:351
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override
Determines whether a given calling convention is valid for the target.
Definition: AArch64.cpp:1245
void getTargetDefinesARMV91A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:357
BuiltinVaListKind getBuiltinVaListKind() const override
Returns the kind of __builtin_va_list type that should be used with this target.
Definition: AArch64.cpp:1265
int getEHDataRegisterNumber(unsigned RegNo) const override
Return the register number that __builtin_eh_return_regno would return with the specified argument.
Definition: AArch64.cpp:1509
void getTargetDefinesARMV81A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:291
ArrayRef< Builtin::Info > getTargetBuiltins() const override
Return information about target-specific builtins for the current primary target, and info about whic...
Definition: AArch64.cpp:700
void getTargetDefinesARMV86A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:323
bool setCPU(const std::string &Name) override
Target the specified CPU.
Definition: AArch64.cpp:282
std::optional< std::pair< unsigned, unsigned > > getVScaleRange(const LangOptions &LangOpts) const override
Returns target-specific min and max values VScale_Range.
Definition: AArch64.cpp:706
void getTargetDefinesARMV94A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:375
bool validateConstraintModifier(StringRef Constraint, char Modifier, unsigned Size, std::string &SuggestedModifier) const override
Definition: AArch64.cpp:1474
bool isCLZForZeroUndef() const override
The __builtin_clz* and __builtin_ctz* built-in functions are specified to have undefined results for ...
Definition: AArch64.cpp:1263
void getTargetDefinesARMV82A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:296
void getTargetDefinesARMV83A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:302
bool validateCpuSupports(StringRef FeatureStr) const override
Definition: AArch64.cpp:728
bool validatePointerAuthKey(const llvm::APSInt &value) const override
Determine whether the given pointer-authentication key is valid.
Definition: AArch64.cpp:1517
bool validateAsmConstraint(const char *&Name, TargetInfo::ConstraintInfo &Info) const override
Definition: AArch64.cpp:1409
bool doesFeatureAffectCodeGen(StringRef Name) const override
Returns true if feature has an impact on target code generation.
Definition: AArch64.cpp:721
void getTargetDefinesARMV85A(const LangOptions &Opts, MacroBuilder &Builder) const
Definition: AArch64.cpp:316
void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override
===-— Other target property query methods -----------------------—===//
Definition: AArch64.cpp:1553
AArch64beTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
Definition: AArch64.cpp:1549
AArch64leTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
Definition: AArch64.cpp:1524
void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override
===-— Other target property query methods -----------------------—===//
Definition: AArch64.cpp:1543
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override
Definition: AArch64.cpp:1717
AppleMachOAArch64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
Definition: AArch64.cpp:1672
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override
Definition: OSTargets.h:48
DarwinAArch64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
Definition: AArch64.cpp:1676
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override
Definition: AArch64.cpp:1725
BuiltinVaListKind getBuiltinVaListKind() const override
Definition: AArch64.cpp:1733
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple, MacroBuilder &Builder) const override
Definition: OSTargets.h:71
unsigned getMinGlobalAlign(uint64_t TypeSize, bool HasNonWeakDef) const override
Definition: AArch64.cpp:1647
MicrosoftARM64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
Definition: AArch64.cpp:1624
TargetInfo::CallingConvKind getCallingConvKind(bool ClangABICompat4) const override
Definition: AArch64.cpp:1643
void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override
Definition: AArch64.cpp:1630
MinGWARM64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
Definition: AArch64.cpp:1666
void getTargetDefines(const LangOptions &Opts, MacroBuilder &Builder) const override
Definition: OSTargets.h:30
BuiltinVaListKind getBuiltinVaListKind() const override
Definition: AArch64.cpp:1595
WindowsARM64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
Definition: AArch64.cpp:1567
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override
Definition: AArch64.cpp:1600
Defines the clang::TargetInfo interface.
void getAppleMachOAArch64Defines(MacroBuilder &Builder, const LangOptions &Opts, const llvm::Triple &Triple)
Definition: AArch64.cpp:1700
static const unsigned ARM64AddrSpaceMap[]
Definition: AArch64.h:26
The JSON file list parser is used to communicate input to InstallAPI.
CallingConv
CallingConv - Specifies the calling convention that a function uses.
Definition: Specifiers.h:278
@ CC_Swift
Definition: Specifiers.h:293
@ CC_OpenCLKernel
Definition: Specifiers.h:292
@ CC_PreserveMost
Definition: Specifiers.h:295
@ CC_Win64
Definition: Specifiers.h:285
@ CC_X86ThisCall
Definition: Specifiers.h:282
@ CC_AArch64VectorCall
Definition: Specifiers.h:297
@ CC_PreserveNone
Definition: Specifiers.h:301
@ CC_C
Definition: Specifiers.h:279
@ CC_SwiftAsync
Definition: Specifiers.h:294
@ CC_X86VectorCall
Definition: Specifiers.h:283
@ CC_AArch64SVEPCS
Definition: Specifiers.h:298
@ CC_X86StdCall
Definition: Specifiers.h:280
@ CC_PreserveAll
Definition: Specifiers.h:296
@ CC_X86FastCall
Definition: Specifiers.h:281
Contains information gathered from parsing the contents of TargetAttr.
Definition: TargetInfo.h:58
unsigned UseZeroLengthBitfieldAlignment
Whether zero length bitfields (e.g., int : 0;) force alignment of the next bitfield.
Definition: TargetInfo.h:185
const llvm::fltSemantics * LongDoubleFormat
Definition: TargetInfo.h:141
std::optional< unsigned > BitIntMaxAlign
Definition: TargetInfo.h:104
unsigned UseBitFieldTypeAlignment
Control whether the alignment of bit-field types is respected when laying out structures.
Definition: TargetInfo.h:176
const llvm::fltSemantics * BFloat16Format
Definition: TargetInfo.h:140