15#include "llvm/IR/InlineAsm.h"
16#include "llvm/IR/IntrinsicsPowerPC.h"
17#include "llvm/Support/ScopedPrinter.h"
20using namespace CodeGen;
29 raw_svector_ostream AsmOS(
Asm);
30 llvm::IntegerType *RetType = CGF.
Int32Ty;
33 case clang::PPC::BI__builtin_ppc_ldarx:
37 case clang::PPC::BI__builtin_ppc_lwarx:
41 case clang::PPC::BI__builtin_ppc_lharx:
45 case clang::PPC::BI__builtin_ppc_lbarx:
50 llvm_unreachable(
"Expected only PowerPC load reserve intrinsics");
53 AsmOS <<
"$0, ${1:y}";
55 std::string Constraints =
"=r,*Z,~{memory}";
57 if (!MachineClobbers.empty()) {
59 Constraints += MachineClobbers;
63 llvm::FunctionType *FTy = llvm::FunctionType::get(RetType, {PtrType},
false);
66 llvm::InlineAsm::get(FTy,
Asm, Constraints,
true);
67 llvm::CallInst *CI = CGF.
Builder.CreateCall(IA, {
Addr});
69 0, Attribute::get(CGF.
getLLVMContext(), Attribute::ElementType, RetType));
87 Intrinsic::ID ID = Intrinsic::not_intrinsic;
89#include "llvm/TargetParser/PPCTargetParser.def"
90 auto GenAIXPPCBuiltinCpuExpr = [&](
unsigned SupportMethod,
unsigned FieldIdx,
91 unsigned Mask, CmpInst::Predicate CompOp,
92 unsigned OpValue) ->
Value * {
93 if (SupportMethod == BUILTIN_PPC_FALSE)
96 if (SupportMethod == BUILTIN_PPC_TRUE)
99 assert(SupportMethod <= SYS_CALL &&
"Invalid value for SupportMethod.");
101 llvm::Value *FieldValue =
nullptr;
102 if (SupportMethod == USE_SYS_CONF) {
103 llvm::Type *STy = llvm::StructType::get(PPC_SYSTEMCONFIG_TYPE);
104 llvm::Constant *SysConf =
108 llvm::Value *Idxs[] = {ConstantInt::get(
Int32Ty, 0),
109 ConstantInt::get(
Int32Ty, FieldIdx)};
114 }
else if (SupportMethod == SYS_CALL) {
115 llvm::FunctionType *FTy =
117 llvm::FunctionCallee
Func =
124 "SupportMethod value is not defined in PPCTargetParser.def.");
127 FieldValue =
Builder.CreateAnd(FieldValue, Mask);
129 llvm::Type *ValueType = FieldValue->getType();
130 bool IsValueType64Bit = ValueType->isIntegerTy(64);
132 (IsValueType64Bit || ValueType->isIntegerTy(32)) &&
133 "Only 32/64-bit integers are supported in GenAIXPPCBuiltinCpuExpr().");
141 default:
return nullptr;
143 case Builtin::BI__builtin_cpu_is: {
145 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
148 typedef std::tuple<unsigned, unsigned, unsigned, unsigned>
CPUInfo;
150 auto [LinuxSupportMethod, LinuxIDValue, AIXSupportMethod, AIXIDValue] =
151 static_cast<CPUInfo>(StringSwitch<CPUInfo>(CPUStr)
152#define PPC_CPU(NAME, Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, \
154 .Case(NAME, {Linux_SUPPORT_METHOD, LinuxID, AIX_SUPPORT_METHOD, AIXID})
155#include "llvm/TargetParser/PPCTargetParser.def"
156 .Default({BUILTIN_PPC_UNSUPPORTED, 0,
157 BUILTIN_PPC_UNSUPPORTED, 0}));
159 if (Triple.isOSAIX()) {
160 assert((AIXSupportMethod != BUILTIN_PPC_UNSUPPORTED) &&
161 "Invalid CPU name. Missed by SemaChecking?");
162 return GenAIXPPCBuiltinCpuExpr(AIXSupportMethod, AIX_SYSCON_IMPL_IDX, 0,
163 ICmpInst::ICMP_EQ, AIXIDValue);
166 assert(Triple.isOSLinux() &&
167 "__builtin_cpu_is() is only supported for AIX and Linux.");
169 assert((LinuxSupportMethod != BUILTIN_PPC_UNSUPPORTED) &&
170 "Invalid CPU name. Missed by SemaChecking?");
172 if (LinuxSupportMethod == BUILTIN_PPC_FALSE)
175 Value *Op0 = llvm::ConstantInt::get(
Int32Ty, PPC_FAWORD_CPUID);
177 Value *TheCall =
Builder.CreateCall(F, {Op0},
"cpu_is");
178 return Builder.CreateICmpEQ(TheCall,
179 llvm::ConstantInt::get(
Int32Ty, LinuxIDValue));
181 case Builtin::BI__builtin_cpu_supports: {
184 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
185 if (Triple.isOSAIX()) {
189 auto [SupportMethod, FieldIdx, Mask, CompOp,
Value] =
190 static_cast<CPUSupportType
>(StringSwitch<CPUSupportType>(CPUStr)
191#define PPC_AIX_FEATURE(NAME, DESC, SUPPORT_METHOD, INDEX, MASK, COMP_OP, \
193 .Case(NAME, {SUPPORT_METHOD, INDEX, MASK, COMP_OP, VALUE})
194#include "llvm/TargetParser/PPCTargetParser.def"
195 .Default({BUILTIN_PPC_FALSE, 0, 0,
196 CmpInst::Predicate(), 0}));
197 return GenAIXPPCBuiltinCpuExpr(SupportMethod, FieldIdx, Mask, CompOp,
201 assert(Triple.isOSLinux() &&
202 "__builtin_cpu_supports() is only supported for AIX and Linux.");
203 auto [FeatureWord, BitMask] =
204 StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
205#define
PPC_LNX_FEATURE(Name, Description, EnumName, Bitmask, FA_WORD) \
206 .Case(Name, {FA_WORD, Bitmask})
207#include
"llvm/TargetParser/PPCTargetParser.def"
211 Value *Op0 = llvm::ConstantInt::get(
Int32Ty, FeatureWord);
213 Value *TheCall =
Builder.CreateCall(F, {Op0},
"cpu_supports");
215 Builder.CreateAnd(TheCall, llvm::ConstantInt::get(
Int32Ty, BitMask));
216 return Builder.CreateICmpNE(Mask, llvm::Constant::getNullValue(
Int32Ty));
217#undef PPC_FAWORD_HWCAP
218#undef PPC_FAWORD_HWCAP2
219#undef PPC_FAWORD_CPUID
224 case PPC::BI__builtin_ppc_get_timebase:
228 case PPC::BI__builtin_altivec_lvx:
229 case PPC::BI__builtin_altivec_lvxl:
230 case PPC::BI__builtin_altivec_lvebx:
231 case PPC::BI__builtin_altivec_lvehx:
232 case PPC::BI__builtin_altivec_lvewx:
233 case PPC::BI__builtin_altivec_lvsl:
234 case PPC::BI__builtin_altivec_lvsr:
235 case PPC::BI__builtin_vsx_lxvd2x:
236 case PPC::BI__builtin_vsx_lxvw4x:
237 case PPC::BI__builtin_vsx_lxvd2x_be:
238 case PPC::BI__builtin_vsx_lxvw4x_be:
239 case PPC::BI__builtin_vsx_lxvl:
240 case PPC::BI__builtin_vsx_lxvll:
245 if (!(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
246 BuiltinID == PPC::BI__builtin_vsx_lxvll)) {
252 default: llvm_unreachable(
"Unsupported ld/lvsl/lvsr intrinsic!");
253 case PPC::BI__builtin_altivec_lvx:
254 ID = Intrinsic::ppc_altivec_lvx;
256 case PPC::BI__builtin_altivec_lvxl:
257 ID = Intrinsic::ppc_altivec_lvxl;
259 case PPC::BI__builtin_altivec_lvebx:
260 ID = Intrinsic::ppc_altivec_lvebx;
262 case PPC::BI__builtin_altivec_lvehx:
263 ID = Intrinsic::ppc_altivec_lvehx;
265 case PPC::BI__builtin_altivec_lvewx:
266 ID = Intrinsic::ppc_altivec_lvewx;
268 case PPC::BI__builtin_altivec_lvsl:
269 ID = Intrinsic::ppc_altivec_lvsl;
271 case PPC::BI__builtin_altivec_lvsr:
272 ID = Intrinsic::ppc_altivec_lvsr;
274 case PPC::BI__builtin_vsx_lxvd2x:
275 ID = Intrinsic::ppc_vsx_lxvd2x;
277 case PPC::BI__builtin_vsx_lxvw4x:
278 ID = Intrinsic::ppc_vsx_lxvw4x;
280 case PPC::BI__builtin_vsx_lxvd2x_be:
281 ID = Intrinsic::ppc_vsx_lxvd2x_be;
283 case PPC::BI__builtin_vsx_lxvw4x_be:
284 ID = Intrinsic::ppc_vsx_lxvw4x_be;
286 case PPC::BI__builtin_vsx_lxvl:
287 ID = Intrinsic::ppc_vsx_lxvl;
289 case PPC::BI__builtin_vsx_lxvll:
290 ID = Intrinsic::ppc_vsx_lxvll;
294 return Builder.CreateCall(F, Ops,
"");
298 case PPC::BI__builtin_altivec_stvx:
299 case PPC::BI__builtin_altivec_stvxl:
300 case PPC::BI__builtin_altivec_stvebx:
301 case PPC::BI__builtin_altivec_stvehx:
302 case PPC::BI__builtin_altivec_stvewx:
303 case PPC::BI__builtin_vsx_stxvd2x:
304 case PPC::BI__builtin_vsx_stxvw4x:
305 case PPC::BI__builtin_vsx_stxvd2x_be:
306 case PPC::BI__builtin_vsx_stxvw4x_be:
307 case PPC::BI__builtin_vsx_stxvl:
308 case PPC::BI__builtin_vsx_stxvll:
314 if (!(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
315 BuiltinID == PPC::BI__builtin_vsx_stxvll)) {
321 default: llvm_unreachable(
"Unsupported st intrinsic!");
322 case PPC::BI__builtin_altivec_stvx:
323 ID = Intrinsic::ppc_altivec_stvx;
325 case PPC::BI__builtin_altivec_stvxl:
326 ID = Intrinsic::ppc_altivec_stvxl;
328 case PPC::BI__builtin_altivec_stvebx:
329 ID = Intrinsic::ppc_altivec_stvebx;
331 case PPC::BI__builtin_altivec_stvehx:
332 ID = Intrinsic::ppc_altivec_stvehx;
334 case PPC::BI__builtin_altivec_stvewx:
335 ID = Intrinsic::ppc_altivec_stvewx;
337 case PPC::BI__builtin_vsx_stxvd2x:
338 ID = Intrinsic::ppc_vsx_stxvd2x;
340 case PPC::BI__builtin_vsx_stxvw4x:
341 ID = Intrinsic::ppc_vsx_stxvw4x;
343 case PPC::BI__builtin_vsx_stxvd2x_be:
344 ID = Intrinsic::ppc_vsx_stxvd2x_be;
346 case PPC::BI__builtin_vsx_stxvw4x_be:
347 ID = Intrinsic::ppc_vsx_stxvw4x_be;
349 case PPC::BI__builtin_vsx_stxvl:
350 ID = Intrinsic::ppc_vsx_stxvl;
352 case PPC::BI__builtin_vsx_stxvll:
353 ID = Intrinsic::ppc_vsx_stxvll;
357 return Builder.CreateCall(F, Ops,
"");
359 case PPC::BI__builtin_vsx_ldrmb: {
365 int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
370 if (NumBytes == 16) {
378 for (
int Idx = 0; Idx < 16; Idx++)
379 RevMask.push_back(15 - Idx);
380 return Builder.CreateShuffleVector(LD, LD, RevMask);
384 llvm::Function *Lvs =
CGM.
getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr
385 : Intrinsic::ppc_altivec_lvsl);
386 llvm::Function *Vperm =
CGM.
getIntrinsic(Intrinsic::ppc_altivec_vperm);
388 Int8Ty, Op0, ConstantInt::get(Op1->
getType(), NumBytes - 1));
393 Op0 = IsLE ? HiLd : LoLd;
394 Op1 = IsLE ? LoLd : HiLd;
395 Value *AllElts =
Builder.CreateCall(Vperm, {Op0, Op1, Mask1},
"shuffle1");
396 Constant *
Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->
getType());
400 for (
int Idx = 0; Idx < 16; Idx++) {
401 int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1)
402 : 16 - (NumBytes - Idx);
403 Consts.push_back(Val);
405 return Builder.CreateShuffleVector(
Builder.CreateBitCast(AllElts, ResTy),
409 for (
int Idx = 0; Idx < 16; Idx++)
410 Consts.push_back(
Builder.getInt8(NumBytes + Idx));
411 Value *Mask2 = ConstantVector::get(Consts);
413 Builder.CreateCall(Vperm, {Zero, AllElts, Mask2},
"shuffle2"), ResTy);
415 case PPC::BI__builtin_vsx_strmb: {
419 int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
421 auto StoreSubVec = [&](
unsigned Width,
unsigned Offset,
unsigned EltNo) {
428 for (
int Idx = 0; Idx < 16; Idx++)
429 RevMask.push_back(15 - Idx);
430 StVec =
Builder.CreateShuffleVector(Op2, Op2, RevMask);
436 unsigned NumElts = 0;
439 llvm_unreachable(
"width for stores must be a power of 2");
458 Op2, llvm::FixedVectorType::get(ConvTy, NumElts));
462 if (IsLE && Width > 1) {
464 Elt =
Builder.CreateCall(F, Elt);
470 unsigned RemainingBytes = NumBytes;
473 return StoreSubVec(16, 0, 0);
475 Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1);
479 if (RemainingBytes >= 4) {
480 Result = StoreSubVec(4, NumBytes - Stored - 4,
481 IsLE ? (Stored >> 2) : 3 - (Stored >> 2));
485 if (RemainingBytes >= 2) {
486 Result = StoreSubVec(2, NumBytes - Stored - 2,
487 IsLE ? (Stored >> 1) : 7 - (Stored >> 1));
493 StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored);
497 case PPC::BI__builtin_vsx_xvsqrtsp:
498 case PPC::BI__builtin_vsx_xvsqrtdp: {
501 if (
Builder.getIsFPConstrained()) {
503 Intrinsic::experimental_constrained_sqrt, ResultType);
504 return Builder.CreateConstrainedFPCall(F,
X);
511 case PPC::BI__builtin_altivec_vclzb:
512 case PPC::BI__builtin_altivec_vclzh:
513 case PPC::BI__builtin_altivec_vclzw:
514 case PPC::BI__builtin_altivec_vclzd: {
517 Value *Undef = ConstantInt::get(
Builder.getInt1Ty(),
false);
519 return Builder.CreateCall(F, {
X, Undef});
521 case PPC::BI__builtin_altivec_vctzb:
522 case PPC::BI__builtin_altivec_vctzh:
523 case PPC::BI__builtin_altivec_vctzw:
524 case PPC::BI__builtin_altivec_vctzd: {
527 Value *Undef = ConstantInt::get(
Builder.getInt1Ty(),
false);
529 return Builder.CreateCall(F, {
X, Undef});
531 case PPC::BI__builtin_altivec_vinsd:
532 case PPC::BI__builtin_altivec_vinsw:
533 case PPC::BI__builtin_altivec_vinsd_elt:
534 case PPC::BI__builtin_altivec_vinsw_elt: {
540 bool IsUnaligned = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
541 BuiltinID == PPC::BI__builtin_altivec_vinsd);
543 bool Is32bit = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
544 BuiltinID == PPC::BI__builtin_altivec_vinsw_elt);
547 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
549 "Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
553 int ValidMaxValue = 0;
555 ValidMaxValue = (Is32bit) ? 12 : 8;
557 ValidMaxValue = (Is32bit) ? 3 : 1;
560 int64_t ConstArg = ArgCI->getSExtValue();
563 std::string RangeErrMsg = IsUnaligned ?
"byte" :
"element";
564 RangeErrMsg +=
" number " + llvm::to_string(ConstArg);
565 RangeErrMsg +=
" is outside of the valid range [0, ";
566 RangeErrMsg += llvm::to_string(ValidMaxValue) +
"]";
569 if (ConstArg < 0 || ConstArg > ValidMaxValue)
574 ConstArg *= Is32bit ? 4 : 8;
577 ConstArg = (Is32bit ? 12 : 8) - ConstArg;
580 ID = Is32bit ? Intrinsic::ppc_altivec_vinsw : Intrinsic::ppc_altivec_vinsd;
581 Op2 = ConstantInt::getSigned(
Int32Ty, ConstArg);
585 ?
Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(
Int32Ty, 4))
587 llvm::FixedVectorType::get(
Int64Ty, 2));
591 case PPC::BI__builtin_altivec_vadduqm:
592 case PPC::BI__builtin_altivec_vsubuqm: {
595 llvm::Type *Int128Ty = llvm::IntegerType::get(
getLLVMContext(), 128);
596 Op0 =
Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int128Ty, 1));
597 Op1 =
Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int128Ty, 1));
598 if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
599 return Builder.CreateAdd(Op0, Op1,
"vadduqm");
601 return Builder.CreateSub(Op0, Op1,
"vsubuqm");
603 case PPC::BI__builtin_altivec_vaddcuq_c:
604 case PPC::BI__builtin_altivec_vsubcuq_c: {
608 llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
610 Ops.push_back(
Builder.CreateBitCast(Op0, V1I128Ty));
611 Ops.push_back(
Builder.CreateBitCast(Op1, V1I128Ty));
612 ID = (BuiltinID == PPC::BI__builtin_altivec_vaddcuq_c)
613 ? Intrinsic::ppc_altivec_vaddcuq
614 : Intrinsic::ppc_altivec_vsubcuq;
617 case PPC::BI__builtin_altivec_vaddeuqm_c:
618 case PPC::BI__builtin_altivec_vaddecuq_c:
619 case PPC::BI__builtin_altivec_vsubeuqm_c:
620 case PPC::BI__builtin_altivec_vsubecuq_c: {
625 llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
627 Ops.push_back(
Builder.CreateBitCast(Op0, V1I128Ty));
628 Ops.push_back(
Builder.CreateBitCast(Op1, V1I128Ty));
629 Ops.push_back(
Builder.CreateBitCast(Op2, V1I128Ty));
632 llvm_unreachable(
"Unsupported intrinsic!");
633 case PPC::BI__builtin_altivec_vaddeuqm_c:
634 ID = Intrinsic::ppc_altivec_vaddeuqm;
636 case PPC::BI__builtin_altivec_vaddecuq_c:
637 ID = Intrinsic::ppc_altivec_vaddecuq;
639 case PPC::BI__builtin_altivec_vsubeuqm_c:
640 ID = Intrinsic::ppc_altivec_vsubeuqm;
642 case PPC::BI__builtin_altivec_vsubecuq_c:
643 ID = Intrinsic::ppc_altivec_vsubecuq;
648 case PPC::BI__builtin_ppc_rldimi:
649 case PPC::BI__builtin_ppc_rlwimi: {
656 if (BuiltinID == PPC::BI__builtin_ppc_rldimi &&
666 ? Intrinsic::ppc_rldimi
667 : Intrinsic::ppc_rlwimi),
668 {Op0, Op1, Op2, Op3});
670 case PPC::BI__builtin_ppc_rlwnm: {
677 case PPC::BI__builtin_ppc_poppar4:
678 case PPC::BI__builtin_ppc_poppar8: {
680 llvm::Type *ArgType = Op0->
getType();
686 if (
Result->getType() != ResultType)
691 case PPC::BI__builtin_ppc_cmpb: {
697 return Builder.CreateCall(F, {Op0, Op1},
"cmpb");
717 Constant *ShiftAmt = ConstantInt::get(
Int64Ty, 32);
727 return Builder.CreateOr(ResLo, ResHi);
730 case PPC::BI__builtin_vsx_xvcpsgnsp:
731 case PPC::BI__builtin_vsx_xvcpsgndp: {
735 ID = Intrinsic::copysign;
737 return Builder.CreateCall(F, {
X, Y});
740 case PPC::BI__builtin_vsx_xvrspip:
741 case PPC::BI__builtin_vsx_xvrdpip:
742 case PPC::BI__builtin_vsx_xvrdpim:
743 case PPC::BI__builtin_vsx_xvrspim:
744 case PPC::BI__builtin_vsx_xvrdpi:
745 case PPC::BI__builtin_vsx_xvrspi:
746 case PPC::BI__builtin_vsx_xvrdpic:
747 case PPC::BI__builtin_vsx_xvrspic:
748 case PPC::BI__builtin_vsx_xvrdpiz:
749 case PPC::BI__builtin_vsx_xvrspiz: {
752 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
753 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
754 ID =
Builder.getIsFPConstrained()
755 ? Intrinsic::experimental_constrained_floor
757 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
758 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
759 ID =
Builder.getIsFPConstrained()
760 ? Intrinsic::experimental_constrained_round
762 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
763 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
764 ID =
Builder.getIsFPConstrained()
765 ? Intrinsic::experimental_constrained_rint
767 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
768 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
769 ID =
Builder.getIsFPConstrained()
770 ? Intrinsic::experimental_constrained_ceil
772 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
773 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
774 ID =
Builder.getIsFPConstrained()
775 ? Intrinsic::experimental_constrained_trunc
778 return Builder.getIsFPConstrained() ?
Builder.CreateConstrainedFPCall(F,
X)
783 case PPC::BI__builtin_vsx_xvabsdp:
784 case PPC::BI__builtin_vsx_xvabssp: {
792 case PPC::BI__builtin_ppc_recipdivf:
793 case PPC::BI__builtin_ppc_recipdivd:
794 case PPC::BI__builtin_ppc_rsqrtf:
795 case PPC::BI__builtin_ppc_rsqrtd: {
796 FastMathFlags FMF =
Builder.getFastMathFlags();
797 Builder.getFastMathFlags().setFast();
801 if (BuiltinID == PPC::BI__builtin_ppc_recipdivf ||
802 BuiltinID == PPC::BI__builtin_ppc_recipdivd) {
805 Builder.getFastMathFlags() &= (FMF);
808 auto *One = ConstantFP::get(ResultType, 1.0);
811 Builder.getFastMathFlags() &= (FMF);
814 case PPC::BI__builtin_ppc_alignx: {
817 ConstantInt *AlignmentCI = cast<ConstantInt>(Op0);
818 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
819 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
820 llvm::Value::MaximumAlignment);
824 AlignmentCI,
nullptr);
827 case PPC::BI__builtin_ppc_rdlam: {
831 llvm::Type *Ty = Op0->
getType();
832 Value *ShiftAmt =
Builder.CreateIntCast(Op1, Ty,
false);
834 Value *Rotate =
Builder.CreateCall(F, {Op0, Op0, ShiftAmt});
835 return Builder.CreateAnd(Rotate, Op2);
837 case PPC::BI__builtin_ppc_load2r: {
844 case PPC::BI__builtin_ppc_fnmsub:
845 case PPC::BI__builtin_ppc_fnmsubs:
846 case PPC::BI__builtin_vsx_xvmaddadp:
847 case PPC::BI__builtin_vsx_xvmaddasp:
848 case PPC::BI__builtin_vsx_xvnmaddadp:
849 case PPC::BI__builtin_vsx_xvnmaddasp:
850 case PPC::BI__builtin_vsx_xvmsubadp:
851 case PPC::BI__builtin_vsx_xvmsubasp:
852 case PPC::BI__builtin_vsx_xvnmsubadp:
853 case PPC::BI__builtin_vsx_xvnmsubasp: {
859 if (
Builder.getIsFPConstrained())
860 F =
CGM.
getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
864 case PPC::BI__builtin_vsx_xvmaddadp:
865 case PPC::BI__builtin_vsx_xvmaddasp:
866 if (
Builder.getIsFPConstrained())
867 return Builder.CreateConstrainedFPCall(F, {
X, Y, Z});
869 return Builder.CreateCall(F, {
X, Y, Z});
870 case PPC::BI__builtin_vsx_xvnmaddadp:
871 case PPC::BI__builtin_vsx_xvnmaddasp:
872 if (
Builder.getIsFPConstrained())
874 Builder.CreateConstrainedFPCall(F, {X, Y, Z}),
"neg");
876 return Builder.CreateFNeg(
Builder.CreateCall(F, {X, Y, Z}),
"neg");
877 case PPC::BI__builtin_vsx_xvmsubadp:
878 case PPC::BI__builtin_vsx_xvmsubasp:
879 if (
Builder.getIsFPConstrained())
880 return Builder.CreateConstrainedFPCall(
881 F, {
X, Y,
Builder.CreateFNeg(Z,
"neg")});
884 case PPC::BI__builtin_ppc_fnmsub:
885 case PPC::BI__builtin_ppc_fnmsubs:
886 case PPC::BI__builtin_vsx_xvnmsubadp:
887 case PPC::BI__builtin_vsx_xvnmsubasp:
888 if (
Builder.getIsFPConstrained())
890 Builder.CreateConstrainedFPCall(
891 F, {X, Y, Builder.CreateFNeg(Z,
"neg")}),
897 llvm_unreachable(
"Unknown FMA operation");
901 case PPC::BI__builtin_vsx_insertword: {
909 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
911 "Third arg to xxinsertw intrinsic must be constant integer");
912 const int64_t MaxIndex = 12;
913 int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex);
924 Op1 =
Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(
Int64Ty, 2));
928 Op0 =
Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(
Int64Ty, 2));
929 Op0 =
Builder.CreateShuffleVector(Op0, Op0, {1, 0});
932 Index = MaxIndex - Index;
936 Op0 =
Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(
Int32Ty, 4));
937 Op2 = ConstantInt::getSigned(
Int32Ty, Index);
938 return Builder.CreateCall(F, {Op0, Op1, Op2});
941 case PPC::BI__builtin_vsx_extractuword: {
947 Op0 =
Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(
Int64Ty, 2));
951 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op1);
953 "Second Arg to xxextractuw intrinsic must be a constant integer!");
954 const int64_t MaxIndex = 12;
955 int64_t Index = std::clamp(ArgCI->getSExtValue(), (int64_t)0, MaxIndex);
959 Index = MaxIndex - Index;
960 Op1 = ConstantInt::getSigned(
Int32Ty, Index);
969 Op1 = ConstantInt::getSigned(
Int32Ty, Index);
970 return Builder.CreateCall(F, {Op0, Op1});
974 case PPC::BI__builtin_vsx_xxpermdi: {
978 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
979 assert(ArgCI &&
"Third arg must be constant integer!");
981 unsigned Index = ArgCI->getZExtValue();
982 Op0 =
Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(
Int64Ty, 2));
983 Op1 =
Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(
Int64Ty, 2));
988 int ElemIdx0 = (Index & 2) >> 1;
989 int ElemIdx1 = 2 + (Index & 1);
991 int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
992 Value *ShuffleCall =
Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
995 return Builder.CreateBitCast(ShuffleCall, RetTy);
998 case PPC::BI__builtin_vsx_xxsldwi: {
1002 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
1003 assert(ArgCI &&
"Third argument must be a compile time constant");
1004 unsigned Index = ArgCI->getZExtValue() & 0x3;
1005 Op0 =
Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(
Int32Ty, 4));
1006 Op1 =
Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(
Int32Ty, 4));
1017 ElemIdx0 = (8 - Index) % 8;
1018 ElemIdx1 = (9 - Index) % 8;
1019 ElemIdx2 = (10 - Index) % 8;
1020 ElemIdx3 = (11 - Index) % 8;
1024 ElemIdx1 = Index + 1;
1025 ElemIdx2 = Index + 2;
1026 ElemIdx3 = Index + 3;
1029 int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
1030 Value *ShuffleCall =
Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
1033 return Builder.CreateBitCast(ShuffleCall, RetTy);
1036 case PPC::BI__builtin_pack_vector_int128: {
1040 Value *PoisonValue =
1041 llvm::PoisonValue::get(llvm::FixedVectorType::get(Op0->
getType(), 2));
1043 PoisonValue, Op0, (uint64_t)(isLittleEndian ? 1 : 0));
1044 Res =
Builder.CreateInsertElement(Res, Op1,
1045 (uint64_t)(isLittleEndian ? 0 : 1));
1049 case PPC::BI__builtin_unpack_vector_int128: {
1052 ConstantInt *Index = cast<ConstantInt>(Op1);
1058 ConstantInt::get(Index->getIntegerType(), 1 - Index->getZExtValue());
1060 return Builder.CreateExtractElement(Unpacked, Index);
1063 case PPC::BI__builtin_ppc_sthcx: {
1067 return Builder.CreateCall(F, {Op0, Op1});
1076#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate, Feature) \
1077 case PPC::BI__builtin_##Name:
1078#include "clang/Basic/BuiltinsPPC.def"
1081 for (
unsigned i = 0, e =
E->getNumArgs(); i != e; i++)
1091 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
1092 BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
1093 BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
1094 unsigned NumVecs = 2;
1095 auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
1096 if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
1098 Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
1104 llvm::Type *VTy = llvm::FixedVectorType::get(
Int8Ty, 16);
1105 Value *Ptr = Ops[0];
1106 for (
unsigned i=0; i<NumVecs; i++) {
1108 llvm::ConstantInt* Index = llvm::ConstantInt::get(
IntTy, i);
1114 if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
1115 BuiltinID == PPC::BI__builtin_mma_build_acc) {
1123 std::reverse(Ops.begin() + 1, Ops.end());
1126 switch (BuiltinID) {
1127 #define CUSTOM_BUILTIN(Name, Intr, Types, Acc, Feature) \
1128 case PPC::BI__builtin_##Name: \
1129 ID = Intrinsic::ppc_##Intr; \
1132 #include "clang/Basic/BuiltinsPPC.def"
1134 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
1135 BuiltinID == PPC::BI__builtin_vsx_stxvp ||
1136 BuiltinID == PPC::BI__builtin_mma_lxvp ||
1137 BuiltinID == PPC::BI__builtin_mma_stxvp) {
1138 if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
1139 BuiltinID == PPC::BI__builtin_mma_lxvp) {
1146 return Builder.CreateCall(F, Ops,
"");
1152 CallOps.push_back(Acc);
1154 if (BuiltinID == PPC::BI__builtin_mma_dmmr ||
1155 BuiltinID == PPC::BI__builtin_mma_dmxor ||
1156 BuiltinID == PPC::BI__builtin_mma_disassemble_dmr) {
1160 if (BuiltinID == PPC::BI__builtin_mma_disassemble_dmr)
1162 for (
unsigned i=1; i<Ops.size(); i++)
1163 CallOps.push_back(Ops[i]);
1169 case PPC::BI__builtin_ppc_compare_and_swap:
1170 case PPC::BI__builtin_ppc_compare_and_swaplp: {
1179 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic,
true);
1187 Value *LoadedVal = Pair.first.getScalarVal();
1191 case PPC::BI__builtin_ppc_fetch_and_add:
1192 case PPC::BI__builtin_ppc_fetch_and_addlp: {
1194 llvm::AtomicOrdering::Monotonic);
1196 case PPC::BI__builtin_ppc_fetch_and_and:
1197 case PPC::BI__builtin_ppc_fetch_and_andlp: {
1199 llvm::AtomicOrdering::Monotonic);
1202 case PPC::BI__builtin_ppc_fetch_and_or:
1203 case PPC::BI__builtin_ppc_fetch_and_orlp: {
1205 llvm::AtomicOrdering::Monotonic);
1207 case PPC::BI__builtin_ppc_fetch_and_swap:
1208 case PPC::BI__builtin_ppc_fetch_and_swaplp: {
1210 llvm::AtomicOrdering::Monotonic);
1212 case PPC::BI__builtin_ppc_ldarx:
1213 case PPC::BI__builtin_ppc_lwarx:
1214 case PPC::BI__builtin_ppc_lharx:
1215 case PPC::BI__builtin_ppc_lbarx:
1217 case PPC::BI__builtin_ppc_mfspr: {
1223 return Builder.CreateCall(F, {Op0});
1225 case PPC::BI__builtin_ppc_mtspr: {
1232 return Builder.CreateCall(F, {Op0, Op1});
1234 case PPC::BI__builtin_ppc_popcntb: {
1236 llvm::Type *ArgType = ArgValue->
getType();
1238 return Builder.CreateCall(F, {ArgValue},
"popcntb");
1240 case PPC::BI__builtin_ppc_mtfsf: {
1247 return Builder.CreateCall(F, {Op0, Cast},
"");
1250 case PPC::BI__builtin_ppc_swdiv_nochk:
1251 case PPC::BI__builtin_ppc_swdivs_nochk: {
1254 FastMathFlags FMF =
Builder.getFastMathFlags();
1255 Builder.getFastMathFlags().setFast();
1256 Value *FDiv =
Builder.CreateFDiv(Op0, Op1,
"swdiv_nochk");
1257 Builder.getFastMathFlags() &= (FMF);
1260 case PPC::BI__builtin_ppc_fric:
1262 *
this,
E, Intrinsic::rint,
1263 Intrinsic::experimental_constrained_rint))
1265 case PPC::BI__builtin_ppc_frim:
1266 case PPC::BI__builtin_ppc_frims:
1268 *
this,
E, Intrinsic::floor,
1269 Intrinsic::experimental_constrained_floor))
1271 case PPC::BI__builtin_ppc_frin:
1272 case PPC::BI__builtin_ppc_frins:
1274 *
this,
E, Intrinsic::round,
1275 Intrinsic::experimental_constrained_round))
1277 case PPC::BI__builtin_ppc_frip:
1278 case PPC::BI__builtin_ppc_frips:
1280 *
this,
E, Intrinsic::ceil,
1281 Intrinsic::experimental_constrained_ceil))
1283 case PPC::BI__builtin_ppc_friz:
1284 case PPC::BI__builtin_ppc_frizs:
1286 *
this,
E, Intrinsic::trunc,
1287 Intrinsic::experimental_constrained_trunc))
1289 case PPC::BI__builtin_ppc_fsqrt:
1290 case PPC::BI__builtin_ppc_fsqrts:
1292 *
this,
E, Intrinsic::sqrt,
1293 Intrinsic::experimental_constrained_sqrt))
1295 case PPC::BI__builtin_ppc_test_data_class: {
1300 {Op0, Op1},
"test_data_class");
1302 case PPC::BI__builtin_ppc_maxfe: {
1308 {Op0, Op1, Op2, Op3});
1310 case PPC::BI__builtin_ppc_maxfl: {
1316 {Op0, Op1, Op2, Op3});
1318 case PPC::BI__builtin_ppc_maxfs: {
1324 {Op0, Op1, Op2, Op3});
1326 case PPC::BI__builtin_ppc_minfe: {
1332 {Op0, Op1, Op2, Op3});
1334 case PPC::BI__builtin_ppc_minfl: {
1340 {Op0, Op1, Op2, Op3});
1342 case PPC::BI__builtin_ppc_minfs: {
1348 {Op0, Op1, Op2, Op3});
1350 case PPC::BI__builtin_ppc_swdiv:
1351 case PPC::BI__builtin_ppc_swdivs: {
1354 return Builder.CreateFDiv(Op0, Op1,
"swdiv");
1356 case PPC::BI__builtin_ppc_set_fpscr_rn:
1358 {EmitScalarExpr(E->getArg(0))});
1359 case PPC::BI__builtin_ppc_mffs:
#define PPC_LNX_FEATURE(NAME, DESC, ENUMNAME, ENUMVAL, HWCAPN)
static constexpr SparcCPUInfo CPUInfo[]
static void Accumulate(SMap &SM, CFGBlock *B)
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static llvm::Value * emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Enumerates target-specific builtins in their own namespaces within namespace clang.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
@ Default
! No language constraints on evaluation order.
const TargetInfo & getTarget() const
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::LLVMContext & getLLVMContext()
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const llvm::DataLayout & getDataLayout() const
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
llvm::Constant * CreateRuntimeVariable(llvm::Type *Ty, StringRef Name)
Create a new runtime global variable with the specified type and name.
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
LValue - This represents an lvalue references.
static RValue get(llvm::Value *V)
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
This represents one expression.
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
A (possibly-)qualified type.
Encodes a location in the source.
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isLittleEndian() const
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
The JSON file list parser is used to communicate input to InstallAPI.
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Diagnostic wrappers for TextAPI types for error reporting.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int64Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * Int32Ty
llvm::IntegerType * IntTy
int
llvm::IntegerType * Int16Ty
llvm::PointerType * UnqualPtrTy