595 NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0),
603 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
607 NEONMAP1(vaesdq_u8, arm_neon_aesd, 0),
608 NEONMAP1(vaeseq_u8, arm_neon_aese, 0),
609 NEONMAP1(vaesimcq_u8, arm_neon_aesimc, 0),
610 NEONMAP1(vaesmcq_u8, arm_neon_aesmc, 0),
611 NEONMAP1(vbfdot_f32, arm_neon_bfdot, 0),
612 NEONMAP1(vbfdotq_f32, arm_neon_bfdot, 0),
613 NEONMAP1(vbfmlalbq_f32, arm_neon_bfmlalb, 0),
614 NEONMAP1(vbfmlaltq_f32, arm_neon_bfmlalt, 0),
615 NEONMAP1(vbfmmlaq_f32, arm_neon_bfmmla, 0),
628 NEONMAP1(vcage_v, arm_neon_vacge, 0),
629 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
630 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
631 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
632 NEONMAP1(vcale_v, arm_neon_vacge, 0),
633 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
634 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
635 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
652 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
655 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
657 NEONMAP1(vcvt_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
658 NEONMAP1(vcvt_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
659 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
660 NEONMAP1(vcvt_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
661 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
662 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
663 NEONMAP1(vcvt_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
664 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
665 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
672 NEONMAP1(vcvta_s16_f16, arm_neon_vcvtas, 0),
673 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
674 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
675 NEONMAP1(vcvta_u16_f16, arm_neon_vcvtau, 0),
676 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
677 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
678 NEONMAP1(vcvtaq_s16_f16, arm_neon_vcvtas, 0),
679 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
680 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
681 NEONMAP1(vcvtaq_u16_f16, arm_neon_vcvtau, 0),
682 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
683 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
684 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
685 NEONMAP1(vcvtm_s16_f16, arm_neon_vcvtms, 0),
686 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
687 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
688 NEONMAP1(vcvtm_u16_f16, arm_neon_vcvtmu, 0),
689 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
690 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
691 NEONMAP1(vcvtmq_s16_f16, arm_neon_vcvtms, 0),
692 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
693 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
694 NEONMAP1(vcvtmq_u16_f16, arm_neon_vcvtmu, 0),
695 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
696 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
697 NEONMAP1(vcvtn_s16_f16, arm_neon_vcvtns, 0),
698 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
699 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
700 NEONMAP1(vcvtn_u16_f16, arm_neon_vcvtnu, 0),
701 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
702 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
703 NEONMAP1(vcvtnq_s16_f16, arm_neon_vcvtns, 0),
704 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
705 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
706 NEONMAP1(vcvtnq_u16_f16, arm_neon_vcvtnu, 0),
707 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
708 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
709 NEONMAP1(vcvtp_s16_f16, arm_neon_vcvtps, 0),
710 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
711 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
712 NEONMAP1(vcvtp_u16_f16, arm_neon_vcvtpu, 0),
713 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
714 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
715 NEONMAP1(vcvtpq_s16_f16, arm_neon_vcvtps, 0),
716 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
717 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
718 NEONMAP1(vcvtpq_u16_f16, arm_neon_vcvtpu, 0),
719 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
720 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
724 NEONMAP1(vcvtq_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
725 NEONMAP1(vcvtq_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
726 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
727 NEONMAP1(vcvtq_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
728 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
729 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
730 NEONMAP1(vcvtq_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
731 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
732 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
739 NEONMAP1(vdot_s32, arm_neon_sdot, 0),
740 NEONMAP1(vdot_u32, arm_neon_udot, 0),
741 NEONMAP1(vdotq_s32, arm_neon_sdot, 0),
742 NEONMAP1(vdotq_u32, arm_neon_udot, 0),
753 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
754 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
755 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
757 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
758 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
759 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
760 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
761 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
762 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
764 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
765 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
766 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
767 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
768 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
770 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
771 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
772 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
773 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
774 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
776 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
777 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
778 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
787 NEONMAP1(vmmlaq_s32, arm_neon_smmla, 0),
788 NEONMAP1(vmmlaq_u32, arm_neon_ummla, 0),
806 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
807 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
831 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
832 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
836 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
837 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
860 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
861 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
865 NEONMAP1(vsha1su0q_u32, arm_neon_sha1su0, 0),
866 NEONMAP1(vsha1su1q_u32, arm_neon_sha1su1, 0),
867 NEONMAP1(vsha256h2q_u32, arm_neon_sha256h2, 0),
868 NEONMAP1(vsha256hq_u32, arm_neon_sha256h, 0),
869 NEONMAP1(vsha256su0q_u32, arm_neon_sha256su0, 0),
870 NEONMAP1(vsha256su1q_u32, arm_neon_sha256su1, 0),
880 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
881 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
882 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
883 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
884 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
885 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
886 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
887 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
889 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
890 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
891 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
893 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
894 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
895 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
897 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
898 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
904 NEONMAP1(vusdot_s32, arm_neon_usdot, 0),
905 NEONMAP1(vusdotq_s32, arm_neon_usdot, 0),
906 NEONMAP1(vusmmlaq_s32, arm_neon_usmmla, 0),
918 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
919 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
924 NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0),
925 NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0),
926 NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0),
927 NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0),
936 NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0),
937 NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0),
938 NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0),
939 NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0),
940 NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0),
951 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
952 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
953 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
954 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
955 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
956 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
957 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
958 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
995 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
998 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
1000 NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
1001 NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
1002 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
1003 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
1004 NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
1005 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
1006 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
1007 NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
1008 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
1009 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
1015 NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
1016 NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
1017 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
1018 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
1019 NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
1020 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
1021 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
1022 NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
1023 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
1024 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
1026 NEONMAP1(vdot_s32, aarch64_neon_sdot, 0),
1027 NEONMAP1(vdot_u32, aarch64_neon_udot, 0),
1028 NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0),
1029 NEONMAP1(vdotq_u32, aarch64_neon_udot, 0),
1042 NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0),
1043 NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0),
1044 NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0),
1045 NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0),
1046 NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0),
1047 NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0),
1048 NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0),
1049 NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0),
1054 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
1055 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
1056 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
1057 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
1058 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
1059 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
1060 NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0),
1061 NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0),
1074 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
1075 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
1076 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
1077 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
1079 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
1080 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
1095 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
1096 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
1098 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
1099 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
1107 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
1108 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
1112 NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0),
1113 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
1114 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
1141 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
1142 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
1146 NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0),
1147 NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0),
1148 NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0),
1149 NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0),
1150 NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0),
1151 NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0),
1152 NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0),
1153 NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0),
1154 NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0),
1155 NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0),
1164 NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0),
1165 NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0),
1166 NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0),
1167 NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0),
1168 NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0),
1169 NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0),
1170 NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0),
1171 NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0),
1172 NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0),
1173 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
1174 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
1175 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
1176 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
1177 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
1178 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
1182 NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0),
1183 NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0),
1184 NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0),
1185 NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0),
1270 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
1307 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
1335 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
1416 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
1417 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
1418 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
1419 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
1787 unsigned BuiltinID,
unsigned LLVMIntrinsic,
unsigned AltLLVMIntrinsic,
1788 const char *NameHint,
unsigned Modifier,
const CallExpr *E,
1790 llvm::Triple::ArchType
Arch) {
1793 std::optional<llvm::APSInt> NeonTypeConst =
1800 const bool Usgn =
Type.isUnsigned();
1801 const bool Quad =
Type.isQuad();
1802 const bool Floating =
Type.isFloatingPoint();
1804 const bool AllowBFloatArgsAndRet =
1807 llvm::FixedVectorType *VTy =
1808 GetNeonType(
this,
Type, HasFastHalfType,
false, AllowBFloatArgsAndRet);
1809 llvm::Type *Ty = VTy;
1813 auto getAlignmentValue32 = [&](
Address addr) ->
Value* {
1814 return Builder.getInt32(addr.getAlignment().getQuantity());
1817 unsigned Int = LLVMIntrinsic;
1819 Int = AltLLVMIntrinsic;
1821 switch (BuiltinID) {
1823 case NEON::BI__builtin_neon_splat_lane_v:
1824 case NEON::BI__builtin_neon_splat_laneq_v:
1825 case NEON::BI__builtin_neon_splatq_lane_v:
1826 case NEON::BI__builtin_neon_splatq_laneq_v: {
1827 auto NumElements = VTy->getElementCount();
1828 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
1829 NumElements = NumElements * 2;
1830 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
1831 NumElements = NumElements.divideCoefficientBy(2);
1833 Ops[0] =
Builder.CreateBitCast(Ops[0], VTy);
1836 case NEON::BI__builtin_neon_vpadd_v:
1837 case NEON::BI__builtin_neon_vpaddq_v:
1839 if (VTy->getElementType()->isFloatingPointTy() &&
1840 Int == Intrinsic::aarch64_neon_addp)
1841 Int = Intrinsic::aarch64_neon_faddp;
1843 case NEON::BI__builtin_neon_vabs_v:
1844 case NEON::BI__builtin_neon_vabsq_v:
1845 if (VTy->getElementType()->isFloatingPointTy())
1846 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops,
"vabs");
1847 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops,
"vabs");
1848 case NEON::BI__builtin_neon_vadd_v:
1849 case NEON::BI__builtin_neon_vaddq_v: {
1850 llvm::Type *VTy = llvm::FixedVectorType::get(
Int8Ty, Quad ? 16 : 8);
1851 Ops[0] =
Builder.CreateBitCast(Ops[0], VTy);
1852 Ops[1] =
Builder.CreateBitCast(Ops[1], VTy);
1853 Ops[0] =
Builder.CreateXor(Ops[0], Ops[1]);
1854 return Builder.CreateBitCast(Ops[0], Ty);
1856 case NEON::BI__builtin_neon_vaddhn_v: {
1857 llvm::FixedVectorType *SrcTy =
1858 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
1861 Ops[0] =
Builder.CreateBitCast(Ops[0], SrcTy);
1862 Ops[1] =
Builder.CreateBitCast(Ops[1], SrcTy);
1863 Ops[0] =
Builder.CreateAdd(Ops[0], Ops[1],
"vaddhn");
1866 Constant *ShiftAmt =
1867 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
1868 Ops[0] =
Builder.CreateLShr(Ops[0], ShiftAmt,
"vaddhn");
1871 return Builder.CreateTrunc(Ops[0], VTy,
"vaddhn");
1873 case NEON::BI__builtin_neon_vcale_v:
1874 case NEON::BI__builtin_neon_vcaleq_v:
1875 case NEON::BI__builtin_neon_vcalt_v:
1876 case NEON::BI__builtin_neon_vcaltq_v:
1877 std::swap(Ops[0], Ops[1]);
1879 case NEON::BI__builtin_neon_vcage_v:
1880 case NEON::BI__builtin_neon_vcageq_v:
1881 case NEON::BI__builtin_neon_vcagt_v:
1882 case NEON::BI__builtin_neon_vcagtq_v: {
1884 switch (VTy->getScalarSizeInBits()) {
1885 default: llvm_unreachable(
"unexpected type");
1896 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
1897 llvm::Type *Tys[] = { VTy, VecFlt };
1898 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
1901 case NEON::BI__builtin_neon_vceqz_v:
1902 case NEON::BI__builtin_neon_vceqzq_v:
1904 Ops[0], Ty, Floating ? ICmpInst::FCMP_OEQ : ICmpInst::ICMP_EQ,
"vceqz");
1905 case NEON::BI__builtin_neon_vcgez_v:
1906 case NEON::BI__builtin_neon_vcgezq_v:
1908 Ops[0], Ty, Floating ? ICmpInst::FCMP_OGE : ICmpInst::ICMP_SGE,
1910 case NEON::BI__builtin_neon_vclez_v:
1911 case NEON::BI__builtin_neon_vclezq_v:
1913 Ops[0], Ty, Floating ? ICmpInst::FCMP_OLE : ICmpInst::ICMP_SLE,
1915 case NEON::BI__builtin_neon_vcgtz_v:
1916 case NEON::BI__builtin_neon_vcgtzq_v:
1918 Ops[0], Ty, Floating ? ICmpInst::FCMP_OGT : ICmpInst::ICMP_SGT,
1920 case NEON::BI__builtin_neon_vcltz_v:
1921 case NEON::BI__builtin_neon_vcltzq_v:
1923 Ops[0], Ty, Floating ? ICmpInst::FCMP_OLT : ICmpInst::ICMP_SLT,
1925 case NEON::BI__builtin_neon_vclz_v:
1926 case NEON::BI__builtin_neon_vclzq_v:
1931 case NEON::BI__builtin_neon_vcvt_f32_v:
1932 case NEON::BI__builtin_neon_vcvtq_f32_v:
1933 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
1936 return Usgn ?
Builder.CreateUIToFP(Ops[0], Ty,
"vcvt")
1937 :
Builder.CreateSIToFP(Ops[0], Ty,
"vcvt");
1938 case NEON::BI__builtin_neon_vcvt_f16_s16:
1939 case NEON::BI__builtin_neon_vcvt_f16_u16:
1940 case NEON::BI__builtin_neon_vcvtq_f16_s16:
1941 case NEON::BI__builtin_neon_vcvtq_f16_u16:
1942 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
1945 return Usgn ?
Builder.CreateUIToFP(Ops[0], Ty,
"vcvt")
1946 :
Builder.CreateSIToFP(Ops[0], Ty,
"vcvt");
1947 case NEON::BI__builtin_neon_vcvt_n_f16_s16:
1948 case NEON::BI__builtin_neon_vcvt_n_f16_u16:
1949 case NEON::BI__builtin_neon_vcvtq_n_f16_s16:
1950 case NEON::BI__builtin_neon_vcvtq_n_f16_u16: {
1955 case NEON::BI__builtin_neon_vcvt_n_f32_v:
1956 case NEON::BI__builtin_neon_vcvt_n_f64_v:
1957 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
1958 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
1960 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
1964 case NEON::BI__builtin_neon_vcvt_n_s16_f16:
1965 case NEON::BI__builtin_neon_vcvt_n_s32_v:
1966 case NEON::BI__builtin_neon_vcvt_n_u16_f16:
1967 case NEON::BI__builtin_neon_vcvt_n_u32_v:
1968 case NEON::BI__builtin_neon_vcvt_n_s64_v:
1969 case NEON::BI__builtin_neon_vcvt_n_u64_v:
1970 case NEON::BI__builtin_neon_vcvtq_n_s16_f16:
1971 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
1972 case NEON::BI__builtin_neon_vcvtq_n_u16_f16:
1973 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
1974 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
1975 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
1977 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
1980 case NEON::BI__builtin_neon_vcvt_s32_v:
1981 case NEON::BI__builtin_neon_vcvt_u32_v:
1982 case NEON::BI__builtin_neon_vcvt_s64_v:
1983 case NEON::BI__builtin_neon_vcvt_u64_v:
1984 case NEON::BI__builtin_neon_vcvt_s16_f16:
1985 case NEON::BI__builtin_neon_vcvt_u16_f16:
1986 case NEON::BI__builtin_neon_vcvtq_s32_v:
1987 case NEON::BI__builtin_neon_vcvtq_u32_v:
1988 case NEON::BI__builtin_neon_vcvtq_s64_v:
1989 case NEON::BI__builtin_neon_vcvtq_u64_v:
1990 case NEON::BI__builtin_neon_vcvtq_s16_f16:
1991 case NEON::BI__builtin_neon_vcvtq_u16_f16: {
1993 return Usgn ?
Builder.CreateFPToUI(Ops[0], Ty,
"vcvt")
1994 :
Builder.CreateFPToSI(Ops[0], Ty,
"vcvt");
1996 case NEON::BI__builtin_neon_vcvta_s16_f16:
1997 case NEON::BI__builtin_neon_vcvta_s32_v:
1998 case NEON::BI__builtin_neon_vcvta_s64_v:
1999 case NEON::BI__builtin_neon_vcvta_u16_f16:
2000 case NEON::BI__builtin_neon_vcvta_u32_v:
2001 case NEON::BI__builtin_neon_vcvta_u64_v:
2002 case NEON::BI__builtin_neon_vcvtaq_s16_f16:
2003 case NEON::BI__builtin_neon_vcvtaq_s32_v:
2004 case NEON::BI__builtin_neon_vcvtaq_s64_v:
2005 case NEON::BI__builtin_neon_vcvtaq_u16_f16:
2006 case NEON::BI__builtin_neon_vcvtaq_u32_v:
2007 case NEON::BI__builtin_neon_vcvtaq_u64_v:
2008 case NEON::BI__builtin_neon_vcvtn_s16_f16:
2009 case NEON::BI__builtin_neon_vcvtn_s32_v:
2010 case NEON::BI__builtin_neon_vcvtn_s64_v:
2011 case NEON::BI__builtin_neon_vcvtn_u16_f16:
2012 case NEON::BI__builtin_neon_vcvtn_u32_v:
2013 case NEON::BI__builtin_neon_vcvtn_u64_v:
2014 case NEON::BI__builtin_neon_vcvtnq_s16_f16:
2015 case NEON::BI__builtin_neon_vcvtnq_s32_v:
2016 case NEON::BI__builtin_neon_vcvtnq_s64_v:
2017 case NEON::BI__builtin_neon_vcvtnq_u16_f16:
2018 case NEON::BI__builtin_neon_vcvtnq_u32_v:
2019 case NEON::BI__builtin_neon_vcvtnq_u64_v:
2020 case NEON::BI__builtin_neon_vcvtp_s16_f16:
2021 case NEON::BI__builtin_neon_vcvtp_s32_v:
2022 case NEON::BI__builtin_neon_vcvtp_s64_v:
2023 case NEON::BI__builtin_neon_vcvtp_u16_f16:
2024 case NEON::BI__builtin_neon_vcvtp_u32_v:
2025 case NEON::BI__builtin_neon_vcvtp_u64_v:
2026 case NEON::BI__builtin_neon_vcvtpq_s16_f16:
2027 case NEON::BI__builtin_neon_vcvtpq_s32_v:
2028 case NEON::BI__builtin_neon_vcvtpq_s64_v:
2029 case NEON::BI__builtin_neon_vcvtpq_u16_f16:
2030 case NEON::BI__builtin_neon_vcvtpq_u32_v:
2031 case NEON::BI__builtin_neon_vcvtpq_u64_v:
2032 case NEON::BI__builtin_neon_vcvtm_s16_f16:
2033 case NEON::BI__builtin_neon_vcvtm_s32_v:
2034 case NEON::BI__builtin_neon_vcvtm_s64_v:
2035 case NEON::BI__builtin_neon_vcvtm_u16_f16:
2036 case NEON::BI__builtin_neon_vcvtm_u32_v:
2037 case NEON::BI__builtin_neon_vcvtm_u64_v:
2038 case NEON::BI__builtin_neon_vcvtmq_s16_f16:
2039 case NEON::BI__builtin_neon_vcvtmq_s32_v:
2040 case NEON::BI__builtin_neon_vcvtmq_s64_v:
2041 case NEON::BI__builtin_neon_vcvtmq_u16_f16:
2042 case NEON::BI__builtin_neon_vcvtmq_u32_v:
2043 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
2045 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
2047 case NEON::BI__builtin_neon_vcvtx_f32_v: {
2048 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
2049 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
2052 case NEON::BI__builtin_neon_vext_v:
2053 case NEON::BI__builtin_neon_vextq_v: {
2056 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2057 Indices.push_back(i+CV);
2059 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
2060 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2061 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices,
"vext");
2063 case NEON::BI__builtin_neon_vfma_v:
2064 case NEON::BI__builtin_neon_vfmaq_v: {
2065 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
2066 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2067 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
2071 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
2072 {Ops[1], Ops[2], Ops[0]});
2074 case NEON::BI__builtin_neon_vld1_v:
2075 case NEON::BI__builtin_neon_vld1q_v: {
2077 Ops.push_back(getAlignmentValue32(PtrOp0));
2078 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops,
"vld1");
2080 case NEON::BI__builtin_neon_vld1_x2_v:
2081 case NEON::BI__builtin_neon_vld1q_x2_v:
2082 case NEON::BI__builtin_neon_vld1_x3_v:
2083 case NEON::BI__builtin_neon_vld1q_x3_v:
2084 case NEON::BI__builtin_neon_vld1_x4_v:
2085 case NEON::BI__builtin_neon_vld1q_x4_v: {
2087 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
2088 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld1xN");
2089 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
2091 case NEON::BI__builtin_neon_vld2_v:
2092 case NEON::BI__builtin_neon_vld2q_v:
2093 case NEON::BI__builtin_neon_vld3_v:
2094 case NEON::BI__builtin_neon_vld3q_v:
2095 case NEON::BI__builtin_neon_vld4_v:
2096 case NEON::BI__builtin_neon_vld4q_v:
2097 case NEON::BI__builtin_neon_vld2_dup_v:
2098 case NEON::BI__builtin_neon_vld2q_dup_v:
2099 case NEON::BI__builtin_neon_vld3_dup_v:
2100 case NEON::BI__builtin_neon_vld3q_dup_v:
2101 case NEON::BI__builtin_neon_vld4_dup_v:
2102 case NEON::BI__builtin_neon_vld4q_dup_v: {
2104 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
2105 Value *Align = getAlignmentValue32(PtrOp1);
2106 Ops[1] =
Builder.CreateCall(F, {Ops[1], Align}, NameHint);
2107 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
2109 case NEON::BI__builtin_neon_vld1_dup_v:
2110 case NEON::BI__builtin_neon_vld1q_dup_v: {
2111 Value *
V = PoisonValue::get(Ty);
2113 LoadInst *Ld =
Builder.CreateLoad(PtrOp0);
2114 llvm::Constant *CI = ConstantInt::get(
SizeTy, 0);
2115 Ops[0] =
Builder.CreateInsertElement(
V, Ld, CI);
2118 case NEON::BI__builtin_neon_vld2_lane_v:
2119 case NEON::BI__builtin_neon_vld2q_lane_v:
2120 case NEON::BI__builtin_neon_vld3_lane_v:
2121 case NEON::BI__builtin_neon_vld3q_lane_v:
2122 case NEON::BI__builtin_neon_vld4_lane_v:
2123 case NEON::BI__builtin_neon_vld4q_lane_v: {
2125 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
2126 for (
unsigned I = 2; I < Ops.size() - 1; ++I)
2127 Ops[I] =
Builder.CreateBitCast(Ops[I], Ty);
2128 Ops.push_back(getAlignmentValue32(PtrOp1));
2130 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
2132 case NEON::BI__builtin_neon_vmovl_v: {
2133 llvm::FixedVectorType *DTy =
2134 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
2135 Ops[0] =
Builder.CreateBitCast(Ops[0], DTy);
2137 return Builder.CreateZExt(Ops[0], Ty,
"vmovl");
2138 return Builder.CreateSExt(Ops[0], Ty,
"vmovl");
2140 case NEON::BI__builtin_neon_vmovn_v: {
2141 llvm::FixedVectorType *QTy =
2142 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
2143 Ops[0] =
Builder.CreateBitCast(Ops[0], QTy);
2144 return Builder.CreateTrunc(Ops[0], Ty,
"vmovn");
2146 case NEON::BI__builtin_neon_vmull_v:
2152 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
2153 Int =
Type.isPoly() ? (
unsigned)Intrinsic::arm_neon_vmullp : Int;
2155 case NEON::BI__builtin_neon_vpadal_v:
2156 case NEON::BI__builtin_neon_vpadalq_v: {
2158 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2162 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
2163 llvm::Type *Tys[2] = { Ty, NarrowTy };
2166 case NEON::BI__builtin_neon_vpaddl_v:
2167 case NEON::BI__builtin_neon_vpaddlq_v: {
2169 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2170 llvm::Type *EltTy = llvm::IntegerType::get(
getLLVMContext(), EltBits / 2);
2172 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
2173 llvm::Type *Tys[2] = { Ty, NarrowTy };
2176 case NEON::BI__builtin_neon_vqdmlal_v:
2177 case NEON::BI__builtin_neon_vqdmlsl_v: {
2182 return EmitNeonCall(
CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
2184 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
2185 case NEON::BI__builtin_neon_vqdmulh_lane_v:
2186 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
2187 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
2189 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
2190 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
2191 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
2192 RTy->getNumElements() * 2);
2193 llvm::Type *Tys[2] = {
2198 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
2199 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
2200 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
2201 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
2202 llvm::Type *Tys[2] = {
2207 case NEON::BI__builtin_neon_vqshl_n_v:
2208 case NEON::BI__builtin_neon_vqshlq_n_v:
2211 case NEON::BI__builtin_neon_vqshlu_n_v:
2212 case NEON::BI__builtin_neon_vqshluq_n_v:
2215 case NEON::BI__builtin_neon_vrecpe_v:
2216 case NEON::BI__builtin_neon_vrecpeq_v:
2217 case NEON::BI__builtin_neon_vrsqrte_v:
2218 case NEON::BI__builtin_neon_vrsqrteq_v:
2219 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
2221 case NEON::BI__builtin_neon_vrndi_v:
2222 case NEON::BI__builtin_neon_vrndiq_v:
2223 Int =
Builder.getIsFPConstrained()
2224 ? Intrinsic::experimental_constrained_nearbyint
2225 : Intrinsic::nearbyint;
2227 case NEON::BI__builtin_neon_vrshr_n_v:
2228 case NEON::BI__builtin_neon_vrshrq_n_v:
2231 case NEON::BI__builtin_neon_vsha512hq_u64:
2232 case NEON::BI__builtin_neon_vsha512h2q_u64:
2233 case NEON::BI__builtin_neon_vsha512su0q_u64:
2234 case NEON::BI__builtin_neon_vsha512su1q_u64: {
2238 case NEON::BI__builtin_neon_vshl_n_v:
2239 case NEON::BI__builtin_neon_vshlq_n_v:
2241 return Builder.CreateShl(
Builder.CreateBitCast(Ops[0],Ty), Ops[1],
2243 case NEON::BI__builtin_neon_vshll_n_v: {
2244 llvm::FixedVectorType *SrcTy =
2245 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
2246 Ops[0] =
Builder.CreateBitCast(Ops[0], SrcTy);
2248 Ops[0] =
Builder.CreateZExt(Ops[0], VTy);
2250 Ops[0] =
Builder.CreateSExt(Ops[0], VTy);
2252 return Builder.CreateShl(Ops[0], Ops[1],
"vshll_n");
2254 case NEON::BI__builtin_neon_vshrn_n_v: {
2255 llvm::FixedVectorType *SrcTy =
2256 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
2257 Ops[0] =
Builder.CreateBitCast(Ops[0], SrcTy);
2260 Ops[0] =
Builder.CreateLShr(Ops[0], Ops[1]);
2262 Ops[0] =
Builder.CreateAShr(Ops[0], Ops[1]);
2263 return Builder.CreateTrunc(Ops[0], Ty,
"vshrn_n");
2265 case NEON::BI__builtin_neon_vshr_n_v:
2266 case NEON::BI__builtin_neon_vshrq_n_v:
2268 case NEON::BI__builtin_neon_vst1_v:
2269 case NEON::BI__builtin_neon_vst1q_v:
2270 case NEON::BI__builtin_neon_vst2_v:
2271 case NEON::BI__builtin_neon_vst2q_v:
2272 case NEON::BI__builtin_neon_vst3_v:
2273 case NEON::BI__builtin_neon_vst3q_v:
2274 case NEON::BI__builtin_neon_vst4_v:
2275 case NEON::BI__builtin_neon_vst4q_v:
2276 case NEON::BI__builtin_neon_vst2_lane_v:
2277 case NEON::BI__builtin_neon_vst2q_lane_v:
2278 case NEON::BI__builtin_neon_vst3_lane_v:
2279 case NEON::BI__builtin_neon_vst3q_lane_v:
2280 case NEON::BI__builtin_neon_vst4_lane_v:
2281 case NEON::BI__builtin_neon_vst4q_lane_v: {
2283 Ops.push_back(getAlignmentValue32(PtrOp0));
2286 case NEON::BI__builtin_neon_vsm3partw1q_u32:
2287 case NEON::BI__builtin_neon_vsm3partw2q_u32:
2288 case NEON::BI__builtin_neon_vsm3ss1q_u32:
2289 case NEON::BI__builtin_neon_vsm4ekeyq_u32:
2290 case NEON::BI__builtin_neon_vsm4eq_u32: {
2294 case NEON::BI__builtin_neon_vsm3tt1aq_u32:
2295 case NEON::BI__builtin_neon_vsm3tt1bq_u32:
2296 case NEON::BI__builtin_neon_vsm3tt2aq_u32:
2297 case NEON::BI__builtin_neon_vsm3tt2bq_u32: {
2302 case NEON::BI__builtin_neon_vst1_x2_v:
2303 case NEON::BI__builtin_neon_vst1q_x2_v:
2304 case NEON::BI__builtin_neon_vst1_x3_v:
2305 case NEON::BI__builtin_neon_vst1q_x3_v:
2306 case NEON::BI__builtin_neon_vst1_x4_v:
2307 case NEON::BI__builtin_neon_vst1q_x4_v: {
2310 if (
Arch == llvm::Triple::aarch64 ||
Arch == llvm::Triple::aarch64_be ||
2311 Arch == llvm::Triple::aarch64_32) {
2313 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
2319 case NEON::BI__builtin_neon_vsubhn_v: {
2320 llvm::FixedVectorType *SrcTy =
2321 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
2324 Ops[0] =
Builder.CreateBitCast(Ops[0], SrcTy);
2325 Ops[1] =
Builder.CreateBitCast(Ops[1], SrcTy);
2326 Ops[0] =
Builder.CreateSub(Ops[0], Ops[1],
"vsubhn");
2329 Constant *ShiftAmt =
2330 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
2331 Ops[0] =
Builder.CreateLShr(Ops[0], ShiftAmt,
"vsubhn");
2334 return Builder.CreateTrunc(Ops[0], VTy,
"vsubhn");
2336 case NEON::BI__builtin_neon_vtrn_v:
2337 case NEON::BI__builtin_neon_vtrnq_v: {
2338 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2339 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
2340 Value *SV =
nullptr;
2342 for (
unsigned vi = 0; vi != 2; ++vi) {
2344 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
2345 Indices.push_back(i+vi);
2346 Indices.push_back(i+e+vi);
2349 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vtrn");
2354 case NEON::BI__builtin_neon_vtst_v:
2355 case NEON::BI__builtin_neon_vtstq_v: {
2356 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
2357 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2358 Ops[0] =
Builder.CreateAnd(Ops[0], Ops[1]);
2359 Ops[0] =
Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
2360 ConstantAggregateZero::get(Ty));
2361 return Builder.CreateSExt(Ops[0], Ty,
"vtst");
2363 case NEON::BI__builtin_neon_vuzp_v:
2364 case NEON::BI__builtin_neon_vuzpq_v: {
2365 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2366 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
2367 Value *SV =
nullptr;
2369 for (
unsigned vi = 0; vi != 2; ++vi) {
2371 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2372 Indices.push_back(2*i+vi);
2375 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vuzp");
2380 case NEON::BI__builtin_neon_vxarq_u64: {
2385 case NEON::BI__builtin_neon_vzip_v:
2386 case NEON::BI__builtin_neon_vzipq_v: {
2387 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2388 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
2389 Value *SV =
nullptr;
2391 for (
unsigned vi = 0; vi != 2; ++vi) {
2393 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
2394 Indices.push_back((i + vi*e) >> 1);
2395 Indices.push_back(((i + vi*e) >> 1)+e);
2398 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vzip");
2403 case NEON::BI__builtin_neon_vdot_s32:
2404 case NEON::BI__builtin_neon_vdot_u32:
2405 case NEON::BI__builtin_neon_vdotq_s32:
2406 case NEON::BI__builtin_neon_vdotq_u32: {
2408 llvm::FixedVectorType::get(
Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
2409 llvm::Type *Tys[2] = { Ty, InputTy };
2412 case NEON::BI__builtin_neon_vfmlal_low_f16:
2413 case NEON::BI__builtin_neon_vfmlalq_low_f16: {
2415 llvm::FixedVectorType::get(
HalfTy, Ty->getPrimitiveSizeInBits() / 16);
2416 llvm::Type *Tys[2] = { Ty, InputTy };
2419 case NEON::BI__builtin_neon_vfmlsl_low_f16:
2420 case NEON::BI__builtin_neon_vfmlslq_low_f16: {
2422 llvm::FixedVectorType::get(
HalfTy, Ty->getPrimitiveSizeInBits() / 16);
2423 llvm::Type *Tys[2] = { Ty, InputTy };
2426 case NEON::BI__builtin_neon_vfmlal_high_f16:
2427 case NEON::BI__builtin_neon_vfmlalq_high_f16: {
2429 llvm::FixedVectorType::get(
HalfTy, Ty->getPrimitiveSizeInBits() / 16);
2430 llvm::Type *Tys[2] = { Ty, InputTy };
2433 case NEON::BI__builtin_neon_vfmlsl_high_f16:
2434 case NEON::BI__builtin_neon_vfmlslq_high_f16: {
2436 llvm::FixedVectorType::get(
HalfTy, Ty->getPrimitiveSizeInBits() / 16);
2437 llvm::Type *Tys[2] = { Ty, InputTy };
2440 case NEON::BI__builtin_neon_vmmlaq_s32:
2441 case NEON::BI__builtin_neon_vmmlaq_u32: {
2443 llvm::FixedVectorType::get(
Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
2444 llvm::Type *Tys[2] = { Ty, InputTy };
2445 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops,
"vmmla");
2447 case NEON::BI__builtin_neon_vusmmlaq_s32: {
2449 llvm::FixedVectorType::get(
Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
2450 llvm::Type *Tys[2] = { Ty, InputTy };
2453 case NEON::BI__builtin_neon_vusdot_s32:
2454 case NEON::BI__builtin_neon_vusdotq_s32: {
2456 llvm::FixedVectorType::get(
Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
2457 llvm::Type *Tys[2] = { Ty, InputTy };
2460 case NEON::BI__builtin_neon_vbfdot_f32:
2461 case NEON::BI__builtin_neon_vbfdotq_f32: {
2462 llvm::Type *InputTy =
2463 llvm::FixedVectorType::get(
BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
2464 llvm::Type *Tys[2] = { Ty, InputTy };
2467 case NEON::BI__builtin_neon___a32_vcvt_bf16_f32: {
2468 llvm::Type *Tys[1] = { Ty };
2475 assert(Int &&
"Expected valid intrinsic number");
2713 llvm::Triple::ArchType
Arch) {
2714 if (
auto Hint = GetValueForARMHint(BuiltinID))
2717 if (BuiltinID == clang::ARM::BI__emit) {
2719 llvm::FunctionType *FTy =
2720 llvm::FunctionType::get(
VoidTy,
false);
2724 llvm_unreachable(
"Sema will ensure that the parameter is constant");
2727 uint64_t ZExtValue =
Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
2729 llvm::InlineAsm *Emit =
2730 IsThumb ? InlineAsm::get(FTy,
".inst.n 0x" + utohexstr(ZExtValue),
"",
2732 : InlineAsm::get(FTy,
".inst 0x" + utohexstr(ZExtValue),
"",
2735 return Builder.CreateCall(Emit);
2738 if (BuiltinID == clang::ARM::BI__builtin_arm_dbg) {
2740 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
2743 if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) {
2755 if (BuiltinID == clang::ARM::BI__builtin_arm_rbit) {
2758 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg,
"rbit");
2761 if (BuiltinID == clang::ARM::BI__builtin_arm_clz ||
2762 BuiltinID == clang::ARM::BI__builtin_arm_clz64) {
2764 Function *F =
CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
2766 if (BuiltinID == clang::ARM::BI__builtin_arm_clz64)
2772 if (BuiltinID == clang::ARM::BI__builtin_arm_cls) {
2774 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::arm_cls), Arg,
"cls");
2776 if (BuiltinID == clang::ARM::BI__builtin_arm_cls64) {
2778 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
2782 if (BuiltinID == clang::ARM::BI__clear_cache) {
2783 assert(E->
getNumArgs() == 2 &&
"__clear_cache takes 2 arguments");
2786 for (
unsigned i = 0; i < 2; i++)
2788 llvm::Type *Ty =
CGM.getTypes().ConvertType(FD->
getType());
2790 StringRef Name = FD->
getName();
2794 if (BuiltinID == clang::ARM::BI__builtin_arm_mcrr ||
2795 BuiltinID == clang::ARM::BI__builtin_arm_mcrr2) {
2798 switch (BuiltinID) {
2799 default: llvm_unreachable(
"unexpected builtin");
2800 case clang::ARM::BI__builtin_arm_mcrr:
2801 F =
CGM.getIntrinsic(Intrinsic::arm_mcrr);
2803 case clang::ARM::BI__builtin_arm_mcrr2:
2804 F =
CGM.getIntrinsic(Intrinsic::arm_mcrr2);
2825 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
2828 if (BuiltinID == clang::ARM::BI__builtin_arm_mrrc ||
2829 BuiltinID == clang::ARM::BI__builtin_arm_mrrc2) {
2832 switch (BuiltinID) {
2833 default: llvm_unreachable(
"unexpected builtin");
2834 case clang::ARM::BI__builtin_arm_mrrc:
2835 F =
CGM.getIntrinsic(Intrinsic::arm_mrrc);
2837 case clang::ARM::BI__builtin_arm_mrrc2:
2838 F =
CGM.getIntrinsic(Intrinsic::arm_mrrc2);
2845 Value *RtAndRt2 =
Builder.CreateCall(F, {Coproc, Opc1, CRm});
2855 Value *ShiftCast = llvm::ConstantInt::get(
Int64Ty, 32);
2856 RtAndRt2 =
Builder.CreateShl(Rt, ShiftCast,
"shl",
true);
2857 RtAndRt2 =
Builder.CreateOr(RtAndRt2, Rt1);
2862 if (BuiltinID == clang::ARM::BI__builtin_arm_ldrexd ||
2863 ((BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
2864 BuiltinID == clang::ARM::BI__builtin_arm_ldaex) &&
2866 BuiltinID == clang::ARM::BI__ldrexd) {
2869 switch (BuiltinID) {
2870 default: llvm_unreachable(
"unexpected builtin");
2871 case clang::ARM::BI__builtin_arm_ldaex:
2872 F =
CGM.getIntrinsic(Intrinsic::arm_ldaexd);
2874 case clang::ARM::BI__builtin_arm_ldrexd:
2875 case clang::ARM::BI__builtin_arm_ldrex:
2876 case clang::ARM::BI__ldrexd:
2877 F =
CGM.getIntrinsic(Intrinsic::arm_ldrexd);
2890 Val =
Builder.CreateShl(Val0, ShiftCst,
"shl",
true );
2891 Val =
Builder.CreateOr(Val, Val1);
2895 if (BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
2896 BuiltinID == clang::ARM::BI__builtin_arm_ldaex) {
2905 BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
2906 : Intrinsic::arm_ldrex,
2908 CallInst *Val =
Builder.CreateCall(F, LoadAddr,
"ldrex");
2912 if (RealResTy->isPointerTy())
2913 return Builder.CreateIntToPtr(Val, RealResTy);
2915 llvm::Type *IntResTy = llvm::IntegerType::get(
2917 return Builder.CreateBitCast(
Builder.CreateTruncOrBitCast(Val, IntResTy),
2922 if (BuiltinID == clang::ARM::BI__builtin_arm_strexd ||
2923 ((BuiltinID == clang::ARM::BI__builtin_arm_stlex ||
2924 BuiltinID == clang::ARM::BI__builtin_arm_strex) &&
2927 BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlexd
2928 : Intrinsic::arm_strexd);
2933 Builder.CreateStore(Val, Tmp);
2936 Val =
Builder.CreateLoad(LdPtr);
2941 return Builder.CreateCall(F, {Arg0, Arg1, StPtr},
"strexd");
2944 if (BuiltinID == clang::ARM::BI__builtin_arm_strex ||
2945 BuiltinID == clang::ARM::BI__builtin_arm_stlex) {
2950 llvm::Type *StoreTy =
2953 if (StoreVal->
getType()->isPointerTy())
2956 llvm::Type *
IntTy = llvm::IntegerType::get(
2958 CGM.getDataLayout().getTypeSizeInBits(StoreVal->
getType()));
2964 BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlex
2965 : Intrinsic::arm_strex,
2968 CallInst *CI =
Builder.CreateCall(F, {StoreVal, StoreAddr},
"strex");
2970 1, Attribute::get(
getLLVMContext(), Attribute::ElementType, StoreTy));
2974 if (BuiltinID == clang::ARM::BI__builtin_arm_clrex) {
2975 Function *F =
CGM.getIntrinsic(Intrinsic::arm_clrex);
2980 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
2981 switch (BuiltinID) {
2982 case clang::ARM::BI__builtin_arm_crc32b:
2983 CRCIntrinsicID = Intrinsic::arm_crc32b;
break;
2984 case clang::ARM::BI__builtin_arm_crc32cb:
2985 CRCIntrinsicID = Intrinsic::arm_crc32cb;
break;
2986 case clang::ARM::BI__builtin_arm_crc32h:
2987 CRCIntrinsicID = Intrinsic::arm_crc32h;
break;
2988 case clang::ARM::BI__builtin_arm_crc32ch:
2989 CRCIntrinsicID = Intrinsic::arm_crc32ch;
break;
2990 case clang::ARM::BI__builtin_arm_crc32w:
2991 case clang::ARM::BI__builtin_arm_crc32d:
2992 CRCIntrinsicID = Intrinsic::arm_crc32w;
break;
2993 case clang::ARM::BI__builtin_arm_crc32cw:
2994 case clang::ARM::BI__builtin_arm_crc32cd:
2995 CRCIntrinsicID = Intrinsic::arm_crc32cw;
break;
2998 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
3004 if (BuiltinID == clang::ARM::BI__builtin_arm_crc32d ||
3005 BuiltinID == clang::ARM::BI__builtin_arm_crc32cd) {
3013 return Builder.CreateCall(F, {Res, Arg1b});
3018 return Builder.CreateCall(F, {Arg0, Arg1});
3022 if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
3023 BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
3024 BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
3025 BuiltinID == clang::ARM::BI__builtin_arm_wsr ||
3026 BuiltinID == clang::ARM::BI__builtin_arm_wsr64 ||
3027 BuiltinID == clang::ARM::BI__builtin_arm_wsrp) {
3030 if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
3031 BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
3032 BuiltinID == clang::ARM::BI__builtin_arm_rsrp)
3035 bool IsPointerBuiltin = BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
3036 BuiltinID == clang::ARM::BI__builtin_arm_wsrp;
3038 bool Is64Bit = BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
3039 BuiltinID == clang::ARM::BI__builtin_arm_wsr64;
3041 llvm::Type *ValueType;
3043 if (IsPointerBuiltin) {
3046 }
else if (Is64Bit) {
3056 if (BuiltinID == ARM::BI__builtin_sponentry) {
3075 return P.first == BuiltinID;
3078 BuiltinID = It->second;
3082 unsigned ICEArguments = 0;
3087 auto getAlignmentValue32 = [&](
Address addr) ->
Value* {
3088 return Builder.getInt32(addr.getAlignment().getQuantity());
3095 unsigned NumArgs = E->
getNumArgs() - (HasExtraArg ? 1 : 0);
3096 for (
unsigned i = 0, e = NumArgs; i != e; i++) {
3098 switch (BuiltinID) {
3099 case NEON::BI__builtin_neon_vld1_v:
3100 case NEON::BI__builtin_neon_vld1q_v:
3101 case NEON::BI__builtin_neon_vld1q_lane_v:
3102 case NEON::BI__builtin_neon_vld1_lane_v:
3103 case NEON::BI__builtin_neon_vld1_dup_v:
3104 case NEON::BI__builtin_neon_vld1q_dup_v:
3105 case NEON::BI__builtin_neon_vst1_v:
3106 case NEON::BI__builtin_neon_vst1q_v:
3107 case NEON::BI__builtin_neon_vst1q_lane_v:
3108 case NEON::BI__builtin_neon_vst1_lane_v:
3109 case NEON::BI__builtin_neon_vst2_v:
3110 case NEON::BI__builtin_neon_vst2q_v:
3111 case NEON::BI__builtin_neon_vst2_lane_v:
3112 case NEON::BI__builtin_neon_vst2q_lane_v:
3113 case NEON::BI__builtin_neon_vst3_v:
3114 case NEON::BI__builtin_neon_vst3q_v:
3115 case NEON::BI__builtin_neon_vst3_lane_v:
3116 case NEON::BI__builtin_neon_vst3q_lane_v:
3117 case NEON::BI__builtin_neon_vst4_v:
3118 case NEON::BI__builtin_neon_vst4q_v:
3119 case NEON::BI__builtin_neon_vst4_lane_v:
3120 case NEON::BI__builtin_neon_vst4q_lane_v:
3129 switch (BuiltinID) {
3130 case NEON::BI__builtin_neon_vld2_v:
3131 case NEON::BI__builtin_neon_vld2q_v:
3132 case NEON::BI__builtin_neon_vld3_v:
3133 case NEON::BI__builtin_neon_vld3q_v:
3134 case NEON::BI__builtin_neon_vld4_v:
3135 case NEON::BI__builtin_neon_vld4q_v:
3136 case NEON::BI__builtin_neon_vld2_lane_v:
3137 case NEON::BI__builtin_neon_vld2q_lane_v:
3138 case NEON::BI__builtin_neon_vld3_lane_v:
3139 case NEON::BI__builtin_neon_vld3q_lane_v:
3140 case NEON::BI__builtin_neon_vld4_lane_v:
3141 case NEON::BI__builtin_neon_vld4q_lane_v:
3142 case NEON::BI__builtin_neon_vld2_dup_v:
3143 case NEON::BI__builtin_neon_vld2q_dup_v:
3144 case NEON::BI__builtin_neon_vld3_dup_v:
3145 case NEON::BI__builtin_neon_vld3q_dup_v:
3146 case NEON::BI__builtin_neon_vld4_dup_v:
3147 case NEON::BI__builtin_neon_vld4q_dup_v:
3159 switch (BuiltinID) {
3162 case NEON::BI__builtin_neon_vget_lane_i8:
3163 case NEON::BI__builtin_neon_vget_lane_i16:
3164 case NEON::BI__builtin_neon_vget_lane_i32:
3165 case NEON::BI__builtin_neon_vget_lane_i64:
3166 case NEON::BI__builtin_neon_vget_lane_bf16:
3167 case NEON::BI__builtin_neon_vget_lane_f32:
3168 case NEON::BI__builtin_neon_vgetq_lane_i8:
3169 case NEON::BI__builtin_neon_vgetq_lane_i16:
3170 case NEON::BI__builtin_neon_vgetq_lane_i32:
3171 case NEON::BI__builtin_neon_vgetq_lane_i64:
3172 case NEON::BI__builtin_neon_vgetq_lane_bf16:
3173 case NEON::BI__builtin_neon_vgetq_lane_f32:
3174 case NEON::BI__builtin_neon_vduph_lane_bf16:
3175 case NEON::BI__builtin_neon_vduph_laneq_bf16:
3176 return Builder.CreateExtractElement(Ops[0], Ops[1],
"vget_lane");
3178 case NEON::BI__builtin_neon_vrndns_f32: {
3180 llvm::Type *Tys[] = {Arg->
getType()};
3181 Function *F =
CGM.getIntrinsic(Intrinsic::roundeven, Tys);
3182 return Builder.CreateCall(F, {Arg},
"vrndn"); }
3184 case NEON::BI__builtin_neon_vset_lane_i8:
3185 case NEON::BI__builtin_neon_vset_lane_i16:
3186 case NEON::BI__builtin_neon_vset_lane_i32:
3187 case NEON::BI__builtin_neon_vset_lane_i64:
3188 case NEON::BI__builtin_neon_vset_lane_bf16:
3189 case NEON::BI__builtin_neon_vset_lane_f32:
3190 case NEON::BI__builtin_neon_vsetq_lane_i8:
3191 case NEON::BI__builtin_neon_vsetq_lane_i16:
3192 case NEON::BI__builtin_neon_vsetq_lane_i32:
3193 case NEON::BI__builtin_neon_vsetq_lane_i64:
3194 case NEON::BI__builtin_neon_vsetq_lane_bf16:
3195 case NEON::BI__builtin_neon_vsetq_lane_f32:
3196 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
3198 case NEON::BI__builtin_neon_vsha1h_u32:
3201 case NEON::BI__builtin_neon_vsha1cq_u32:
3204 case NEON::BI__builtin_neon_vsha1pq_u32:
3207 case NEON::BI__builtin_neon_vsha1mq_u32:
3211 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
3212 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
3218 case clang::ARM::BI_MoveToCoprocessor:
3219 case clang::ARM::BI_MoveToCoprocessor2: {
3220 Function *F =
CGM.getIntrinsic(BuiltinID == clang::ARM::BI_MoveToCoprocessor
3221 ? Intrinsic::arm_mcr
3222 : Intrinsic::arm_mcr2);
3223 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
3224 Ops[3], Ops[4], Ops[5]});
3229 assert(HasExtraArg);
3231 std::optional<llvm::APSInt>
Result =
3236 if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f ||
3237 BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_d) {
3240 if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f)
3246 bool usgn =
Result->getZExtValue() == 1;
3247 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
3251 return Builder.CreateCall(F, Ops,
"vcvtr");
3256 bool usgn =
Type.isUnsigned();
3257 bool rightShift =
false;
3259 llvm::FixedVectorType *VTy =
3262 llvm::Type *Ty = VTy;
3277 switch (BuiltinID) {
3278 default:
return nullptr;
3279 case NEON::BI__builtin_neon_vld1q_lane_v:
3282 if (VTy->getElementType()->isIntegerTy(64)) {
3284 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3286 Value *SV = llvm::ConstantVector::get(ConstantInt::get(
Int32Ty, 1-Lane));
3287 Ops[1] =
Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3289 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
3291 Function *F =
CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
3292 Value *Align = getAlignmentValue32(PtrOp0);
3295 int Indices[] = {1 - Lane, Lane};
3296 return Builder.CreateShuffleVector(Ops[1], Ld, Indices,
"vld1q_lane");
3299 case NEON::BI__builtin_neon_vld1_lane_v: {
3300 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3303 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2],
"vld1_lane");
3305 case NEON::BI__builtin_neon_vqrshrn_n_v:
3307 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
3310 case NEON::BI__builtin_neon_vqrshrun_n_v:
3311 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
3312 Ops,
"vqrshrun_n", 1,
true);
3313 case NEON::BI__builtin_neon_vqshrn_n_v:
3314 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
3317 case NEON::BI__builtin_neon_vqshrun_n_v:
3318 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
3319 Ops,
"vqshrun_n", 1,
true);
3320 case NEON::BI__builtin_neon_vrecpe_v:
3321 case NEON::BI__builtin_neon_vrecpeq_v:
3324 case NEON::BI__builtin_neon_vrshrn_n_v:
3325 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
3326 Ops,
"vrshrn_n", 1,
true);
3327 case NEON::BI__builtin_neon_vrsra_n_v:
3328 case NEON::BI__builtin_neon_vrsraq_n_v:
3329 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
3330 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3332 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
3333 Ops[1] =
Builder.CreateCall(
CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
3334 return Builder.CreateAdd(Ops[0], Ops[1],
"vrsra_n");
3335 case NEON::BI__builtin_neon_vsri_n_v:
3336 case NEON::BI__builtin_neon_vsriq_n_v:
3339 case NEON::BI__builtin_neon_vsli_n_v:
3340 case NEON::BI__builtin_neon_vsliq_n_v:
3342 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
3344 case NEON::BI__builtin_neon_vsra_n_v:
3345 case NEON::BI__builtin_neon_vsraq_n_v:
3346 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
3348 return Builder.CreateAdd(Ops[0], Ops[1]);
3349 case NEON::BI__builtin_neon_vst1q_lane_v:
3352 if (VTy->getElementType()->isIntegerTy(64)) {
3353 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3355 Ops[1] =
Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3356 Ops[2] = getAlignmentValue32(PtrOp0);
3357 llvm::Type *Tys[] = {
Int8PtrTy, Ops[1]->getType()};
3358 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
3362 case NEON::BI__builtin_neon_vst1_lane_v: {
3363 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3364 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2]);
3365 return Builder.CreateStore(Ops[1],
3368 case NEON::BI__builtin_neon_vtbl1_v:
3371 case NEON::BI__builtin_neon_vtbl2_v:
3374 case NEON::BI__builtin_neon_vtbl3_v:
3377 case NEON::BI__builtin_neon_vtbl4_v:
3380 case NEON::BI__builtin_neon_vtbx1_v:
3383 case NEON::BI__builtin_neon_vtbx2_v:
3386 case NEON::BI__builtin_neon_vtbx3_v:
3389 case NEON::BI__builtin_neon_vtbx4_v:
5022 llvm::Triple::ArchType
Arch) {
5031 if (BuiltinID == Builtin::BI__builtin_cpu_supports)
5032 return EmitAArch64CpuSupports(E);
5034 unsigned HintID =
static_cast<unsigned>(-1);
5035 switch (BuiltinID) {
5037 case clang::AArch64::BI__builtin_arm_nop:
5040 case clang::AArch64::BI__builtin_arm_yield:
5041 case clang::AArch64::BI__yield:
5044 case clang::AArch64::BI__builtin_arm_wfe:
5045 case clang::AArch64::BI__wfe:
5048 case clang::AArch64::BI__builtin_arm_wfi:
5049 case clang::AArch64::BI__wfi:
5052 case clang::AArch64::BI__builtin_arm_sev:
5053 case clang::AArch64::BI__sev:
5056 case clang::AArch64::BI__builtin_arm_sevl:
5057 case clang::AArch64::BI__sevl:
5062 if (HintID !=
static_cast<unsigned>(-1)) {
5063 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_hint);
5064 return Builder.CreateCall(F, llvm::ConstantInt::get(
Int32Ty, HintID));
5067 if (BuiltinID == clang::AArch64::BI__builtin_arm_trap) {
5068 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_break);
5073 if (BuiltinID == clang::AArch64::BI__builtin_arm_get_sme_state) {
5076 llvm::FunctionType::get(StructType::get(
CGM.Int64Ty,
CGM.Int64Ty), {},
5078 "__arm_sme_state"));
5080 "aarch64_pstate_sm_compatible");
5081 CI->setAttributes(Attrs);
5084 AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2);
5091 if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) {
5093 "rbit of unusual size!");
5096 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg,
"rbit");
5098 if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) {
5100 "rbit of unusual size!");
5103 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg,
"rbit");
5106 if (BuiltinID == clang::AArch64::BI__builtin_arm_clz ||
5107 BuiltinID == clang::AArch64::BI__builtin_arm_clz64) {
5109 Function *F =
CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
5111 if (BuiltinID == clang::AArch64::BI__builtin_arm_clz64)
5116 if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) {
5118 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
5121 if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) {
5123 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
5127 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32zf ||
5128 BuiltinID == clang::AArch64::BI__builtin_arm_rint32z) {
5130 llvm::Type *Ty = Arg->getType();
5131 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
5135 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64zf ||
5136 BuiltinID == clang::AArch64::BI__builtin_arm_rint64z) {
5138 llvm::Type *Ty = Arg->getType();
5139 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
5143 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32xf ||
5144 BuiltinID == clang::AArch64::BI__builtin_arm_rint32x) {
5146 llvm::Type *Ty = Arg->getType();
5147 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
5151 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64xf ||
5152 BuiltinID == clang::AArch64::BI__builtin_arm_rint64x) {
5154 llvm::Type *Ty = Arg->getType();
5155 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
5159 if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) {
5161 "__jcvt of unusual size!");
5164 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
5167 if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b ||
5168 BuiltinID == clang::AArch64::BI__builtin_arm_st64b ||
5169 BuiltinID == clang::AArch64::BI__builtin_arm_st64bv ||
5170 BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) {
5174 if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) {
5177 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
5178 llvm::Value *Val =
Builder.CreateCall(F, MemAddr);
5180 for (
size_t i = 0; i < 8; i++) {
5181 llvm::Value *ValOffsetPtr =
5192 Args.push_back(MemAddr);
5193 for (
size_t i = 0; i < 8; i++) {
5194 llvm::Value *ValOffsetPtr =
5201 auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b
5202 ? Intrinsic::aarch64_st64b
5203 : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv
5204 ? Intrinsic::aarch64_st64bv
5205 : Intrinsic::aarch64_st64bv0);
5207 return Builder.CreateCall(F, Args);
5211 if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr ||
5212 BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) {
5214 auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_rndr
5215 ? Intrinsic::aarch64_rndr
5216 : Intrinsic::aarch64_rndrrs);
5218 llvm::Value *Val =
Builder.CreateCall(F);
5219 Value *RandomValue =
Builder.CreateExtractValue(Val, 0);
5223 Builder.CreateStore(RandomValue, MemAddress);
5228 if (BuiltinID == clang::AArch64::BI__clear_cache) {
5229 assert(E->
getNumArgs() == 2 &&
"__clear_cache takes 2 arguments");
5232 for (
unsigned i = 0; i < 2; i++)
5234 llvm::Type *Ty =
CGM.getTypes().ConvertType(FD->
getType());
5236 StringRef Name = FD->
getName();
5240 if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
5241 BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) &&
5244 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
5245 ? Intrinsic::aarch64_ldaxp
5246 : Intrinsic::aarch64_ldxp);
5253 llvm::Type *Int128Ty = llvm::IntegerType::get(
getLLVMContext(), 128);
5254 Val0 =
Builder.CreateZExt(Val0, Int128Ty);
5255 Val1 =
Builder.CreateZExt(Val1, Int128Ty);
5257 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
5258 Val =
Builder.CreateShl(Val0, ShiftCst,
"shl",
true );
5259 Val =
Builder.CreateOr(Val, Val1);
5261 }
else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
5262 BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) {
5271 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
5272 ? Intrinsic::aarch64_ldaxr
5273 : Intrinsic::aarch64_ldxr,
5275 CallInst *Val =
Builder.CreateCall(F, LoadAddr,
"ldxr");
5279 if (RealResTy->isPointerTy())
5280 return Builder.CreateIntToPtr(Val, RealResTy);
5282 llvm::Type *IntResTy = llvm::IntegerType::get(
5284 return Builder.CreateBitCast(
Builder.CreateTruncOrBitCast(Val, IntResTy),
5288 if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
5289 BuiltinID == clang::AArch64::BI__builtin_arm_stlex) &&
5292 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
5293 ? Intrinsic::aarch64_stlxp
5294 : Intrinsic::aarch64_stxp);
5301 llvm::Value *Val =
Builder.CreateLoad(Tmp);
5306 return Builder.CreateCall(F, {Arg0, Arg1, StPtr},
"stxp");
5309 if (BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
5310 BuiltinID == clang::AArch64::BI__builtin_arm_stlex) {
5315 llvm::Type *StoreTy =
5318 if (StoreVal->
getType()->isPointerTy())
5321 llvm::Type *
IntTy = llvm::IntegerType::get(
5323 CGM.getDataLayout().getTypeSizeInBits(StoreVal->
getType()));
5329 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
5330 ? Intrinsic::aarch64_stlxr
5331 : Intrinsic::aarch64_stxr,
5333 CallInst *CI =
Builder.CreateCall(F, {StoreVal, StoreAddr},
"stxr");
5335 1, Attribute::get(
getLLVMContext(), Attribute::ElementType, StoreTy));
5339 if (BuiltinID == clang::AArch64::BI__getReg) {
5342 llvm_unreachable(
"Sema will ensure that the parameter is constant");
5345 LLVMContext &Context =
CGM.getLLVMContext();
5348 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
5349 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
5350 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
5353 CGM.getIntrinsic(Intrinsic::read_register, {
Int64Ty});
5354 return Builder.CreateCall(F, Metadata);
5357 if (BuiltinID == clang::AArch64::BI__break) {
5360 llvm_unreachable(
"Sema will ensure that the parameter is constant");
5362 llvm::Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_break);
5366 if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) {
5367 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_clrex);
5371 if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier)
5372 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5373 llvm::SyncScope::SingleThread);
5376 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
5377 switch (BuiltinID) {
5378 case clang::AArch64::BI__builtin_arm_crc32b:
5379 CRCIntrinsicID = Intrinsic::aarch64_crc32b;
break;
5380 case clang::AArch64::BI__builtin_arm_crc32cb:
5381 CRCIntrinsicID = Intrinsic::aarch64_crc32cb;
break;
5382 case clang::AArch64::BI__builtin_arm_crc32h:
5383 CRCIntrinsicID = Intrinsic::aarch64_crc32h;
break;
5384 case clang::AArch64::BI__builtin_arm_crc32ch:
5385 CRCIntrinsicID = Intrinsic::aarch64_crc32ch;
break;
5386 case clang::AArch64::BI__builtin_arm_crc32w:
5387 CRCIntrinsicID = Intrinsic::aarch64_crc32w;
break;
5388 case clang::AArch64::BI__builtin_arm_crc32cw:
5389 CRCIntrinsicID = Intrinsic::aarch64_crc32cw;
break;
5390 case clang::AArch64::BI__builtin_arm_crc32d:
5391 CRCIntrinsicID = Intrinsic::aarch64_crc32x;
break;
5392 case clang::AArch64::BI__builtin_arm_crc32cd:
5393 CRCIntrinsicID = Intrinsic::aarch64_crc32cx;
break;
5396 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
5401 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
5402 Arg1 =
Builder.CreateZExtOrBitCast(Arg1, DataTy);
5404 return Builder.CreateCall(F, {Arg0, Arg1});
5408 if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) {
5415 CGM.getIntrinsic(Intrinsic::aarch64_mops_memset_tag), {Dst, Val, Size});
5419 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
5420 switch (BuiltinID) {
5421 case clang::AArch64::BI__builtin_arm_irg:
5422 MTEIntrinsicID = Intrinsic::aarch64_irg;
break;
5423 case clang::AArch64::BI__builtin_arm_addg:
5424 MTEIntrinsicID = Intrinsic::aarch64_addg;
break;
5425 case clang::AArch64::BI__builtin_arm_gmi:
5426 MTEIntrinsicID = Intrinsic::aarch64_gmi;
break;
5427 case clang::AArch64::BI__builtin_arm_ldg:
5428 MTEIntrinsicID = Intrinsic::aarch64_ldg;
break;
5429 case clang::AArch64::BI__builtin_arm_stg:
5430 MTEIntrinsicID = Intrinsic::aarch64_stg;
break;
5431 case clang::AArch64::BI__builtin_arm_subp:
5432 MTEIntrinsicID = Intrinsic::aarch64_subp;
break;
5435 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
5436 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
5441 return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID),
5444 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
5449 return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID),
5450 {Pointer, TagOffset});
5452 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
5458 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
5463 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
5465 return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID),
5466 {TagAddress, TagAddress});
5471 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
5473 return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID),
5474 {TagAddress, TagAddress});
5476 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
5480 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
5484 if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
5485 BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
5486 BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
5487 BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
5488 BuiltinID == clang::AArch64::BI__builtin_arm_wsr ||
5489 BuiltinID == clang::AArch64::BI__builtin_arm_wsr64 ||
5490 BuiltinID == clang::AArch64::BI__builtin_arm_wsr128 ||
5491 BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) {
5494 if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
5495 BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
5496 BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
5497 BuiltinID == clang::AArch64::BI__builtin_arm_rsrp)
5500 bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
5501 BuiltinID == clang::AArch64::BI__builtin_arm_wsrp;
5503 bool Is32Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
5504 BuiltinID == clang::AArch64::BI__builtin_arm_wsr;
5506 bool Is128Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
5507 BuiltinID == clang::AArch64::BI__builtin_arm_wsr128;
5509 llvm::Type *ValueType;
5513 }
else if (Is128Bit) {
5514 llvm::Type *Int128Ty =
5515 llvm::IntegerType::getInt128Ty(
CGM.getLLVMContext());
5516 ValueType = Int128Ty;
5518 }
else if (IsPointerBuiltin) {
5528 if (BuiltinID == clang::AArch64::BI_ReadStatusReg ||
5529 BuiltinID == clang::AArch64::BI_WriteStatusReg ||
5530 BuiltinID == clang::AArch64::BI__sys) {
5531 LLVMContext &Context =
CGM.getLLVMContext();
5536 std::string SysRegStr;
5537 unsigned SysRegOp0 = (BuiltinID == clang::AArch64::BI_ReadStatusReg ||
5538 BuiltinID == clang::AArch64::BI_WriteStatusReg)
5539 ? ((1 << 1) | ((SysReg >> 14) & 1))
5541 llvm::raw_string_ostream(SysRegStr)
5542 << SysRegOp0 <<
":" << ((SysReg >> 11) & 7) <<
":"
5543 << ((SysReg >> 7) & 15) <<
":" << ((SysReg >> 3) & 15) <<
":"
5546 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
5547 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
5548 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
5553 if (BuiltinID == clang::AArch64::BI_ReadStatusReg) {
5554 llvm::Function *F =
CGM.getIntrinsic(Intrinsic::read_register, Types);
5556 return Builder.CreateCall(F, Metadata);
5559 llvm::Function *F =
CGM.getIntrinsic(Intrinsic::write_register, Types);
5561 llvm::Value *
Result =
Builder.CreateCall(F, {Metadata, ArgValue});
5562 if (BuiltinID == clang::AArch64::BI__sys) {
5570 if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) {
5576 if (BuiltinID == clang::AArch64::BI__builtin_sponentry) {
5581 if (BuiltinID == clang::AArch64::BI__mulh ||
5582 BuiltinID == clang::AArch64::BI__umulh) {
5584 llvm::Type *Int128Ty = llvm::IntegerType::get(
getLLVMContext(), 128);
5586 bool IsSigned = BuiltinID == clang::AArch64::BI__mulh;
5592 Value *MulResult, *HigherBits;
5594 MulResult =
Builder.CreateNSWMul(LHS, RHS);
5595 HigherBits =
Builder.CreateAShr(MulResult, 64);
5597 MulResult =
Builder.CreateNUWMul(LHS, RHS);
5598 HigherBits =
Builder.CreateLShr(MulResult, 64);
5600 HigherBits =
Builder.CreateIntCast(HigherBits, ResType, IsSigned);
5605 if (BuiltinID == AArch64::BI__writex18byte ||
5606 BuiltinID == AArch64::BI__writex18word ||
5607 BuiltinID == AArch64::BI__writex18dword ||
5608 BuiltinID == AArch64::BI__writex18qword) {
5624 if (BuiltinID == AArch64::BI__readx18byte ||
5625 BuiltinID == AArch64::BI__readx18word ||
5626 BuiltinID == AArch64::BI__readx18dword ||
5627 BuiltinID == AArch64::BI__readx18qword) {
5642 if (BuiltinID == AArch64::BI__addx18byte ||
5643 BuiltinID == AArch64::BI__addx18word ||
5644 BuiltinID == AArch64::BI__addx18dword ||
5645 BuiltinID == AArch64::BI__addx18qword ||
5646 BuiltinID == AArch64::BI__incx18byte ||
5647 BuiltinID == AArch64::BI__incx18word ||
5648 BuiltinID == AArch64::BI__incx18dword ||
5649 BuiltinID == AArch64::BI__incx18qword) {
5652 switch (BuiltinID) {
5653 case AArch64::BI__incx18byte:
5657 case AArch64::BI__incx18word:
5661 case AArch64::BI__incx18dword:
5665 case AArch64::BI__incx18qword:
5671 isIncrement =
false;
5696 if (BuiltinID == AArch64::BI_CopyDoubleFromInt64 ||
5697 BuiltinID == AArch64::BI_CopyFloatFromInt32 ||
5698 BuiltinID == AArch64::BI_CopyInt32FromFloat ||
5699 BuiltinID == AArch64::BI_CopyInt64FromDouble) {
5702 return Builder.CreateBitCast(Arg, RetTy);
5705 if (BuiltinID == AArch64::BI_CountLeadingOnes ||
5706 BuiltinID == AArch64::BI_CountLeadingOnes64 ||
5707 BuiltinID == AArch64::BI_CountLeadingZeros ||
5708 BuiltinID == AArch64::BI_CountLeadingZeros64) {
5712 if (BuiltinID == AArch64::BI_CountLeadingOnes ||
5713 BuiltinID == AArch64::BI_CountLeadingOnes64)
5714 Arg =
Builder.CreateXor(Arg, Constant::getAllOnesValue(
ArgType));
5719 if (BuiltinID == AArch64::BI_CountLeadingOnes64 ||
5720 BuiltinID == AArch64::BI_CountLeadingZeros64)
5725 if (BuiltinID == AArch64::BI_CountLeadingSigns ||
5726 BuiltinID == AArch64::BI_CountLeadingSigns64) {
5729 Function *F = (BuiltinID == AArch64::BI_CountLeadingSigns)
5730 ?
CGM.getIntrinsic(Intrinsic::aarch64_cls)
5731 :
CGM.getIntrinsic(Intrinsic::aarch64_cls64);
5734 if (BuiltinID == AArch64::BI_CountLeadingSigns64)
5739 if (BuiltinID == AArch64::BI_CountOneBits ||
5740 BuiltinID == AArch64::BI_CountOneBits64) {
5746 if (BuiltinID == AArch64::BI_CountOneBits64)
5751 if (BuiltinID == AArch64::BI__prefetch) {
5760 if (BuiltinID == AArch64::BI__hlt) {
5761 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_hlt);
5766 return ConstantInt::get(
Builder.getInt32Ty(), 0);
5769 if (BuiltinID == NEON::BI__builtin_neon_vcvth_bf16_f32)
5777 if (std::optional<MSVCIntrin> MsvcIntId =
5783 return P.first == BuiltinID;
5786 BuiltinID = It->second;
5790 unsigned ICEArguments = 0;
5797 for (
unsigned i = 0, e = E->
getNumArgs() - 1; i != e; i++) {
5799 switch (BuiltinID) {
5800 case NEON::BI__builtin_neon_vld1_v:
5801 case NEON::BI__builtin_neon_vld1q_v:
5802 case NEON::BI__builtin_neon_vld1_dup_v:
5803 case NEON::BI__builtin_neon_vld1q_dup_v:
5804 case NEON::BI__builtin_neon_vld1_lane_v:
5805 case NEON::BI__builtin_neon_vld1q_lane_v:
5806 case NEON::BI__builtin_neon_vst1_v:
5807 case NEON::BI__builtin_neon_vst1q_v:
5808 case NEON::BI__builtin_neon_vst1_lane_v:
5809 case NEON::BI__builtin_neon_vst1q_lane_v:
5810 case NEON::BI__builtin_neon_vldap1_lane_s64:
5811 case NEON::BI__builtin_neon_vldap1q_lane_s64:
5812 case NEON::BI__builtin_neon_vstl1_lane_s64:
5813 case NEON::BI__builtin_neon_vstl1q_lane_s64:
5831 assert(
Result &&
"SISD intrinsic should have been handled");
5837 if (std::optional<llvm::APSInt>
Result =
5842 bool usgn =
Type.isUnsigned();
5843 bool quad =
Type.isQuad();
5846 switch (BuiltinID) {
5848 case NEON::BI__builtin_neon_vabsh_f16:
5851 case NEON::BI__builtin_neon_vaddq_p128: {
5854 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
5855 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
5856 Ops[0] =
Builder.CreateXor(Ops[0], Ops[1]);
5857 llvm::Type *Int128Ty = llvm::Type::getIntNTy(
getLLVMContext(), 128);
5858 return Builder.CreateBitCast(Ops[0], Int128Ty);
5860 case NEON::BI__builtin_neon_vldrq_p128: {
5861 llvm::Type *Int128Ty = llvm::Type::getIntNTy(
getLLVMContext(), 128);
5863 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
5866 case NEON::BI__builtin_neon_vstrq_p128: {
5867 Value *Ptr = Ops[0];
5870 case NEON::BI__builtin_neon_vcvts_f32_u32:
5871 case NEON::BI__builtin_neon_vcvtd_f64_u64:
5874 case NEON::BI__builtin_neon_vcvts_f32_s32:
5875 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
5877 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
5880 Ops[0] =
Builder.CreateBitCast(Ops[0], InTy);
5882 return Builder.CreateUIToFP(Ops[0], FTy);
5883 return Builder.CreateSIToFP(Ops[0], FTy);
5885 case NEON::BI__builtin_neon_vcvth_f16_u16:
5886 case NEON::BI__builtin_neon_vcvth_f16_u32:
5887 case NEON::BI__builtin_neon_vcvth_f16_u64:
5890 case NEON::BI__builtin_neon_vcvth_f16_s16:
5891 case NEON::BI__builtin_neon_vcvth_f16_s32:
5892 case NEON::BI__builtin_neon_vcvth_f16_s64: {
5894 llvm::Type *FTy =
HalfTy;
5896 if (Ops[0]->
getType()->getPrimitiveSizeInBits() == 64)
5898 else if (Ops[0]->
getType()->getPrimitiveSizeInBits() == 32)
5902 Ops[0] =
Builder.CreateBitCast(Ops[0], InTy);
5904 return Builder.CreateUIToFP(Ops[0], FTy);
5905 return Builder.CreateSIToFP(Ops[0], FTy);
5907 case NEON::BI__builtin_neon_vcvtah_u16_f16:
5908 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
5909 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
5910 case NEON::BI__builtin_neon_vcvtph_u16_f16:
5911 case NEON::BI__builtin_neon_vcvth_u16_f16:
5912 case NEON::BI__builtin_neon_vcvtah_s16_f16:
5913 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
5914 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
5915 case NEON::BI__builtin_neon_vcvtph_s16_f16:
5916 case NEON::BI__builtin_neon_vcvth_s16_f16: {
5919 llvm::Type* FTy =
HalfTy;
5920 llvm::Type *Tys[2] = {InTy, FTy};
5922 switch (BuiltinID) {
5923 default: llvm_unreachable(
"missing builtin ID in switch!");
5924 case NEON::BI__builtin_neon_vcvtah_u16_f16:
5925 Int = Intrinsic::aarch64_neon_fcvtau;
break;
5926 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
5927 Int = Intrinsic::aarch64_neon_fcvtmu;
break;
5928 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
5929 Int = Intrinsic::aarch64_neon_fcvtnu;
break;
5930 case NEON::BI__builtin_neon_vcvtph_u16_f16:
5931 Int = Intrinsic::aarch64_neon_fcvtpu;
break;
5932 case NEON::BI__builtin_neon_vcvth_u16_f16:
5933 Int = Intrinsic::aarch64_neon_fcvtzu;
break;
5934 case NEON::BI__builtin_neon_vcvtah_s16_f16:
5935 Int = Intrinsic::aarch64_neon_fcvtas;
break;
5936 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
5937 Int = Intrinsic::aarch64_neon_fcvtms;
break;
5938 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
5939 Int = Intrinsic::aarch64_neon_fcvtns;
break;
5940 case NEON::BI__builtin_neon_vcvtph_s16_f16:
5941 Int = Intrinsic::aarch64_neon_fcvtps;
break;
5942 case NEON::BI__builtin_neon_vcvth_s16_f16:
5943 Int = Intrinsic::aarch64_neon_fcvtzs;
break;
5947 case NEON::BI__builtin_neon_vcaleh_f16:
5948 case NEON::BI__builtin_neon_vcalth_f16:
5949 case NEON::BI__builtin_neon_vcageh_f16:
5950 case NEON::BI__builtin_neon_vcagth_f16: {
5953 llvm::Type* FTy =
HalfTy;
5954 llvm::Type *Tys[2] = {InTy, FTy};
5956 switch (BuiltinID) {
5957 default: llvm_unreachable(
"missing builtin ID in switch!");
5958 case NEON::BI__builtin_neon_vcageh_f16:
5959 Int = Intrinsic::aarch64_neon_facge;
break;
5960 case NEON::BI__builtin_neon_vcagth_f16:
5961 Int = Intrinsic::aarch64_neon_facgt;
break;
5962 case NEON::BI__builtin_neon_vcaleh_f16:
5963 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]);
break;
5964 case NEON::BI__builtin_neon_vcalth_f16:
5965 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]);
break;
5970 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
5971 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
5974 llvm::Type* FTy =
HalfTy;
5975 llvm::Type *Tys[2] = {InTy, FTy};
5977 switch (BuiltinID) {
5978 default: llvm_unreachable(
"missing builtin ID in switch!");
5979 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
5980 Int = Intrinsic::aarch64_neon_vcvtfp2fxs;
break;
5981 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
5982 Int = Intrinsic::aarch64_neon_vcvtfp2fxu;
break;
5987 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
5988 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
5990 llvm::Type* FTy =
HalfTy;
5992 llvm::Type *Tys[2] = {FTy, InTy};
5994 switch (BuiltinID) {
5995 default: llvm_unreachable(
"missing builtin ID in switch!");
5996 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
5997 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
5998 Ops[0] =
Builder.CreateSExt(Ops[0], InTy,
"sext");
6000 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
6001 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
6002 Ops[0] =
Builder.CreateZExt(Ops[0], InTy);
6007 case NEON::BI__builtin_neon_vpaddd_s64: {
6008 auto *Ty = llvm::FixedVectorType::get(
Int64Ty, 2);
6011 Vec =
Builder.CreateBitCast(Vec, Ty,
"v2i64");
6012 llvm::Value *Idx0 = llvm::ConstantInt::get(
SizeTy, 0);
6013 llvm::Value *Idx1 = llvm::ConstantInt::get(
SizeTy, 1);
6014 Value *Op0 =
Builder.CreateExtractElement(Vec, Idx0,
"lane0");
6015 Value *Op1 =
Builder.CreateExtractElement(Vec, Idx1,
"lane1");
6017 return Builder.CreateAdd(Op0, Op1,
"vpaddd");
6019 case NEON::BI__builtin_neon_vpaddd_f64: {
6020 auto *Ty = llvm::FixedVectorType::get(
DoubleTy, 2);
6023 Vec =
Builder.CreateBitCast(Vec, Ty,
"v2f64");
6024 llvm::Value *Idx0 = llvm::ConstantInt::get(
SizeTy, 0);
6025 llvm::Value *Idx1 = llvm::ConstantInt::get(
SizeTy, 1);
6026 Value *Op0 =
Builder.CreateExtractElement(Vec, Idx0,
"lane0");
6027 Value *Op1 =
Builder.CreateExtractElement(Vec, Idx1,
"lane1");
6029 return Builder.CreateFAdd(Op0, Op1,
"vpaddd");
6031 case NEON::BI__builtin_neon_vpadds_f32: {
6032 auto *Ty = llvm::FixedVectorType::get(
FloatTy, 2);
6035 Vec =
Builder.CreateBitCast(Vec, Ty,
"v2f32");
6036 llvm::Value *Idx0 = llvm::ConstantInt::get(
SizeTy, 0);
6037 llvm::Value *Idx1 = llvm::ConstantInt::get(
SizeTy, 1);
6038 Value *Op0 =
Builder.CreateExtractElement(Vec, Idx0,
"lane0");
6039 Value *Op1 =
Builder.CreateExtractElement(Vec, Idx1,
"lane1");
6041 return Builder.CreateFAdd(Op0, Op1,
"vpaddd");
6043 case NEON::BI__builtin_neon_vceqzd_s64:
6047 ICmpInst::ICMP_EQ,
"vceqz");
6048 case NEON::BI__builtin_neon_vceqzd_f64:
6049 case NEON::BI__builtin_neon_vceqzs_f32:
6050 case NEON::BI__builtin_neon_vceqzh_f16:
6054 ICmpInst::FCMP_OEQ,
"vceqz");
6055 case NEON::BI__builtin_neon_vcgezd_s64:
6059 ICmpInst::ICMP_SGE,
"vcgez");
6060 case NEON::BI__builtin_neon_vcgezd_f64:
6061 case NEON::BI__builtin_neon_vcgezs_f32:
6062 case NEON::BI__builtin_neon_vcgezh_f16:
6066 ICmpInst::FCMP_OGE,
"vcgez");
6067 case NEON::BI__builtin_neon_vclezd_s64:
6071 ICmpInst::ICMP_SLE,
"vclez");
6072 case NEON::BI__builtin_neon_vclezd_f64:
6073 case NEON::BI__builtin_neon_vclezs_f32:
6074 case NEON::BI__builtin_neon_vclezh_f16:
6078 ICmpInst::FCMP_OLE,
"vclez");
6079 case NEON::BI__builtin_neon_vcgtzd_s64:
6083 ICmpInst::ICMP_SGT,
"vcgtz");
6084 case NEON::BI__builtin_neon_vcgtzd_f64:
6085 case NEON::BI__builtin_neon_vcgtzs_f32:
6086 case NEON::BI__builtin_neon_vcgtzh_f16:
6090 ICmpInst::FCMP_OGT,
"vcgtz");
6091 case NEON::BI__builtin_neon_vcltzd_s64:
6095 ICmpInst::ICMP_SLT,
"vcltz");
6097 case NEON::BI__builtin_neon_vcltzd_f64:
6098 case NEON::BI__builtin_neon_vcltzs_f32:
6099 case NEON::BI__builtin_neon_vcltzh_f16:
6103 ICmpInst::FCMP_OLT,
"vcltz");
6105 case NEON::BI__builtin_neon_vceqzd_u64: {
6109 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(
Int64Ty));
6112 case NEON::BI__builtin_neon_vceqd_f64:
6113 case NEON::BI__builtin_neon_vcled_f64:
6114 case NEON::BI__builtin_neon_vcltd_f64:
6115 case NEON::BI__builtin_neon_vcged_f64:
6116 case NEON::BI__builtin_neon_vcgtd_f64: {
6117 llvm::CmpInst::Predicate P;
6118 switch (BuiltinID) {
6119 default: llvm_unreachable(
"missing builtin ID in switch!");
6120 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ;
break;
6121 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE;
break;
6122 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT;
break;
6123 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE;
break;
6124 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT;
break;
6129 if (P == llvm::FCmpInst::FCMP_OEQ)
6130 Ops[0] =
Builder.CreateFCmp(P, Ops[0], Ops[1]);
6132 Ops[0] =
Builder.CreateFCmpS(P, Ops[0], Ops[1]);
6135 case NEON::BI__builtin_neon_vceqs_f32:
6136 case NEON::BI__builtin_neon_vcles_f32:
6137 case NEON::BI__builtin_neon_vclts_f32:
6138 case NEON::BI__builtin_neon_vcges_f32:
6139 case NEON::BI__builtin_neon_vcgts_f32: {
6140 llvm::CmpInst::Predicate P;
6141 switch (BuiltinID) {
6142 default: llvm_unreachable(
"missing builtin ID in switch!");
6143 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ;
break;
6144 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE;
break;
6145 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT;
break;
6146 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE;
break;
6147 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT;
break;
6152 if (P == llvm::FCmpInst::FCMP_OEQ)
6153 Ops[0] =
Builder.CreateFCmp(P, Ops[0], Ops[1]);
6155 Ops[0] =
Builder.CreateFCmpS(P, Ops[0], Ops[1]);
6158 case NEON::BI__builtin_neon_vceqh_f16:
6159 case NEON::BI__builtin_neon_vcleh_f16:
6160 case NEON::BI__builtin_neon_vclth_f16:
6161 case NEON::BI__builtin_neon_vcgeh_f16:
6162 case NEON::BI__builtin_neon_vcgth_f16: {
6163 llvm::CmpInst::Predicate P;
6164 switch (BuiltinID) {
6165 default: llvm_unreachable(
"missing builtin ID in switch!");
6166 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ;
break;
6167 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE;
break;
6168 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT;
break;
6169 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE;
break;
6170 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT;
break;
6175 if (P == llvm::FCmpInst::FCMP_OEQ)
6176 Ops[0] =
Builder.CreateFCmp(P, Ops[0], Ops[1]);
6178 Ops[0] =
Builder.CreateFCmpS(P, Ops[0], Ops[1]);
6181 case NEON::BI__builtin_neon_vceqd_s64:
6182 case NEON::BI__builtin_neon_vceqd_u64:
6183 case NEON::BI__builtin_neon_vcgtd_s64:
6184 case NEON::BI__builtin_neon_vcgtd_u64:
6185 case NEON::BI__builtin_neon_vcltd_s64:
6186 case NEON::BI__builtin_neon_vcltd_u64:
6187 case NEON::BI__builtin_neon_vcged_u64:
6188 case NEON::BI__builtin_neon_vcged_s64:
6189 case NEON::BI__builtin_neon_vcled_u64:
6190 case NEON::BI__builtin_neon_vcled_s64: {
6191 llvm::CmpInst::Predicate P;
6192 switch (BuiltinID) {
6193 default: llvm_unreachable(
"missing builtin ID in switch!");
6194 case NEON::BI__builtin_neon_vceqd_s64:
6195 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;
break;
6196 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;
break;
6197 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;
break;
6198 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;
break;
6199 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;
break;
6200 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;
break;
6201 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;
break;
6202 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;
break;
6203 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;
break;
6208 Ops[0] =
Builder.CreateICmp(P, Ops[0], Ops[1]);
6211 case NEON::BI__builtin_neon_vtstd_s64:
6212 case NEON::BI__builtin_neon_vtstd_u64: {
6216 Ops[0] =
Builder.CreateAnd(Ops[0], Ops[1]);
6217 Ops[0] =
Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
6218 llvm::Constant::getNullValue(
Int64Ty));
6221 case NEON::BI__builtin_neon_vset_lane_i8:
6222 case NEON::BI__builtin_neon_vset_lane_i16:
6223 case NEON::BI__builtin_neon_vset_lane_i32:
6224 case NEON::BI__builtin_neon_vset_lane_i64:
6225 case NEON::BI__builtin_neon_vset_lane_bf16:
6226 case NEON::BI__builtin_neon_vset_lane_f32:
6227 case NEON::BI__builtin_neon_vsetq_lane_i8:
6228 case NEON::BI__builtin_neon_vsetq_lane_i16:
6229 case NEON::BI__builtin_neon_vsetq_lane_i32:
6230 case NEON::BI__builtin_neon_vsetq_lane_i64:
6231 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6232 case NEON::BI__builtin_neon_vsetq_lane_f32:
6234 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
6235 case NEON::BI__builtin_neon_vset_lane_f64:
6238 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(
DoubleTy, 1));
6240 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
6241 case NEON::BI__builtin_neon_vset_lane_mf8:
6242 case NEON::BI__builtin_neon_vsetq_lane_mf8:
6247 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
6248 case NEON::BI__builtin_neon_vsetq_lane_f64:
6251 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(
DoubleTy, 2));
6253 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
6255 case NEON::BI__builtin_neon_vget_lane_i8:
6256 case NEON::BI__builtin_neon_vdupb_lane_i8:
6258 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int8Ty, 8));
6261 case NEON::BI__builtin_neon_vgetq_lane_i8:
6262 case NEON::BI__builtin_neon_vdupb_laneq_i8:
6264 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int8Ty, 16));
6267 case NEON::BI__builtin_neon_vget_lane_mf8:
6268 case NEON::BI__builtin_neon_vdupb_lane_mf8:
6269 case NEON::BI__builtin_neon_vgetq_lane_mf8:
6270 case NEON::BI__builtin_neon_vdupb_laneq_mf8:
6273 case NEON::BI__builtin_neon_vget_lane_i16:
6274 case NEON::BI__builtin_neon_vduph_lane_i16:
6276 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int16Ty, 4));
6279 case NEON::BI__builtin_neon_vgetq_lane_i16:
6280 case NEON::BI__builtin_neon_vduph_laneq_i16:
6282 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int16Ty, 8));
6285 case NEON::BI__builtin_neon_vget_lane_i32:
6286 case NEON::BI__builtin_neon_vdups_lane_i32:
6288 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int32Ty, 2));
6291 case NEON::BI__builtin_neon_vdups_lane_f32:
6293 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
FloatTy, 2));
6296 case NEON::BI__builtin_neon_vgetq_lane_i32:
6297 case NEON::BI__builtin_neon_vdups_laneq_i32:
6299 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int32Ty, 4));
6302 case NEON::BI__builtin_neon_vget_lane_i64:
6303 case NEON::BI__builtin_neon_vdupd_lane_i64:
6305 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int64Ty, 1));
6308 case NEON::BI__builtin_neon_vdupd_lane_f64:
6310 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
DoubleTy, 1));
6313 case NEON::BI__builtin_neon_vgetq_lane_i64:
6314 case NEON::BI__builtin_neon_vdupd_laneq_i64:
6316 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int64Ty, 2));
6319 case NEON::BI__builtin_neon_vget_lane_f32:
6321 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
FloatTy, 2));
6324 case NEON::BI__builtin_neon_vget_lane_f64:
6326 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
DoubleTy, 1));
6329 case NEON::BI__builtin_neon_vgetq_lane_f32:
6330 case NEON::BI__builtin_neon_vdups_laneq_f32:
6332 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
FloatTy, 4));
6335 case NEON::BI__builtin_neon_vgetq_lane_f64:
6336 case NEON::BI__builtin_neon_vdupd_laneq_f64:
6338 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
DoubleTy, 2));
6341 case NEON::BI__builtin_neon_vaddh_f16:
6343 return Builder.CreateFAdd(Ops[0], Ops[1],
"vaddh");
6344 case NEON::BI__builtin_neon_vsubh_f16:
6346 return Builder.CreateFSub(Ops[0], Ops[1],
"vsubh");
6347 case NEON::BI__builtin_neon_vmulh_f16:
6349 return Builder.CreateFMul(Ops[0], Ops[1],
"vmulh");
6350 case NEON::BI__builtin_neon_vdivh_f16:
6352 return Builder.CreateFDiv(Ops[0], Ops[1],
"vdivh");
6353 case NEON::BI__builtin_neon_vfmah_f16:
6356 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
HalfTy,
6358 case NEON::BI__builtin_neon_vfmsh_f16: {
6363 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
HalfTy,
6366 case NEON::BI__builtin_neon_vaddd_s64:
6367 case NEON::BI__builtin_neon_vaddd_u64:
6369 case NEON::BI__builtin_neon_vsubd_s64:
6370 case NEON::BI__builtin_neon_vsubd_u64:
6372 case NEON::BI__builtin_neon_vqdmlalh_s16:
6373 case NEON::BI__builtin_neon_vqdmlslh_s16: {
6377 auto *VTy = llvm::FixedVectorType::get(
Int32Ty, 4);
6378 Ops[1] =
EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
6379 ProductOps,
"vqdmlXl");
6380 Constant *CI = ConstantInt::get(
SizeTy, 0);
6381 Ops[1] =
Builder.CreateExtractElement(Ops[1], CI,
"lane0");
6383 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
6384 ? Intrinsic::aarch64_neon_sqadd
6385 : Intrinsic::aarch64_neon_sqsub;
6388 case NEON::BI__builtin_neon_vqshlud_n_s64: {
6394 case NEON::BI__builtin_neon_vqshld_n_u64:
6395 case NEON::BI__builtin_neon_vqshld_n_s64: {
6396 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
6397 ? Intrinsic::aarch64_neon_uqshl
6398 : Intrinsic::aarch64_neon_sqshl;
6403 case NEON::BI__builtin_neon_vrshrd_n_u64:
6404 case NEON::BI__builtin_neon_vrshrd_n_s64: {
6405 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
6406 ? Intrinsic::aarch64_neon_urshl
6407 : Intrinsic::aarch64_neon_srshl;
6410 Ops[1] = ConstantInt::get(
Int64Ty, -SV);
6413 case NEON::BI__builtin_neon_vrsrad_n_u64:
6414 case NEON::BI__builtin_neon_vrsrad_n_s64: {
6415 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
6416 ? Intrinsic::aarch64_neon_urshl
6417 : Intrinsic::aarch64_neon_srshl;
6421 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
6424 case NEON::BI__builtin_neon_vshld_n_s64:
6425 case NEON::BI__builtin_neon_vshld_n_u64: {
6428 Ops[0], ConstantInt::get(
Int64Ty, Amt->getZExtValue()),
"shld_n");
6430 case NEON::BI__builtin_neon_vshrd_n_s64: {
6433 Ops[0], ConstantInt::get(
Int64Ty, std::min(
static_cast<uint64_t
>(63),
6434 Amt->getZExtValue())),
6437 case NEON::BI__builtin_neon_vshrd_n_u64: {
6439 uint64_t ShiftAmt = Amt->getZExtValue();
6442 return ConstantInt::get(
Int64Ty, 0);
6443 return Builder.CreateLShr(Ops[0], ConstantInt::get(
Int64Ty, ShiftAmt),
6446 case NEON::BI__builtin_neon_vsrad_n_s64: {
6449 Ops[1], ConstantInt::get(
Int64Ty, std::min(
static_cast<uint64_t
>(63),
6450 Amt->getZExtValue())),
6452 return Builder.CreateAdd(Ops[0], Ops[1]);
6454 case NEON::BI__builtin_neon_vsrad_n_u64: {
6456 uint64_t ShiftAmt = Amt->getZExtValue();
6461 Ops[1] =
Builder.CreateLShr(Ops[1], ConstantInt::get(
Int64Ty, ShiftAmt),
6463 return Builder.CreateAdd(Ops[0], Ops[1]);
6465 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
6466 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
6467 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
6468 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
6474 auto *VTy = llvm::FixedVectorType::get(
Int32Ty, 4);
6475 Ops[1] =
EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
6476 ProductOps,
"vqdmlXl");
6477 Constant *CI = ConstantInt::get(
SizeTy, 0);
6478 Ops[1] =
Builder.CreateExtractElement(Ops[1], CI,
"lane0");
6481 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
6482 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
6483 ? Intrinsic::aarch64_neon_sqadd
6484 : Intrinsic::aarch64_neon_sqsub;
6487 case NEON::BI__builtin_neon_vqdmlals_s32:
6488 case NEON::BI__builtin_neon_vqdmlsls_s32: {
6490 ProductOps.push_back(Ops[1]);
6493 EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
6494 ProductOps,
"vqdmlXl");
6496 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
6497 ? Intrinsic::aarch64_neon_sqadd
6498 : Intrinsic::aarch64_neon_sqsub;
6501 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
6502 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
6503 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
6504 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
6508 ProductOps.push_back(Ops[1]);
6509 ProductOps.push_back(Ops[2]);
6511 EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
6512 ProductOps,
"vqdmlXl");
6515 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
6516 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
6517 ? Intrinsic::aarch64_neon_sqadd
6518 : Intrinsic::aarch64_neon_sqsub;
6521 case NEON::BI__builtin_neon_vget_lane_bf16:
6522 case NEON::BI__builtin_neon_vduph_lane_bf16:
6523 case NEON::BI__builtin_neon_vduph_lane_f16: {
6527 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6528 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6529 case NEON::BI__builtin_neon_vduph_laneq_f16: {
6533 case NEON::BI__builtin_neon_vcvt_bf16_f32: {
6534 llvm::Type *V4F32 = FixedVectorType::get(
Builder.getFloatTy(), 4);
6535 llvm::Type *V4BF16 = FixedVectorType::get(
Builder.getBFloatTy(), 4);
6536 return Builder.CreateFPTrunc(
Builder.CreateBitCast(Ops[0], V4F32), V4BF16);
6538 case NEON::BI__builtin_neon_vcvtq_low_bf16_f32: {
6540 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
6541 llvm::Type *V4F32 = FixedVectorType::get(
Builder.getFloatTy(), 4);
6542 llvm::Type *V4BF16 = FixedVectorType::get(
Builder.getBFloatTy(), 4);
6543 llvm::Value *Trunc =
6544 Builder.CreateFPTrunc(
Builder.CreateBitCast(Ops[0], V4F32), V4BF16);
6545 return Builder.CreateShuffleVector(
6546 Trunc, ConstantAggregateZero::get(V4BF16), ConcatMask);
6548 case NEON::BI__builtin_neon_vcvtq_high_bf16_f32: {
6550 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
6552 std::iota(LoMask.begin(), LoMask.end(), 0);
6553 llvm::Type *V4F32 = FixedVectorType::get(
Builder.getFloatTy(), 4);
6554 llvm::Type *V4BF16 = FixedVectorType::get(
Builder.getBFloatTy(), 4);
6555 llvm::Type *V8BF16 = FixedVectorType::get(
Builder.getBFloatTy(), 8);
6556 llvm::Value *Inactive =
Builder.CreateShuffleVector(
6557 Builder.CreateBitCast(Ops[0], V8BF16), LoMask);
6558 llvm::Value *Trunc =
6559 Builder.CreateFPTrunc(
Builder.CreateBitCast(Ops[1], V4F32), V4BF16);
6560 return Builder.CreateShuffleVector(Inactive, Trunc, ConcatMask);
6563 case clang::AArch64::BI_InterlockedAdd:
6564 case clang::AArch64::BI_InterlockedAdd_acq:
6565 case clang::AArch64::BI_InterlockedAdd_rel:
6566 case clang::AArch64::BI_InterlockedAdd_nf:
6567 case clang::AArch64::BI_InterlockedAdd64:
6568 case clang::AArch64::BI_InterlockedAdd64_acq:
6569 case clang::AArch64::BI_InterlockedAdd64_rel:
6570 case clang::AArch64::BI_InterlockedAdd64_nf: {
6573 llvm::AtomicOrdering Ordering;
6574 switch (BuiltinID) {
6575 case clang::AArch64::BI_InterlockedAdd:
6576 case clang::AArch64::BI_InterlockedAdd64:
6577 Ordering = llvm::AtomicOrdering::SequentiallyConsistent;
6579 case clang::AArch64::BI_InterlockedAdd_acq:
6580 case clang::AArch64::BI_InterlockedAdd64_acq:
6581 Ordering = llvm::AtomicOrdering::Acquire;
6583 case clang::AArch64::BI_InterlockedAdd_rel:
6584 case clang::AArch64::BI_InterlockedAdd64_rel:
6585 Ordering = llvm::AtomicOrdering::Release;
6587 case clang::AArch64::BI_InterlockedAdd_nf:
6588 case clang::AArch64::BI_InterlockedAdd64_nf:
6589 Ordering = llvm::AtomicOrdering::Monotonic;
6592 llvm_unreachable(
"missing builtin ID in switch!");
6594 AtomicRMWInst *RMWI =
6595 Builder.CreateAtomicRMW(AtomicRMWInst::Add, DestAddr, Val, Ordering);
6596 return Builder.CreateAdd(RMWI, Val);
6601 llvm::Type *Ty = VTy;
6620 bool ExtractLow =
false;
6621 bool ExtendLaneArg =
false;
6622 switch (BuiltinID) {
6623 default:
return nullptr;
6624 case NEON::BI__builtin_neon_vbsl_v:
6625 case NEON::BI__builtin_neon_vbslq_v: {
6626 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
6627 Ops[0] =
Builder.CreateBitCast(Ops[0], BitTy,
"vbsl");
6628 Ops[1] =
Builder.CreateBitCast(Ops[1], BitTy,
"vbsl");
6629 Ops[2] =
Builder.CreateBitCast(Ops[2], BitTy,
"vbsl");
6631 Ops[1] =
Builder.CreateAnd(Ops[0], Ops[1],
"vbsl");
6632 Ops[2] =
Builder.CreateAnd(
Builder.CreateNot(Ops[0]), Ops[2],
"vbsl");
6633 Ops[0] =
Builder.CreateOr(Ops[1], Ops[2],
"vbsl");
6634 return Builder.CreateBitCast(Ops[0], Ty);
6636 case NEON::BI__builtin_neon_vfma_lane_v:
6637 case NEON::BI__builtin_neon_vfmaq_lane_v: {
6640 Value *Addend = Ops[0];
6641 Value *Multiplicand = Ops[1];
6642 Value *LaneSource = Ops[2];
6643 Ops[0] = Multiplicand;
6644 Ops[1] = LaneSource;
6648 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
6649 ? llvm::FixedVectorType::get(VTy->getElementType(),
6650 VTy->getNumElements() / 2)
6653 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
6654 Ops[1] =
Builder.CreateBitCast(Ops[1], SourceTy);
6655 Ops[1] =
Builder.CreateShuffleVector(Ops[1], Ops[1], SV,
"lane");
6658 Int =
Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
6662 case NEON::BI__builtin_neon_vfma_laneq_v: {
6665 if (VTy && VTy->getElementType() ==
DoubleTy) {
6668 llvm::FixedVectorType *VTy =
6670 Ops[2] =
Builder.CreateBitCast(Ops[2], VTy);
6671 Ops[2] =
Builder.CreateExtractElement(Ops[2], Ops[3],
"extract");
6674 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
6675 DoubleTy, {Ops[1], Ops[2], Ops[0]});
6678 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
6679 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
6681 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
6682 VTy->getNumElements() * 2);
6683 Ops[2] =
Builder.CreateBitCast(Ops[2], STy);
6684 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
6686 Ops[2] =
Builder.CreateShuffleVector(Ops[2], Ops[2], SV,
"lane");
6689 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6690 {Ops[2], Ops[1], Ops[0]});
6692 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
6693 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
6694 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
6696 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
6699 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6700 {Ops[2], Ops[1], Ops[0]});
6702 case NEON::BI__builtin_neon_vfmah_lane_f16:
6703 case NEON::BI__builtin_neon_vfmas_lane_f32:
6704 case NEON::BI__builtin_neon_vfmah_laneq_f16:
6705 case NEON::BI__builtin_neon_vfmas_laneq_f32:
6706 case NEON::BI__builtin_neon_vfmad_lane_f64:
6707 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
6710 Ops[2] =
Builder.CreateExtractElement(Ops[2], Ops[3],
"extract");
6712 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6713 {Ops[1], Ops[2], Ops[0]});
6715 case NEON::BI__builtin_neon_vmull_v:
6717 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
6718 if (
Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
6720 case NEON::BI__builtin_neon_vmax_v:
6721 case NEON::BI__builtin_neon_vmaxq_v:
6723 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
6724 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
6726 case NEON::BI__builtin_neon_vmaxh_f16: {
6728 Int = Intrinsic::aarch64_neon_fmax;
6731 case NEON::BI__builtin_neon_vmin_v:
6732 case NEON::BI__builtin_neon_vminq_v:
6734 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
6735 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
6737 case NEON::BI__builtin_neon_vminh_f16: {
6739 Int = Intrinsic::aarch64_neon_fmin;
6742 case NEON::BI__builtin_neon_vabd_v:
6743 case NEON::BI__builtin_neon_vabdq_v:
6745 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
6746 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
6748 case NEON::BI__builtin_neon_vpadal_v:
6749 case NEON::BI__builtin_neon_vpadalq_v: {
6750 unsigned ArgElts = VTy->getNumElements();
6752 unsigned BitWidth = EltTy->getBitWidth();
6753 auto *ArgTy = llvm::FixedVectorType::get(
6754 llvm::IntegerType::get(
getLLVMContext(), BitWidth / 2), 2 * ArgElts);
6755 llvm::Type* Tys[2] = { VTy, ArgTy };
6756 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
6758 TmpOps.push_back(Ops[1]);
6761 llvm::Value *addend =
Builder.CreateBitCast(Ops[0], tmp->getType());
6762 return Builder.CreateAdd(tmp, addend);
6764 case NEON::BI__builtin_neon_vpmin_v:
6765 case NEON::BI__builtin_neon_vpminq_v:
6767 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
6768 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
6770 case NEON::BI__builtin_neon_vpmax_v:
6771 case NEON::BI__builtin_neon_vpmaxq_v:
6773 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
6774 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
6776 case NEON::BI__builtin_neon_vminnm_v:
6777 case NEON::BI__builtin_neon_vminnmq_v:
6778 Int = Intrinsic::aarch64_neon_fminnm;
6780 case NEON::BI__builtin_neon_vminnmh_f16:
6782 Int = Intrinsic::aarch64_neon_fminnm;
6784 case NEON::BI__builtin_neon_vmaxnm_v:
6785 case NEON::BI__builtin_neon_vmaxnmq_v:
6786 Int = Intrinsic::aarch64_neon_fmaxnm;
6788 case NEON::BI__builtin_neon_vmaxnmh_f16:
6790 Int = Intrinsic::aarch64_neon_fmaxnm;
6792 case NEON::BI__builtin_neon_vrecpss_f32: {
6797 case NEON::BI__builtin_neon_vrecpsd_f64:
6801 case NEON::BI__builtin_neon_vrecpsh_f16:
6805 case NEON::BI__builtin_neon_vqshrun_n_v:
6806 Int = Intrinsic::aarch64_neon_sqshrun;
6808 case NEON::BI__builtin_neon_vqrshrun_n_v:
6809 Int = Intrinsic::aarch64_neon_sqrshrun;
6811 case NEON::BI__builtin_neon_vqshrn_n_v:
6812 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
6814 case NEON::BI__builtin_neon_vrshrn_n_v:
6815 Int = Intrinsic::aarch64_neon_rshrn;
6817 case NEON::BI__builtin_neon_vqrshrn_n_v:
6818 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
6820 case NEON::BI__builtin_neon_vrndah_f16: {
6822 Int =
Builder.getIsFPConstrained()
6823 ? Intrinsic::experimental_constrained_round
6827 case NEON::BI__builtin_neon_vrnda_v:
6828 case NEON::BI__builtin_neon_vrndaq_v: {
6829 Int =
Builder.getIsFPConstrained()
6830 ? Intrinsic::experimental_constrained_round
6834 case NEON::BI__builtin_neon_vrndih_f16: {
6836 Int =
Builder.getIsFPConstrained()
6837 ? Intrinsic::experimental_constrained_nearbyint
6838 : Intrinsic::nearbyint;
6841 case NEON::BI__builtin_neon_vrndmh_f16: {
6843 Int =
Builder.getIsFPConstrained()
6844 ? Intrinsic::experimental_constrained_floor
6848 case NEON::BI__builtin_neon_vrndm_v:
6849 case NEON::BI__builtin_neon_vrndmq_v: {
6850 Int =
Builder.getIsFPConstrained()
6851 ? Intrinsic::experimental_constrained_floor
6855 case NEON::BI__builtin_neon_vrndnh_f16: {
6857 Int =
Builder.getIsFPConstrained()
6858 ? Intrinsic::experimental_constrained_roundeven
6859 : Intrinsic::roundeven;
6862 case NEON::BI__builtin_neon_vrndn_v:
6863 case NEON::BI__builtin_neon_vrndnq_v: {
6864 Int =
Builder.getIsFPConstrained()
6865 ? Intrinsic::experimental_constrained_roundeven
6866 : Intrinsic::roundeven;
6869 case NEON::BI__builtin_neon_vrndns_f32: {
6871 Int =
Builder.getIsFPConstrained()
6872 ? Intrinsic::experimental_constrained_roundeven
6873 : Intrinsic::roundeven;
6876 case NEON::BI__builtin_neon_vrndph_f16: {
6878 Int =
Builder.getIsFPConstrained()
6879 ? Intrinsic::experimental_constrained_ceil
6883 case NEON::BI__builtin_neon_vrndp_v:
6884 case NEON::BI__builtin_neon_vrndpq_v: {
6885 Int =
Builder.getIsFPConstrained()
6886 ? Intrinsic::experimental_constrained_ceil
6890 case NEON::BI__builtin_neon_vrndxh_f16: {
6892 Int =
Builder.getIsFPConstrained()
6893 ? Intrinsic::experimental_constrained_rint
6897 case NEON::BI__builtin_neon_vrndx_v:
6898 case NEON::BI__builtin_neon_vrndxq_v: {
6899 Int =
Builder.getIsFPConstrained()
6900 ? Intrinsic::experimental_constrained_rint
6904 case NEON::BI__builtin_neon_vrndh_f16: {
6906 Int =
Builder.getIsFPConstrained()
6907 ? Intrinsic::experimental_constrained_trunc
6911 case NEON::BI__builtin_neon_vrnd32x_f32:
6912 case NEON::BI__builtin_neon_vrnd32xq_f32:
6913 case NEON::BI__builtin_neon_vrnd32x_f64:
6914 case NEON::BI__builtin_neon_vrnd32xq_f64: {
6916 Int = Intrinsic::aarch64_neon_frint32x;
6919 case NEON::BI__builtin_neon_vrnd32z_f32:
6920 case NEON::BI__builtin_neon_vrnd32zq_f32:
6921 case NEON::BI__builtin_neon_vrnd32z_f64:
6922 case NEON::BI__builtin_neon_vrnd32zq_f64: {
6924 Int = Intrinsic::aarch64_neon_frint32z;
6927 case NEON::BI__builtin_neon_vrnd64x_f32:
6928 case NEON::BI__builtin_neon_vrnd64xq_f32:
6929 case NEON::BI__builtin_neon_vrnd64x_f64:
6930 case NEON::BI__builtin_neon_vrnd64xq_f64: {
6932 Int = Intrinsic::aarch64_neon_frint64x;
6935 case NEON::BI__builtin_neon_vrnd64z_f32:
6936 case NEON::BI__builtin_neon_vrnd64zq_f32:
6937 case NEON::BI__builtin_neon_vrnd64z_f64:
6938 case NEON::BI__builtin_neon_vrnd64zq_f64: {
6940 Int = Intrinsic::aarch64_neon_frint64z;
6943 case NEON::BI__builtin_neon_vrnd_v:
6944 case NEON::BI__builtin_neon_vrndq_v: {
6945 Int =
Builder.getIsFPConstrained()
6946 ? Intrinsic::experimental_constrained_trunc
6950 case NEON::BI__builtin_neon_vcvt_f64_v:
6951 case NEON::BI__builtin_neon_vcvtq_f64_v:
6952 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
6954 return usgn ?
Builder.CreateUIToFP(Ops[0], Ty,
"vcvt")
6955 :
Builder.CreateSIToFP(Ops[0], Ty,
"vcvt");
6956 case NEON::BI__builtin_neon_vcvt_f64_f32: {
6958 "unexpected vcvt_f64_f32 builtin");
6962 return Builder.CreateFPExt(Ops[0], Ty,
"vcvt");
6964 case NEON::BI__builtin_neon_vcvt_f32_f64: {
6966 "unexpected vcvt_f32_f64 builtin");
6970 return Builder.CreateFPTrunc(Ops[0], Ty,
"vcvt");
6972 case NEON::BI__builtin_neon_vcvt_s32_v:
6973 case NEON::BI__builtin_neon_vcvt_u32_v:
6974 case NEON::BI__builtin_neon_vcvt_s64_v:
6975 case NEON::BI__builtin_neon_vcvt_u64_v:
6976 case NEON::BI__builtin_neon_vcvt_s16_f16:
6977 case NEON::BI__builtin_neon_vcvt_u16_f16:
6978 case NEON::BI__builtin_neon_vcvtq_s32_v:
6979 case NEON::BI__builtin_neon_vcvtq_u32_v:
6980 case NEON::BI__builtin_neon_vcvtq_s64_v:
6981 case NEON::BI__builtin_neon_vcvtq_u64_v:
6982 case NEON::BI__builtin_neon_vcvtq_s16_f16:
6983 case NEON::BI__builtin_neon_vcvtq_u16_f16: {
6985 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
6989 case NEON::BI__builtin_neon_vcvta_s16_f16:
6990 case NEON::BI__builtin_neon_vcvta_u16_f16:
6991 case NEON::BI__builtin_neon_vcvta_s32_v:
6992 case NEON::BI__builtin_neon_vcvtaq_s16_f16:
6993 case NEON::BI__builtin_neon_vcvtaq_s32_v:
6994 case NEON::BI__builtin_neon_vcvta_u32_v:
6995 case NEON::BI__builtin_neon_vcvtaq_u16_f16:
6996 case NEON::BI__builtin_neon_vcvtaq_u32_v:
6997 case NEON::BI__builtin_neon_vcvta_s64_v:
6998 case NEON::BI__builtin_neon_vcvtaq_s64_v:
6999 case NEON::BI__builtin_neon_vcvta_u64_v:
7000 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
7001 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
7005 case NEON::BI__builtin_neon_vcvtm_s16_f16:
7006 case NEON::BI__builtin_neon_vcvtm_s32_v:
7007 case NEON::BI__builtin_neon_vcvtmq_s16_f16:
7008 case NEON::BI__builtin_neon_vcvtmq_s32_v:
7009 case NEON::BI__builtin_neon_vcvtm_u16_f16:
7010 case NEON::BI__builtin_neon_vcvtm_u32_v:
7011 case NEON::BI__builtin_neon_vcvtmq_u16_f16:
7012 case NEON::BI__builtin_neon_vcvtmq_u32_v:
7013 case NEON::BI__builtin_neon_vcvtm_s64_v:
7014 case NEON::BI__builtin_neon_vcvtmq_s64_v:
7015 case NEON::BI__builtin_neon_vcvtm_u64_v:
7016 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
7017 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
7021 case NEON::BI__builtin_neon_vcvtn_s16_f16:
7022 case NEON::BI__builtin_neon_vcvtn_s32_v:
7023 case NEON::BI__builtin_neon_vcvtnq_s16_f16:
7024 case NEON::BI__builtin_neon_vcvtnq_s32_v:
7025 case NEON::BI__builtin_neon_vcvtn_u16_f16:
7026 case NEON::BI__builtin_neon_vcvtn_u32_v:
7027 case NEON::BI__builtin_neon_vcvtnq_u16_f16:
7028 case NEON::BI__builtin_neon_vcvtnq_u32_v:
7029 case NEON::BI__builtin_neon_vcvtn_s64_v:
7030 case NEON::BI__builtin_neon_vcvtnq_s64_v:
7031 case NEON::BI__builtin_neon_vcvtn_u64_v:
7032 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
7033 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
7037 case NEON::BI__builtin_neon_vcvtp_s16_f16:
7038 case NEON::BI__builtin_neon_vcvtp_s32_v:
7039 case NEON::BI__builtin_neon_vcvtpq_s16_f16:
7040 case NEON::BI__builtin_neon_vcvtpq_s32_v:
7041 case NEON::BI__builtin_neon_vcvtp_u16_f16:
7042 case NEON::BI__builtin_neon_vcvtp_u32_v:
7043 case NEON::BI__builtin_neon_vcvtpq_u16_f16:
7044 case NEON::BI__builtin_neon_vcvtpq_u32_v:
7045 case NEON::BI__builtin_neon_vcvtp_s64_v:
7046 case NEON::BI__builtin_neon_vcvtpq_s64_v:
7047 case NEON::BI__builtin_neon_vcvtp_u64_v:
7048 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
7049 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
7053 case NEON::BI__builtin_neon_vmulx_v:
7054 case NEON::BI__builtin_neon_vmulxq_v: {
7055 Int = Intrinsic::aarch64_neon_fmulx;
7058 case NEON::BI__builtin_neon_vmulxh_lane_f16:
7059 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
7063 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2],
"extract");
7065 Int = Intrinsic::aarch64_neon_fmulx;
7068 case NEON::BI__builtin_neon_vmul_lane_v:
7069 case NEON::BI__builtin_neon_vmul_laneq_v: {
7072 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
7075 llvm::FixedVectorType *VTy =
7077 Ops[1] =
Builder.CreateBitCast(Ops[1], VTy);
7078 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2],
"extract");
7082 case NEON::BI__builtin_neon_vnegd_s64:
7084 case NEON::BI__builtin_neon_vnegh_f16:
7086 case NEON::BI__builtin_neon_vpmaxnm_v:
7087 case NEON::BI__builtin_neon_vpmaxnmq_v: {
7088 Int = Intrinsic::aarch64_neon_fmaxnmp;
7091 case NEON::BI__builtin_neon_vpminnm_v:
7092 case NEON::BI__builtin_neon_vpminnmq_v: {
7093 Int = Intrinsic::aarch64_neon_fminnmp;
7096 case NEON::BI__builtin_neon_vsqrth_f16: {
7098 Int =
Builder.getIsFPConstrained()
7099 ? Intrinsic::experimental_constrained_sqrt
7103 case NEON::BI__builtin_neon_vsqrt_v:
7104 case NEON::BI__builtin_neon_vsqrtq_v: {
7105 Int =
Builder.getIsFPConstrained()
7106 ? Intrinsic::experimental_constrained_sqrt
7108 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
7111 case NEON::BI__builtin_neon_vrbit_v:
7112 case NEON::BI__builtin_neon_vrbitq_v: {
7113 Int = Intrinsic::bitreverse;
7116 case NEON::BI__builtin_neon_vmaxv_f16: {
7117 Int = Intrinsic::aarch64_neon_fmaxv;
7119 VTy = llvm::FixedVectorType::get(
HalfTy, 4);
7120 llvm::Type *Tys[2] = { Ty, VTy };
7125 case NEON::BI__builtin_neon_vmaxvq_f16: {
7126 Int = Intrinsic::aarch64_neon_fmaxv;
7128 VTy = llvm::FixedVectorType::get(
HalfTy, 8);
7129 llvm::Type *Tys[2] = { Ty, VTy };
7134 case NEON::BI__builtin_neon_vminv_f16: {
7135 Int = Intrinsic::aarch64_neon_fminv;
7137 VTy = llvm::FixedVectorType::get(
HalfTy, 4);
7138 llvm::Type *Tys[2] = { Ty, VTy };
7143 case NEON::BI__builtin_neon_vminvq_f16: {
7144 Int = Intrinsic::aarch64_neon_fminv;
7146 VTy = llvm::FixedVectorType::get(
HalfTy, 8);
7147 llvm::Type *Tys[2] = { Ty, VTy };
7152 case NEON::BI__builtin_neon_vmaxnmv_f16: {
7153 Int = Intrinsic::aarch64_neon_fmaxnmv;
7155 VTy = llvm::FixedVectorType::get(
HalfTy, 4);
7156 llvm::Type *Tys[2] = { Ty, VTy };
7161 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
7162 Int = Intrinsic::aarch64_neon_fmaxnmv;
7164 VTy = llvm::FixedVectorType::get(
HalfTy, 8);
7165 llvm::Type *Tys[2] = { Ty, VTy };
7170 case NEON::BI__builtin_neon_vminnmv_f16: {
7171 Int = Intrinsic::aarch64_neon_fminnmv;
7173 VTy = llvm::FixedVectorType::get(
HalfTy, 4);
7174 llvm::Type *Tys[2] = { Ty, VTy };
7179 case NEON::BI__builtin_neon_vminnmvq_f16: {
7180 Int = Intrinsic::aarch64_neon_fminnmv;
7182 VTy = llvm::FixedVectorType::get(
HalfTy, 8);
7183 llvm::Type *Tys[2] = { Ty, VTy };
7188 case NEON::BI__builtin_neon_vmul_n_f64: {
7191 return Builder.CreateFMul(Ops[0], RHS);
7193 case NEON::BI__builtin_neon_vaddlv_u8: {
7194 Int = Intrinsic::aarch64_neon_uaddlv;
7196 VTy = llvm::FixedVectorType::get(
Int8Ty, 8);
7197 llvm::Type *Tys[2] = { Ty, VTy };
7202 case NEON::BI__builtin_neon_vaddlv_u16: {
7203 Int = Intrinsic::aarch64_neon_uaddlv;
7205 VTy = llvm::FixedVectorType::get(
Int16Ty, 4);
7206 llvm::Type *Tys[2] = { Ty, VTy };
7210 case NEON::BI__builtin_neon_vaddlvq_u8: {
7211 Int = Intrinsic::aarch64_neon_uaddlv;
7213 VTy = llvm::FixedVectorType::get(
Int8Ty, 16);
7214 llvm::Type *Tys[2] = { Ty, VTy };
7219 case NEON::BI__builtin_neon_vaddlvq_u16: {
7220 Int = Intrinsic::aarch64_neon_uaddlv;
7222 VTy = llvm::FixedVectorType::get(
Int16Ty, 8);
7223 llvm::Type *Tys[2] = { Ty, VTy };
7227 case NEON::BI__builtin_neon_vaddlv_s8: {
7228 Int = Intrinsic::aarch64_neon_saddlv;
7230 VTy = llvm::FixedVectorType::get(
Int8Ty, 8);
7231 llvm::Type *Tys[2] = { Ty, VTy };
7236 case NEON::BI__builtin_neon_vaddlv_s16: {
7237 Int = Intrinsic::aarch64_neon_saddlv;
7239 VTy = llvm::FixedVectorType::get(
Int16Ty, 4);
7240 llvm::Type *Tys[2] = { Ty, VTy };
7244 case NEON::BI__builtin_neon_vaddlvq_s8: {
7245 Int = Intrinsic::aarch64_neon_saddlv;
7247 VTy = llvm::FixedVectorType::get(
Int8Ty, 16);
7248 llvm::Type *Tys[2] = { Ty, VTy };
7253 case NEON::BI__builtin_neon_vaddlvq_s16: {
7254 Int = Intrinsic::aarch64_neon_saddlv;
7256 VTy = llvm::FixedVectorType::get(
Int16Ty, 8);
7257 llvm::Type *Tys[2] = { Ty, VTy };
7261 case NEON::BI__builtin_neon_vsri_n_v:
7262 case NEON::BI__builtin_neon_vsriq_n_v: {
7263 Int = Intrinsic::aarch64_neon_vsri;
7264 llvm::Function *Intrin =
CGM.getIntrinsic(Int, Ty);
7267 case NEON::BI__builtin_neon_vsli_n_v:
7268 case NEON::BI__builtin_neon_vsliq_n_v: {
7269 Int = Intrinsic::aarch64_neon_vsli;
7270 llvm::Function *Intrin =
CGM.getIntrinsic(Int, Ty);
7273 case NEON::BI__builtin_neon_vsra_n_v:
7274 case NEON::BI__builtin_neon_vsraq_n_v:
7275 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
7277 return Builder.CreateAdd(Ops[0], Ops[1]);
7278 case NEON::BI__builtin_neon_vrsra_n_v:
7279 case NEON::BI__builtin_neon_vrsraq_n_v: {
7280 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
7282 TmpOps.push_back(Ops[1]);
7283 TmpOps.push_back(Ops[2]);
7285 llvm::Value *tmp =
EmitNeonCall(F, TmpOps,
"vrshr_n", 1,
true);
7286 Ops[0] =
Builder.CreateBitCast(Ops[0], VTy);
7287 return Builder.CreateAdd(Ops[0], tmp);
7289 case NEON::BI__builtin_neon_vld1_v:
7290 case NEON::BI__builtin_neon_vld1q_v: {
7293 case NEON::BI__builtin_neon_vst1_v:
7294 case NEON::BI__builtin_neon_vst1q_v:
7295 Ops[1] =
Builder.CreateBitCast(Ops[1], VTy);
7297 case NEON::BI__builtin_neon_vld1_lane_v:
7298 case NEON::BI__builtin_neon_vld1q_lane_v: {
7299 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7300 Ops[0] =
Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
7302 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vld1_lane");
7304 case NEON::BI__builtin_neon_vldap1_lane_s64:
7305 case NEON::BI__builtin_neon_vldap1q_lane_s64: {
7306 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7307 llvm::LoadInst *LI =
Builder.CreateAlignedLoad(
7309 LI->setAtomic(llvm::AtomicOrdering::Acquire);
7311 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vldap1_lane");
7313 case NEON::BI__builtin_neon_vld1_dup_v:
7314 case NEON::BI__builtin_neon_vld1q_dup_v: {
7315 Value *
V = PoisonValue::get(Ty);
7316 Ops[0] =
Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
7318 llvm::Constant *CI = ConstantInt::get(
Int32Ty, 0);
7319 Ops[0] =
Builder.CreateInsertElement(
V, Ops[0], CI);
7322 case NEON::BI__builtin_neon_vst1_lane_v:
7323 case NEON::BI__builtin_neon_vst1q_lane_v:
7324 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7325 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2]);
7327 case NEON::BI__builtin_neon_vstl1_lane_s64:
7328 case NEON::BI__builtin_neon_vstl1q_lane_s64: {
7329 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7330 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2]);
7331 llvm::StoreInst *SI =
7333 SI->setAtomic(llvm::AtomicOrdering::Release);
7336 case NEON::BI__builtin_neon_vld2_v:
7337 case NEON::BI__builtin_neon_vld2q_v: {
7339 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
7340 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld2");
7341 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7343 case NEON::BI__builtin_neon_vld3_v:
7344 case NEON::BI__builtin_neon_vld3q_v: {
7346 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
7347 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld3");
7348 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7350 case NEON::BI__builtin_neon_vld4_v:
7351 case NEON::BI__builtin_neon_vld4q_v: {
7353 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
7354 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld4");
7355 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7357 case NEON::BI__builtin_neon_vld2_dup_v:
7358 case NEON::BI__builtin_neon_vld2q_dup_v: {
7360 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
7361 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld2");
7362 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7364 case NEON::BI__builtin_neon_vld3_dup_v:
7365 case NEON::BI__builtin_neon_vld3q_dup_v: {
7367 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
7368 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld3");
7369 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7371 case NEON::BI__builtin_neon_vld4_dup_v:
7372 case NEON::BI__builtin_neon_vld4q_dup_v: {
7374 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
7375 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld4");
7376 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7378 case NEON::BI__builtin_neon_vld2_lane_v:
7379 case NEON::BI__builtin_neon_vld2q_lane_v: {
7380 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
7381 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
7382 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
7383 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7384 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7387 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7389 case NEON::BI__builtin_neon_vld3_lane_v:
7390 case NEON::BI__builtin_neon_vld3q_lane_v: {
7391 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
7392 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
7393 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
7394 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7395 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7396 Ops[3] =
Builder.CreateBitCast(Ops[3], Ty);
7399 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7401 case NEON::BI__builtin_neon_vld4_lane_v:
7402 case NEON::BI__builtin_neon_vld4q_lane_v: {
7403 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
7404 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
7405 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
7406 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7407 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7408 Ops[3] =
Builder.CreateBitCast(Ops[3], Ty);
7409 Ops[4] =
Builder.CreateBitCast(Ops[4], Ty);
7412 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7414 case NEON::BI__builtin_neon_vst2_v:
7415 case NEON::BI__builtin_neon_vst2q_v: {
7416 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7417 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
7418 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
7421 case NEON::BI__builtin_neon_vst2_lane_v:
7422 case NEON::BI__builtin_neon_vst2q_lane_v: {
7423 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7425 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
7426 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
7429 case NEON::BI__builtin_neon_vst3_v:
7430 case NEON::BI__builtin_neon_vst3q_v: {
7431 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7432 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
7433 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
7436 case NEON::BI__builtin_neon_vst3_lane_v:
7437 case NEON::BI__builtin_neon_vst3q_lane_v: {
7438 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7440 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
7441 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
7444 case NEON::BI__builtin_neon_vst4_v:
7445 case NEON::BI__builtin_neon_vst4q_v: {
7446 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7447 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
7448 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
7451 case NEON::BI__builtin_neon_vst4_lane_v:
7452 case NEON::BI__builtin_neon_vst4q_lane_v: {
7453 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7455 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
7456 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
7459 case NEON::BI__builtin_neon_vtrn_v:
7460 case NEON::BI__builtin_neon_vtrnq_v: {
7461 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7462 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7463 Value *SV =
nullptr;
7465 for (
unsigned vi = 0; vi != 2; ++vi) {
7467 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7468 Indices.push_back(i+vi);
7469 Indices.push_back(i+e+vi);
7472 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vtrn");
7477 case NEON::BI__builtin_neon_vuzp_v:
7478 case NEON::BI__builtin_neon_vuzpq_v: {
7479 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7480 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7481 Value *SV =
nullptr;
7483 for (
unsigned vi = 0; vi != 2; ++vi) {
7485 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
7486 Indices.push_back(2*i+vi);
7489 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vuzp");
7494 case NEON::BI__builtin_neon_vzip_v:
7495 case NEON::BI__builtin_neon_vzipq_v: {
7496 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7497 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7498 Value *SV =
nullptr;
7500 for (
unsigned vi = 0; vi != 2; ++vi) {
7502 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7503 Indices.push_back((i + vi*e) >> 1);
7504 Indices.push_back(((i + vi*e) >> 1)+e);
7507 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vzip");
7512 case NEON::BI__builtin_neon_vqtbl1q_v: {
7513 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
7516 case NEON::BI__builtin_neon_vqtbl2q_v: {
7517 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
7520 case NEON::BI__builtin_neon_vqtbl3q_v: {
7521 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
7524 case NEON::BI__builtin_neon_vqtbl4q_v: {
7525 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
7528 case NEON::BI__builtin_neon_vqtbx1q_v: {
7529 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
7532 case NEON::BI__builtin_neon_vqtbx2q_v: {
7533 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
7536 case NEON::BI__builtin_neon_vqtbx3q_v: {
7537 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
7540 case NEON::BI__builtin_neon_vqtbx4q_v: {
7541 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
7544 case NEON::BI__builtin_neon_vsqadd_v:
7545 case NEON::BI__builtin_neon_vsqaddq_v: {
7546 Int = Intrinsic::aarch64_neon_usqadd;
7549 case NEON::BI__builtin_neon_vuqadd_v:
7550 case NEON::BI__builtin_neon_vuqaddq_v: {
7551 Int = Intrinsic::aarch64_neon_suqadd;
7555 case NEON::BI__builtin_neon_vluti2_laneq_mf8:
7556 case NEON::BI__builtin_neon_vluti2_laneq_bf16:
7557 case NEON::BI__builtin_neon_vluti2_laneq_f16:
7558 case NEON::BI__builtin_neon_vluti2_laneq_p16:
7559 case NEON::BI__builtin_neon_vluti2_laneq_p8:
7560 case NEON::BI__builtin_neon_vluti2_laneq_s16:
7561 case NEON::BI__builtin_neon_vluti2_laneq_s8:
7562 case NEON::BI__builtin_neon_vluti2_laneq_u16:
7563 case NEON::BI__builtin_neon_vluti2_laneq_u8: {
7564 Int = Intrinsic::aarch64_neon_vluti2_laneq;
7571 case NEON::BI__builtin_neon_vluti2q_laneq_mf8:
7572 case NEON::BI__builtin_neon_vluti2q_laneq_bf16:
7573 case NEON::BI__builtin_neon_vluti2q_laneq_f16:
7574 case NEON::BI__builtin_neon_vluti2q_laneq_p16:
7575 case NEON::BI__builtin_neon_vluti2q_laneq_p8:
7576 case NEON::BI__builtin_neon_vluti2q_laneq_s16:
7577 case NEON::BI__builtin_neon_vluti2q_laneq_s8:
7578 case NEON::BI__builtin_neon_vluti2q_laneq_u16:
7579 case NEON::BI__builtin_neon_vluti2q_laneq_u8: {
7580 Int = Intrinsic::aarch64_neon_vluti2_laneq;
7587 case NEON::BI__builtin_neon_vluti2_lane_mf8:
7588 case NEON::BI__builtin_neon_vluti2_lane_bf16:
7589 case NEON::BI__builtin_neon_vluti2_lane_f16:
7590 case NEON::BI__builtin_neon_vluti2_lane_p16:
7591 case NEON::BI__builtin_neon_vluti2_lane_p8:
7592 case NEON::BI__builtin_neon_vluti2_lane_s16:
7593 case NEON::BI__builtin_neon_vluti2_lane_s8:
7594 case NEON::BI__builtin_neon_vluti2_lane_u16:
7595 case NEON::BI__builtin_neon_vluti2_lane_u8: {
7596 Int = Intrinsic::aarch64_neon_vluti2_lane;
7603 case NEON::BI__builtin_neon_vluti2q_lane_mf8:
7604 case NEON::BI__builtin_neon_vluti2q_lane_bf16:
7605 case NEON::BI__builtin_neon_vluti2q_lane_f16:
7606 case NEON::BI__builtin_neon_vluti2q_lane_p16:
7607 case NEON::BI__builtin_neon_vluti2q_lane_p8:
7608 case NEON::BI__builtin_neon_vluti2q_lane_s16:
7609 case NEON::BI__builtin_neon_vluti2q_lane_s8:
7610 case NEON::BI__builtin_neon_vluti2q_lane_u16:
7611 case NEON::BI__builtin_neon_vluti2q_lane_u8: {
7612 Int = Intrinsic::aarch64_neon_vluti2_lane;
7619 case NEON::BI__builtin_neon_vluti4q_lane_mf8:
7620 case NEON::BI__builtin_neon_vluti4q_lane_p8:
7621 case NEON::BI__builtin_neon_vluti4q_lane_s8:
7622 case NEON::BI__builtin_neon_vluti4q_lane_u8: {
7623 Int = Intrinsic::aarch64_neon_vluti4q_lane;
7626 case NEON::BI__builtin_neon_vluti4q_laneq_mf8:
7627 case NEON::BI__builtin_neon_vluti4q_laneq_p8:
7628 case NEON::BI__builtin_neon_vluti4q_laneq_s8:
7629 case NEON::BI__builtin_neon_vluti4q_laneq_u8: {
7630 Int = Intrinsic::aarch64_neon_vluti4q_laneq;
7633 case NEON::BI__builtin_neon_vluti4q_lane_bf16_x2:
7634 case NEON::BI__builtin_neon_vluti4q_lane_f16_x2:
7635 case NEON::BI__builtin_neon_vluti4q_lane_p16_x2:
7636 case NEON::BI__builtin_neon_vluti4q_lane_s16_x2:
7637 case NEON::BI__builtin_neon_vluti4q_lane_u16_x2: {
7638 Int = Intrinsic::aarch64_neon_vluti4q_lane_x2;
7639 return EmitNeonCall(
CGM.getIntrinsic(Int, Ty), Ops,
"vluti4q_lane_x2");
7641 case NEON::BI__builtin_neon_vluti4q_laneq_bf16_x2:
7642 case NEON::BI__builtin_neon_vluti4q_laneq_f16_x2:
7643 case NEON::BI__builtin_neon_vluti4q_laneq_p16_x2:
7644 case NEON::BI__builtin_neon_vluti4q_laneq_s16_x2:
7645 case NEON::BI__builtin_neon_vluti4q_laneq_u16_x2: {
7646 Int = Intrinsic::aarch64_neon_vluti4q_laneq_x2;
7647 return EmitNeonCall(
CGM.getIntrinsic(Int, Ty), Ops,
"vluti4q_laneq_x2");
7649 case NEON::BI__builtin_neon_vmmlaq_f16_mf8_fpm:
7651 {llvm::FixedVectorType::get(
HalfTy, 8),
7652 llvm::FixedVectorType::get(
Int8Ty, 16)},
7654 case NEON::BI__builtin_neon_vmmlaq_f32_mf8_fpm:
7656 {llvm::FixedVectorType::get(
FloatTy, 4),
7657 llvm::FixedVectorType::get(
Int8Ty, 16)},
7659 case NEON::BI__builtin_neon_vcvt1_low_bf16_mf8_fpm:
7662 case NEON::BI__builtin_neon_vcvt1_bf16_mf8_fpm:
7663 case NEON::BI__builtin_neon_vcvt1_high_bf16_mf8_fpm:
7665 llvm::FixedVectorType::get(
BFloatTy, 8),
7666 Ops[0]->
getType(), ExtractLow, Ops, E,
"vbfcvt1");
7667 case NEON::BI__builtin_neon_vcvt2_low_bf16_mf8_fpm:
7670 case NEON::BI__builtin_neon_vcvt2_bf16_mf8_fpm:
7671 case NEON::BI__builtin_neon_vcvt2_high_bf16_mf8_fpm:
7673 llvm::FixedVectorType::get(
BFloatTy, 8),
7674 Ops[0]->
getType(), ExtractLow, Ops, E,
"vbfcvt2");
7675 case NEON::BI__builtin_neon_vcvt1_low_f16_mf8_fpm:
7678 case NEON::BI__builtin_neon_vcvt1_f16_mf8_fpm:
7679 case NEON::BI__builtin_neon_vcvt1_high_f16_mf8_fpm:
7681 llvm::FixedVectorType::get(
HalfTy, 8),
7682 Ops[0]->
getType(), ExtractLow, Ops, E,
"vbfcvt1");
7683 case NEON::BI__builtin_neon_vcvt2_low_f16_mf8_fpm:
7686 case NEON::BI__builtin_neon_vcvt2_f16_mf8_fpm:
7687 case NEON::BI__builtin_neon_vcvt2_high_f16_mf8_fpm:
7689 llvm::FixedVectorType::get(
HalfTy, 8),
7690 Ops[0]->
getType(), ExtractLow, Ops, E,
"vbfcvt2");
7691 case NEON::BI__builtin_neon_vcvt_mf8_f32_fpm:
7693 llvm::FixedVectorType::get(
Int8Ty, 8),
7694 Ops[0]->
getType(),
false, Ops, E,
"vfcvtn");
7695 case NEON::BI__builtin_neon_vcvt_mf8_f16_fpm:
7697 llvm::FixedVectorType::get(
Int8Ty, 8),
7698 llvm::FixedVectorType::get(
HalfTy, 4),
false, Ops,
7700 case NEON::BI__builtin_neon_vcvtq_mf8_f16_fpm:
7702 llvm::FixedVectorType::get(
Int8Ty, 16),
7703 llvm::FixedVectorType::get(
HalfTy, 8),
false, Ops,
7705 case NEON::BI__builtin_neon_vcvt_high_mf8_f32_fpm: {
7706 llvm::Type *Ty = llvm::FixedVectorType::get(
Int8Ty, 16);
7707 Ops[0] =
Builder.CreateInsertVector(Ty, PoisonValue::get(Ty), Ops[0],
7710 Ops[1]->
getType(),
false, Ops, E,
"vfcvtn2");
7713 case NEON::BI__builtin_neon_vdot_f16_mf8_fpm:
7714 case NEON::BI__builtin_neon_vdotq_f16_mf8_fpm:
7717 case NEON::BI__builtin_neon_vdot_lane_f16_mf8_fpm:
7718 case NEON::BI__builtin_neon_vdotq_lane_f16_mf8_fpm:
7719 ExtendLaneArg =
true;
7721 case NEON::BI__builtin_neon_vdot_laneq_f16_mf8_fpm:
7722 case NEON::BI__builtin_neon_vdotq_laneq_f16_mf8_fpm:
7724 ExtendLaneArg,
HalfTy, Ops, E,
"fdot2_lane");
7725 case NEON::BI__builtin_neon_vdot_f32_mf8_fpm:
7726 case NEON::BI__builtin_neon_vdotq_f32_mf8_fpm:
7729 case NEON::BI__builtin_neon_vdot_lane_f32_mf8_fpm:
7730 case NEON::BI__builtin_neon_vdotq_lane_f32_mf8_fpm:
7731 ExtendLaneArg =
true;
7733 case NEON::BI__builtin_neon_vdot_laneq_f32_mf8_fpm:
7734 case NEON::BI__builtin_neon_vdotq_laneq_f32_mf8_fpm:
7736 ExtendLaneArg,
FloatTy, Ops, E,
"fdot4_lane");
7738 case NEON::BI__builtin_neon_vmlalbq_f16_mf8_fpm:
7740 {llvm::FixedVectorType::get(
HalfTy, 8)}, Ops, E,
7742 case NEON::BI__builtin_neon_vmlaltq_f16_mf8_fpm:
7744 {llvm::FixedVectorType::get(
HalfTy, 8)}, Ops, E,
7746 case NEON::BI__builtin_neon_vmlallbbq_f32_mf8_fpm:
7748 {llvm::FixedVectorType::get(
FloatTy, 4)}, Ops, E,
7750 case NEON::BI__builtin_neon_vmlallbtq_f32_mf8_fpm:
7752 {llvm::FixedVectorType::get(
FloatTy, 4)}, Ops, E,
7754 case NEON::BI__builtin_neon_vmlalltbq_f32_mf8_fpm:
7756 {llvm::FixedVectorType::get(
FloatTy, 4)}, Ops, E,
7758 case NEON::BI__builtin_neon_vmlallttq_f32_mf8_fpm:
7760 {llvm::FixedVectorType::get(
FloatTy, 4)}, Ops, E,
7762 case NEON::BI__builtin_neon_vmlalbq_lane_f16_mf8_fpm:
7763 ExtendLaneArg =
true;
7765 case NEON::BI__builtin_neon_vmlalbq_laneq_f16_mf8_fpm:
7767 ExtendLaneArg,
HalfTy, Ops, E,
"vmlal_lane");
7768 case NEON::BI__builtin_neon_vmlaltq_lane_f16_mf8_fpm:
7769 ExtendLaneArg =
true;
7771 case NEON::BI__builtin_neon_vmlaltq_laneq_f16_mf8_fpm:
7773 ExtendLaneArg,
HalfTy, Ops, E,
"vmlal_lane");
7774 case NEON::BI__builtin_neon_vmlallbbq_lane_f32_mf8_fpm:
7775 ExtendLaneArg =
true;
7777 case NEON::BI__builtin_neon_vmlallbbq_laneq_f32_mf8_fpm:
7779 ExtendLaneArg,
FloatTy, Ops, E,
"vmlall_lane");
7780 case NEON::BI__builtin_neon_vmlallbtq_lane_f32_mf8_fpm:
7781 ExtendLaneArg =
true;
7783 case NEON::BI__builtin_neon_vmlallbtq_laneq_f32_mf8_fpm:
7785 ExtendLaneArg,
FloatTy, Ops, E,
"vmlall_lane");
7786 case NEON::BI__builtin_neon_vmlalltbq_lane_f32_mf8_fpm:
7787 ExtendLaneArg =
true;
7789 case NEON::BI__builtin_neon_vmlalltbq_laneq_f32_mf8_fpm:
7791 ExtendLaneArg,
FloatTy, Ops, E,
"vmlall_lane");
7792 case NEON::BI__builtin_neon_vmlallttq_lane_f32_mf8_fpm:
7793 ExtendLaneArg =
true;
7795 case NEON::BI__builtin_neon_vmlallttq_laneq_f32_mf8_fpm:
7797 ExtendLaneArg,
FloatTy, Ops, E,
"vmlall_lane");
7798 case NEON::BI__builtin_neon_vamin_f16:
7799 case NEON::BI__builtin_neon_vaminq_f16:
7800 case NEON::BI__builtin_neon_vamin_f32:
7801 case NEON::BI__builtin_neon_vaminq_f32:
7802 case NEON::BI__builtin_neon_vaminq_f64: {
7803 Int = Intrinsic::aarch64_neon_famin;
7806 case NEON::BI__builtin_neon_vamax_f16:
7807 case NEON::BI__builtin_neon_vamaxq_f16:
7808 case NEON::BI__builtin_neon_vamax_f32:
7809 case NEON::BI__builtin_neon_vamaxq_f32:
7810 case NEON::BI__builtin_neon_vamaxq_f64: {
7811 Int = Intrinsic::aarch64_neon_famax;
7814 case NEON::BI__builtin_neon_vscale_f16:
7815 case NEON::BI__builtin_neon_vscaleq_f16:
7816 case NEON::BI__builtin_neon_vscale_f32:
7817 case NEON::BI__builtin_neon_vscaleq_f32:
7818 case NEON::BI__builtin_neon_vscaleq_f64: {
7819 Int = Intrinsic::aarch64_neon_fp8_fscale;