595 NEONMAP1(__a32_vcvt_bf16_f32, arm_neon_vcvtfp2bf, 0),
603 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
607 NEONMAP1(vaesdq_u8, arm_neon_aesd, 0),
608 NEONMAP1(vaeseq_u8, arm_neon_aese, 0),
609 NEONMAP1(vaesimcq_u8, arm_neon_aesimc, 0),
610 NEONMAP1(vaesmcq_u8, arm_neon_aesmc, 0),
611 NEONMAP1(vbfdot_f32, arm_neon_bfdot, 0),
612 NEONMAP1(vbfdotq_f32, arm_neon_bfdot, 0),
613 NEONMAP1(vbfmlalbq_f32, arm_neon_bfmlalb, 0),
614 NEONMAP1(vbfmlaltq_f32, arm_neon_bfmlalt, 0),
615 NEONMAP1(vbfmmlaq_f32, arm_neon_bfmmla, 0),
628 NEONMAP1(vcage_v, arm_neon_vacge, 0),
629 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
630 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
631 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
632 NEONMAP1(vcale_v, arm_neon_vacge, 0),
633 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
634 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
635 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
652 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
655 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
657 NEONMAP1(vcvt_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
658 NEONMAP1(vcvt_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
659 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
660 NEONMAP1(vcvt_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
661 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
662 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
663 NEONMAP1(vcvt_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
664 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
665 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
672 NEONMAP1(vcvta_s16_f16, arm_neon_vcvtas, 0),
673 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
674 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
675 NEONMAP1(vcvta_u16_f16, arm_neon_vcvtau, 0),
676 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
677 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
678 NEONMAP1(vcvtaq_s16_f16, arm_neon_vcvtas, 0),
679 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
680 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
681 NEONMAP1(vcvtaq_u16_f16, arm_neon_vcvtau, 0),
682 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
683 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
684 NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
685 NEONMAP1(vcvtm_s16_f16, arm_neon_vcvtms, 0),
686 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
687 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
688 NEONMAP1(vcvtm_u16_f16, arm_neon_vcvtmu, 0),
689 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
690 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
691 NEONMAP1(vcvtmq_s16_f16, arm_neon_vcvtms, 0),
692 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
693 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
694 NEONMAP1(vcvtmq_u16_f16, arm_neon_vcvtmu, 0),
695 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
696 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
697 NEONMAP1(vcvtn_s16_f16, arm_neon_vcvtns, 0),
698 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
699 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
700 NEONMAP1(vcvtn_u16_f16, arm_neon_vcvtnu, 0),
701 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
702 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
703 NEONMAP1(vcvtnq_s16_f16, arm_neon_vcvtns, 0),
704 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
705 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
706 NEONMAP1(vcvtnq_u16_f16, arm_neon_vcvtnu, 0),
707 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
708 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
709 NEONMAP1(vcvtp_s16_f16, arm_neon_vcvtps, 0),
710 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
711 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
712 NEONMAP1(vcvtp_u16_f16, arm_neon_vcvtpu, 0),
713 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
714 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
715 NEONMAP1(vcvtpq_s16_f16, arm_neon_vcvtps, 0),
716 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
717 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
718 NEONMAP1(vcvtpq_u16_f16, arm_neon_vcvtpu, 0),
719 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
720 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
724 NEONMAP1(vcvtq_n_f16_s16, arm_neon_vcvtfxs2fp, 0),
725 NEONMAP1(vcvtq_n_f16_u16, arm_neon_vcvtfxu2fp, 0),
726 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
727 NEONMAP1(vcvtq_n_s16_f16, arm_neon_vcvtfp2fxs, 0),
728 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
729 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
730 NEONMAP1(vcvtq_n_u16_f16, arm_neon_vcvtfp2fxu, 0),
731 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
732 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
739 NEONMAP1(vdot_s32, arm_neon_sdot, 0),
740 NEONMAP1(vdot_u32, arm_neon_udot, 0),
741 NEONMAP1(vdotq_s32, arm_neon_sdot, 0),
742 NEONMAP1(vdotq_u32, arm_neon_udot, 0),
753 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
754 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
755 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
757 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
758 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
759 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
760 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
761 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
762 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
764 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
765 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
766 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
767 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
768 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
770 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
771 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
772 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
773 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
774 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
776 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
777 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
778 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
787 NEONMAP1(vmmlaq_s32, arm_neon_smmla, 0),
788 NEONMAP1(vmmlaq_u32, arm_neon_ummla, 0),
806 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
807 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
831 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
832 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
836 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
837 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
860 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
861 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
865 NEONMAP1(vsha1su0q_u32, arm_neon_sha1su0, 0),
866 NEONMAP1(vsha1su1q_u32, arm_neon_sha1su1, 0),
867 NEONMAP1(vsha256h2q_u32, arm_neon_sha256h2, 0),
868 NEONMAP1(vsha256hq_u32, arm_neon_sha256h, 0),
869 NEONMAP1(vsha256su0q_u32, arm_neon_sha256su0, 0),
870 NEONMAP1(vsha256su1q_u32, arm_neon_sha256su1, 0),
880 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
881 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
882 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
883 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
884 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
885 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
886 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
887 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
889 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
890 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
891 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
893 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
894 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
895 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
897 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
898 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
904 NEONMAP1(vusdot_s32, arm_neon_usdot, 0),
905 NEONMAP1(vusdotq_s32, arm_neon_usdot, 0),
906 NEONMAP1(vusmmlaq_s32, arm_neon_usmmla, 0),
918 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
919 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
924 NEONMAP1(vaesdq_u8, aarch64_crypto_aesd, 0),
925 NEONMAP1(vaeseq_u8, aarch64_crypto_aese, 0),
926 NEONMAP1(vaesimcq_u8, aarch64_crypto_aesimc, 0),
927 NEONMAP1(vaesmcq_u8, aarch64_crypto_aesmc, 0),
936 NEONMAP1(vbfdot_f32, aarch64_neon_bfdot, 0),
937 NEONMAP1(vbfdotq_f32, aarch64_neon_bfdot, 0),
938 NEONMAP1(vbfmlalbq_f32, aarch64_neon_bfmlalb, 0),
939 NEONMAP1(vbfmlaltq_f32, aarch64_neon_bfmlalt, 0),
940 NEONMAP1(vbfmmlaq_f32, aarch64_neon_bfmmla, 0),
951 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
952 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
953 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
954 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
955 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
956 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
957 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
958 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
995 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
998 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
1000 NEONMAP1(vcvt_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
1001 NEONMAP1(vcvt_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
1002 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
1003 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
1004 NEONMAP1(vcvt_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
1005 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
1006 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
1007 NEONMAP1(vcvt_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
1008 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
1009 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
1015 NEONMAP1(vcvtq_n_f16_s16, aarch64_neon_vcvtfxs2fp, 0),
1016 NEONMAP1(vcvtq_n_f16_u16, aarch64_neon_vcvtfxu2fp, 0),
1017 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
1018 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
1019 NEONMAP1(vcvtq_n_s16_f16, aarch64_neon_vcvtfp2fxs, 0),
1020 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
1021 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
1022 NEONMAP1(vcvtq_n_u16_f16, aarch64_neon_vcvtfp2fxu, 0),
1023 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
1024 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
1026 NEONMAP1(vdot_s32, aarch64_neon_sdot, 0),
1027 NEONMAP1(vdot_u32, aarch64_neon_udot, 0),
1028 NEONMAP1(vdotq_s32, aarch64_neon_sdot, 0),
1029 NEONMAP1(vdotq_u32, aarch64_neon_udot, 0),
1042 NEONMAP1(vfmlal_high_f16, aarch64_neon_fmlal2, 0),
1043 NEONMAP1(vfmlal_low_f16, aarch64_neon_fmlal, 0),
1044 NEONMAP1(vfmlalq_high_f16, aarch64_neon_fmlal2, 0),
1045 NEONMAP1(vfmlalq_low_f16, aarch64_neon_fmlal, 0),
1046 NEONMAP1(vfmlsl_high_f16, aarch64_neon_fmlsl2, 0),
1047 NEONMAP1(vfmlsl_low_f16, aarch64_neon_fmlsl, 0),
1048 NEONMAP1(vfmlslq_high_f16, aarch64_neon_fmlsl2, 0),
1049 NEONMAP1(vfmlslq_low_f16, aarch64_neon_fmlsl, 0),
1054 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
1055 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
1056 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
1057 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
1058 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
1059 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
1060 NEONMAP1(vmmlaq_s32, aarch64_neon_smmla, 0),
1061 NEONMAP1(vmmlaq_u32, aarch64_neon_ummla, 0),
1074 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
1075 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
1076 NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
1077 NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
1079 NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
1080 NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
1095 NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
1096 NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
1098 NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
1099 NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
1107 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
1108 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
1112 NEONMAP1(vrax1q_u64, aarch64_crypto_rax1, 0),
1113 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
1114 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
1141 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
1142 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
1146 NEONMAP1(vsha1su0q_u32, aarch64_crypto_sha1su0, 0),
1147 NEONMAP1(vsha1su1q_u32, aarch64_crypto_sha1su1, 0),
1148 NEONMAP1(vsha256h2q_u32, aarch64_crypto_sha256h2, 0),
1149 NEONMAP1(vsha256hq_u32, aarch64_crypto_sha256h, 0),
1150 NEONMAP1(vsha256su0q_u32, aarch64_crypto_sha256su0, 0),
1151 NEONMAP1(vsha256su1q_u32, aarch64_crypto_sha256su1, 0),
1152 NEONMAP1(vsha512h2q_u64, aarch64_crypto_sha512h2, 0),
1153 NEONMAP1(vsha512hq_u64, aarch64_crypto_sha512h, 0),
1154 NEONMAP1(vsha512su0q_u64, aarch64_crypto_sha512su0, 0),
1155 NEONMAP1(vsha512su1q_u64, aarch64_crypto_sha512su1, 0),
1164 NEONMAP1(vsm3partw1q_u32, aarch64_crypto_sm3partw1, 0),
1165 NEONMAP1(vsm3partw2q_u32, aarch64_crypto_sm3partw2, 0),
1166 NEONMAP1(vsm3ss1q_u32, aarch64_crypto_sm3ss1, 0),
1167 NEONMAP1(vsm3tt1aq_u32, aarch64_crypto_sm3tt1a, 0),
1168 NEONMAP1(vsm3tt1bq_u32, aarch64_crypto_sm3tt1b, 0),
1169 NEONMAP1(vsm3tt2aq_u32, aarch64_crypto_sm3tt2a, 0),
1170 NEONMAP1(vsm3tt2bq_u32, aarch64_crypto_sm3tt2b, 0),
1171 NEONMAP1(vsm4ekeyq_u32, aarch64_crypto_sm4ekey, 0),
1172 NEONMAP1(vsm4eq_u32, aarch64_crypto_sm4e, 0),
1173 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
1174 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
1175 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
1176 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
1177 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
1178 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
1182 NEONMAP1(vusdot_s32, aarch64_neon_usdot, 0),
1183 NEONMAP1(vusdotq_s32, aarch64_neon_usdot, 0),
1184 NEONMAP1(vusmmlaq_s32, aarch64_neon_usmmla, 0),
1185 NEONMAP1(vxarq_u64, aarch64_crypto_xar, 0),
1270 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
1307 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
1335 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
1416 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
1417 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
1418 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
1419 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
1787 unsigned BuiltinID,
unsigned LLVMIntrinsic,
unsigned AltLLVMIntrinsic,
1788 const char *NameHint,
unsigned Modifier,
const CallExpr *E,
1790 llvm::Triple::ArchType
Arch) {
1793 std::optional<llvm::APSInt> NeonTypeConst =
1800 const bool Usgn =
Type.isUnsigned();
1801 const bool Quad =
Type.isQuad();
1802 const bool Floating =
Type.isFloatingPoint();
1804 const bool AllowBFloatArgsAndRet =
1807 llvm::FixedVectorType *VTy =
1808 GetNeonType(
this,
Type, HasFastHalfType,
false, AllowBFloatArgsAndRet);
1809 llvm::Type *Ty = VTy;
1813 auto getAlignmentValue32 = [&](
Address addr) ->
Value* {
1814 return Builder.getInt32(addr.getAlignment().getQuantity());
1817 unsigned Int = LLVMIntrinsic;
1819 Int = AltLLVMIntrinsic;
1821 switch (BuiltinID) {
1823 case NEON::BI__builtin_neon_splat_lane_v:
1824 case NEON::BI__builtin_neon_splat_laneq_v:
1825 case NEON::BI__builtin_neon_splatq_lane_v:
1826 case NEON::BI__builtin_neon_splatq_laneq_v: {
1827 auto NumElements = VTy->getElementCount();
1828 if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
1829 NumElements = NumElements * 2;
1830 if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
1831 NumElements = NumElements.divideCoefficientBy(2);
1833 Ops[0] =
Builder.CreateBitCast(Ops[0], VTy);
1836 case NEON::BI__builtin_neon_vpadd_v:
1837 case NEON::BI__builtin_neon_vpaddq_v:
1839 if (VTy->getElementType()->isFloatingPointTy() &&
1840 Int == Intrinsic::aarch64_neon_addp)
1841 Int = Intrinsic::aarch64_neon_faddp;
1843 case NEON::BI__builtin_neon_vabs_v:
1844 case NEON::BI__builtin_neon_vabsq_v:
1845 if (VTy->getElementType()->isFloatingPointTy())
1846 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops,
"vabs");
1847 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops,
"vabs");
1848 case NEON::BI__builtin_neon_vadd_v:
1849 case NEON::BI__builtin_neon_vaddq_v: {
1850 llvm::Type *VTy = llvm::FixedVectorType::get(
Int8Ty, Quad ? 16 : 8);
1851 Ops[0] =
Builder.CreateBitCast(Ops[0], VTy);
1852 Ops[1] =
Builder.CreateBitCast(Ops[1], VTy);
1853 Ops[0] =
Builder.CreateXor(Ops[0], Ops[1]);
1854 return Builder.CreateBitCast(Ops[0], Ty);
1856 case NEON::BI__builtin_neon_vaddhn_v: {
1857 llvm::FixedVectorType *SrcTy =
1858 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
1861 Ops[0] =
Builder.CreateBitCast(Ops[0], SrcTy);
1862 Ops[1] =
Builder.CreateBitCast(Ops[1], SrcTy);
1863 Ops[0] =
Builder.CreateAdd(Ops[0], Ops[1],
"vaddhn");
1866 Constant *ShiftAmt =
1867 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
1868 Ops[0] =
Builder.CreateLShr(Ops[0], ShiftAmt,
"vaddhn");
1871 return Builder.CreateTrunc(Ops[0], VTy,
"vaddhn");
1873 case NEON::BI__builtin_neon_vcale_v:
1874 case NEON::BI__builtin_neon_vcaleq_v:
1875 case NEON::BI__builtin_neon_vcalt_v:
1876 case NEON::BI__builtin_neon_vcaltq_v:
1877 std::swap(Ops[0], Ops[1]);
1879 case NEON::BI__builtin_neon_vcage_v:
1880 case NEON::BI__builtin_neon_vcageq_v:
1881 case NEON::BI__builtin_neon_vcagt_v:
1882 case NEON::BI__builtin_neon_vcagtq_v: {
1884 switch (VTy->getScalarSizeInBits()) {
1885 default: llvm_unreachable(
"unexpected type");
1896 auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
1897 llvm::Type *Tys[] = { VTy, VecFlt };
1898 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
1901 case NEON::BI__builtin_neon_vceqz_v:
1902 case NEON::BI__builtin_neon_vceqzq_v:
1904 Ops[0], Ty, Floating ? ICmpInst::FCMP_OEQ : ICmpInst::ICMP_EQ,
"vceqz");
1905 case NEON::BI__builtin_neon_vcgez_v:
1906 case NEON::BI__builtin_neon_vcgezq_v:
1908 Ops[0], Ty, Floating ? ICmpInst::FCMP_OGE : ICmpInst::ICMP_SGE,
1910 case NEON::BI__builtin_neon_vclez_v:
1911 case NEON::BI__builtin_neon_vclezq_v:
1913 Ops[0], Ty, Floating ? ICmpInst::FCMP_OLE : ICmpInst::ICMP_SLE,
1915 case NEON::BI__builtin_neon_vcgtz_v:
1916 case NEON::BI__builtin_neon_vcgtzq_v:
1918 Ops[0], Ty, Floating ? ICmpInst::FCMP_OGT : ICmpInst::ICMP_SGT,
1920 case NEON::BI__builtin_neon_vcltz_v:
1921 case NEON::BI__builtin_neon_vcltzq_v:
1923 Ops[0], Ty, Floating ? ICmpInst::FCMP_OLT : ICmpInst::ICMP_SLT,
1925 case NEON::BI__builtin_neon_vclz_v:
1926 case NEON::BI__builtin_neon_vclzq_v:
1931 case NEON::BI__builtin_neon_vcvt_f32_v:
1932 case NEON::BI__builtin_neon_vcvtq_f32_v:
1933 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
1936 return Usgn ?
Builder.CreateUIToFP(Ops[0], Ty,
"vcvt")
1937 :
Builder.CreateSIToFP(Ops[0], Ty,
"vcvt");
1938 case NEON::BI__builtin_neon_vcvt_f16_s16:
1939 case NEON::BI__builtin_neon_vcvt_f16_u16:
1940 case NEON::BI__builtin_neon_vcvtq_f16_s16:
1941 case NEON::BI__builtin_neon_vcvtq_f16_u16:
1942 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
1945 return Usgn ?
Builder.CreateUIToFP(Ops[0], Ty,
"vcvt")
1946 :
Builder.CreateSIToFP(Ops[0], Ty,
"vcvt");
1947 case NEON::BI__builtin_neon_vcvt_n_f16_s16:
1948 case NEON::BI__builtin_neon_vcvt_n_f16_u16:
1949 case NEON::BI__builtin_neon_vcvtq_n_f16_s16:
1950 case NEON::BI__builtin_neon_vcvtq_n_f16_u16: {
1955 case NEON::BI__builtin_neon_vcvt_n_f32_v:
1956 case NEON::BI__builtin_neon_vcvt_n_f64_v:
1957 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
1958 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
1960 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
1964 case NEON::BI__builtin_neon_vcvt_n_s16_f16:
1965 case NEON::BI__builtin_neon_vcvt_n_s32_v:
1966 case NEON::BI__builtin_neon_vcvt_n_u16_f16:
1967 case NEON::BI__builtin_neon_vcvt_n_u32_v:
1968 case NEON::BI__builtin_neon_vcvt_n_s64_v:
1969 case NEON::BI__builtin_neon_vcvt_n_u64_v:
1970 case NEON::BI__builtin_neon_vcvtq_n_s16_f16:
1971 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
1972 case NEON::BI__builtin_neon_vcvtq_n_u16_f16:
1973 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
1974 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
1975 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
1977 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
1980 case NEON::BI__builtin_neon_vcvt_s32_v:
1981 case NEON::BI__builtin_neon_vcvt_u32_v:
1982 case NEON::BI__builtin_neon_vcvt_s64_v:
1983 case NEON::BI__builtin_neon_vcvt_u64_v:
1984 case NEON::BI__builtin_neon_vcvt_s16_f16:
1985 case NEON::BI__builtin_neon_vcvt_u16_f16:
1986 case NEON::BI__builtin_neon_vcvtq_s32_v:
1987 case NEON::BI__builtin_neon_vcvtq_u32_v:
1988 case NEON::BI__builtin_neon_vcvtq_s64_v:
1989 case NEON::BI__builtin_neon_vcvtq_u64_v:
1990 case NEON::BI__builtin_neon_vcvtq_s16_f16:
1991 case NEON::BI__builtin_neon_vcvtq_u16_f16: {
1993 return Usgn ?
Builder.CreateFPToUI(Ops[0], Ty,
"vcvt")
1994 :
Builder.CreateFPToSI(Ops[0], Ty,
"vcvt");
1996 case NEON::BI__builtin_neon_vcvta_s16_f16:
1997 case NEON::BI__builtin_neon_vcvta_s32_v:
1998 case NEON::BI__builtin_neon_vcvta_s64_v:
1999 case NEON::BI__builtin_neon_vcvta_u16_f16:
2000 case NEON::BI__builtin_neon_vcvta_u32_v:
2001 case NEON::BI__builtin_neon_vcvta_u64_v:
2002 case NEON::BI__builtin_neon_vcvtaq_s16_f16:
2003 case NEON::BI__builtin_neon_vcvtaq_s32_v:
2004 case NEON::BI__builtin_neon_vcvtaq_s64_v:
2005 case NEON::BI__builtin_neon_vcvtaq_u16_f16:
2006 case NEON::BI__builtin_neon_vcvtaq_u32_v:
2007 case NEON::BI__builtin_neon_vcvtaq_u64_v:
2008 case NEON::BI__builtin_neon_vcvtn_s16_f16:
2009 case NEON::BI__builtin_neon_vcvtn_s32_v:
2010 case NEON::BI__builtin_neon_vcvtn_s64_v:
2011 case NEON::BI__builtin_neon_vcvtn_u16_f16:
2012 case NEON::BI__builtin_neon_vcvtn_u32_v:
2013 case NEON::BI__builtin_neon_vcvtn_u64_v:
2014 case NEON::BI__builtin_neon_vcvtnq_s16_f16:
2015 case NEON::BI__builtin_neon_vcvtnq_s32_v:
2016 case NEON::BI__builtin_neon_vcvtnq_s64_v:
2017 case NEON::BI__builtin_neon_vcvtnq_u16_f16:
2018 case NEON::BI__builtin_neon_vcvtnq_u32_v:
2019 case NEON::BI__builtin_neon_vcvtnq_u64_v:
2020 case NEON::BI__builtin_neon_vcvtp_s16_f16:
2021 case NEON::BI__builtin_neon_vcvtp_s32_v:
2022 case NEON::BI__builtin_neon_vcvtp_s64_v:
2023 case NEON::BI__builtin_neon_vcvtp_u16_f16:
2024 case NEON::BI__builtin_neon_vcvtp_u32_v:
2025 case NEON::BI__builtin_neon_vcvtp_u64_v:
2026 case NEON::BI__builtin_neon_vcvtpq_s16_f16:
2027 case NEON::BI__builtin_neon_vcvtpq_s32_v:
2028 case NEON::BI__builtin_neon_vcvtpq_s64_v:
2029 case NEON::BI__builtin_neon_vcvtpq_u16_f16:
2030 case NEON::BI__builtin_neon_vcvtpq_u32_v:
2031 case NEON::BI__builtin_neon_vcvtpq_u64_v:
2032 case NEON::BI__builtin_neon_vcvtm_s16_f16:
2033 case NEON::BI__builtin_neon_vcvtm_s32_v:
2034 case NEON::BI__builtin_neon_vcvtm_s64_v:
2035 case NEON::BI__builtin_neon_vcvtm_u16_f16:
2036 case NEON::BI__builtin_neon_vcvtm_u32_v:
2037 case NEON::BI__builtin_neon_vcvtm_u64_v:
2038 case NEON::BI__builtin_neon_vcvtmq_s16_f16:
2039 case NEON::BI__builtin_neon_vcvtmq_s32_v:
2040 case NEON::BI__builtin_neon_vcvtmq_s64_v:
2041 case NEON::BI__builtin_neon_vcvtmq_u16_f16:
2042 case NEON::BI__builtin_neon_vcvtmq_u32_v:
2043 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
2045 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
2047 case NEON::BI__builtin_neon_vcvtx_f32_v: {
2048 llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
2049 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
2052 case NEON::BI__builtin_neon_vext_v:
2053 case NEON::BI__builtin_neon_vextq_v: {
2056 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2057 Indices.push_back(i+CV);
2059 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
2060 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2061 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices,
"vext");
2063 case NEON::BI__builtin_neon_vfma_v:
2064 case NEON::BI__builtin_neon_vfmaq_v: {
2065 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
2066 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2067 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
2071 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
2072 {Ops[1], Ops[2], Ops[0]});
2074 case NEON::BI__builtin_neon_vld1_v:
2075 case NEON::BI__builtin_neon_vld1q_v: {
2077 Ops.push_back(getAlignmentValue32(PtrOp0));
2078 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops,
"vld1");
2080 case NEON::BI__builtin_neon_vld1_x2_v:
2081 case NEON::BI__builtin_neon_vld1q_x2_v:
2082 case NEON::BI__builtin_neon_vld1_x3_v:
2083 case NEON::BI__builtin_neon_vld1q_x3_v:
2084 case NEON::BI__builtin_neon_vld1_x4_v:
2085 case NEON::BI__builtin_neon_vld1q_x4_v: {
2087 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
2088 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld1xN");
2089 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
2091 case NEON::BI__builtin_neon_vld2_v:
2092 case NEON::BI__builtin_neon_vld2q_v:
2093 case NEON::BI__builtin_neon_vld3_v:
2094 case NEON::BI__builtin_neon_vld3q_v:
2095 case NEON::BI__builtin_neon_vld4_v:
2096 case NEON::BI__builtin_neon_vld4q_v:
2097 case NEON::BI__builtin_neon_vld2_dup_v:
2098 case NEON::BI__builtin_neon_vld2q_dup_v:
2099 case NEON::BI__builtin_neon_vld3_dup_v:
2100 case NEON::BI__builtin_neon_vld3q_dup_v:
2101 case NEON::BI__builtin_neon_vld4_dup_v:
2102 case NEON::BI__builtin_neon_vld4q_dup_v: {
2104 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
2105 Value *Align = getAlignmentValue32(PtrOp1);
2106 Ops[1] =
Builder.CreateCall(F, {Ops[1], Align}, NameHint);
2107 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
2109 case NEON::BI__builtin_neon_vld1_dup_v:
2110 case NEON::BI__builtin_neon_vld1q_dup_v: {
2111 Value *
V = PoisonValue::get(Ty);
2113 LoadInst *Ld =
Builder.CreateLoad(PtrOp0);
2114 llvm::Constant *CI = ConstantInt::get(
SizeTy, 0);
2115 Ops[0] =
Builder.CreateInsertElement(
V, Ld, CI);
2118 case NEON::BI__builtin_neon_vld2_lane_v:
2119 case NEON::BI__builtin_neon_vld2q_lane_v:
2120 case NEON::BI__builtin_neon_vld3_lane_v:
2121 case NEON::BI__builtin_neon_vld3q_lane_v:
2122 case NEON::BI__builtin_neon_vld4_lane_v:
2123 case NEON::BI__builtin_neon_vld4q_lane_v: {
2125 Function *F =
CGM.getIntrinsic(LLVMIntrinsic, Tys);
2126 for (
unsigned I = 2; I < Ops.size() - 1; ++I)
2127 Ops[I] =
Builder.CreateBitCast(Ops[I], Ty);
2128 Ops.push_back(getAlignmentValue32(PtrOp1));
2130 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
2132 case NEON::BI__builtin_neon_vmovl_v: {
2133 llvm::FixedVectorType *DTy =
2134 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
2135 Ops[0] =
Builder.CreateBitCast(Ops[0], DTy);
2137 return Builder.CreateZExt(Ops[0], Ty,
"vmovl");
2138 return Builder.CreateSExt(Ops[0], Ty,
"vmovl");
2140 case NEON::BI__builtin_neon_vmovn_v: {
2141 llvm::FixedVectorType *QTy =
2142 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
2143 Ops[0] =
Builder.CreateBitCast(Ops[0], QTy);
2144 return Builder.CreateTrunc(Ops[0], Ty,
"vmovn");
2146 case NEON::BI__builtin_neon_vmull_v:
2152 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
2153 Int =
Type.isPoly() ? (
unsigned)Intrinsic::arm_neon_vmullp : Int;
2155 case NEON::BI__builtin_neon_vpadal_v:
2156 case NEON::BI__builtin_neon_vpadalq_v: {
2158 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2162 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
2163 llvm::Type *Tys[2] = { Ty, NarrowTy };
2166 case NEON::BI__builtin_neon_vpaddl_v:
2167 case NEON::BI__builtin_neon_vpaddlq_v: {
2169 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2170 llvm::Type *EltTy = llvm::IntegerType::get(
getLLVMContext(), EltBits / 2);
2172 llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
2173 llvm::Type *Tys[2] = { Ty, NarrowTy };
2176 case NEON::BI__builtin_neon_vqdmlal_v:
2177 case NEON::BI__builtin_neon_vqdmlsl_v: {
2182 return EmitNeonCall(
CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
2184 case NEON::BI__builtin_neon_vqdmulhq_lane_v:
2185 case NEON::BI__builtin_neon_vqdmulh_lane_v:
2186 case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
2187 case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
2189 if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
2190 BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
2191 RTy = llvm::FixedVectorType::get(RTy->getElementType(),
2192 RTy->getNumElements() * 2);
2193 llvm::Type *Tys[2] = {
2198 case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
2199 case NEON::BI__builtin_neon_vqdmulh_laneq_v:
2200 case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
2201 case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
2202 llvm::Type *Tys[2] = {
2207 case NEON::BI__builtin_neon_vqshl_n_v:
2208 case NEON::BI__builtin_neon_vqshlq_n_v:
2211 case NEON::BI__builtin_neon_vqshlu_n_v:
2212 case NEON::BI__builtin_neon_vqshluq_n_v:
2215 case NEON::BI__builtin_neon_vrecpe_v:
2216 case NEON::BI__builtin_neon_vrecpeq_v:
2217 case NEON::BI__builtin_neon_vrsqrte_v:
2218 case NEON::BI__builtin_neon_vrsqrteq_v:
2219 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
2221 case NEON::BI__builtin_neon_vrndi_v:
2222 case NEON::BI__builtin_neon_vrndiq_v:
2223 Int =
Builder.getIsFPConstrained()
2224 ? Intrinsic::experimental_constrained_nearbyint
2225 : Intrinsic::nearbyint;
2227 case NEON::BI__builtin_neon_vrshr_n_v:
2228 case NEON::BI__builtin_neon_vrshrq_n_v:
2231 case NEON::BI__builtin_neon_vsha512hq_u64:
2232 case NEON::BI__builtin_neon_vsha512h2q_u64:
2233 case NEON::BI__builtin_neon_vsha512su0q_u64:
2234 case NEON::BI__builtin_neon_vsha512su1q_u64: {
2238 case NEON::BI__builtin_neon_vshl_n_v:
2239 case NEON::BI__builtin_neon_vshlq_n_v:
2241 return Builder.CreateShl(
Builder.CreateBitCast(Ops[0],Ty), Ops[1],
2243 case NEON::BI__builtin_neon_vshll_n_v: {
2244 llvm::FixedVectorType *SrcTy =
2245 llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
2246 Ops[0] =
Builder.CreateBitCast(Ops[0], SrcTy);
2248 Ops[0] =
Builder.CreateZExt(Ops[0], VTy);
2250 Ops[0] =
Builder.CreateSExt(Ops[0], VTy);
2252 return Builder.CreateShl(Ops[0], Ops[1],
"vshll_n");
2254 case NEON::BI__builtin_neon_vshrn_n_v: {
2255 llvm::FixedVectorType *SrcTy =
2256 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
2257 Ops[0] =
Builder.CreateBitCast(Ops[0], SrcTy);
2260 Ops[0] =
Builder.CreateLShr(Ops[0], Ops[1]);
2262 Ops[0] =
Builder.CreateAShr(Ops[0], Ops[1]);
2263 return Builder.CreateTrunc(Ops[0], Ty,
"vshrn_n");
2265 case NEON::BI__builtin_neon_vshr_n_v:
2266 case NEON::BI__builtin_neon_vshrq_n_v:
2268 case NEON::BI__builtin_neon_vst1_v:
2269 case NEON::BI__builtin_neon_vst1q_v:
2270 case NEON::BI__builtin_neon_vst2_v:
2271 case NEON::BI__builtin_neon_vst2q_v:
2272 case NEON::BI__builtin_neon_vst3_v:
2273 case NEON::BI__builtin_neon_vst3q_v:
2274 case NEON::BI__builtin_neon_vst4_v:
2275 case NEON::BI__builtin_neon_vst4q_v:
2276 case NEON::BI__builtin_neon_vst2_lane_v:
2277 case NEON::BI__builtin_neon_vst2q_lane_v:
2278 case NEON::BI__builtin_neon_vst3_lane_v:
2279 case NEON::BI__builtin_neon_vst3q_lane_v:
2280 case NEON::BI__builtin_neon_vst4_lane_v:
2281 case NEON::BI__builtin_neon_vst4q_lane_v: {
2283 Ops.push_back(getAlignmentValue32(PtrOp0));
2286 case NEON::BI__builtin_neon_vsm3partw1q_u32:
2287 case NEON::BI__builtin_neon_vsm3partw2q_u32:
2288 case NEON::BI__builtin_neon_vsm3ss1q_u32:
2289 case NEON::BI__builtin_neon_vsm4ekeyq_u32:
2290 case NEON::BI__builtin_neon_vsm4eq_u32: {
2294 case NEON::BI__builtin_neon_vsm3tt1aq_u32:
2295 case NEON::BI__builtin_neon_vsm3tt1bq_u32:
2296 case NEON::BI__builtin_neon_vsm3tt2aq_u32:
2297 case NEON::BI__builtin_neon_vsm3tt2bq_u32: {
2302 case NEON::BI__builtin_neon_vst1_x2_v:
2303 case NEON::BI__builtin_neon_vst1q_x2_v:
2304 case NEON::BI__builtin_neon_vst1_x3_v:
2305 case NEON::BI__builtin_neon_vst1q_x3_v:
2306 case NEON::BI__builtin_neon_vst1_x4_v:
2307 case NEON::BI__builtin_neon_vst1q_x4_v: {
2310 if (
Arch == llvm::Triple::aarch64 ||
Arch == llvm::Triple::aarch64_be ||
2311 Arch == llvm::Triple::aarch64_32) {
2313 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
2319 case NEON::BI__builtin_neon_vsubhn_v: {
2320 llvm::FixedVectorType *SrcTy =
2321 llvm::FixedVectorType::getExtendedElementVectorType(VTy);
2324 Ops[0] =
Builder.CreateBitCast(Ops[0], SrcTy);
2325 Ops[1] =
Builder.CreateBitCast(Ops[1], SrcTy);
2326 Ops[0] =
Builder.CreateSub(Ops[0], Ops[1],
"vsubhn");
2329 Constant *ShiftAmt =
2330 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
2331 Ops[0] =
Builder.CreateLShr(Ops[0], ShiftAmt,
"vsubhn");
2334 return Builder.CreateTrunc(Ops[0], VTy,
"vsubhn");
2336 case NEON::BI__builtin_neon_vtrn_v:
2337 case NEON::BI__builtin_neon_vtrnq_v: {
2338 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2339 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
2340 Value *SV =
nullptr;
2342 for (
unsigned vi = 0; vi != 2; ++vi) {
2344 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
2345 Indices.push_back(i+vi);
2346 Indices.push_back(i+e+vi);
2349 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vtrn");
2354 case NEON::BI__builtin_neon_vtst_v:
2355 case NEON::BI__builtin_neon_vtstq_v: {
2356 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
2357 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2358 Ops[0] =
Builder.CreateAnd(Ops[0], Ops[1]);
2359 Ops[0] =
Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
2360 ConstantAggregateZero::get(Ty));
2361 return Builder.CreateSExt(Ops[0], Ty,
"vtst");
2363 case NEON::BI__builtin_neon_vuzp_v:
2364 case NEON::BI__builtin_neon_vuzpq_v: {
2365 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2366 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
2367 Value *SV =
nullptr;
2369 for (
unsigned vi = 0; vi != 2; ++vi) {
2371 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2372 Indices.push_back(2*i+vi);
2375 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vuzp");
2380 case NEON::BI__builtin_neon_vxarq_u64: {
2385 case NEON::BI__builtin_neon_vzip_v:
2386 case NEON::BI__builtin_neon_vzipq_v: {
2387 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
2388 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
2389 Value *SV =
nullptr;
2391 for (
unsigned vi = 0; vi != 2; ++vi) {
2393 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
2394 Indices.push_back((i + vi*e) >> 1);
2395 Indices.push_back(((i + vi*e) >> 1)+e);
2398 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vzip");
2403 case NEON::BI__builtin_neon_vdot_s32:
2404 case NEON::BI__builtin_neon_vdot_u32:
2405 case NEON::BI__builtin_neon_vdotq_s32:
2406 case NEON::BI__builtin_neon_vdotq_u32: {
2408 llvm::FixedVectorType::get(
Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
2409 llvm::Type *Tys[2] = { Ty, InputTy };
2412 case NEON::BI__builtin_neon_vfmlal_low_f16:
2413 case NEON::BI__builtin_neon_vfmlalq_low_f16: {
2415 llvm::FixedVectorType::get(
HalfTy, Ty->getPrimitiveSizeInBits() / 16);
2416 llvm::Type *Tys[2] = { Ty, InputTy };
2419 case NEON::BI__builtin_neon_vfmlsl_low_f16:
2420 case NEON::BI__builtin_neon_vfmlslq_low_f16: {
2422 llvm::FixedVectorType::get(
HalfTy, Ty->getPrimitiveSizeInBits() / 16);
2423 llvm::Type *Tys[2] = { Ty, InputTy };
2426 case NEON::BI__builtin_neon_vfmlal_high_f16:
2427 case NEON::BI__builtin_neon_vfmlalq_high_f16: {
2429 llvm::FixedVectorType::get(
HalfTy, Ty->getPrimitiveSizeInBits() / 16);
2430 llvm::Type *Tys[2] = { Ty, InputTy };
2433 case NEON::BI__builtin_neon_vfmlsl_high_f16:
2434 case NEON::BI__builtin_neon_vfmlslq_high_f16: {
2436 llvm::FixedVectorType::get(
HalfTy, Ty->getPrimitiveSizeInBits() / 16);
2437 llvm::Type *Tys[2] = { Ty, InputTy };
2440 case NEON::BI__builtin_neon_vmmlaq_s32:
2441 case NEON::BI__builtin_neon_vmmlaq_u32: {
2443 llvm::FixedVectorType::get(
Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
2444 llvm::Type *Tys[2] = { Ty, InputTy };
2445 return EmitNeonCall(
CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops,
"vmmla");
2447 case NEON::BI__builtin_neon_vusmmlaq_s32: {
2449 llvm::FixedVectorType::get(
Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
2450 llvm::Type *Tys[2] = { Ty, InputTy };
2453 case NEON::BI__builtin_neon_vusdot_s32:
2454 case NEON::BI__builtin_neon_vusdotq_s32: {
2456 llvm::FixedVectorType::get(
Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
2457 llvm::Type *Tys[2] = { Ty, InputTy };
2460 case NEON::BI__builtin_neon_vbfdot_f32:
2461 case NEON::BI__builtin_neon_vbfdotq_f32: {
2462 llvm::Type *InputTy =
2463 llvm::FixedVectorType::get(
BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
2464 llvm::Type *Tys[2] = { Ty, InputTy };
2467 case NEON::BI__builtin_neon___a32_vcvt_bf16_f32: {
2468 llvm::Type *Tys[1] = { Ty };
2475 assert(Int &&
"Expected valid intrinsic number");
2713 llvm::Triple::ArchType
Arch) {
2714 if (
auto Hint = GetValueForARMHint(BuiltinID))
2717 if (BuiltinID == clang::ARM::BI__emit) {
2719 llvm::FunctionType *FTy =
2720 llvm::FunctionType::get(
VoidTy,
false);
2724 llvm_unreachable(
"Sema will ensure that the parameter is constant");
2727 uint64_t ZExtValue =
Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
2729 llvm::InlineAsm *Emit =
2730 IsThumb ? InlineAsm::get(FTy,
".inst.n 0x" + utohexstr(ZExtValue),
"",
2732 : InlineAsm::get(FTy,
".inst 0x" + utohexstr(ZExtValue),
"",
2735 return Builder.CreateCall(Emit);
2738 if (BuiltinID == clang::ARM::BI__builtin_arm_dbg) {
2740 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
2743 if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) {
2755 if (BuiltinID == clang::ARM::BI__builtin_arm_rbit) {
2758 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg,
"rbit");
2761 if (BuiltinID == clang::ARM::BI__builtin_arm_clz ||
2762 BuiltinID == clang::ARM::BI__builtin_arm_clz64) {
2764 Function *F =
CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
2766 if (BuiltinID == clang::ARM::BI__builtin_arm_clz64)
2772 if (BuiltinID == clang::ARM::BI__builtin_arm_cls) {
2774 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::arm_cls), Arg,
"cls");
2776 if (BuiltinID == clang::ARM::BI__builtin_arm_cls64) {
2778 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
2782 if (BuiltinID == clang::ARM::BI__clear_cache) {
2783 assert(E->
getNumArgs() == 2 &&
"__clear_cache takes 2 arguments");
2786 for (
unsigned i = 0; i < 2; i++)
2788 llvm::Type *Ty =
CGM.getTypes().ConvertType(FD->
getType());
2790 StringRef Name = FD->
getName();
2794 if (BuiltinID == clang::ARM::BI__builtin_arm_mcrr ||
2795 BuiltinID == clang::ARM::BI__builtin_arm_mcrr2) {
2798 switch (BuiltinID) {
2799 default: llvm_unreachable(
"unexpected builtin");
2800 case clang::ARM::BI__builtin_arm_mcrr:
2801 F =
CGM.getIntrinsic(Intrinsic::arm_mcrr);
2803 case clang::ARM::BI__builtin_arm_mcrr2:
2804 F =
CGM.getIntrinsic(Intrinsic::arm_mcrr2);
2825 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
2828 if (BuiltinID == clang::ARM::BI__builtin_arm_mrrc ||
2829 BuiltinID == clang::ARM::BI__builtin_arm_mrrc2) {
2832 switch (BuiltinID) {
2833 default: llvm_unreachable(
"unexpected builtin");
2834 case clang::ARM::BI__builtin_arm_mrrc:
2835 F =
CGM.getIntrinsic(Intrinsic::arm_mrrc);
2837 case clang::ARM::BI__builtin_arm_mrrc2:
2838 F =
CGM.getIntrinsic(Intrinsic::arm_mrrc2);
2845 Value *RtAndRt2 =
Builder.CreateCall(F, {Coproc, Opc1, CRm});
2855 Value *ShiftCast = llvm::ConstantInt::get(
Int64Ty, 32);
2856 RtAndRt2 =
Builder.CreateShl(Rt, ShiftCast,
"shl",
true);
2857 RtAndRt2 =
Builder.CreateOr(RtAndRt2, Rt1);
2862 if (BuiltinID == clang::ARM::BI__builtin_arm_ldrexd ||
2863 ((BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
2864 BuiltinID == clang::ARM::BI__builtin_arm_ldaex) &&
2866 BuiltinID == clang::ARM::BI__ldrexd) {
2869 switch (BuiltinID) {
2870 default: llvm_unreachable(
"unexpected builtin");
2871 case clang::ARM::BI__builtin_arm_ldaex:
2872 F =
CGM.getIntrinsic(Intrinsic::arm_ldaexd);
2874 case clang::ARM::BI__builtin_arm_ldrexd:
2875 case clang::ARM::BI__builtin_arm_ldrex:
2876 case clang::ARM::BI__ldrexd:
2877 F =
CGM.getIntrinsic(Intrinsic::arm_ldrexd);
2890 Val =
Builder.CreateShl(Val0, ShiftCst,
"shl",
true );
2891 Val =
Builder.CreateOr(Val, Val1);
2895 if (BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
2896 BuiltinID == clang::ARM::BI__builtin_arm_ldaex) {
2905 BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
2906 : Intrinsic::arm_ldrex,
2908 CallInst *Val =
Builder.CreateCall(F, LoadAddr,
"ldrex");
2912 if (RealResTy->isPointerTy())
2913 return Builder.CreateIntToPtr(Val, RealResTy);
2915 llvm::Type *IntResTy = llvm::IntegerType::get(
2917 return Builder.CreateBitCast(
Builder.CreateTruncOrBitCast(Val, IntResTy),
2922 if (BuiltinID == clang::ARM::BI__builtin_arm_strexd ||
2923 ((BuiltinID == clang::ARM::BI__builtin_arm_stlex ||
2924 BuiltinID == clang::ARM::BI__builtin_arm_strex) &&
2927 BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlexd
2928 : Intrinsic::arm_strexd);
2933 Builder.CreateStore(Val, Tmp);
2936 Val =
Builder.CreateLoad(LdPtr);
2941 return Builder.CreateCall(F, {Arg0, Arg1, StPtr},
"strexd");
2944 if (BuiltinID == clang::ARM::BI__builtin_arm_strex ||
2945 BuiltinID == clang::ARM::BI__builtin_arm_stlex) {
2950 llvm::Type *StoreTy =
2953 if (StoreVal->
getType()->isPointerTy())
2956 llvm::Type *
IntTy = llvm::IntegerType::get(
2958 CGM.getDataLayout().getTypeSizeInBits(StoreVal->
getType()));
2964 BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlex
2965 : Intrinsic::arm_strex,
2968 CallInst *CI =
Builder.CreateCall(F, {StoreVal, StoreAddr},
"strex");
2970 1, Attribute::get(
getLLVMContext(), Attribute::ElementType, StoreTy));
2974 if (BuiltinID == clang::ARM::BI__builtin_arm_clrex) {
2975 Function *F =
CGM.getIntrinsic(Intrinsic::arm_clrex);
2980 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
2981 switch (BuiltinID) {
2982 case clang::ARM::BI__builtin_arm_crc32b:
2983 CRCIntrinsicID = Intrinsic::arm_crc32b;
break;
2984 case clang::ARM::BI__builtin_arm_crc32cb:
2985 CRCIntrinsicID = Intrinsic::arm_crc32cb;
break;
2986 case clang::ARM::BI__builtin_arm_crc32h:
2987 CRCIntrinsicID = Intrinsic::arm_crc32h;
break;
2988 case clang::ARM::BI__builtin_arm_crc32ch:
2989 CRCIntrinsicID = Intrinsic::arm_crc32ch;
break;
2990 case clang::ARM::BI__builtin_arm_crc32w:
2991 case clang::ARM::BI__builtin_arm_crc32d:
2992 CRCIntrinsicID = Intrinsic::arm_crc32w;
break;
2993 case clang::ARM::BI__builtin_arm_crc32cw:
2994 case clang::ARM::BI__builtin_arm_crc32cd:
2995 CRCIntrinsicID = Intrinsic::arm_crc32cw;
break;
2998 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
3004 if (BuiltinID == clang::ARM::BI__builtin_arm_crc32d ||
3005 BuiltinID == clang::ARM::BI__builtin_arm_crc32cd) {
3013 return Builder.CreateCall(F, {Res, Arg1b});
3018 return Builder.CreateCall(F, {Arg0, Arg1});
3022 if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
3023 BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
3024 BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
3025 BuiltinID == clang::ARM::BI__builtin_arm_wsr ||
3026 BuiltinID == clang::ARM::BI__builtin_arm_wsr64 ||
3027 BuiltinID == clang::ARM::BI__builtin_arm_wsrp) {
3030 if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
3031 BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
3032 BuiltinID == clang::ARM::BI__builtin_arm_rsrp)
3035 bool IsPointerBuiltin = BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
3036 BuiltinID == clang::ARM::BI__builtin_arm_wsrp;
3038 bool Is64Bit = BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
3039 BuiltinID == clang::ARM::BI__builtin_arm_wsr64;
3041 llvm::Type *ValueType;
3043 if (IsPointerBuiltin) {
3046 }
else if (Is64Bit) {
3056 if (BuiltinID == ARM::BI__builtin_sponentry) {
3075 return P.first == BuiltinID;
3078 BuiltinID = It->second;
3082 unsigned ICEArguments = 0;
3087 auto getAlignmentValue32 = [&](
Address addr) ->
Value* {
3088 return Builder.getInt32(addr.getAlignment().getQuantity());
3095 unsigned NumArgs = E->
getNumArgs() - (HasExtraArg ? 1 : 0);
3096 for (
unsigned i = 0, e = NumArgs; i != e; i++) {
3098 switch (BuiltinID) {
3099 case NEON::BI__builtin_neon_vld1_v:
3100 case NEON::BI__builtin_neon_vld1q_v:
3101 case NEON::BI__builtin_neon_vld1q_lane_v:
3102 case NEON::BI__builtin_neon_vld1_lane_v:
3103 case NEON::BI__builtin_neon_vld1_dup_v:
3104 case NEON::BI__builtin_neon_vld1q_dup_v:
3105 case NEON::BI__builtin_neon_vst1_v:
3106 case NEON::BI__builtin_neon_vst1q_v:
3107 case NEON::BI__builtin_neon_vst1q_lane_v:
3108 case NEON::BI__builtin_neon_vst1_lane_v:
3109 case NEON::BI__builtin_neon_vst2_v:
3110 case NEON::BI__builtin_neon_vst2q_v:
3111 case NEON::BI__builtin_neon_vst2_lane_v:
3112 case NEON::BI__builtin_neon_vst2q_lane_v:
3113 case NEON::BI__builtin_neon_vst3_v:
3114 case NEON::BI__builtin_neon_vst3q_v:
3115 case NEON::BI__builtin_neon_vst3_lane_v:
3116 case NEON::BI__builtin_neon_vst3q_lane_v:
3117 case NEON::BI__builtin_neon_vst4_v:
3118 case NEON::BI__builtin_neon_vst4q_v:
3119 case NEON::BI__builtin_neon_vst4_lane_v:
3120 case NEON::BI__builtin_neon_vst4q_lane_v:
3129 switch (BuiltinID) {
3130 case NEON::BI__builtin_neon_vld2_v:
3131 case NEON::BI__builtin_neon_vld2q_v:
3132 case NEON::BI__builtin_neon_vld3_v:
3133 case NEON::BI__builtin_neon_vld3q_v:
3134 case NEON::BI__builtin_neon_vld4_v:
3135 case NEON::BI__builtin_neon_vld4q_v:
3136 case NEON::BI__builtin_neon_vld2_lane_v:
3137 case NEON::BI__builtin_neon_vld2q_lane_v:
3138 case NEON::BI__builtin_neon_vld3_lane_v:
3139 case NEON::BI__builtin_neon_vld3q_lane_v:
3140 case NEON::BI__builtin_neon_vld4_lane_v:
3141 case NEON::BI__builtin_neon_vld4q_lane_v:
3142 case NEON::BI__builtin_neon_vld2_dup_v:
3143 case NEON::BI__builtin_neon_vld2q_dup_v:
3144 case NEON::BI__builtin_neon_vld3_dup_v:
3145 case NEON::BI__builtin_neon_vld3q_dup_v:
3146 case NEON::BI__builtin_neon_vld4_dup_v:
3147 case NEON::BI__builtin_neon_vld4q_dup_v:
3159 switch (BuiltinID) {
3162 case NEON::BI__builtin_neon_vget_lane_i8:
3163 case NEON::BI__builtin_neon_vget_lane_i16:
3164 case NEON::BI__builtin_neon_vget_lane_i32:
3165 case NEON::BI__builtin_neon_vget_lane_i64:
3166 case NEON::BI__builtin_neon_vget_lane_bf16:
3167 case NEON::BI__builtin_neon_vget_lane_f32:
3168 case NEON::BI__builtin_neon_vgetq_lane_i8:
3169 case NEON::BI__builtin_neon_vgetq_lane_i16:
3170 case NEON::BI__builtin_neon_vgetq_lane_i32:
3171 case NEON::BI__builtin_neon_vgetq_lane_i64:
3172 case NEON::BI__builtin_neon_vgetq_lane_bf16:
3173 case NEON::BI__builtin_neon_vgetq_lane_f32:
3174 case NEON::BI__builtin_neon_vduph_lane_bf16:
3175 case NEON::BI__builtin_neon_vduph_laneq_bf16:
3176 return Builder.CreateExtractElement(Ops[0], Ops[1],
"vget_lane");
3178 case NEON::BI__builtin_neon_vrndns_f32: {
3180 llvm::Type *Tys[] = {Arg->
getType()};
3181 Function *F =
CGM.getIntrinsic(Intrinsic::roundeven, Tys);
3182 return Builder.CreateCall(F, {Arg},
"vrndn"); }
3184 case NEON::BI__builtin_neon_vset_lane_i8:
3185 case NEON::BI__builtin_neon_vset_lane_i16:
3186 case NEON::BI__builtin_neon_vset_lane_i32:
3187 case NEON::BI__builtin_neon_vset_lane_i64:
3188 case NEON::BI__builtin_neon_vset_lane_bf16:
3189 case NEON::BI__builtin_neon_vset_lane_f32:
3190 case NEON::BI__builtin_neon_vsetq_lane_i8:
3191 case NEON::BI__builtin_neon_vsetq_lane_i16:
3192 case NEON::BI__builtin_neon_vsetq_lane_i32:
3193 case NEON::BI__builtin_neon_vsetq_lane_i64:
3194 case NEON::BI__builtin_neon_vsetq_lane_bf16:
3195 case NEON::BI__builtin_neon_vsetq_lane_f32:
3196 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
3198 case NEON::BI__builtin_neon_vsha1h_u32:
3201 case NEON::BI__builtin_neon_vsha1cq_u32:
3204 case NEON::BI__builtin_neon_vsha1pq_u32:
3207 case NEON::BI__builtin_neon_vsha1mq_u32:
3211 case NEON::BI__builtin_neon_vcvth_bf16_f32: {
3212 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
3218 case clang::ARM::BI_MoveToCoprocessor:
3219 case clang::ARM::BI_MoveToCoprocessor2: {
3220 Function *F =
CGM.getIntrinsic(BuiltinID == clang::ARM::BI_MoveToCoprocessor
3221 ? Intrinsic::arm_mcr
3222 : Intrinsic::arm_mcr2);
3223 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
3224 Ops[3], Ops[4], Ops[5]});
3229 assert(HasExtraArg);
3231 std::optional<llvm::APSInt>
Result =
3236 if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f ||
3237 BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_d) {
3240 if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f)
3246 bool usgn =
Result->getZExtValue() == 1;
3247 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
3251 return Builder.CreateCall(F, Ops,
"vcvtr");
3256 bool usgn =
Type.isUnsigned();
3257 bool rightShift =
false;
3259 llvm::FixedVectorType *VTy =
3262 llvm::Type *Ty = VTy;
3277 switch (BuiltinID) {
3278 default:
return nullptr;
3279 case NEON::BI__builtin_neon_vld1q_lane_v:
3282 if (VTy->getElementType()->isIntegerTy(64)) {
3284 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3286 Value *SV = llvm::ConstantVector::get(ConstantInt::get(
Int32Ty, 1-Lane));
3287 Ops[1] =
Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3289 Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
3291 Function *F =
CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
3292 Value *Align = getAlignmentValue32(PtrOp0);
3295 int Indices[] = {1 - Lane, Lane};
3296 return Builder.CreateShuffleVector(Ops[1], Ld, Indices,
"vld1q_lane");
3299 case NEON::BI__builtin_neon_vld1_lane_v: {
3300 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3303 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2],
"vld1_lane");
3305 case NEON::BI__builtin_neon_vqrshrn_n_v:
3307 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
3310 case NEON::BI__builtin_neon_vqrshrun_n_v:
3311 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
3312 Ops,
"vqrshrun_n", 1,
true);
3313 case NEON::BI__builtin_neon_vqshrn_n_v:
3314 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
3317 case NEON::BI__builtin_neon_vqshrun_n_v:
3318 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
3319 Ops,
"vqshrun_n", 1,
true);
3320 case NEON::BI__builtin_neon_vrecpe_v:
3321 case NEON::BI__builtin_neon_vrecpeq_v:
3324 case NEON::BI__builtin_neon_vrshrn_n_v:
3325 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
3326 Ops,
"vrshrn_n", 1,
true);
3327 case NEON::BI__builtin_neon_vrsra_n_v:
3328 case NEON::BI__builtin_neon_vrsraq_n_v:
3329 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
3330 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3332 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
3333 Ops[1] =
Builder.CreateCall(
CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
3334 return Builder.CreateAdd(Ops[0], Ops[1],
"vrsra_n");
3335 case NEON::BI__builtin_neon_vsri_n_v:
3336 case NEON::BI__builtin_neon_vsriq_n_v:
3339 case NEON::BI__builtin_neon_vsli_n_v:
3340 case NEON::BI__builtin_neon_vsliq_n_v:
3342 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
3344 case NEON::BI__builtin_neon_vsra_n_v:
3345 case NEON::BI__builtin_neon_vsraq_n_v:
3346 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
3348 return Builder.CreateAdd(Ops[0], Ops[1]);
3349 case NEON::BI__builtin_neon_vst1q_lane_v:
3352 if (VTy->getElementType()->isIntegerTy(64)) {
3353 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3355 Ops[1] =
Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3356 Ops[2] = getAlignmentValue32(PtrOp0);
3357 llvm::Type *Tys[] = {
Int8PtrTy, Ops[1]->getType()};
3358 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
3362 case NEON::BI__builtin_neon_vst1_lane_v: {
3363 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
3364 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2]);
3365 return Builder.CreateStore(Ops[1],
3368 case NEON::BI__builtin_neon_vtbl1_v:
3371 case NEON::BI__builtin_neon_vtbl2_v:
3374 case NEON::BI__builtin_neon_vtbl3_v:
3377 case NEON::BI__builtin_neon_vtbl4_v:
3380 case NEON::BI__builtin_neon_vtbx1_v:
3383 case NEON::BI__builtin_neon_vtbx2_v:
3386 case NEON::BI__builtin_neon_vtbx3_v:
3389 case NEON::BI__builtin_neon_vtbx4_v:
5054 llvm::Triple::ArchType
Arch) {
5063 if (BuiltinID == Builtin::BI__builtin_cpu_supports)
5064 return EmitAArch64CpuSupports(E);
5066 unsigned HintID =
static_cast<unsigned>(-1);
5067 switch (BuiltinID) {
5069 case clang::AArch64::BI__builtin_arm_nop:
5072 case clang::AArch64::BI__builtin_arm_yield:
5073 case clang::AArch64::BI__yield:
5076 case clang::AArch64::BI__builtin_arm_wfe:
5077 case clang::AArch64::BI__wfe:
5080 case clang::AArch64::BI__builtin_arm_wfi:
5081 case clang::AArch64::BI__wfi:
5084 case clang::AArch64::BI__builtin_arm_sev:
5085 case clang::AArch64::BI__sev:
5088 case clang::AArch64::BI__builtin_arm_sevl:
5089 case clang::AArch64::BI__sevl:
5094 if (HintID !=
static_cast<unsigned>(-1)) {
5095 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_hint);
5096 return Builder.CreateCall(F, llvm::ConstantInt::get(
Int32Ty, HintID));
5099 if (BuiltinID == clang::AArch64::BI__builtin_arm_trap) {
5100 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_break);
5105 if (BuiltinID == clang::AArch64::BI__builtin_arm_get_sme_state) {
5108 llvm::FunctionType::get(StructType::get(
CGM.Int64Ty,
CGM.Int64Ty), {},
5110 "__arm_sme_state"));
5112 "aarch64_pstate_sm_compatible");
5113 CI->setAttributes(Attrs);
5116 AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2);
5123 if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) {
5125 "rbit of unusual size!");
5128 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg,
"rbit");
5130 if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) {
5132 "rbit of unusual size!");
5135 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg,
"rbit");
5138 if (BuiltinID == clang::AArch64::BI__builtin_arm_clz ||
5139 BuiltinID == clang::AArch64::BI__builtin_arm_clz64) {
5141 Function *F =
CGM.getIntrinsic(Intrinsic::ctlz, Arg->getType());
5143 if (BuiltinID == clang::AArch64::BI__builtin_arm_clz64)
5148 if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) {
5150 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
5153 if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) {
5155 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
5159 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32zf ||
5160 BuiltinID == clang::AArch64::BI__builtin_arm_rint32z) {
5162 llvm::Type *Ty = Arg->getType();
5163 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
5167 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64zf ||
5168 BuiltinID == clang::AArch64::BI__builtin_arm_rint64z) {
5170 llvm::Type *Ty = Arg->getType();
5171 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
5175 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint32xf ||
5176 BuiltinID == clang::AArch64::BI__builtin_arm_rint32x) {
5178 llvm::Type *Ty = Arg->getType();
5179 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
5183 if (BuiltinID == clang::AArch64::BI__builtin_arm_rint64xf ||
5184 BuiltinID == clang::AArch64::BI__builtin_arm_rint64x) {
5186 llvm::Type *Ty = Arg->getType();
5187 return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
5191 if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) {
5193 "__jcvt of unusual size!");
5196 CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
5199 if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b ||
5200 BuiltinID == clang::AArch64::BI__builtin_arm_st64b ||
5201 BuiltinID == clang::AArch64::BI__builtin_arm_st64bv ||
5202 BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) {
5206 if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) {
5209 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
5210 llvm::Value *Val =
Builder.CreateCall(F, MemAddr);
5212 for (
size_t i = 0; i < 8; i++) {
5213 llvm::Value *ValOffsetPtr =
5224 Args.push_back(MemAddr);
5225 for (
size_t i = 0; i < 8; i++) {
5226 llvm::Value *ValOffsetPtr =
5233 auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b
5234 ? Intrinsic::aarch64_st64b
5235 : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv
5236 ? Intrinsic::aarch64_st64bv
5237 : Intrinsic::aarch64_st64bv0);
5239 return Builder.CreateCall(F, Args);
5243 if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr ||
5244 BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) {
5246 auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_rndr
5247 ? Intrinsic::aarch64_rndr
5248 : Intrinsic::aarch64_rndrrs);
5250 llvm::Value *Val =
Builder.CreateCall(F);
5251 Value *RandomValue =
Builder.CreateExtractValue(Val, 0);
5255 Builder.CreateStore(RandomValue, MemAddress);
5260 if (BuiltinID == clang::AArch64::BI__clear_cache) {
5261 assert(E->
getNumArgs() == 2 &&
"__clear_cache takes 2 arguments");
5264 for (
unsigned i = 0; i < 2; i++)
5266 llvm::Type *Ty =
CGM.getTypes().ConvertType(FD->
getType());
5268 StringRef Name = FD->
getName();
5272 if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
5273 BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) &&
5276 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
5277 ? Intrinsic::aarch64_ldaxp
5278 : Intrinsic::aarch64_ldxp);
5285 llvm::Type *Int128Ty = llvm::IntegerType::get(
getLLVMContext(), 128);
5286 Val0 =
Builder.CreateZExt(Val0, Int128Ty);
5287 Val1 =
Builder.CreateZExt(Val1, Int128Ty);
5289 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
5290 Val =
Builder.CreateShl(Val0, ShiftCst,
"shl",
true );
5291 Val =
Builder.CreateOr(Val, Val1);
5293 }
else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
5294 BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) {
5303 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
5304 ? Intrinsic::aarch64_ldaxr
5305 : Intrinsic::aarch64_ldxr,
5307 CallInst *Val =
Builder.CreateCall(F, LoadAddr,
"ldxr");
5311 if (RealResTy->isPointerTy())
5312 return Builder.CreateIntToPtr(Val, RealResTy);
5314 llvm::Type *IntResTy = llvm::IntegerType::get(
5316 return Builder.CreateBitCast(
Builder.CreateTruncOrBitCast(Val, IntResTy),
5320 if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
5321 BuiltinID == clang::AArch64::BI__builtin_arm_stlex) &&
5324 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
5325 ? Intrinsic::aarch64_stlxp
5326 : Intrinsic::aarch64_stxp);
5333 llvm::Value *Val =
Builder.CreateLoad(Tmp);
5338 return Builder.CreateCall(F, {Arg0, Arg1, StPtr},
"stxp");
5341 if (BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
5342 BuiltinID == clang::AArch64::BI__builtin_arm_stlex) {
5347 llvm::Type *StoreTy =
5350 if (StoreVal->
getType()->isPointerTy())
5353 llvm::Type *
IntTy = llvm::IntegerType::get(
5355 CGM.getDataLayout().getTypeSizeInBits(StoreVal->
getType()));
5361 CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
5362 ? Intrinsic::aarch64_stlxr
5363 : Intrinsic::aarch64_stxr,
5365 CallInst *CI =
Builder.CreateCall(F, {StoreVal, StoreAddr},
"stxr");
5367 1, Attribute::get(
getLLVMContext(), Attribute::ElementType, StoreTy));
5371 if (BuiltinID == clang::AArch64::BI__getReg) {
5374 llvm_unreachable(
"Sema will ensure that the parameter is constant");
5377 LLVMContext &Context =
CGM.getLLVMContext();
5380 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
5381 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
5382 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
5385 CGM.getIntrinsic(Intrinsic::read_register, {
Int64Ty});
5386 return Builder.CreateCall(F, Metadata);
5389 if (BuiltinID == clang::AArch64::BI__break) {
5392 llvm_unreachable(
"Sema will ensure that the parameter is constant");
5394 llvm::Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_break);
5398 if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) {
5399 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_clrex);
5403 if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier)
5404 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5405 llvm::SyncScope::SingleThread);
5408 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
5409 switch (BuiltinID) {
5410 case clang::AArch64::BI__builtin_arm_crc32b:
5411 CRCIntrinsicID = Intrinsic::aarch64_crc32b;
break;
5412 case clang::AArch64::BI__builtin_arm_crc32cb:
5413 CRCIntrinsicID = Intrinsic::aarch64_crc32cb;
break;
5414 case clang::AArch64::BI__builtin_arm_crc32h:
5415 CRCIntrinsicID = Intrinsic::aarch64_crc32h;
break;
5416 case clang::AArch64::BI__builtin_arm_crc32ch:
5417 CRCIntrinsicID = Intrinsic::aarch64_crc32ch;
break;
5418 case clang::AArch64::BI__builtin_arm_crc32w:
5419 CRCIntrinsicID = Intrinsic::aarch64_crc32w;
break;
5420 case clang::AArch64::BI__builtin_arm_crc32cw:
5421 CRCIntrinsicID = Intrinsic::aarch64_crc32cw;
break;
5422 case clang::AArch64::BI__builtin_arm_crc32d:
5423 CRCIntrinsicID = Intrinsic::aarch64_crc32x;
break;
5424 case clang::AArch64::BI__builtin_arm_crc32cd:
5425 CRCIntrinsicID = Intrinsic::aarch64_crc32cx;
break;
5428 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
5433 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
5434 Arg1 =
Builder.CreateZExtOrBitCast(Arg1, DataTy);
5436 return Builder.CreateCall(F, {Arg0, Arg1});
5440 if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) {
5447 CGM.getIntrinsic(Intrinsic::aarch64_mops_memset_tag), {Dst, Val, Size});
5451 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
5452 switch (BuiltinID) {
5453 case clang::AArch64::BI__builtin_arm_irg:
5454 MTEIntrinsicID = Intrinsic::aarch64_irg;
break;
5455 case clang::AArch64::BI__builtin_arm_addg:
5456 MTEIntrinsicID = Intrinsic::aarch64_addg;
break;
5457 case clang::AArch64::BI__builtin_arm_gmi:
5458 MTEIntrinsicID = Intrinsic::aarch64_gmi;
break;
5459 case clang::AArch64::BI__builtin_arm_ldg:
5460 MTEIntrinsicID = Intrinsic::aarch64_ldg;
break;
5461 case clang::AArch64::BI__builtin_arm_stg:
5462 MTEIntrinsicID = Intrinsic::aarch64_stg;
break;
5463 case clang::AArch64::BI__builtin_arm_subp:
5464 MTEIntrinsicID = Intrinsic::aarch64_subp;
break;
5467 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
5468 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
5473 return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID),
5476 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
5481 return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID),
5482 {Pointer, TagOffset});
5484 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
5490 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
5495 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
5497 return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID),
5498 {TagAddress, TagAddress});
5503 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
5505 return Builder.CreateCall(
CGM.getIntrinsic(MTEIntrinsicID),
5506 {TagAddress, TagAddress});
5508 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
5512 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
5516 if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
5517 BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
5518 BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
5519 BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
5520 BuiltinID == clang::AArch64::BI__builtin_arm_wsr ||
5521 BuiltinID == clang::AArch64::BI__builtin_arm_wsr64 ||
5522 BuiltinID == clang::AArch64::BI__builtin_arm_wsr128 ||
5523 BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) {
5526 if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
5527 BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
5528 BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
5529 BuiltinID == clang::AArch64::BI__builtin_arm_rsrp)
5532 bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
5533 BuiltinID == clang::AArch64::BI__builtin_arm_wsrp;
5535 bool Is32Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
5536 BuiltinID == clang::AArch64::BI__builtin_arm_wsr;
5538 bool Is128Bit = BuiltinID == clang::AArch64::BI__builtin_arm_rsr128 ||
5539 BuiltinID == clang::AArch64::BI__builtin_arm_wsr128;
5541 llvm::Type *ValueType;
5545 }
else if (Is128Bit) {
5546 llvm::Type *Int128Ty =
5547 llvm::IntegerType::getInt128Ty(
CGM.getLLVMContext());
5548 ValueType = Int128Ty;
5550 }
else if (IsPointerBuiltin) {
5560 if (BuiltinID == clang::AArch64::BI_ReadStatusReg ||
5561 BuiltinID == clang::AArch64::BI_WriteStatusReg ||
5562 BuiltinID == clang::AArch64::BI__sys) {
5563 LLVMContext &Context =
CGM.getLLVMContext();
5568 std::string SysRegStr;
5569 unsigned SysRegOp0 = (BuiltinID == clang::AArch64::BI_ReadStatusReg ||
5570 BuiltinID == clang::AArch64::BI_WriteStatusReg)
5571 ? ((1 << 1) | ((SysReg >> 14) & 1))
5573 llvm::raw_string_ostream(SysRegStr)
5574 << SysRegOp0 <<
":" << ((SysReg >> 11) & 7) <<
":"
5575 << ((SysReg >> 7) & 15) <<
":" << ((SysReg >> 3) & 15) <<
":"
5578 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
5579 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
5580 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
5585 if (BuiltinID == clang::AArch64::BI_ReadStatusReg) {
5586 llvm::Function *F =
CGM.getIntrinsic(Intrinsic::read_register, Types);
5588 return Builder.CreateCall(F, Metadata);
5591 llvm::Function *F =
CGM.getIntrinsic(Intrinsic::write_register, Types);
5593 llvm::Value *
Result =
Builder.CreateCall(F, {Metadata, ArgValue});
5594 if (BuiltinID == clang::AArch64::BI__sys) {
5602 if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) {
5608 if (BuiltinID == clang::AArch64::BI__builtin_sponentry) {
5613 if (BuiltinID == clang::AArch64::BI__mulh ||
5614 BuiltinID == clang::AArch64::BI__umulh) {
5616 llvm::Type *Int128Ty = llvm::IntegerType::get(
getLLVMContext(), 128);
5618 bool IsSigned = BuiltinID == clang::AArch64::BI__mulh;
5624 Value *MulResult, *HigherBits;
5626 MulResult =
Builder.CreateNSWMul(LHS, RHS);
5627 HigherBits =
Builder.CreateAShr(MulResult, 64);
5629 MulResult =
Builder.CreateNUWMul(LHS, RHS);
5630 HigherBits =
Builder.CreateLShr(MulResult, 64);
5632 HigherBits =
Builder.CreateIntCast(HigherBits, ResType, IsSigned);
5637 if (BuiltinID == AArch64::BI__writex18byte ||
5638 BuiltinID == AArch64::BI__writex18word ||
5639 BuiltinID == AArch64::BI__writex18dword ||
5640 BuiltinID == AArch64::BI__writex18qword) {
5656 if (BuiltinID == AArch64::BI__readx18byte ||
5657 BuiltinID == AArch64::BI__readx18word ||
5658 BuiltinID == AArch64::BI__readx18dword ||
5659 BuiltinID == AArch64::BI__readx18qword) {
5674 if (BuiltinID == AArch64::BI__addx18byte ||
5675 BuiltinID == AArch64::BI__addx18word ||
5676 BuiltinID == AArch64::BI__addx18dword ||
5677 BuiltinID == AArch64::BI__addx18qword ||
5678 BuiltinID == AArch64::BI__incx18byte ||
5679 BuiltinID == AArch64::BI__incx18word ||
5680 BuiltinID == AArch64::BI__incx18dword ||
5681 BuiltinID == AArch64::BI__incx18qword) {
5684 switch (BuiltinID) {
5685 case AArch64::BI__incx18byte:
5689 case AArch64::BI__incx18word:
5693 case AArch64::BI__incx18dword:
5697 case AArch64::BI__incx18qword:
5703 isIncrement =
false;
5728 if (BuiltinID == AArch64::BI_CopyDoubleFromInt64 ||
5729 BuiltinID == AArch64::BI_CopyFloatFromInt32 ||
5730 BuiltinID == AArch64::BI_CopyInt32FromFloat ||
5731 BuiltinID == AArch64::BI_CopyInt64FromDouble) {
5734 return Builder.CreateBitCast(Arg, RetTy);
5737 if (BuiltinID == AArch64::BI_CountLeadingOnes ||
5738 BuiltinID == AArch64::BI_CountLeadingOnes64 ||
5739 BuiltinID == AArch64::BI_CountLeadingZeros ||
5740 BuiltinID == AArch64::BI_CountLeadingZeros64) {
5744 if (BuiltinID == AArch64::BI_CountLeadingOnes ||
5745 BuiltinID == AArch64::BI_CountLeadingOnes64)
5746 Arg =
Builder.CreateXor(Arg, Constant::getAllOnesValue(
ArgType));
5751 if (BuiltinID == AArch64::BI_CountLeadingOnes64 ||
5752 BuiltinID == AArch64::BI_CountLeadingZeros64)
5757 if (BuiltinID == AArch64::BI_CountLeadingSigns ||
5758 BuiltinID == AArch64::BI_CountLeadingSigns64) {
5761 Function *F = (BuiltinID == AArch64::BI_CountLeadingSigns)
5762 ?
CGM.getIntrinsic(Intrinsic::aarch64_cls)
5763 :
CGM.getIntrinsic(Intrinsic::aarch64_cls64);
5766 if (BuiltinID == AArch64::BI_CountLeadingSigns64)
5771 if (BuiltinID == AArch64::BI_CountOneBits ||
5772 BuiltinID == AArch64::BI_CountOneBits64) {
5778 if (BuiltinID == AArch64::BI_CountOneBits64)
5783 if (BuiltinID == AArch64::BI__prefetch) {
5792 if (BuiltinID == AArch64::BI__hlt) {
5793 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_hlt);
5798 return ConstantInt::get(
Builder.getInt32Ty(), 0);
5801 if (BuiltinID == NEON::BI__builtin_neon_vcvth_bf16_f32)
5809 if (std::optional<MSVCIntrin> MsvcIntId =
5815 return P.first == BuiltinID;
5818 BuiltinID = It->second;
5822 unsigned ICEArguments = 0;
5829 for (
unsigned i = 0, e = E->
getNumArgs() - 1; i != e; i++) {
5831 switch (BuiltinID) {
5832 case NEON::BI__builtin_neon_vld1_v:
5833 case NEON::BI__builtin_neon_vld1q_v:
5834 case NEON::BI__builtin_neon_vld1_dup_v:
5835 case NEON::BI__builtin_neon_vld1q_dup_v:
5836 case NEON::BI__builtin_neon_vld1_lane_v:
5837 case NEON::BI__builtin_neon_vld1q_lane_v:
5838 case NEON::BI__builtin_neon_vst1_v:
5839 case NEON::BI__builtin_neon_vst1q_v:
5840 case NEON::BI__builtin_neon_vst1_lane_v:
5841 case NEON::BI__builtin_neon_vst1q_lane_v:
5842 case NEON::BI__builtin_neon_vldap1_lane_s64:
5843 case NEON::BI__builtin_neon_vldap1q_lane_s64:
5844 case NEON::BI__builtin_neon_vstl1_lane_s64:
5845 case NEON::BI__builtin_neon_vstl1q_lane_s64:
5863 assert(
Result &&
"SISD intrinsic should have been handled");
5869 if (std::optional<llvm::APSInt>
Result =
5874 bool usgn =
Type.isUnsigned();
5875 bool quad =
Type.isQuad();
5878 switch (BuiltinID) {
5880 case NEON::BI__builtin_neon_vabsh_f16:
5883 case NEON::BI__builtin_neon_vaddq_p128: {
5886 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
5887 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
5888 Ops[0] =
Builder.CreateXor(Ops[0], Ops[1]);
5889 llvm::Type *Int128Ty = llvm::Type::getIntNTy(
getLLVMContext(), 128);
5890 return Builder.CreateBitCast(Ops[0], Int128Ty);
5892 case NEON::BI__builtin_neon_vldrq_p128: {
5893 llvm::Type *Int128Ty = llvm::Type::getIntNTy(
getLLVMContext(), 128);
5895 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
5898 case NEON::BI__builtin_neon_vstrq_p128: {
5899 Value *Ptr = Ops[0];
5902 case NEON::BI__builtin_neon_vcvts_f32_u32:
5903 case NEON::BI__builtin_neon_vcvtd_f64_u64:
5906 case NEON::BI__builtin_neon_vcvts_f32_s32:
5907 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
5909 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
5912 Ops[0] =
Builder.CreateBitCast(Ops[0], InTy);
5914 return Builder.CreateUIToFP(Ops[0], FTy);
5915 return Builder.CreateSIToFP(Ops[0], FTy);
5917 case NEON::BI__builtin_neon_vcvth_f16_u16:
5918 case NEON::BI__builtin_neon_vcvth_f16_u32:
5919 case NEON::BI__builtin_neon_vcvth_f16_u64:
5922 case NEON::BI__builtin_neon_vcvth_f16_s16:
5923 case NEON::BI__builtin_neon_vcvth_f16_s32:
5924 case NEON::BI__builtin_neon_vcvth_f16_s64: {
5926 llvm::Type *FTy =
HalfTy;
5928 if (Ops[0]->
getType()->getPrimitiveSizeInBits() == 64)
5930 else if (Ops[0]->
getType()->getPrimitiveSizeInBits() == 32)
5934 Ops[0] =
Builder.CreateBitCast(Ops[0], InTy);
5936 return Builder.CreateUIToFP(Ops[0], FTy);
5937 return Builder.CreateSIToFP(Ops[0], FTy);
5939 case NEON::BI__builtin_neon_vcvtah_u16_f16:
5940 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
5941 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
5942 case NEON::BI__builtin_neon_vcvtph_u16_f16:
5943 case NEON::BI__builtin_neon_vcvth_u16_f16:
5944 case NEON::BI__builtin_neon_vcvtah_s16_f16:
5945 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
5946 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
5947 case NEON::BI__builtin_neon_vcvtph_s16_f16:
5948 case NEON::BI__builtin_neon_vcvth_s16_f16: {
5951 llvm::Type* FTy =
HalfTy;
5952 llvm::Type *Tys[2] = {InTy, FTy};
5954 switch (BuiltinID) {
5955 default: llvm_unreachable(
"missing builtin ID in switch!");
5956 case NEON::BI__builtin_neon_vcvtah_u16_f16:
5957 Int = Intrinsic::aarch64_neon_fcvtau;
break;
5958 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
5959 Int = Intrinsic::aarch64_neon_fcvtmu;
break;
5960 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
5961 Int = Intrinsic::aarch64_neon_fcvtnu;
break;
5962 case NEON::BI__builtin_neon_vcvtph_u16_f16:
5963 Int = Intrinsic::aarch64_neon_fcvtpu;
break;
5964 case NEON::BI__builtin_neon_vcvth_u16_f16:
5965 Int = Intrinsic::aarch64_neon_fcvtzu;
break;
5966 case NEON::BI__builtin_neon_vcvtah_s16_f16:
5967 Int = Intrinsic::aarch64_neon_fcvtas;
break;
5968 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
5969 Int = Intrinsic::aarch64_neon_fcvtms;
break;
5970 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
5971 Int = Intrinsic::aarch64_neon_fcvtns;
break;
5972 case NEON::BI__builtin_neon_vcvtph_s16_f16:
5973 Int = Intrinsic::aarch64_neon_fcvtps;
break;
5974 case NEON::BI__builtin_neon_vcvth_s16_f16:
5975 Int = Intrinsic::aarch64_neon_fcvtzs;
break;
5979 case NEON::BI__builtin_neon_vcaleh_f16:
5980 case NEON::BI__builtin_neon_vcalth_f16:
5981 case NEON::BI__builtin_neon_vcageh_f16:
5982 case NEON::BI__builtin_neon_vcagth_f16: {
5985 llvm::Type* FTy =
HalfTy;
5986 llvm::Type *Tys[2] = {InTy, FTy};
5988 switch (BuiltinID) {
5989 default: llvm_unreachable(
"missing builtin ID in switch!");
5990 case NEON::BI__builtin_neon_vcageh_f16:
5991 Int = Intrinsic::aarch64_neon_facge;
break;
5992 case NEON::BI__builtin_neon_vcagth_f16:
5993 Int = Intrinsic::aarch64_neon_facgt;
break;
5994 case NEON::BI__builtin_neon_vcaleh_f16:
5995 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]);
break;
5996 case NEON::BI__builtin_neon_vcalth_f16:
5997 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]);
break;
6002 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
6003 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
6006 llvm::Type* FTy =
HalfTy;
6007 llvm::Type *Tys[2] = {InTy, FTy};
6009 switch (BuiltinID) {
6010 default: llvm_unreachable(
"missing builtin ID in switch!");
6011 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
6012 Int = Intrinsic::aarch64_neon_vcvtfp2fxs;
break;
6013 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
6014 Int = Intrinsic::aarch64_neon_vcvtfp2fxu;
break;
6019 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
6020 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
6022 llvm::Type* FTy =
HalfTy;
6024 llvm::Type *Tys[2] = {FTy, InTy};
6026 switch (BuiltinID) {
6027 default: llvm_unreachable(
"missing builtin ID in switch!");
6028 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
6029 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
6030 Ops[0] =
Builder.CreateSExt(Ops[0], InTy,
"sext");
6032 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
6033 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
6034 Ops[0] =
Builder.CreateZExt(Ops[0], InTy);
6039 case NEON::BI__builtin_neon_vpaddd_s64: {
6040 auto *Ty = llvm::FixedVectorType::get(
Int64Ty, 2);
6043 Vec =
Builder.CreateBitCast(Vec, Ty,
"v2i64");
6044 llvm::Value *Idx0 = llvm::ConstantInt::get(
SizeTy, 0);
6045 llvm::Value *Idx1 = llvm::ConstantInt::get(
SizeTy, 1);
6046 Value *Op0 =
Builder.CreateExtractElement(Vec, Idx0,
"lane0");
6047 Value *Op1 =
Builder.CreateExtractElement(Vec, Idx1,
"lane1");
6049 return Builder.CreateAdd(Op0, Op1,
"vpaddd");
6051 case NEON::BI__builtin_neon_vpaddd_f64: {
6052 auto *Ty = llvm::FixedVectorType::get(
DoubleTy, 2);
6055 Vec =
Builder.CreateBitCast(Vec, Ty,
"v2f64");
6056 llvm::Value *Idx0 = llvm::ConstantInt::get(
SizeTy, 0);
6057 llvm::Value *Idx1 = llvm::ConstantInt::get(
SizeTy, 1);
6058 Value *Op0 =
Builder.CreateExtractElement(Vec, Idx0,
"lane0");
6059 Value *Op1 =
Builder.CreateExtractElement(Vec, Idx1,
"lane1");
6061 return Builder.CreateFAdd(Op0, Op1,
"vpaddd");
6063 case NEON::BI__builtin_neon_vpadds_f32: {
6064 auto *Ty = llvm::FixedVectorType::get(
FloatTy, 2);
6067 Vec =
Builder.CreateBitCast(Vec, Ty,
"v2f32");
6068 llvm::Value *Idx0 = llvm::ConstantInt::get(
SizeTy, 0);
6069 llvm::Value *Idx1 = llvm::ConstantInt::get(
SizeTy, 1);
6070 Value *Op0 =
Builder.CreateExtractElement(Vec, Idx0,
"lane0");
6071 Value *Op1 =
Builder.CreateExtractElement(Vec, Idx1,
"lane1");
6073 return Builder.CreateFAdd(Op0, Op1,
"vpaddd");
6075 case NEON::BI__builtin_neon_vceqzd_s64:
6079 ICmpInst::ICMP_EQ,
"vceqz");
6080 case NEON::BI__builtin_neon_vceqzd_f64:
6081 case NEON::BI__builtin_neon_vceqzs_f32:
6082 case NEON::BI__builtin_neon_vceqzh_f16:
6086 ICmpInst::FCMP_OEQ,
"vceqz");
6087 case NEON::BI__builtin_neon_vcgezd_s64:
6091 ICmpInst::ICMP_SGE,
"vcgez");
6092 case NEON::BI__builtin_neon_vcgezd_f64:
6093 case NEON::BI__builtin_neon_vcgezs_f32:
6094 case NEON::BI__builtin_neon_vcgezh_f16:
6098 ICmpInst::FCMP_OGE,
"vcgez");
6099 case NEON::BI__builtin_neon_vclezd_s64:
6103 ICmpInst::ICMP_SLE,
"vclez");
6104 case NEON::BI__builtin_neon_vclezd_f64:
6105 case NEON::BI__builtin_neon_vclezs_f32:
6106 case NEON::BI__builtin_neon_vclezh_f16:
6110 ICmpInst::FCMP_OLE,
"vclez");
6111 case NEON::BI__builtin_neon_vcgtzd_s64:
6115 ICmpInst::ICMP_SGT,
"vcgtz");
6116 case NEON::BI__builtin_neon_vcgtzd_f64:
6117 case NEON::BI__builtin_neon_vcgtzs_f32:
6118 case NEON::BI__builtin_neon_vcgtzh_f16:
6122 ICmpInst::FCMP_OGT,
"vcgtz");
6123 case NEON::BI__builtin_neon_vcltzd_s64:
6127 ICmpInst::ICMP_SLT,
"vcltz");
6129 case NEON::BI__builtin_neon_vcltzd_f64:
6130 case NEON::BI__builtin_neon_vcltzs_f32:
6131 case NEON::BI__builtin_neon_vcltzh_f16:
6135 ICmpInst::FCMP_OLT,
"vcltz");
6137 case NEON::BI__builtin_neon_vceqzd_u64: {
6141 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(
Int64Ty));
6144 case NEON::BI__builtin_neon_vceqd_f64:
6145 case NEON::BI__builtin_neon_vcled_f64:
6146 case NEON::BI__builtin_neon_vcltd_f64:
6147 case NEON::BI__builtin_neon_vcged_f64:
6148 case NEON::BI__builtin_neon_vcgtd_f64: {
6149 llvm::CmpInst::Predicate P;
6150 switch (BuiltinID) {
6151 default: llvm_unreachable(
"missing builtin ID in switch!");
6152 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ;
break;
6153 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE;
break;
6154 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT;
break;
6155 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE;
break;
6156 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT;
break;
6161 if (P == llvm::FCmpInst::FCMP_OEQ)
6162 Ops[0] =
Builder.CreateFCmp(P, Ops[0], Ops[1]);
6164 Ops[0] =
Builder.CreateFCmpS(P, Ops[0], Ops[1]);
6167 case NEON::BI__builtin_neon_vceqs_f32:
6168 case NEON::BI__builtin_neon_vcles_f32:
6169 case NEON::BI__builtin_neon_vclts_f32:
6170 case NEON::BI__builtin_neon_vcges_f32:
6171 case NEON::BI__builtin_neon_vcgts_f32: {
6172 llvm::CmpInst::Predicate P;
6173 switch (BuiltinID) {
6174 default: llvm_unreachable(
"missing builtin ID in switch!");
6175 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ;
break;
6176 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE;
break;
6177 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT;
break;
6178 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE;
break;
6179 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT;
break;
6184 if (P == llvm::FCmpInst::FCMP_OEQ)
6185 Ops[0] =
Builder.CreateFCmp(P, Ops[0], Ops[1]);
6187 Ops[0] =
Builder.CreateFCmpS(P, Ops[0], Ops[1]);
6190 case NEON::BI__builtin_neon_vceqh_f16:
6191 case NEON::BI__builtin_neon_vcleh_f16:
6192 case NEON::BI__builtin_neon_vclth_f16:
6193 case NEON::BI__builtin_neon_vcgeh_f16:
6194 case NEON::BI__builtin_neon_vcgth_f16: {
6195 llvm::CmpInst::Predicate P;
6196 switch (BuiltinID) {
6197 default: llvm_unreachable(
"missing builtin ID in switch!");
6198 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ;
break;
6199 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE;
break;
6200 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT;
break;
6201 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE;
break;
6202 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT;
break;
6207 if (P == llvm::FCmpInst::FCMP_OEQ)
6208 Ops[0] =
Builder.CreateFCmp(P, Ops[0], Ops[1]);
6210 Ops[0] =
Builder.CreateFCmpS(P, Ops[0], Ops[1]);
6213 case NEON::BI__builtin_neon_vceqd_s64:
6214 case NEON::BI__builtin_neon_vceqd_u64:
6215 case NEON::BI__builtin_neon_vcgtd_s64:
6216 case NEON::BI__builtin_neon_vcgtd_u64:
6217 case NEON::BI__builtin_neon_vcltd_s64:
6218 case NEON::BI__builtin_neon_vcltd_u64:
6219 case NEON::BI__builtin_neon_vcged_u64:
6220 case NEON::BI__builtin_neon_vcged_s64:
6221 case NEON::BI__builtin_neon_vcled_u64:
6222 case NEON::BI__builtin_neon_vcled_s64: {
6223 llvm::CmpInst::Predicate P;
6224 switch (BuiltinID) {
6225 default: llvm_unreachable(
"missing builtin ID in switch!");
6226 case NEON::BI__builtin_neon_vceqd_s64:
6227 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;
break;
6228 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;
break;
6229 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;
break;
6230 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;
break;
6231 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;
break;
6232 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;
break;
6233 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;
break;
6234 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;
break;
6235 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;
break;
6240 Ops[0] =
Builder.CreateICmp(P, Ops[0], Ops[1]);
6243 case NEON::BI__builtin_neon_vtstd_s64:
6244 case NEON::BI__builtin_neon_vtstd_u64: {
6248 Ops[0] =
Builder.CreateAnd(Ops[0], Ops[1]);
6249 Ops[0] =
Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
6250 llvm::Constant::getNullValue(
Int64Ty));
6253 case NEON::BI__builtin_neon_vset_lane_i8:
6254 case NEON::BI__builtin_neon_vset_lane_i16:
6255 case NEON::BI__builtin_neon_vset_lane_i32:
6256 case NEON::BI__builtin_neon_vset_lane_i64:
6257 case NEON::BI__builtin_neon_vset_lane_bf16:
6258 case NEON::BI__builtin_neon_vset_lane_f32:
6259 case NEON::BI__builtin_neon_vsetq_lane_i8:
6260 case NEON::BI__builtin_neon_vsetq_lane_i16:
6261 case NEON::BI__builtin_neon_vsetq_lane_i32:
6262 case NEON::BI__builtin_neon_vsetq_lane_i64:
6263 case NEON::BI__builtin_neon_vsetq_lane_bf16:
6264 case NEON::BI__builtin_neon_vsetq_lane_f32:
6266 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
6267 case NEON::BI__builtin_neon_vset_lane_f64:
6270 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(
DoubleTy, 1));
6272 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
6273 case NEON::BI__builtin_neon_vset_lane_mf8:
6274 case NEON::BI__builtin_neon_vsetq_lane_mf8:
6279 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
6280 case NEON::BI__builtin_neon_vsetq_lane_f64:
6283 Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(
DoubleTy, 2));
6285 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vset_lane");
6287 case NEON::BI__builtin_neon_vget_lane_i8:
6288 case NEON::BI__builtin_neon_vdupb_lane_i8:
6290 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int8Ty, 8));
6293 case NEON::BI__builtin_neon_vgetq_lane_i8:
6294 case NEON::BI__builtin_neon_vdupb_laneq_i8:
6296 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int8Ty, 16));
6299 case NEON::BI__builtin_neon_vget_lane_mf8:
6300 case NEON::BI__builtin_neon_vdupb_lane_mf8:
6301 case NEON::BI__builtin_neon_vgetq_lane_mf8:
6302 case NEON::BI__builtin_neon_vdupb_laneq_mf8:
6305 case NEON::BI__builtin_neon_vget_lane_i16:
6306 case NEON::BI__builtin_neon_vduph_lane_i16:
6308 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int16Ty, 4));
6311 case NEON::BI__builtin_neon_vgetq_lane_i16:
6312 case NEON::BI__builtin_neon_vduph_laneq_i16:
6314 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int16Ty, 8));
6317 case NEON::BI__builtin_neon_vget_lane_i32:
6318 case NEON::BI__builtin_neon_vdups_lane_i32:
6320 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int32Ty, 2));
6323 case NEON::BI__builtin_neon_vdups_lane_f32:
6325 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
FloatTy, 2));
6328 case NEON::BI__builtin_neon_vgetq_lane_i32:
6329 case NEON::BI__builtin_neon_vdups_laneq_i32:
6331 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int32Ty, 4));
6334 case NEON::BI__builtin_neon_vget_lane_i64:
6335 case NEON::BI__builtin_neon_vdupd_lane_i64:
6337 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int64Ty, 1));
6340 case NEON::BI__builtin_neon_vdupd_lane_f64:
6342 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
DoubleTy, 1));
6345 case NEON::BI__builtin_neon_vgetq_lane_i64:
6346 case NEON::BI__builtin_neon_vdupd_laneq_i64:
6348 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
Int64Ty, 2));
6351 case NEON::BI__builtin_neon_vget_lane_f32:
6353 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
FloatTy, 2));
6356 case NEON::BI__builtin_neon_vget_lane_f64:
6358 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
DoubleTy, 1));
6361 case NEON::BI__builtin_neon_vgetq_lane_f32:
6362 case NEON::BI__builtin_neon_vdups_laneq_f32:
6364 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
FloatTy, 4));
6367 case NEON::BI__builtin_neon_vgetq_lane_f64:
6368 case NEON::BI__builtin_neon_vdupd_laneq_f64:
6370 Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(
DoubleTy, 2));
6373 case NEON::BI__builtin_neon_vaddh_f16:
6375 return Builder.CreateFAdd(Ops[0], Ops[1],
"vaddh");
6376 case NEON::BI__builtin_neon_vsubh_f16:
6378 return Builder.CreateFSub(Ops[0], Ops[1],
"vsubh");
6379 case NEON::BI__builtin_neon_vmulh_f16:
6381 return Builder.CreateFMul(Ops[0], Ops[1],
"vmulh");
6382 case NEON::BI__builtin_neon_vdivh_f16:
6384 return Builder.CreateFDiv(Ops[0], Ops[1],
"vdivh");
6385 case NEON::BI__builtin_neon_vfmah_f16:
6388 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
HalfTy,
6390 case NEON::BI__builtin_neon_vfmsh_f16: {
6395 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
HalfTy,
6398 case NEON::BI__builtin_neon_vaddd_s64:
6399 case NEON::BI__builtin_neon_vaddd_u64:
6401 case NEON::BI__builtin_neon_vsubd_s64:
6402 case NEON::BI__builtin_neon_vsubd_u64:
6404 case NEON::BI__builtin_neon_vqdmlalh_s16:
6405 case NEON::BI__builtin_neon_vqdmlslh_s16: {
6409 auto *VTy = llvm::FixedVectorType::get(
Int32Ty, 4);
6410 Ops[1] =
EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
6411 ProductOps,
"vqdmlXl");
6412 Constant *CI = ConstantInt::get(
SizeTy, 0);
6413 Ops[1] =
Builder.CreateExtractElement(Ops[1], CI,
"lane0");
6415 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
6416 ? Intrinsic::aarch64_neon_sqadd
6417 : Intrinsic::aarch64_neon_sqsub;
6420 case NEON::BI__builtin_neon_vqshlud_n_s64: {
6426 case NEON::BI__builtin_neon_vqshld_n_u64:
6427 case NEON::BI__builtin_neon_vqshld_n_s64: {
6428 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
6429 ? Intrinsic::aarch64_neon_uqshl
6430 : Intrinsic::aarch64_neon_sqshl;
6435 case NEON::BI__builtin_neon_vrshrd_n_u64:
6436 case NEON::BI__builtin_neon_vrshrd_n_s64: {
6437 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
6438 ? Intrinsic::aarch64_neon_urshl
6439 : Intrinsic::aarch64_neon_srshl;
6442 Ops[1] = ConstantInt::get(
Int64Ty, -SV);
6445 case NEON::BI__builtin_neon_vrsrad_n_u64:
6446 case NEON::BI__builtin_neon_vrsrad_n_s64: {
6447 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
6448 ? Intrinsic::aarch64_neon_urshl
6449 : Intrinsic::aarch64_neon_srshl;
6453 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
6456 case NEON::BI__builtin_neon_vshld_n_s64:
6457 case NEON::BI__builtin_neon_vshld_n_u64: {
6460 Ops[0], ConstantInt::get(
Int64Ty, Amt->getZExtValue()),
"shld_n");
6462 case NEON::BI__builtin_neon_vshrd_n_s64: {
6465 Ops[0], ConstantInt::get(
Int64Ty, std::min(
static_cast<uint64_t
>(63),
6466 Amt->getZExtValue())),
6469 case NEON::BI__builtin_neon_vshrd_n_u64: {
6471 uint64_t ShiftAmt = Amt->getZExtValue();
6474 return ConstantInt::get(
Int64Ty, 0);
6475 return Builder.CreateLShr(Ops[0], ConstantInt::get(
Int64Ty, ShiftAmt),
6478 case NEON::BI__builtin_neon_vsrad_n_s64: {
6481 Ops[1], ConstantInt::get(
Int64Ty, std::min(
static_cast<uint64_t
>(63),
6482 Amt->getZExtValue())),
6484 return Builder.CreateAdd(Ops[0], Ops[1]);
6486 case NEON::BI__builtin_neon_vsrad_n_u64: {
6488 uint64_t ShiftAmt = Amt->getZExtValue();
6493 Ops[1] =
Builder.CreateLShr(Ops[1], ConstantInt::get(
Int64Ty, ShiftAmt),
6495 return Builder.CreateAdd(Ops[0], Ops[1]);
6497 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
6498 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
6499 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
6500 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
6506 auto *VTy = llvm::FixedVectorType::get(
Int32Ty, 4);
6507 Ops[1] =
EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
6508 ProductOps,
"vqdmlXl");
6509 Constant *CI = ConstantInt::get(
SizeTy, 0);
6510 Ops[1] =
Builder.CreateExtractElement(Ops[1], CI,
"lane0");
6513 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
6514 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
6515 ? Intrinsic::aarch64_neon_sqadd
6516 : Intrinsic::aarch64_neon_sqsub;
6519 case NEON::BI__builtin_neon_vqdmlals_s32:
6520 case NEON::BI__builtin_neon_vqdmlsls_s32: {
6522 ProductOps.push_back(Ops[1]);
6525 EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
6526 ProductOps,
"vqdmlXl");
6528 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
6529 ? Intrinsic::aarch64_neon_sqadd
6530 : Intrinsic::aarch64_neon_sqsub;
6533 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
6534 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
6535 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
6536 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
6540 ProductOps.push_back(Ops[1]);
6541 ProductOps.push_back(Ops[2]);
6543 EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
6544 ProductOps,
"vqdmlXl");
6547 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
6548 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
6549 ? Intrinsic::aarch64_neon_sqadd
6550 : Intrinsic::aarch64_neon_sqsub;
6553 case NEON::BI__builtin_neon_vget_lane_bf16:
6554 case NEON::BI__builtin_neon_vduph_lane_bf16:
6555 case NEON::BI__builtin_neon_vduph_lane_f16: {
6559 case NEON::BI__builtin_neon_vgetq_lane_bf16:
6560 case NEON::BI__builtin_neon_vduph_laneq_bf16:
6561 case NEON::BI__builtin_neon_vduph_laneq_f16: {
6565 case NEON::BI__builtin_neon_vcvt_bf16_f32: {
6566 llvm::Type *V4F32 = FixedVectorType::get(
Builder.getFloatTy(), 4);
6567 llvm::Type *V4BF16 = FixedVectorType::get(
Builder.getBFloatTy(), 4);
6568 return Builder.CreateFPTrunc(
Builder.CreateBitCast(Ops[0], V4F32), V4BF16);
6570 case NEON::BI__builtin_neon_vcvtq_low_bf16_f32: {
6572 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
6573 llvm::Type *V4F32 = FixedVectorType::get(
Builder.getFloatTy(), 4);
6574 llvm::Type *V4BF16 = FixedVectorType::get(
Builder.getBFloatTy(), 4);
6575 llvm::Value *Trunc =
6576 Builder.CreateFPTrunc(
Builder.CreateBitCast(Ops[0], V4F32), V4BF16);
6577 return Builder.CreateShuffleVector(
6578 Trunc, ConstantAggregateZero::get(V4BF16), ConcatMask);
6580 case NEON::BI__builtin_neon_vcvtq_high_bf16_f32: {
6582 std::iota(ConcatMask.begin(), ConcatMask.end(), 0);
6584 std::iota(LoMask.begin(), LoMask.end(), 0);
6585 llvm::Type *V4F32 = FixedVectorType::get(
Builder.getFloatTy(), 4);
6586 llvm::Type *V4BF16 = FixedVectorType::get(
Builder.getBFloatTy(), 4);
6587 llvm::Type *V8BF16 = FixedVectorType::get(
Builder.getBFloatTy(), 8);
6588 llvm::Value *Inactive =
Builder.CreateShuffleVector(
6589 Builder.CreateBitCast(Ops[0], V8BF16), LoMask);
6590 llvm::Value *Trunc =
6591 Builder.CreateFPTrunc(
Builder.CreateBitCast(Ops[1], V4F32), V4BF16);
6592 return Builder.CreateShuffleVector(Inactive, Trunc, ConcatMask);
6595 case clang::AArch64::BI_InterlockedAdd:
6596 case clang::AArch64::BI_InterlockedAdd_acq:
6597 case clang::AArch64::BI_InterlockedAdd_rel:
6598 case clang::AArch64::BI_InterlockedAdd_nf:
6599 case clang::AArch64::BI_InterlockedAdd64:
6600 case clang::AArch64::BI_InterlockedAdd64_acq:
6601 case clang::AArch64::BI_InterlockedAdd64_rel:
6602 case clang::AArch64::BI_InterlockedAdd64_nf: {
6605 llvm::AtomicOrdering Ordering;
6606 switch (BuiltinID) {
6607 case clang::AArch64::BI_InterlockedAdd:
6608 case clang::AArch64::BI_InterlockedAdd64:
6609 Ordering = llvm::AtomicOrdering::SequentiallyConsistent;
6611 case clang::AArch64::BI_InterlockedAdd_acq:
6612 case clang::AArch64::BI_InterlockedAdd64_acq:
6613 Ordering = llvm::AtomicOrdering::Acquire;
6615 case clang::AArch64::BI_InterlockedAdd_rel:
6616 case clang::AArch64::BI_InterlockedAdd64_rel:
6617 Ordering = llvm::AtomicOrdering::Release;
6619 case clang::AArch64::BI_InterlockedAdd_nf:
6620 case clang::AArch64::BI_InterlockedAdd64_nf:
6621 Ordering = llvm::AtomicOrdering::Monotonic;
6624 llvm_unreachable(
"missing builtin ID in switch!");
6626 AtomicRMWInst *RMWI =
6627 Builder.CreateAtomicRMW(AtomicRMWInst::Add, DestAddr, Val, Ordering);
6628 return Builder.CreateAdd(RMWI, Val);
6633 llvm::Type *Ty = VTy;
6652 bool ExtractLow =
false;
6653 bool ExtendLaneArg =
false;
6654 switch (BuiltinID) {
6655 default:
return nullptr;
6656 case NEON::BI__builtin_neon_vbsl_v:
6657 case NEON::BI__builtin_neon_vbslq_v: {
6658 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
6659 Ops[0] =
Builder.CreateBitCast(Ops[0], BitTy,
"vbsl");
6660 Ops[1] =
Builder.CreateBitCast(Ops[1], BitTy,
"vbsl");
6661 Ops[2] =
Builder.CreateBitCast(Ops[2], BitTy,
"vbsl");
6663 Ops[1] =
Builder.CreateAnd(Ops[0], Ops[1],
"vbsl");
6664 Ops[2] =
Builder.CreateAnd(
Builder.CreateNot(Ops[0]), Ops[2],
"vbsl");
6665 Ops[0] =
Builder.CreateOr(Ops[1], Ops[2],
"vbsl");
6666 return Builder.CreateBitCast(Ops[0], Ty);
6668 case NEON::BI__builtin_neon_vfma_lane_v:
6669 case NEON::BI__builtin_neon_vfmaq_lane_v: {
6672 Value *Addend = Ops[0];
6673 Value *Multiplicand = Ops[1];
6674 Value *LaneSource = Ops[2];
6675 Ops[0] = Multiplicand;
6676 Ops[1] = LaneSource;
6680 auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
6681 ? llvm::FixedVectorType::get(VTy->getElementType(),
6682 VTy->getNumElements() / 2)
6685 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
6686 Ops[1] =
Builder.CreateBitCast(Ops[1], SourceTy);
6687 Ops[1] =
Builder.CreateShuffleVector(Ops[1], Ops[1], SV,
"lane");
6690 Int =
Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
6694 case NEON::BI__builtin_neon_vfma_laneq_v: {
6697 if (VTy && VTy->getElementType() ==
DoubleTy) {
6700 llvm::FixedVectorType *VTy =
6702 Ops[2] =
Builder.CreateBitCast(Ops[2], VTy);
6703 Ops[2] =
Builder.CreateExtractElement(Ops[2], Ops[3],
"extract");
6706 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
6707 DoubleTy, {Ops[1], Ops[2], Ops[0]});
6710 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
6711 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
6713 auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
6714 VTy->getNumElements() * 2);
6715 Ops[2] =
Builder.CreateBitCast(Ops[2], STy);
6716 Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
6718 Ops[2] =
Builder.CreateShuffleVector(Ops[2], Ops[2], SV,
"lane");
6721 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6722 {Ops[2], Ops[1], Ops[0]});
6724 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
6725 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
6726 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
6728 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
6731 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6732 {Ops[2], Ops[1], Ops[0]});
6734 case NEON::BI__builtin_neon_vfmah_lane_f16:
6735 case NEON::BI__builtin_neon_vfmas_lane_f32:
6736 case NEON::BI__builtin_neon_vfmah_laneq_f16:
6737 case NEON::BI__builtin_neon_vfmas_laneq_f32:
6738 case NEON::BI__builtin_neon_vfmad_lane_f64:
6739 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
6742 Ops[2] =
Builder.CreateExtractElement(Ops[2], Ops[3],
"extract");
6744 *
this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6745 {Ops[1], Ops[2], Ops[0]});
6747 case NEON::BI__builtin_neon_vmull_v:
6749 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
6750 if (
Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
6752 case NEON::BI__builtin_neon_vmax_v:
6753 case NEON::BI__builtin_neon_vmaxq_v:
6755 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
6756 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
6758 case NEON::BI__builtin_neon_vmaxh_f16: {
6760 Int = Intrinsic::aarch64_neon_fmax;
6763 case NEON::BI__builtin_neon_vmin_v:
6764 case NEON::BI__builtin_neon_vminq_v:
6766 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
6767 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
6769 case NEON::BI__builtin_neon_vminh_f16: {
6771 Int = Intrinsic::aarch64_neon_fmin;
6774 case NEON::BI__builtin_neon_vabd_v:
6775 case NEON::BI__builtin_neon_vabdq_v:
6777 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
6778 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
6780 case NEON::BI__builtin_neon_vpadal_v:
6781 case NEON::BI__builtin_neon_vpadalq_v: {
6782 unsigned ArgElts = VTy->getNumElements();
6784 unsigned BitWidth = EltTy->getBitWidth();
6785 auto *ArgTy = llvm::FixedVectorType::get(
6786 llvm::IntegerType::get(
getLLVMContext(), BitWidth / 2), 2 * ArgElts);
6787 llvm::Type* Tys[2] = { VTy, ArgTy };
6788 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
6790 TmpOps.push_back(Ops[1]);
6793 llvm::Value *addend =
Builder.CreateBitCast(Ops[0], tmp->getType());
6794 return Builder.CreateAdd(tmp, addend);
6796 case NEON::BI__builtin_neon_vpmin_v:
6797 case NEON::BI__builtin_neon_vpminq_v:
6799 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
6800 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
6802 case NEON::BI__builtin_neon_vpmax_v:
6803 case NEON::BI__builtin_neon_vpmaxq_v:
6805 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
6806 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
6808 case NEON::BI__builtin_neon_vminnm_v:
6809 case NEON::BI__builtin_neon_vminnmq_v:
6810 Int = Intrinsic::aarch64_neon_fminnm;
6812 case NEON::BI__builtin_neon_vminnmh_f16:
6814 Int = Intrinsic::aarch64_neon_fminnm;
6816 case NEON::BI__builtin_neon_vmaxnm_v:
6817 case NEON::BI__builtin_neon_vmaxnmq_v:
6818 Int = Intrinsic::aarch64_neon_fmaxnm;
6820 case NEON::BI__builtin_neon_vmaxnmh_f16:
6822 Int = Intrinsic::aarch64_neon_fmaxnm;
6824 case NEON::BI__builtin_neon_vrecpss_f32: {
6829 case NEON::BI__builtin_neon_vrecpsd_f64:
6833 case NEON::BI__builtin_neon_vrecpsh_f16:
6837 case NEON::BI__builtin_neon_vqshrun_n_v:
6838 Int = Intrinsic::aarch64_neon_sqshrun;
6840 case NEON::BI__builtin_neon_vqrshrun_n_v:
6841 Int = Intrinsic::aarch64_neon_sqrshrun;
6843 case NEON::BI__builtin_neon_vqshrn_n_v:
6844 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
6846 case NEON::BI__builtin_neon_vrshrn_n_v:
6847 Int = Intrinsic::aarch64_neon_rshrn;
6849 case NEON::BI__builtin_neon_vqrshrn_n_v:
6850 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
6852 case NEON::BI__builtin_neon_vrndah_f16: {
6854 Int =
Builder.getIsFPConstrained()
6855 ? Intrinsic::experimental_constrained_round
6859 case NEON::BI__builtin_neon_vrnda_v:
6860 case NEON::BI__builtin_neon_vrndaq_v: {
6861 Int =
Builder.getIsFPConstrained()
6862 ? Intrinsic::experimental_constrained_round
6866 case NEON::BI__builtin_neon_vrndih_f16: {
6868 Int =
Builder.getIsFPConstrained()
6869 ? Intrinsic::experimental_constrained_nearbyint
6870 : Intrinsic::nearbyint;
6873 case NEON::BI__builtin_neon_vrndmh_f16: {
6875 Int =
Builder.getIsFPConstrained()
6876 ? Intrinsic::experimental_constrained_floor
6880 case NEON::BI__builtin_neon_vrndm_v:
6881 case NEON::BI__builtin_neon_vrndmq_v: {
6882 Int =
Builder.getIsFPConstrained()
6883 ? Intrinsic::experimental_constrained_floor
6887 case NEON::BI__builtin_neon_vrndnh_f16: {
6889 Int =
Builder.getIsFPConstrained()
6890 ? Intrinsic::experimental_constrained_roundeven
6891 : Intrinsic::roundeven;
6894 case NEON::BI__builtin_neon_vrndn_v:
6895 case NEON::BI__builtin_neon_vrndnq_v: {
6896 Int =
Builder.getIsFPConstrained()
6897 ? Intrinsic::experimental_constrained_roundeven
6898 : Intrinsic::roundeven;
6901 case NEON::BI__builtin_neon_vrndns_f32: {
6903 Int =
Builder.getIsFPConstrained()
6904 ? Intrinsic::experimental_constrained_roundeven
6905 : Intrinsic::roundeven;
6908 case NEON::BI__builtin_neon_vrndph_f16: {
6910 Int =
Builder.getIsFPConstrained()
6911 ? Intrinsic::experimental_constrained_ceil
6915 case NEON::BI__builtin_neon_vrndp_v:
6916 case NEON::BI__builtin_neon_vrndpq_v: {
6917 Int =
Builder.getIsFPConstrained()
6918 ? Intrinsic::experimental_constrained_ceil
6922 case NEON::BI__builtin_neon_vrndxh_f16: {
6924 Int =
Builder.getIsFPConstrained()
6925 ? Intrinsic::experimental_constrained_rint
6929 case NEON::BI__builtin_neon_vrndx_v:
6930 case NEON::BI__builtin_neon_vrndxq_v: {
6931 Int =
Builder.getIsFPConstrained()
6932 ? Intrinsic::experimental_constrained_rint
6936 case NEON::BI__builtin_neon_vrndh_f16: {
6938 Int =
Builder.getIsFPConstrained()
6939 ? Intrinsic::experimental_constrained_trunc
6943 case NEON::BI__builtin_neon_vrnd32x_f32:
6944 case NEON::BI__builtin_neon_vrnd32xq_f32:
6945 case NEON::BI__builtin_neon_vrnd32x_f64:
6946 case NEON::BI__builtin_neon_vrnd32xq_f64: {
6948 Int = Intrinsic::aarch64_neon_frint32x;
6951 case NEON::BI__builtin_neon_vrnd32z_f32:
6952 case NEON::BI__builtin_neon_vrnd32zq_f32:
6953 case NEON::BI__builtin_neon_vrnd32z_f64:
6954 case NEON::BI__builtin_neon_vrnd32zq_f64: {
6956 Int = Intrinsic::aarch64_neon_frint32z;
6959 case NEON::BI__builtin_neon_vrnd64x_f32:
6960 case NEON::BI__builtin_neon_vrnd64xq_f32:
6961 case NEON::BI__builtin_neon_vrnd64x_f64:
6962 case NEON::BI__builtin_neon_vrnd64xq_f64: {
6964 Int = Intrinsic::aarch64_neon_frint64x;
6967 case NEON::BI__builtin_neon_vrnd64z_f32:
6968 case NEON::BI__builtin_neon_vrnd64zq_f32:
6969 case NEON::BI__builtin_neon_vrnd64z_f64:
6970 case NEON::BI__builtin_neon_vrnd64zq_f64: {
6972 Int = Intrinsic::aarch64_neon_frint64z;
6975 case NEON::BI__builtin_neon_vrnd_v:
6976 case NEON::BI__builtin_neon_vrndq_v: {
6977 Int =
Builder.getIsFPConstrained()
6978 ? Intrinsic::experimental_constrained_trunc
6982 case NEON::BI__builtin_neon_vcvt_f64_v:
6983 case NEON::BI__builtin_neon_vcvtq_f64_v:
6984 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
6986 return usgn ?
Builder.CreateUIToFP(Ops[0], Ty,
"vcvt")
6987 :
Builder.CreateSIToFP(Ops[0], Ty,
"vcvt");
6988 case NEON::BI__builtin_neon_vcvt_f64_f32: {
6990 "unexpected vcvt_f64_f32 builtin");
6994 return Builder.CreateFPExt(Ops[0], Ty,
"vcvt");
6996 case NEON::BI__builtin_neon_vcvt_f32_f64: {
6998 "unexpected vcvt_f32_f64 builtin");
7002 return Builder.CreateFPTrunc(Ops[0], Ty,
"vcvt");
7004 case NEON::BI__builtin_neon_vcvt_s32_v:
7005 case NEON::BI__builtin_neon_vcvt_u32_v:
7006 case NEON::BI__builtin_neon_vcvt_s64_v:
7007 case NEON::BI__builtin_neon_vcvt_u64_v:
7008 case NEON::BI__builtin_neon_vcvt_s16_f16:
7009 case NEON::BI__builtin_neon_vcvt_u16_f16:
7010 case NEON::BI__builtin_neon_vcvtq_s32_v:
7011 case NEON::BI__builtin_neon_vcvtq_u32_v:
7012 case NEON::BI__builtin_neon_vcvtq_s64_v:
7013 case NEON::BI__builtin_neon_vcvtq_u64_v:
7014 case NEON::BI__builtin_neon_vcvtq_s16_f16:
7015 case NEON::BI__builtin_neon_vcvtq_u16_f16: {
7017 usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
7021 case NEON::BI__builtin_neon_vcvta_s16_f16:
7022 case NEON::BI__builtin_neon_vcvta_u16_f16:
7023 case NEON::BI__builtin_neon_vcvta_s32_v:
7024 case NEON::BI__builtin_neon_vcvtaq_s16_f16:
7025 case NEON::BI__builtin_neon_vcvtaq_s32_v:
7026 case NEON::BI__builtin_neon_vcvta_u32_v:
7027 case NEON::BI__builtin_neon_vcvtaq_u16_f16:
7028 case NEON::BI__builtin_neon_vcvtaq_u32_v:
7029 case NEON::BI__builtin_neon_vcvta_s64_v:
7030 case NEON::BI__builtin_neon_vcvtaq_s64_v:
7031 case NEON::BI__builtin_neon_vcvta_u64_v:
7032 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
7033 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
7037 case NEON::BI__builtin_neon_vcvtm_s16_f16:
7038 case NEON::BI__builtin_neon_vcvtm_s32_v:
7039 case NEON::BI__builtin_neon_vcvtmq_s16_f16:
7040 case NEON::BI__builtin_neon_vcvtmq_s32_v:
7041 case NEON::BI__builtin_neon_vcvtm_u16_f16:
7042 case NEON::BI__builtin_neon_vcvtm_u32_v:
7043 case NEON::BI__builtin_neon_vcvtmq_u16_f16:
7044 case NEON::BI__builtin_neon_vcvtmq_u32_v:
7045 case NEON::BI__builtin_neon_vcvtm_s64_v:
7046 case NEON::BI__builtin_neon_vcvtmq_s64_v:
7047 case NEON::BI__builtin_neon_vcvtm_u64_v:
7048 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
7049 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
7053 case NEON::BI__builtin_neon_vcvtn_s16_f16:
7054 case NEON::BI__builtin_neon_vcvtn_s32_v:
7055 case NEON::BI__builtin_neon_vcvtnq_s16_f16:
7056 case NEON::BI__builtin_neon_vcvtnq_s32_v:
7057 case NEON::BI__builtin_neon_vcvtn_u16_f16:
7058 case NEON::BI__builtin_neon_vcvtn_u32_v:
7059 case NEON::BI__builtin_neon_vcvtnq_u16_f16:
7060 case NEON::BI__builtin_neon_vcvtnq_u32_v:
7061 case NEON::BI__builtin_neon_vcvtn_s64_v:
7062 case NEON::BI__builtin_neon_vcvtnq_s64_v:
7063 case NEON::BI__builtin_neon_vcvtn_u64_v:
7064 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
7065 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
7069 case NEON::BI__builtin_neon_vcvtp_s16_f16:
7070 case NEON::BI__builtin_neon_vcvtp_s32_v:
7071 case NEON::BI__builtin_neon_vcvtpq_s16_f16:
7072 case NEON::BI__builtin_neon_vcvtpq_s32_v:
7073 case NEON::BI__builtin_neon_vcvtp_u16_f16:
7074 case NEON::BI__builtin_neon_vcvtp_u32_v:
7075 case NEON::BI__builtin_neon_vcvtpq_u16_f16:
7076 case NEON::BI__builtin_neon_vcvtpq_u32_v:
7077 case NEON::BI__builtin_neon_vcvtp_s64_v:
7078 case NEON::BI__builtin_neon_vcvtpq_s64_v:
7079 case NEON::BI__builtin_neon_vcvtp_u64_v:
7080 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
7081 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
7085 case NEON::BI__builtin_neon_vmulx_v:
7086 case NEON::BI__builtin_neon_vmulxq_v: {
7087 Int = Intrinsic::aarch64_neon_fmulx;
7090 case NEON::BI__builtin_neon_vmulxh_lane_f16:
7091 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
7095 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2],
"extract");
7097 Int = Intrinsic::aarch64_neon_fmulx;
7100 case NEON::BI__builtin_neon_vmul_lane_v:
7101 case NEON::BI__builtin_neon_vmul_laneq_v: {
7104 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
7107 llvm::FixedVectorType *VTy =
7109 Ops[1] =
Builder.CreateBitCast(Ops[1], VTy);
7110 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2],
"extract");
7114 case NEON::BI__builtin_neon_vnegd_s64:
7116 case NEON::BI__builtin_neon_vnegh_f16:
7118 case NEON::BI__builtin_neon_vpmaxnm_v:
7119 case NEON::BI__builtin_neon_vpmaxnmq_v: {
7120 Int = Intrinsic::aarch64_neon_fmaxnmp;
7123 case NEON::BI__builtin_neon_vpminnm_v:
7124 case NEON::BI__builtin_neon_vpminnmq_v: {
7125 Int = Intrinsic::aarch64_neon_fminnmp;
7128 case NEON::BI__builtin_neon_vsqrth_f16: {
7130 Int =
Builder.getIsFPConstrained()
7131 ? Intrinsic::experimental_constrained_sqrt
7135 case NEON::BI__builtin_neon_vsqrt_v:
7136 case NEON::BI__builtin_neon_vsqrtq_v: {
7137 Int =
Builder.getIsFPConstrained()
7138 ? Intrinsic::experimental_constrained_sqrt
7140 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
7143 case NEON::BI__builtin_neon_vrbit_v:
7144 case NEON::BI__builtin_neon_vrbitq_v: {
7145 Int = Intrinsic::bitreverse;
7148 case NEON::BI__builtin_neon_vmaxv_f16: {
7149 Int = Intrinsic::aarch64_neon_fmaxv;
7151 VTy = llvm::FixedVectorType::get(
HalfTy, 4);
7152 llvm::Type *Tys[2] = { Ty, VTy };
7157 case NEON::BI__builtin_neon_vmaxvq_f16: {
7158 Int = Intrinsic::aarch64_neon_fmaxv;
7160 VTy = llvm::FixedVectorType::get(
HalfTy, 8);
7161 llvm::Type *Tys[2] = { Ty, VTy };
7166 case NEON::BI__builtin_neon_vminv_f16: {
7167 Int = Intrinsic::aarch64_neon_fminv;
7169 VTy = llvm::FixedVectorType::get(
HalfTy, 4);
7170 llvm::Type *Tys[2] = { Ty, VTy };
7175 case NEON::BI__builtin_neon_vminvq_f16: {
7176 Int = Intrinsic::aarch64_neon_fminv;
7178 VTy = llvm::FixedVectorType::get(
HalfTy, 8);
7179 llvm::Type *Tys[2] = { Ty, VTy };
7184 case NEON::BI__builtin_neon_vmaxnmv_f16: {
7185 Int = Intrinsic::aarch64_neon_fmaxnmv;
7187 VTy = llvm::FixedVectorType::get(
HalfTy, 4);
7188 llvm::Type *Tys[2] = { Ty, VTy };
7193 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
7194 Int = Intrinsic::aarch64_neon_fmaxnmv;
7196 VTy = llvm::FixedVectorType::get(
HalfTy, 8);
7197 llvm::Type *Tys[2] = { Ty, VTy };
7202 case NEON::BI__builtin_neon_vminnmv_f16: {
7203 Int = Intrinsic::aarch64_neon_fminnmv;
7205 VTy = llvm::FixedVectorType::get(
HalfTy, 4);
7206 llvm::Type *Tys[2] = { Ty, VTy };
7211 case NEON::BI__builtin_neon_vminnmvq_f16: {
7212 Int = Intrinsic::aarch64_neon_fminnmv;
7214 VTy = llvm::FixedVectorType::get(
HalfTy, 8);
7215 llvm::Type *Tys[2] = { Ty, VTy };
7220 case NEON::BI__builtin_neon_vmul_n_f64: {
7223 return Builder.CreateFMul(Ops[0], RHS);
7225 case NEON::BI__builtin_neon_vaddlv_u8: {
7226 Int = Intrinsic::aarch64_neon_uaddlv;
7228 VTy = llvm::FixedVectorType::get(
Int8Ty, 8);
7229 llvm::Type *Tys[2] = { Ty, VTy };
7234 case NEON::BI__builtin_neon_vaddlv_u16: {
7235 Int = Intrinsic::aarch64_neon_uaddlv;
7237 VTy = llvm::FixedVectorType::get(
Int16Ty, 4);
7238 llvm::Type *Tys[2] = { Ty, VTy };
7242 case NEON::BI__builtin_neon_vaddlvq_u8: {
7243 Int = Intrinsic::aarch64_neon_uaddlv;
7245 VTy = llvm::FixedVectorType::get(
Int8Ty, 16);
7246 llvm::Type *Tys[2] = { Ty, VTy };
7251 case NEON::BI__builtin_neon_vaddlvq_u16: {
7252 Int = Intrinsic::aarch64_neon_uaddlv;
7254 VTy = llvm::FixedVectorType::get(
Int16Ty, 8);
7255 llvm::Type *Tys[2] = { Ty, VTy };
7259 case NEON::BI__builtin_neon_vaddlv_s8: {
7260 Int = Intrinsic::aarch64_neon_saddlv;
7262 VTy = llvm::FixedVectorType::get(
Int8Ty, 8);
7263 llvm::Type *Tys[2] = { Ty, VTy };
7268 case NEON::BI__builtin_neon_vaddlv_s16: {
7269 Int = Intrinsic::aarch64_neon_saddlv;
7271 VTy = llvm::FixedVectorType::get(
Int16Ty, 4);
7272 llvm::Type *Tys[2] = { Ty, VTy };
7276 case NEON::BI__builtin_neon_vaddlvq_s8: {
7277 Int = Intrinsic::aarch64_neon_saddlv;
7279 VTy = llvm::FixedVectorType::get(
Int8Ty, 16);
7280 llvm::Type *Tys[2] = { Ty, VTy };
7285 case NEON::BI__builtin_neon_vaddlvq_s16: {
7286 Int = Intrinsic::aarch64_neon_saddlv;
7288 VTy = llvm::FixedVectorType::get(
Int16Ty, 8);
7289 llvm::Type *Tys[2] = { Ty, VTy };
7293 case NEON::BI__builtin_neon_vsri_n_v:
7294 case NEON::BI__builtin_neon_vsriq_n_v: {
7295 Int = Intrinsic::aarch64_neon_vsri;
7296 llvm::Function *Intrin =
CGM.getIntrinsic(Int, Ty);
7299 case NEON::BI__builtin_neon_vsli_n_v:
7300 case NEON::BI__builtin_neon_vsliq_n_v: {
7301 Int = Intrinsic::aarch64_neon_vsli;
7302 llvm::Function *Intrin =
CGM.getIntrinsic(Int, Ty);
7305 case NEON::BI__builtin_neon_vsra_n_v:
7306 case NEON::BI__builtin_neon_vsraq_n_v:
7307 Ops[0] =
Builder.CreateBitCast(Ops[0], Ty);
7309 return Builder.CreateAdd(Ops[0], Ops[1]);
7310 case NEON::BI__builtin_neon_vrsra_n_v:
7311 case NEON::BI__builtin_neon_vrsraq_n_v: {
7312 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
7314 TmpOps.push_back(Ops[1]);
7315 TmpOps.push_back(Ops[2]);
7317 llvm::Value *tmp =
EmitNeonCall(F, TmpOps,
"vrshr_n", 1,
true);
7318 Ops[0] =
Builder.CreateBitCast(Ops[0], VTy);
7319 return Builder.CreateAdd(Ops[0], tmp);
7321 case NEON::BI__builtin_neon_vld1_v:
7322 case NEON::BI__builtin_neon_vld1q_v: {
7325 case NEON::BI__builtin_neon_vst1_v:
7326 case NEON::BI__builtin_neon_vst1q_v:
7327 Ops[1] =
Builder.CreateBitCast(Ops[1], VTy);
7329 case NEON::BI__builtin_neon_vld1_lane_v:
7330 case NEON::BI__builtin_neon_vld1q_lane_v: {
7331 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7332 Ops[0] =
Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
7334 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vld1_lane");
7336 case NEON::BI__builtin_neon_vldap1_lane_s64:
7337 case NEON::BI__builtin_neon_vldap1q_lane_s64: {
7338 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7339 llvm::LoadInst *LI =
Builder.CreateAlignedLoad(
7341 LI->setAtomic(llvm::AtomicOrdering::Acquire);
7343 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2],
"vldap1_lane");
7345 case NEON::BI__builtin_neon_vld1_dup_v:
7346 case NEON::BI__builtin_neon_vld1q_dup_v: {
7347 Value *
V = PoisonValue::get(Ty);
7348 Ops[0] =
Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
7350 llvm::Constant *CI = ConstantInt::get(
Int32Ty, 0);
7351 Ops[0] =
Builder.CreateInsertElement(
V, Ops[0], CI);
7354 case NEON::BI__builtin_neon_vst1_lane_v:
7355 case NEON::BI__builtin_neon_vst1q_lane_v:
7356 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7357 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2]);
7359 case NEON::BI__builtin_neon_vstl1_lane_s64:
7360 case NEON::BI__builtin_neon_vstl1q_lane_s64: {
7361 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7362 Ops[1] =
Builder.CreateExtractElement(Ops[1], Ops[2]);
7363 llvm::StoreInst *SI =
7365 SI->setAtomic(llvm::AtomicOrdering::Release);
7368 case NEON::BI__builtin_neon_vld2_v:
7369 case NEON::BI__builtin_neon_vld2q_v: {
7371 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
7372 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld2");
7373 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7375 case NEON::BI__builtin_neon_vld3_v:
7376 case NEON::BI__builtin_neon_vld3q_v: {
7378 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
7379 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld3");
7380 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7382 case NEON::BI__builtin_neon_vld4_v:
7383 case NEON::BI__builtin_neon_vld4q_v: {
7385 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
7386 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld4");
7387 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7389 case NEON::BI__builtin_neon_vld2_dup_v:
7390 case NEON::BI__builtin_neon_vld2q_dup_v: {
7392 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
7393 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld2");
7394 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7396 case NEON::BI__builtin_neon_vld3_dup_v:
7397 case NEON::BI__builtin_neon_vld3q_dup_v: {
7399 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
7400 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld3");
7401 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7403 case NEON::BI__builtin_neon_vld4_dup_v:
7404 case NEON::BI__builtin_neon_vld4q_dup_v: {
7406 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
7407 Ops[1] =
Builder.CreateCall(F, Ops[1],
"vld4");
7408 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7410 case NEON::BI__builtin_neon_vld2_lane_v:
7411 case NEON::BI__builtin_neon_vld2q_lane_v: {
7412 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
7413 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
7414 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
7415 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7416 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7419 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7421 case NEON::BI__builtin_neon_vld3_lane_v:
7422 case NEON::BI__builtin_neon_vld3q_lane_v: {
7423 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
7424 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
7425 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
7426 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7427 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7428 Ops[3] =
Builder.CreateBitCast(Ops[3], Ty);
7431 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7433 case NEON::BI__builtin_neon_vld4_lane_v:
7434 case NEON::BI__builtin_neon_vld4q_lane_v: {
7435 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
7436 Function *F =
CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
7437 std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
7438 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7439 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7440 Ops[3] =
Builder.CreateBitCast(Ops[3], Ty);
7441 Ops[4] =
Builder.CreateBitCast(Ops[4], Ty);
7444 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
7446 case NEON::BI__builtin_neon_vst2_v:
7447 case NEON::BI__builtin_neon_vst2q_v: {
7448 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7449 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
7450 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
7453 case NEON::BI__builtin_neon_vst2_lane_v:
7454 case NEON::BI__builtin_neon_vst2q_lane_v: {
7455 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7457 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
7458 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
7461 case NEON::BI__builtin_neon_vst3_v:
7462 case NEON::BI__builtin_neon_vst3q_v: {
7463 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7464 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
7465 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
7468 case NEON::BI__builtin_neon_vst3_lane_v:
7469 case NEON::BI__builtin_neon_vst3q_lane_v: {
7470 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7472 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
7473 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
7476 case NEON::BI__builtin_neon_vst4_v:
7477 case NEON::BI__builtin_neon_vst4q_v: {
7478 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7479 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
7480 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
7483 case NEON::BI__builtin_neon_vst4_lane_v:
7484 case NEON::BI__builtin_neon_vst4q_lane_v: {
7485 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7487 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
7488 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
7491 case NEON::BI__builtin_neon_vtrn_v:
7492 case NEON::BI__builtin_neon_vtrnq_v: {
7493 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7494 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7495 Value *SV =
nullptr;
7497 for (
unsigned vi = 0; vi != 2; ++vi) {
7499 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7500 Indices.push_back(i+vi);
7501 Indices.push_back(i+e+vi);
7504 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vtrn");
7509 case NEON::BI__builtin_neon_vuzp_v:
7510 case NEON::BI__builtin_neon_vuzpq_v: {
7511 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7512 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7513 Value *SV =
nullptr;
7515 for (
unsigned vi = 0; vi != 2; ++vi) {
7517 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
7518 Indices.push_back(2*i+vi);
7521 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vuzp");
7526 case NEON::BI__builtin_neon_vzip_v:
7527 case NEON::BI__builtin_neon_vzipq_v: {
7528 Ops[1] =
Builder.CreateBitCast(Ops[1], Ty);
7529 Ops[2] =
Builder.CreateBitCast(Ops[2], Ty);
7530 Value *SV =
nullptr;
7532 for (
unsigned vi = 0; vi != 2; ++vi) {
7534 for (
unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7535 Indices.push_back((i + vi*e) >> 1);
7536 Indices.push_back(((i + vi*e) >> 1)+e);
7539 SV =
Builder.CreateShuffleVector(Ops[1], Ops[2], Indices,
"vzip");
7544 case NEON::BI__builtin_neon_vqtbl1q_v: {
7545 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
7548 case NEON::BI__builtin_neon_vqtbl2q_v: {
7549 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
7552 case NEON::BI__builtin_neon_vqtbl3q_v: {
7553 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
7556 case NEON::BI__builtin_neon_vqtbl4q_v: {
7557 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
7560 case NEON::BI__builtin_neon_vqtbx1q_v: {
7561 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
7564 case NEON::BI__builtin_neon_vqtbx2q_v: {
7565 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
7568 case NEON::BI__builtin_neon_vqtbx3q_v: {
7569 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
7572 case NEON::BI__builtin_neon_vqtbx4q_v: {
7573 return EmitNeonCall(
CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
7576 case NEON::BI__builtin_neon_vsqadd_v:
7577 case NEON::BI__builtin_neon_vsqaddq_v: {
7578 Int = Intrinsic::aarch64_neon_usqadd;
7581 case NEON::BI__builtin_neon_vuqadd_v:
7582 case NEON::BI__builtin_neon_vuqaddq_v: {
7583 Int = Intrinsic::aarch64_neon_suqadd;
7587 case NEON::BI__builtin_neon_vluti2_laneq_mf8:
7588 case NEON::BI__builtin_neon_vluti2_laneq_bf16:
7589 case NEON::BI__builtin_neon_vluti2_laneq_f16:
7590 case NEON::BI__builtin_neon_vluti2_laneq_p16:
7591 case NEON::BI__builtin_neon_vluti2_laneq_p8:
7592 case NEON::BI__builtin_neon_vluti2_laneq_s16:
7593 case NEON::BI__builtin_neon_vluti2_laneq_s8:
7594 case NEON::BI__builtin_neon_vluti2_laneq_u16:
7595 case NEON::BI__builtin_neon_vluti2_laneq_u8: {
7596 Int = Intrinsic::aarch64_neon_vluti2_laneq;
7603 case NEON::BI__builtin_neon_vluti2q_laneq_mf8:
7604 case NEON::BI__builtin_neon_vluti2q_laneq_bf16:
7605 case NEON::BI__builtin_neon_vluti2q_laneq_f16:
7606 case NEON::BI__builtin_neon_vluti2q_laneq_p16:
7607 case NEON::BI__builtin_neon_vluti2q_laneq_p8:
7608 case NEON::BI__builtin_neon_vluti2q_laneq_s16:
7609 case NEON::BI__builtin_neon_vluti2q_laneq_s8:
7610 case NEON::BI__builtin_neon_vluti2q_laneq_u16:
7611 case NEON::BI__builtin_neon_vluti2q_laneq_u8: {
7612 Int = Intrinsic::aarch64_neon_vluti2_laneq;
7619 case NEON::BI__builtin_neon_vluti2_lane_mf8:
7620 case NEON::BI__builtin_neon_vluti2_lane_bf16:
7621 case NEON::BI__builtin_neon_vluti2_lane_f16:
7622 case NEON::BI__builtin_neon_vluti2_lane_p16:
7623 case NEON::BI__builtin_neon_vluti2_lane_p8:
7624 case NEON::BI__builtin_neon_vluti2_lane_s16:
7625 case NEON::BI__builtin_neon_vluti2_lane_s8:
7626 case NEON::BI__builtin_neon_vluti2_lane_u16:
7627 case NEON::BI__builtin_neon_vluti2_lane_u8: {
7628 Int = Intrinsic::aarch64_neon_vluti2_lane;
7635 case NEON::BI__builtin_neon_vluti2q_lane_mf8:
7636 case NEON::BI__builtin_neon_vluti2q_lane_bf16:
7637 case NEON::BI__builtin_neon_vluti2q_lane_f16:
7638 case NEON::BI__builtin_neon_vluti2q_lane_p16:
7639 case NEON::BI__builtin_neon_vluti2q_lane_p8:
7640 case NEON::BI__builtin_neon_vluti2q_lane_s16:
7641 case NEON::BI__builtin_neon_vluti2q_lane_s8:
7642 case NEON::BI__builtin_neon_vluti2q_lane_u16:
7643 case NEON::BI__builtin_neon_vluti2q_lane_u8: {
7644 Int = Intrinsic::aarch64_neon_vluti2_lane;
7651 case NEON::BI__builtin_neon_vluti4q_lane_mf8:
7652 case NEON::BI__builtin_neon_vluti4q_lane_p8:
7653 case NEON::BI__builtin_neon_vluti4q_lane_s8:
7654 case NEON::BI__builtin_neon_vluti4q_lane_u8: {
7655 Int = Intrinsic::aarch64_neon_vluti4q_lane;
7658 case NEON::BI__builtin_neon_vluti4q_laneq_mf8:
7659 case NEON::BI__builtin_neon_vluti4q_laneq_p8:
7660 case NEON::BI__builtin_neon_vluti4q_laneq_s8:
7661 case NEON::BI__builtin_neon_vluti4q_laneq_u8: {
7662 Int = Intrinsic::aarch64_neon_vluti4q_laneq;
7665 case NEON::BI__builtin_neon_vluti4q_lane_bf16_x2:
7666 case NEON::BI__builtin_neon_vluti4q_lane_f16_x2:
7667 case NEON::BI__builtin_neon_vluti4q_lane_p16_x2:
7668 case NEON::BI__builtin_neon_vluti4q_lane_s16_x2:
7669 case NEON::BI__builtin_neon_vluti4q_lane_u16_x2: {
7670 Int = Intrinsic::aarch64_neon_vluti4q_lane_x2;
7671 return EmitNeonCall(
CGM.getIntrinsic(Int, Ty), Ops,
"vluti4q_lane_x2");
7673 case NEON::BI__builtin_neon_vluti4q_laneq_bf16_x2:
7674 case NEON::BI__builtin_neon_vluti4q_laneq_f16_x2:
7675 case NEON::BI__builtin_neon_vluti4q_laneq_p16_x2:
7676 case NEON::BI__builtin_neon_vluti4q_laneq_s16_x2:
7677 case NEON::BI__builtin_neon_vluti4q_laneq_u16_x2: {
7678 Int = Intrinsic::aarch64_neon_vluti4q_laneq_x2;
7679 return EmitNeonCall(
CGM.getIntrinsic(Int, Ty), Ops,
"vluti4q_laneq_x2");
7681 case NEON::BI__builtin_neon_vmmlaq_f16_mf8_fpm:
7683 {llvm::FixedVectorType::get(
HalfTy, 8),
7684 llvm::FixedVectorType::get(
Int8Ty, 16)},
7686 case NEON::BI__builtin_neon_vmmlaq_f32_mf8_fpm:
7688 {llvm::FixedVectorType::get(
FloatTy, 4),
7689 llvm::FixedVectorType::get(
Int8Ty, 16)},
7691 case NEON::BI__builtin_neon_vcvt1_low_bf16_mf8_fpm:
7694 case NEON::BI__builtin_neon_vcvt1_bf16_mf8_fpm:
7695 case NEON::BI__builtin_neon_vcvt1_high_bf16_mf8_fpm:
7697 llvm::FixedVectorType::get(
BFloatTy, 8),
7698 Ops[0]->
getType(), ExtractLow, Ops, E,
"vbfcvt1");
7699 case NEON::BI__builtin_neon_vcvt2_low_bf16_mf8_fpm:
7702 case NEON::BI__builtin_neon_vcvt2_bf16_mf8_fpm:
7703 case NEON::BI__builtin_neon_vcvt2_high_bf16_mf8_fpm:
7705 llvm::FixedVectorType::get(
BFloatTy, 8),
7706 Ops[0]->
getType(), ExtractLow, Ops, E,
"vbfcvt2");
7707 case NEON::BI__builtin_neon_vcvt1_low_f16_mf8_fpm:
7710 case NEON::BI__builtin_neon_vcvt1_f16_mf8_fpm:
7711 case NEON::BI__builtin_neon_vcvt1_high_f16_mf8_fpm:
7713 llvm::FixedVectorType::get(
HalfTy, 8),
7714 Ops[0]->
getType(), ExtractLow, Ops, E,
"vbfcvt1");
7715 case NEON::BI__builtin_neon_vcvt2_low_f16_mf8_fpm:
7718 case NEON::BI__builtin_neon_vcvt2_f16_mf8_fpm:
7719 case NEON::BI__builtin_neon_vcvt2_high_f16_mf8_fpm:
7721 llvm::FixedVectorType::get(
HalfTy, 8),
7722 Ops[0]->
getType(), ExtractLow, Ops, E,
"vbfcvt2");
7723 case NEON::BI__builtin_neon_vcvt_mf8_f32_fpm:
7725 llvm::FixedVectorType::get(
Int8Ty, 8),
7726 Ops[0]->
getType(),
false, Ops, E,
"vfcvtn");
7727 case NEON::BI__builtin_neon_vcvt_mf8_f16_fpm:
7729 llvm::FixedVectorType::get(
Int8Ty, 8),
7730 llvm::FixedVectorType::get(
HalfTy, 4),
false, Ops,
7732 case NEON::BI__builtin_neon_vcvtq_mf8_f16_fpm:
7734 llvm::FixedVectorType::get(
Int8Ty, 16),
7735 llvm::FixedVectorType::get(
HalfTy, 8),
false, Ops,
7737 case NEON::BI__builtin_neon_vcvt_high_mf8_f32_fpm: {
7738 llvm::Type *Ty = llvm::FixedVectorType::get(
Int8Ty, 16);
7739 Ops[0] =
Builder.CreateInsertVector(Ty, PoisonValue::get(Ty), Ops[0],
7742 Ops[1]->
getType(),
false, Ops, E,
"vfcvtn2");
7745 case NEON::BI__builtin_neon_vdot_f16_mf8_fpm:
7746 case NEON::BI__builtin_neon_vdotq_f16_mf8_fpm:
7749 case NEON::BI__builtin_neon_vdot_lane_f16_mf8_fpm:
7750 case NEON::BI__builtin_neon_vdotq_lane_f16_mf8_fpm:
7751 ExtendLaneArg =
true;
7753 case NEON::BI__builtin_neon_vdot_laneq_f16_mf8_fpm:
7754 case NEON::BI__builtin_neon_vdotq_laneq_f16_mf8_fpm:
7756 ExtendLaneArg,
HalfTy, Ops, E,
"fdot2_lane");
7757 case NEON::BI__builtin_neon_vdot_f32_mf8_fpm:
7758 case NEON::BI__builtin_neon_vdotq_f32_mf8_fpm:
7761 case NEON::BI__builtin_neon_vdot_lane_f32_mf8_fpm:
7762 case NEON::BI__builtin_neon_vdotq_lane_f32_mf8_fpm:
7763 ExtendLaneArg =
true;
7765 case NEON::BI__builtin_neon_vdot_laneq_f32_mf8_fpm:
7766 case NEON::BI__builtin_neon_vdotq_laneq_f32_mf8_fpm:
7768 ExtendLaneArg,
FloatTy, Ops, E,
"fdot4_lane");
7770 case NEON::BI__builtin_neon_vmlalbq_f16_mf8_fpm:
7772 {llvm::FixedVectorType::get(
HalfTy, 8)}, Ops, E,
7774 case NEON::BI__builtin_neon_vmlaltq_f16_mf8_fpm:
7776 {llvm::FixedVectorType::get(
HalfTy, 8)}, Ops, E,
7778 case NEON::BI__builtin_neon_vmlallbbq_f32_mf8_fpm:
7780 {llvm::FixedVectorType::get(
FloatTy, 4)}, Ops, E,
7782 case NEON::BI__builtin_neon_vmlallbtq_f32_mf8_fpm:
7784 {llvm::FixedVectorType::get(
FloatTy, 4)}, Ops, E,
7786 case NEON::BI__builtin_neon_vmlalltbq_f32_mf8_fpm:
7788 {llvm::FixedVectorType::get(
FloatTy, 4)}, Ops, E,
7790 case NEON::BI__builtin_neon_vmlallttq_f32_mf8_fpm:
7792 {llvm::FixedVectorType::get(
FloatTy, 4)}, Ops, E,
7794 case NEON::BI__builtin_neon_vmlalbq_lane_f16_mf8_fpm:
7795 ExtendLaneArg =
true;
7797 case NEON::BI__builtin_neon_vmlalbq_laneq_f16_mf8_fpm:
7799 ExtendLaneArg,
HalfTy, Ops, E,
"vmlal_lane");
7800 case NEON::BI__builtin_neon_vmlaltq_lane_f16_mf8_fpm:
7801 ExtendLaneArg =
true;
7803 case NEON::BI__builtin_neon_vmlaltq_laneq_f16_mf8_fpm:
7805 ExtendLaneArg,
HalfTy, Ops, E,
"vmlal_lane");
7806 case NEON::BI__builtin_neon_vmlallbbq_lane_f32_mf8_fpm:
7807 ExtendLaneArg =
true;
7809 case NEON::BI__builtin_neon_vmlallbbq_laneq_f32_mf8_fpm:
7811 ExtendLaneArg,
FloatTy, Ops, E,
"vmlall_lane");
7812 case NEON::BI__builtin_neon_vmlallbtq_lane_f32_mf8_fpm:
7813 ExtendLaneArg =
true;
7815 case NEON::BI__builtin_neon_vmlallbtq_laneq_f32_mf8_fpm:
7817 ExtendLaneArg,
FloatTy, Ops, E,
"vmlall_lane");
7818 case NEON::BI__builtin_neon_vmlalltbq_lane_f32_mf8_fpm:
7819 ExtendLaneArg =
true;
7821 case NEON::BI__builtin_neon_vmlalltbq_laneq_f32_mf8_fpm:
7823 ExtendLaneArg,
FloatTy, Ops, E,
"vmlall_lane");
7824 case NEON::BI__builtin_neon_vmlallttq_lane_f32_mf8_fpm:
7825 ExtendLaneArg =
true;
7827 case NEON::BI__builtin_neon_vmlallttq_laneq_f32_mf8_fpm:
7829 ExtendLaneArg,
FloatTy, Ops, E,
"vmlall_lane");
7830 case NEON::BI__builtin_neon_vamin_f16:
7831 case NEON::BI__builtin_neon_vaminq_f16:
7832 case NEON::BI__builtin_neon_vamin_f32:
7833 case NEON::BI__builtin_neon_vaminq_f32:
7834 case NEON::BI__builtin_neon_vaminq_f64: {
7835 Int = Intrinsic::aarch64_neon_famin;
7838 case NEON::BI__builtin_neon_vamax_f16:
7839 case NEON::BI__builtin_neon_vamaxq_f16:
7840 case NEON::BI__builtin_neon_vamax_f32:
7841 case NEON::BI__builtin_neon_vamaxq_f32:
7842 case NEON::BI__builtin_neon_vamaxq_f64: {
7843 Int = Intrinsic::aarch64_neon_famax;
7846 case NEON::BI__builtin_neon_vscale_f16:
7847 case NEON::BI__builtin_neon_vscaleq_f16:
7848 case NEON::BI__builtin_neon_vscale_f32:
7849 case NEON::BI__builtin_neon_vscaleq_f32:
7850 case NEON::BI__builtin_neon_vscaleq_f64: {
7851 Int = Intrinsic::aarch64_neon_fp8_fscale;