diff options
-rw-r--r-- | clang/include/clang/Basic/BuiltinsVEVL.gen.def | 603 | ||||
-rw-r--r-- | clang/lib/Headers/velintrin.h | 5 | ||||
-rw-r--r-- | clang/test/CodeGen/VE/ve-velintrin.c | 4224 |
3 files changed, 4828 insertions, 4 deletions
diff --git a/clang/include/clang/Basic/BuiltinsVEVL.gen.def b/clang/include/clang/Basic/BuiltinsVEVL.gen.def index 9960c89b5300..7b06e5c30e93 100644 --- a/clang/include/clang/Basic/BuiltinsVEVL.gen.def +++ b/clang/include/clang/Basic/BuiltinsVEVL.gen.def @@ -31,123 +31,195 @@ BUILTIN(__builtin_ve_vl_vldl2dzx_vssvl, "V256dLUivC*V256dUi", "n") BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssl, "V256dLUivC*Ui", "n") BUILTIN(__builtin_ve_vl_vldl2dzxnc_vssvl, "V256dLUivC*V256dUi", "n") BUILTIN(__builtin_ve_vl_vst_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstnc_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstncot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstu_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstunc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstunc_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstuot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstuot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstuncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstuncot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstl_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstlnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstlnc_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstlot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstlot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstlncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstlncot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vst2d_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst2d_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vst2dnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst2dnc_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vst2dot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst2dot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vst2dncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vst2dncot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstu2d_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu2d_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstu2dnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu2dnc_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstu2dot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu2dot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstu2dncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstu2dncot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstl2d_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl2d_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstl2dnc_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl2dnc_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstl2dot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl2dot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_vstl2dncot_vssl, "vV256dLUiv*Ui", "n") +BUILTIN(__builtin_ve_vl_vstl2dncot_vssml, "vV256dLUiv*V256bUi", "n") BUILTIN(__builtin_ve_vl_pfchv_ssl, "vLivC*Ui", "n") BUILTIN(__builtin_ve_vl_pfchvnc_ssl, "vLivC*Ui", "n") BUILTIN(__builtin_ve_vl_lsv_vvss, "V256dV256dUiLUi", "n") BUILTIN(__builtin_ve_vl_lvsl_svs, "LUiV256dUi", "n") BUILTIN(__builtin_ve_vl_lvsd_svs, "dV256dUi", "n") BUILTIN(__builtin_ve_vl_lvss_svs, "fV256dUi", "n") +BUILTIN(__builtin_ve_vl_lvm_mmss, "V256bV256bLUiLUi", "n") +BUILTIN(__builtin_ve_vl_lvm_MMss, "V512bV512bLUiLUi", "n") +BUILTIN(__builtin_ve_vl_svm_sms, "LUiV256bLUi", "n") +BUILTIN(__builtin_ve_vl_svm_sMs, "LUiV512bLUi", "n") BUILTIN(__builtin_ve_vl_vbrdd_vsl, "V256ddUi", "n") BUILTIN(__builtin_ve_vl_vbrdd_vsvl, "V256ddV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrdd_vsmvl, "V256ddV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vbrdl_vsl, "V256dLiUi", "n") BUILTIN(__builtin_ve_vl_vbrdl_vsvl, "V256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrdl_vsmvl, "V256dLiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vbrds_vsl, "V256dfUi", "n") BUILTIN(__builtin_ve_vl_vbrds_vsvl, "V256dfV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrds_vsmvl, "V256dfV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vbrdw_vsl, "V256diUi", "n") BUILTIN(__builtin_ve_vl_vbrdw_vsvl, "V256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrdw_vsmvl, "V256diV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvbrd_vsl, "V256dLUiUi", "n") BUILTIN(__builtin_ve_vl_pvbrd_vsvl, "V256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvbrd_vsMvl, "V256dLUiV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmv_vsvl, "V256dUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vmv_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmv_vsvmvl, "V256dUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddul_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddul_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddul_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vadduw_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vadduw_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vadduw_vsvl, "V256dUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vadduw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vadduw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vadduw_vsvmvl, "V256dUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvaddu_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvaddu_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvaddu_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvaddu_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvaddu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvaddu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddswsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddswsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddswsx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswsx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddswzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddswzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddswzx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddswzx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvadds_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvadds_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvadds_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvadds_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvadds_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvadds_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddsl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddsl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddsl_vsvl, "V256dLiV256dUi", "n") BUILTIN(__builtin_ve_vl_vaddsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vaddsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubul_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubul_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubul_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubuw_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubuw_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubuw_vsvl, "V256dUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubuw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsubu_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsubu_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsubu_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsubu_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubswsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubswsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubswsx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswsx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubswzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubswzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubswzx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubswzx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsubs_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsubs_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsubs_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsubs_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubs_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsubs_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubsl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubsl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubsl_vsvl, "V256dLiV256dUi", "n") BUILTIN(__builtin_ve_vl_vsubsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsubsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulul_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulul_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulul_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmuluw_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmuluw_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmuluw_vsvl, "V256dUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vmuluw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmuluw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmuluw_vsvmvl, "V256dUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulswsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulswsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulswsx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswsx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulswzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulswzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulswzx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulswzx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulsl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulsl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulsl_vsvl, "V256dLiV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmulsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulslw_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulslw_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmulslw_vsvl, "V256diV256dUi", "n") @@ -156,148 +228,221 @@ BUILTIN(__builtin_ve_vl_vdivul_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivul_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivul_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivuw_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivuw_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivuw_vsvl, "V256dUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivuw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivul_vvsl, "V256dV256dLUiUi", "n") BUILTIN(__builtin_ve_vl_vdivul_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivul_vvsmvl, "V256dV256dLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivuw_vvsl, "V256dV256dUiUi", "n") BUILTIN(__builtin_ve_vl_vdivuw_vvsvl, "V256dV256dUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivuw_vvsmvl, "V256dV256dUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswsx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswzx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswsx_vvsl, "V256dV256diUi", "n") BUILTIN(__builtin_ve_vl_vdivswsx_vvsvl, "V256dV256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswsx_vvsmvl, "V256dV256diV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivswzx_vvsl, "V256dV256diUi", "n") BUILTIN(__builtin_ve_vl_vdivswzx_vvsvl, "V256dV256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivswzx_vvsmvl, "V256dV256diV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivsl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivsl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivsl_vsvl, "V256dLiV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vdivsl_vvsl, "V256dV256dLiUi", "n") BUILTIN(__builtin_ve_vl_vdivsl_vvsvl, "V256dV256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vdivsl_vvsmvl, "V256dV256dLiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpul_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpul_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpul_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpul_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpul_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpuw_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpuw_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpuw_vsvl, "V256dUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpuw_vsvvl, "V256dUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpuw_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpuw_vsvmvl, "V256dUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcmpu_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcmpu_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcmpu_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcmpu_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmpu_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmpu_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpswsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpswsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpswsx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswsx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpswzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpswzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpswzx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpswzx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcmps_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcmps_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcmps_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcmps_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmps_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcmps_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpsl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpsl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpsl_vsvl, "V256dLiV256dUi", "n") BUILTIN(__builtin_ve_vl_vcmpsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcmpsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxswsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxswsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxswsx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswsx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxswzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxswzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxswzx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxswzx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvmaxs_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvmaxs_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvmaxs_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvmaxs_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmaxs_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmaxs_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vminswsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vminswsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vminswsx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vminswsx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswsx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vminswzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vminswzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vminswzx_vsvl, "V256diV256dUi", "n") BUILTIN(__builtin_ve_vl_vminswzx_vsvvl, "V256diV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminswzx_vsvmvl, "V256diV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvmins_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvmins_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvmins_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvmins_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmins_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvmins_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxsl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxsl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxsl_vsvl, "V256dLiV256dUi", "n") BUILTIN(__builtin_ve_vl_vmaxsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmaxsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vminsl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vminsl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vminsl_vsvl, "V256dLiV256dUi", "n") BUILTIN(__builtin_ve_vl_vminsl_vsvvl, "V256dLiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminsl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vminsl_vsvmvl, "V256dLiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vand_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vand_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vand_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vand_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vand_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vand_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvand_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvand_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvand_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvand_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvand_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvand_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vor_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vor_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vor_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vor_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vor_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vor_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvor_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvor_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvor_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvor_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvor_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvor_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vxor_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vxor_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vxor_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_vxor_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vxor_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vxor_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvxor_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvxor_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvxor_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvxor_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxor_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvxor_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_veqv_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_veqv_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_veqv_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_veqv_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_veqv_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_veqv_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pveqv_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pveqv_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pveqv_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pveqv_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqv_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pveqv_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vldz_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vldz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vldz_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvldzlo_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvldzlo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvldzlo_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvldzup_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvldzup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvldzup_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvldz_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvldz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvldz_vvMvl, "V256dV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vpcnt_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vpcnt_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vpcnt_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvpcntlo_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvpcntlo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvpcntlo_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvpcntup_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvpcntup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvpcntup_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvpcnt_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvpcnt_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvpcnt_vvMvl, "V256dV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vbrv_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vbrv_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vbrv_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvbrvlo_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvbrvlo_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvbrvlo_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvbrvup_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvbrvup_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvbrvup_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvbrv_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvbrv_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvbrv_vvMvl, "V256dV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vseq_vl, "V256dUi", "n") BUILTIN(__builtin_ve_vl_vseq_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvseqlo_vl, "V256dUi", "n") @@ -310,96 +455,143 @@ BUILTIN(__builtin_ve_vl_vsll_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsll_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsll_vvsl, "V256dV256dLUiUi", "n") BUILTIN(__builtin_ve_vl_vsll_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsll_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsll_vvsmvl, "V256dV256dLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsll_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsll_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsll_vvsl, "V256dV256dLUiUi", "n") BUILTIN(__builtin_ve_vl_pvsll_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsll_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsll_vvsMvl, "V256dV256dLUiV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrl_vvsl, "V256dV256dLUiUi", "n") BUILTIN(__builtin_ve_vl_vsrl_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrl_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrl_vvsmvl, "V256dV256dLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsrl_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsrl_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsrl_vvsl, "V256dV256dLUiUi", "n") BUILTIN(__builtin_ve_vl_pvsrl_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrl_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsrl_vvsMvl, "V256dV256dLUiV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vslawsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vslawsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vslawsx_vvsl, "V256dV256diUi", "n") BUILTIN(__builtin_ve_vl_vslawsx_vvsvl, "V256dV256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslawsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslawsx_vvsmvl, "V256dV256diV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vslawzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vslawzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vslawzx_vvsl, "V256dV256diUi", "n") BUILTIN(__builtin_ve_vl_vslawzx_vvsvl, "V256dV256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslawzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslawzx_vvsmvl, "V256dV256diV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsla_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsla_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsla_vvsl, "V256dV256dLUiUi", "n") BUILTIN(__builtin_ve_vl_pvsla_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsla_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsla_vvsMvl, "V256dV256dLUiV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vslal_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vslal_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vslal_vvsl, "V256dV256dLiUi", "n") BUILTIN(__builtin_ve_vl_vslal_vvsvl, "V256dV256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslal_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vslal_vvsmvl, "V256dV256dLiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrawsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrawsx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrawsx_vvsl, "V256dV256diUi", "n") BUILTIN(__builtin_ve_vl_vsrawsx_vvsvl, "V256dV256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrawsx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrawsx_vvsmvl, "V256dV256diV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrawzx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrawzx_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsrawzx_vvsl, "V256dV256diUi", "n") BUILTIN(__builtin_ve_vl_vsrawzx_vvsvl, "V256dV256diV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrawzx_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsrawzx_vvsmvl, "V256dV256diV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsra_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsra_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvsra_vvsl, "V256dV256dLUiUi", "n") BUILTIN(__builtin_ve_vl_pvsra_vvsvl, "V256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsra_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvsra_vvsMvl, "V256dV256dLUiV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsral_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsral_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vsral_vvsl, "V256dV256dLiUi", "n") BUILTIN(__builtin_ve_vl_vsral_vvsvl, "V256dV256dLiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsral_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsral_vvsmvl, "V256dV256dLiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsfa_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vsfa_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsfa_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfaddd_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfaddd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfaddd_vsvl, "V256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfaddd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfaddd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfaddd_vsvmvl, "V256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfadds_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfadds_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfadds_vsvl, "V256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfadds_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfadds_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfadds_vsvmvl, "V256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfadd_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfadd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfadd_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfadd_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfadd_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfadd_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsubd_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsubd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsubd_vsvl, "V256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsubd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubd_vsvmvl, "V256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsubs_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsubs_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsubs_vsvl, "V256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsubs_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsubs_vsvmvl, "V256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfsub_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfsub_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfsub_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfsub_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfsub_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfsub_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmuld_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmuld_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmuld_vsvl, "V256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmuld_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuld_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuld_vsvmvl, "V256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmuls_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmuls_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmuls_vsvl, "V256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmuls_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuls_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmuls_vsvmvl, "V256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmul_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmul_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmul_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmul_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmul_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmul_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfdivd_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfdivd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfdivd_vsvl, "V256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfdivd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivd_vsvmvl, "V256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfdivs_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfdivs_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfdivs_vsvl, "V256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfdivs_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfdivs_vsvmvl, "V256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsqrtd_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsqrtd_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfsqrts_vvl, "V256dV256dUi", "n") @@ -408,110 +600,164 @@ BUILTIN(__builtin_ve_vl_vfcmpd_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfcmpd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfcmpd_vsvl, "V256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfcmpd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmpd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmpd_vsvmvl, "V256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfcmps_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfcmps_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfcmps_vsvl, "V256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfcmps_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmps_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfcmps_vsvmvl, "V256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfcmp_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfcmp_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfcmp_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfcmp_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfcmp_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfcmp_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmaxd_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmaxd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmaxd_vsvl, "V256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmaxd_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxd_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxd_vsvmvl, "V256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmaxs_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmaxs_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmaxs_vsvl, "V256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmaxs_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxs_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmaxs_vsvmvl, "V256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmax_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmax_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmax_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmax_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmax_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmax_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmind_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmind_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmind_vsvl, "V256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmind_vsvvl, "V256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmind_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmind_vsvmvl, "V256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmins_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmins_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmins_vsvl, "V256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmins_vsvvl, "V256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmins_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmins_vsvmvl, "V256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmin_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmin_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmin_vsvl, "V256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmin_vsvvl, "V256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmin_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmin_vsvMvl, "V256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmadd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmadd_vsvvl, "V256ddV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmadd_vvsvl, "V256dV256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmadd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmads_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmads_vsvvl, "V256dfV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmads_vsvvvl, "V256dfV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmads_vvsvl, "V256dV256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmads_vvsvvl, "V256dV256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmads_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmad_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmad_vsvvl, "V256dLUiV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmad_vvsvl, "V256dV256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmad_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbd_vsvvl, "V256ddV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbd_vvsvl, "V256dV256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbs_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbs_vsvvl, "V256dfV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbs_vvsvl, "V256dV256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmsbs_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmsb_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmsb_vsvvl, "V256dLUiV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmsb_vvsvl, "V256dV256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmsb_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmadd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmadd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmadd_vsvvl, "V256ddV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmadd_vsvvvl, "V256ddV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmadd_vvsvl, "V256dV256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmadd_vvsvvl, "V256dV256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmadd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmads_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmads_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmads_vsvvl, "V256dfV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmads_vsvvvl, "V256dfV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmads_vvsvl, "V256dV256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmads_vvsvvl, "V256dV256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmads_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmad_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmad_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmad_vsvvl, "V256dLUiV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmad_vsvvvl, "V256dLUiV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmad_vvsvl, "V256dV256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmad_vvsvvl, "V256dV256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmad_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvl, "V256ddV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvvl, "V256ddV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvl, "V256dV256ddV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvvl, "V256dV256ddV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vsvvmvl, "V256ddV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbd_vvsvmvl, "V256dV256ddV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvl, "V256dfV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvvl, "V256dfV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvl, "V256dV256dfV256dUi", "n") BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvvl, "V256dV256dfV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vvvvmvl, "V256dV256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vsvvmvl, "V256dfV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfnmsbs_vvsvmvl, "V256dV256dfV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvl, "V256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvvl, "V256dV256dV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvl, "V256dLUiV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvvl, "V256dLUiV256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvl, "V256dV256dLUiV256dUi", "n") BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvvl, "V256dV256dLUiV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vvvvMvl, "V256dV256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vsvvMvl, "V256dLUiV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfnmsb_vvsvMvl, "V256dV256dLUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vrcpd_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vrcpd_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vrcps_vvl, "V256dV256dUi", "n") @@ -532,28 +778,40 @@ BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvrsqrtnex_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwdsx_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwdsx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdsx_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdsxrz_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwdzx_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwdzx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdzx_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwdzxrz_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwssx_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwssx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwssx_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwssxrz_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwszx_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwszx_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwszx_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtwszxrz_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcvtws_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcvtws_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtws_vvMvl, "V256dV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvcvtwsrz_vvMvl, "V256dV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtld_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtld_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtld_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtldrz_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtldrz_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcvtldrz_vvmvl, "V256dV256dV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtdw_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtdw_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtsw_vvl, "V256dV256dUi", "n") @@ -566,13 +824,312 @@ BUILTIN(__builtin_ve_vl_vcvtds_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtds_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtsd_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vcvtsd_vvvl, "V256dV256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmrg_vvvml, "V256dV256dV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vmrg_vvvmvl, "V256dV256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmrg_vsvml, "V256dLUiV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vmrg_vsvmvl, "V256dLUiV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmrgw_vvvMl, "V256dV256dV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_vmrgw_vvvMvl, "V256dV256dV256dV512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vmrgw_vsvMl, "V256dUiV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_vmrgw_vsvMvl, "V256dUiV256dV512bV256dUi", "n") BUILTIN(__builtin_ve_vl_vshf_vvvsl, "V256dV256dV256dLUiUi", "n") BUILTIN(__builtin_ve_vl_vshf_vvvsvl, "V256dV256dV256dLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vcp_vvmvl, "V256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vex_vvmvl, "V256dV256dV256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklat_ml, "V256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklaf_ml, "V256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkat_Ml, "V512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkaf_Ml, "V512bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkllt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkllt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklne_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklne_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkleq_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkleq_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklge_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklge_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklle_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklle_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnum_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnum_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgtnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgtnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklltnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklltnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklnenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkleqnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkleqnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmklgenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkllenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkllenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwlt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwlt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwne_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwne_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkweq_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkweq_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwge_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwge_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwle_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwle_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnum_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnum_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgtnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgtnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwltnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwltnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwnenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkweqnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkweqnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwgenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwlenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkwlenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlolt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuplt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlolt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuplt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlone_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupne_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlone_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupne_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloeq_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupeq_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloeq_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupeq_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloge_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupge_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloge_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupge_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlole_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuple_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlole_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuple_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonum_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnum_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonum_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnum_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogtnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgtnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogtnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgtnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloltnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupltnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloltnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupltnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlonenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupnenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloeqnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupeqnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwloeqnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupeqnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlogenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwupgenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlolenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuplenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlolenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwuplenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgt_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgt_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlt_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlt_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwne_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwne_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkweq_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkweq_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwge_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwge_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwle_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwle_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnum_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnum_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgtnan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgtnan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwltnan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwltnan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnenan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwnenan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkweqnan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkweqnan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgenan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwgenan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlenan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkwlenan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdlt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdlt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdne_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdne_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdeq_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdeq_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdge_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdge_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdle_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdle_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnum_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnum_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgtnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgtnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdltnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdltnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdnenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdeqnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdeqnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdgenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdlenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkdlenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkslt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkslt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksne_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksne_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkseq_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkseq_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksge_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksge_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksle_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksle_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnum_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnum_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgtnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgtnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksltnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksltnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksnenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkseqnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkseqnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmksgenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_vfmkslenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfmkslenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslolt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuplt_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslolt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuplt_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslone_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupne_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslone_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupne_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloeq_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupeq_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloeq_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupeq_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloge_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupge_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloge_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupge_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslole_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuple_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslole_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuple_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonum_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnum_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonum_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnum_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogtnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgtnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogtnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgtnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloltnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupltnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloltnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupltnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslonenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupnenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloeqnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupeqnan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksloeqnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupeqnan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslogenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksupgenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslolenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuplenan_mvl, "V256bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslolenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksuplenan_mvml, "V256bV256dV256bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgt_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgt_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslt_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslt_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksne_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksne_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkseq_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkseq_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksge_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksge_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksle_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksle_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnum_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnum_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgtnan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgtnan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksltnan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksltnan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnenan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksnenan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkseqnan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkseqnan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgenan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmksgenan_MvMl, "V512bV256dV512bUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslenan_Mvl, "V512bV256dUi", "n") +BUILTIN(__builtin_ve_vl_pvfmkslenan_MvMl, "V512bV256dV512bUi", "n") BUILTIN(__builtin_ve_vl_vsumwsx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsumwsx_vvml, "V256dV256dV256bUi", "n") BUILTIN(__builtin_ve_vl_vsumwzx_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsumwzx_vvml, "V256dV256dV256bUi", "n") BUILTIN(__builtin_ve_vl_vsuml_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vsuml_vvml, "V256dV256dV256bUi", "n") BUILTIN(__builtin_ve_vl_vfsumd_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsumd_vvml, "V256dV256dV256bUi", "n") BUILTIN(__builtin_ve_vl_vfsums_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vfsums_vvml, "V256dV256dV256bUi", "n") BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vrmaxswfstsx_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vrmaxswlstsx_vvl, "V256dV256dUi", "n") @@ -614,36 +1171,82 @@ BUILTIN(__builtin_ve_vl_vfrminsfst_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfrminslst_vvl, "V256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vfrminslst_vvvl, "V256dV256dV256dUi", "n") BUILTIN(__builtin_ve_vl_vrand_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrand_vvml, "V256dV256dV256bUi", "n") BUILTIN(__builtin_ve_vl_vror_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vror_vvml, "V256dV256dV256bUi", "n") BUILTIN(__builtin_ve_vl_vrxor_vvl, "V256dV256dUi", "n") +BUILTIN(__builtin_ve_vl_vrxor_vvml, "V256dV256dV256bUi", "n") BUILTIN(__builtin_ve_vl_vgt_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vgt_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgt_vvssml, "V256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_vgt_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vgtnc_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vgtnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtnc_vvssml, "V256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_vgtnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vgtu_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vgtu_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtu_vvssml, "V256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_vgtu_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vgtunc_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vgtunc_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtunc_vvssml, "V256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_vgtunc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vgtlsx_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vgtlsx_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsx_vvssml, "V256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsx_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssml, "V256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_vgtlsxnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vgtlzx_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vgtlzx_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzx_vvssml, "V256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzx_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssl, "V256dV256dLUiLUiUi", "n") BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssvl, "V256dV256dLUiLUiV256dUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssml, "V256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_vgtlzxnc_vvssmvl, "V256dV256dLUiLUiV256bV256dUi", "n") BUILTIN(__builtin_ve_vl_vsc_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsc_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vscnc_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscnc_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vscot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscot_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vscncot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vscu_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscu_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vscunc_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscunc_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vscuot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscuot_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vscuncot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscuncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vscl_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vscl_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vsclnc_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsclnc_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vsclot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsclot_vvssml, "vV256dV256dLUiLUiV256bUi", "n") BUILTIN(__builtin_ve_vl_vsclncot_vvssl, "vV256dV256dLUiLUiUi", "n") +BUILTIN(__builtin_ve_vl_vsclncot_vvssml, "vV256dV256dLUiLUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_andm_mmm, "V256bV256bV256b", "n") +BUILTIN(__builtin_ve_vl_andm_MMM, "V512bV512bV512b", "n") +BUILTIN(__builtin_ve_vl_orm_mmm, "V256bV256bV256b", "n") +BUILTIN(__builtin_ve_vl_orm_MMM, "V512bV512bV512b", "n") +BUILTIN(__builtin_ve_vl_xorm_mmm, "V256bV256bV256b", "n") +BUILTIN(__builtin_ve_vl_xorm_MMM, "V512bV512bV512b", "n") +BUILTIN(__builtin_ve_vl_eqvm_mmm, "V256bV256bV256b", "n") +BUILTIN(__builtin_ve_vl_eqvm_MMM, "V512bV512bV512b", "n") +BUILTIN(__builtin_ve_vl_nndm_mmm, "V256bV256bV256b", "n") +BUILTIN(__builtin_ve_vl_nndm_MMM, "V512bV512bV512b", "n") +BUILTIN(__builtin_ve_vl_negm_mm, "V256bV256b", "n") +BUILTIN(__builtin_ve_vl_negm_MM, "V512bV512b", "n") +BUILTIN(__builtin_ve_vl_pcvm_sml, "LUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_lzvm_sml, "LUiV256bUi", "n") +BUILTIN(__builtin_ve_vl_tovm_sml, "LUiV256bUi", "n") BUILTIN(__builtin_ve_vl_lcr_sss, "LUiLUiLUi", "n") BUILTIN(__builtin_ve_vl_scr_sss, "vLUiLUiLUi", "n") BUILTIN(__builtin_ve_vl_tscr_ssss, "LUiLUiLUiLUi", "n") diff --git a/clang/lib/Headers/velintrin.h b/clang/lib/Headers/velintrin.h index c12054a9e965..69b1fba296d4 100644 --- a/clang/lib/Headers/velintrin.h +++ b/clang/lib/Headers/velintrin.h @@ -12,9 +12,7 @@ // Vector registers typedef double __vr __attribute__((__vector_size__(2048))); -// TODO: Vector mask registers -// Depend on https://reviews.llvm.org/D88905 -#if 0 +// Vector mask registers #if __STDC_VERSION__ >= 199901L // For C99 typedef _Bool __vm __attribute__((ext_vector_type(256))); @@ -30,7 +28,6 @@ typedef bool __vm512 __attribute__((ext_vector_type(512))); #error need C++ or C99 to use vector intrinsics for VE #endif #endif -#endif enum VShuffleCodes { VE_VSHUFFLE_YUYU = 0, diff --git a/clang/test/CodeGen/VE/ve-velintrin.c b/clang/test/CodeGen/VE/ve-velintrin.c index 44c3309086e8..bea291746226 100644 --- a/clang/test/CodeGen/VE/ve-velintrin.c +++ b/clang/test/CodeGen/VE/ve-velintrin.c @@ -9,6 +9,8 @@ long v1, v2, v3; double vd1; float vf1; __vr vr1, vr2, vr3, vr4; +__vm256 vm1, vm2, vm3; +__vm512 vm1_512, vm2_512, vm3_512; void __attribute__((noinline)) test_vld_vssl(char* p, long idx) { @@ -242,6 +244,13 @@ test_vst_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vst_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vst_vssml + // CHECK: call void @llvm.ve.vl.vst.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vst_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstnc_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstnc_vssl // CHECK: call void @llvm.ve.vl.vstnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -249,6 +258,13 @@ test_vstnc_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstnc_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstnc_vssml + // CHECK: call void @llvm.ve.vl.vstnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstnc_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstot_vssl // CHECK: call void @llvm.ve.vl.vstot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -256,6 +272,13 @@ test_vstot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstot_vssml + // CHECK: call void @llvm.ve.vl.vstot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstncot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstncot_vssl // CHECK: call void @llvm.ve.vl.vstncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -263,6 +286,13 @@ test_vstncot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstncot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstncot_vssml + // CHECK: call void @llvm.ve.vl.vstncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstncot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstu_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstu_vssl // CHECK: call void @llvm.ve.vl.vstu.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -270,6 +300,13 @@ test_vstu_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstu_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstu_vssml + // CHECK: call void @llvm.ve.vl.vstu.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstu_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstunc_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstunc_vssl // CHECK: call void @llvm.ve.vl.vstunc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -277,6 +314,13 @@ test_vstunc_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstunc_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstunc_vssml + // CHECK: call void @llvm.ve.vl.vstunc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstunc_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstuot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstuot_vssl // CHECK: call void @llvm.ve.vl.vstuot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -284,6 +328,13 @@ test_vstuot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstuot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstuot_vssml + // CHECK: call void @llvm.ve.vl.vstuot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstuot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstuncot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstuncot_vssl // CHECK: call void @llvm.ve.vl.vstuncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -291,6 +342,13 @@ test_vstuncot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstuncot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstuncot_vssml + // CHECK: call void @llvm.ve.vl.vstuncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstuncot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstl_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstl_vssl // CHECK: call void @llvm.ve.vl.vstl.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -298,6 +356,13 @@ test_vstl_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstl_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstl_vssml + // CHECK: call void @llvm.ve.vl.vstl.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstl_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstlnc_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstlnc_vssl // CHECK: call void @llvm.ve.vl.vstlnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -305,6 +370,13 @@ test_vstlnc_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstlnc_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstlnc_vssml + // CHECK: call void @llvm.ve.vl.vstlnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstlnc_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstlot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstlot_vssl // CHECK: call void @llvm.ve.vl.vstlot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -312,6 +384,13 @@ test_vstlot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstlot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstlot_vssml + // CHECK: call void @llvm.ve.vl.vstlot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstlot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstlncot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstlncot_vssl // CHECK: call void @llvm.ve.vl.vstlncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -319,6 +398,13 @@ test_vstlncot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstlncot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstlncot_vssml + // CHECK: call void @llvm.ve.vl.vstlncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstlncot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vst2d_vssl(char* p, long idx) { // CHECK-LABEL: @test_vst2d_vssl // CHECK: call void @llvm.ve.vl.vst2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -326,6 +412,13 @@ test_vst2d_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vst2d_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vst2d_vssml + // CHECK: call void @llvm.ve.vl.vst2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vst2d_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vst2dnc_vssl(char* p, long idx) { // CHECK-LABEL: @test_vst2dnc_vssl // CHECK: call void @llvm.ve.vl.vst2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -333,6 +426,13 @@ test_vst2dnc_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vst2dnc_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vst2dnc_vssml + // CHECK: call void @llvm.ve.vl.vst2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vst2dnc_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vst2dot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vst2dot_vssl // CHECK: call void @llvm.ve.vl.vst2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -340,6 +440,13 @@ test_vst2dot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vst2dot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vst2dot_vssml + // CHECK: call void @llvm.ve.vl.vst2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vst2dot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vst2dncot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vst2dncot_vssl // CHECK: call void @llvm.ve.vl.vst2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -347,6 +454,13 @@ test_vst2dncot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vst2dncot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vst2dncot_vssml + // CHECK: call void @llvm.ve.vl.vst2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vst2dncot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstu2d_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstu2d_vssl // CHECK: call void @llvm.ve.vl.vstu2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -354,6 +468,13 @@ test_vstu2d_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstu2d_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstu2d_vssml + // CHECK: call void @llvm.ve.vl.vstu2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstu2d_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstu2dnc_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstu2dnc_vssl // CHECK: call void @llvm.ve.vl.vstu2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -361,6 +482,13 @@ test_vstu2dnc_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstu2dnc_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstu2dnc_vssml + // CHECK: call void @llvm.ve.vl.vstu2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstu2dnc_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstu2dot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstu2dot_vssl // CHECK: call void @llvm.ve.vl.vstu2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -368,6 +496,13 @@ test_vstu2dot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstu2dot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstu2dot_vssml + // CHECK: call void @llvm.ve.vl.vstu2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstu2dot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstu2dncot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstu2dncot_vssl // CHECK: call void @llvm.ve.vl.vstu2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -375,6 +510,13 @@ test_vstu2dncot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstu2dncot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstu2dncot_vssml + // CHECK: call void @llvm.ve.vl.vstu2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstu2dncot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstl2d_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstl2d_vssl // CHECK: call void @llvm.ve.vl.vstl2d.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -382,6 +524,13 @@ test_vstl2d_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstl2d_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstl2d_vssml + // CHECK: call void @llvm.ve.vl.vstl2d.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstl2d_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstl2dnc_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstl2dnc_vssl // CHECK: call void @llvm.ve.vl.vstl2dnc.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -389,6 +538,13 @@ test_vstl2dnc_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstl2dnc_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstl2dnc_vssml + // CHECK: call void @llvm.ve.vl.vstl2dnc.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstl2dnc_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstl2dot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstl2dot_vssl // CHECK: call void @llvm.ve.vl.vstl2dot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -396,6 +552,13 @@ test_vstl2dot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstl2dot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstl2dot_vssml + // CHECK: call void @llvm.ve.vl.vstl2dot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstl2dot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_vstl2dncot_vssl(char* p, long idx) { // CHECK-LABEL: @test_vstl2dncot_vssl // CHECK: call void @llvm.ve.vl.vstl2dncot.vssl(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -403,6 +566,13 @@ test_vstl2dncot_vssl(char* p, long idx) { } void __attribute__((noinline)) +test_vstl2dncot_vssml(char* p, long idx) { + // CHECK-LABEL: @test_vstl2dncot_vssml + // CHECK: call void @llvm.ve.vl.vstl2dncot.vssml(<256 x double> %{{.*}}, i64 %{{.*}}, i8* %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vstl2dncot_vssml(vr1, idx, p, vm1, 256); +} + +void __attribute__((noinline)) test_pfchv_ssl(char* p, long idx) { // CHECK-LABEL: @test_pfchv_ssl // CHECK: call void @llvm.ve.vl.pfchv.ssl(i64 %{{.*}}, i8* %{{.*}}, i32 256) @@ -445,6 +615,34 @@ test_lvss_svs(int idx) { } void __attribute__((noinline)) +test_lvm_mmss(unsigned long sy, unsigned long sz) { + // CHECK-LABEL: @test_lvm_mmss + // CHECK: call <256 x i1> @llvm.ve.vl.lvm.mmss(<256 x i1> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) + vm1 = _vel_lvm_mmss(vm2, sy, sz); +} + +void __attribute__((noinline)) +test_lvm_MMss(unsigned long sy, unsigned long sz) { + // CHECK-LABEL: @test_lvm_MMss + // CHECK: call <512 x i1> @llvm.ve.vl.lvm.MMss(<512 x i1> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}) + vm1_512 = _vel_lvm_MMss(vm2_512, sy, sz); +} + +void __attribute__((noinline)) +test_svm_sms(unsigned long sy) { + // CHECK-LABEL: @test_svm_sms + // CHECK: call i64 @llvm.ve.vl.svm.sms(<256 x i1> %{{.*}}, i64 %{{.*}}) + v1 = _vel_svm_sms(vm2, sy); +} + +void __attribute__((noinline)) +test_svm_sMs(unsigned long sy) { + // CHECK-LABEL: @test_svm_sMs + // CHECK: call i64 @llvm.ve.vl.svm.sMs(<512 x i1> %{{.*}}, i64 %{{.*}}) + v1 = _vel_svm_sMs(vm2_512, sy); +} + +void __attribute__((noinline)) test_vbrdd_vsl() { // CHECK-LABEL: @test_vbrdd_vsl // CHECK: call <256 x double> @llvm.ve.vl.vbrdd.vsl(double %{{.*}}, i32 256) @@ -459,6 +657,13 @@ test_vbrdd_vsvl() { } void __attribute__((noinline)) +test_vbrdd_vsmvl() { + // CHECK-LABEL: @test_vbrdd_vsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vbrdd.vsmvl(double %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr1 = _vel_vbrdd_vsmvl(vd1, vm1, vr1, 256); +} + +void __attribute__((noinline)) test_vbrdl_vsl() { // CHECK-LABEL: @test_vbrdl_vsl // CHECK: call <256 x double> @llvm.ve.vl.vbrdl.vsl(i64 %{{.*}}, i32 256) @@ -473,6 +678,13 @@ test_vbrdl_vsvl() { } void __attribute__((noinline)) +test_vbrdl_vsmvl() { + // CHECK-LABEL: @test_vbrdl_vsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vbrdl.vsmvl(i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr1 = _vel_vbrdl_vsmvl(v1, vm1, vr1, 256); +} + +void __attribute__((noinline)) test_vbrds_vsl() { // CHECK-LABEL: @test_vbrds_vsl // CHECK: call <256 x double> @llvm.ve.vl.vbrds.vsl(float %{{.*}}, i32 256) @@ -487,6 +699,13 @@ test_vbrds_vsvl() { } void __attribute__((noinline)) +test_vbrds_vsmvl() { + // CHECK-LABEL: @test_vbrds_vsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vbrds.vsmvl(float %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr1 = _vel_vbrds_vsmvl(vf1, vm1, vr1, 256); +} + +void __attribute__((noinline)) test_vbrdw_vsl() { // CHECK-LABEL: @test_vbrdw_vsl // CHECK: call <256 x double> @llvm.ve.vl.vbrdw.vsl(i32 %{{.*}}, i32 256) @@ -501,6 +720,13 @@ test_vbrdw_vsvl() { } void __attribute__((noinline)) +test_vbrdw_vsmvl() { + // CHECK-LABEL: @test_vbrdw_vsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vbrdw.vsmvl(i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr1 = _vel_vbrdw_vsmvl(v1, vm1, vr1, 256); +} + +void __attribute__((noinline)) test_pvbrd_vsl() { // CHECK-LABEL: @test_pvbrd_vsl // CHECK: call <256 x double> @llvm.ve.vl.pvbrd.vsl(i64 %{{.*}}, i32 256) @@ -515,6 +741,13 @@ test_pvbrd_vsvl() { } void __attribute__((noinline)) +test_pvbrd_vsmvl() { + // CHECK-LABEL: @test_pvbrd_vsmvl + // CHECK: call <256 x double> @llvm.ve.vl.pvbrd.vsMvl(i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr1 = _vel_pvbrd_vsMvl(v1, vm1_512, vr1, 256); +} + +void __attribute__((noinline)) test_vmv_vsvl() { // CHECK-LABEL: @test_vmv_vsvl // CHECK: call <256 x double> @llvm.ve.vl.vmv.vsvl(i32 %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -529,6 +762,13 @@ test_vmv_vsvvl() { } void __attribute__((noinline)) +test_vmv_vsvmvl() { + // CHECK-LABEL: @test_vmv_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmv.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr1 = _vel_vmv_vsvmvl(v1, vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vaddul_vvvl() { // CHECK-LABEL: @test_vaddul_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -557,6 +797,20 @@ test_vaddul_vsvvl() { } void __attribute__((noinline)) +test_vaddul_vvvmvl() { + // CHECK-LABEL: @test_vaddul_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vaddul_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vaddul_vsvmvl() { + // CHECK-LABEL: @test_vaddul_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vaddul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vaddul_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vadduw_vvvl() { // CHECK-LABEL: @test_vadduw_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -585,6 +839,20 @@ test_vadduw_vsvvl() { } void __attribute__((noinline)) +test_vadduw_vvvmvl() { + // CHECK-LABEL: @test_vadduw_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vadduw_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vadduw_vsvmvl() { + // CHECK-LABEL: @test_vadduw_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vadduw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vadduw_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvaddu_vvvl() { // CHECK-LABEL: @test_pvaddu_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -613,6 +881,20 @@ test_pvaddu_vsvvl() { } void __attribute__((noinline)) +test_pvaddu_vvvMvl() { + // CHECK-LABEL: @test_pvaddu_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvaddu_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvaddu_vsvMvl() { + // CHECK-LABEL: @test_pvaddu_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvaddu.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvaddu_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vaddswsx_vvvl() { // CHECK-LABEL: @test_vaddswsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -641,6 +923,20 @@ test_vaddswsx_vsvvl() { } void __attribute__((noinline)) +test_vaddswsx_vvvmvl() { + // CHECK-LABEL: @test_vaddswsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vaddswsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vaddswsx_vsvmvl() { + // CHECK-LABEL: @test_vaddswsx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vaddswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vaddswsx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vaddswzx_vvvl() { // CHECK-LABEL: @test_vaddswzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -669,6 +965,20 @@ test_vaddswzx_vsvvl() { } void __attribute__((noinline)) +test_vaddswzx_vvvmvl() { + // CHECK-LABEL: @test_vaddswzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vaddswzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vaddswzx_vsvmvl() { + // CHECK-LABEL: @test_vaddswzx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vaddswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vaddswzx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvadds_vvvl() { // CHECK-LABEL: @test_pvadds_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -697,6 +1007,20 @@ test_pvadds_vsvvl() { } void __attribute__((noinline)) +test_pvadds_vvvMvl() { + // CHECK-LABEL: @test_pvadds_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvadds_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvadds_vsvMvl() { + // CHECK-LABEL: @test_pvadds_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvadds.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvadds_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vaddsl_vvvl() { // CHECK-LABEL: @test_vaddsl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -725,6 +1049,20 @@ test_vaddsl_vsvvl() { } void __attribute__((noinline)) +test_vaddsl_vvvmvl() { + // CHECK-LABEL: @test_vaddsl_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vaddsl_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vaddsl_vsvmvl() { + // CHECK-LABEL: @test_vaddsl_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vaddsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vaddsl_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vsubul_vvvl() { // CHECK-LABEL: @test_vsubul_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -753,6 +1091,20 @@ test_vsubul_vsvvl() { } void __attribute__((noinline)) +test_vsubul_vvvmvl() { + // CHECK-LABEL: @test_vsubul_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubul_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsubul_vsvmvl() { + // CHECK-LABEL: @test_vsubul_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubul_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vsubuw_vvvl() { // CHECK-LABEL: @test_vsubuw_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -781,6 +1133,20 @@ test_vsubuw_vsvvl() { } void __attribute__((noinline)) +test_vsubuw_vvvmvl() { + // CHECK-LABEL: @test_vsubuw_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubuw_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsubuw_vsvmvl() { + // CHECK-LABEL: @test_vsubuw_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubuw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubuw_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvsubu_vvvl() { // CHECK-LABEL: @test_pvsubu_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -809,6 +1175,20 @@ test_pvsubu_vsvvl() { } void __attribute__((noinline)) +test_pvsubu_vvvMvl() { + // CHECK-LABEL: @test_pvsubu_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsubu_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvsubu_vsvMvl() { + // CHECK-LABEL: @test_pvsubu_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsubu.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsubu_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vsubswsx_vvvl() { // CHECK-LABEL: @test_vsubswsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -837,6 +1217,20 @@ test_vsubswsx_vsvvl() { } void __attribute__((noinline)) +test_vsubswsx_vvvmvl() { + // CHECK-LABEL: @test_vsubswsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubswsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsubswsx_vsvmvl() { + // CHECK-LABEL: @test_vsubswsx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubswsx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vsubswzx_vvvl() { // CHECK-LABEL: @test_vsubswzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -865,6 +1259,20 @@ test_vsubswzx_vsvvl() { } void __attribute__((noinline)) +test_vsubswzx_vvvmvl() { + // CHECK-LABEL: @test_vsubswzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubswzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsubswzx_vsvmvl() { + // CHECK-LABEL: @test_vsubswzx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubswzx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvsubs_vvvl() { // CHECK-LABEL: @test_pvsubs_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -893,6 +1301,20 @@ test_pvsubs_vsvvl() { } void __attribute__((noinline)) +test_pvsubs_vvvMvl() { + // CHECK-LABEL: @test_pvsubs_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsubs_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvsubs_vsvMvl() { + // CHECK-LABEL: @test_pvsubs_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsubs.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsubs_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vsubsl_vvvl() { // CHECK-LABEL: @test_vsubsl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -921,6 +1343,20 @@ test_vsubsl_vsvvl() { } void __attribute__((noinline)) +test_vsubsl_vvvmvl() { + // CHECK-LABEL: @test_vsubsl_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubsl_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsubsl_vsvmvl() { + // CHECK-LABEL: @test_vsubsl_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsubsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsubsl_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vmulul_vvvl() { // CHECK-LABEL: @test_vmulul_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -949,6 +1385,20 @@ test_vmulul_vsvvl() { } void __attribute__((noinline)) +test_vmulul_vvvmvl() { + // CHECK-LABEL: @test_vmulul_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmulul_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmulul_vsvmvl() { + // CHECK-LABEL: @test_vmulul_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmulul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmulul_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vmuluw_vvvl() { // CHECK-LABEL: @test_vmuluw_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -977,6 +1427,20 @@ test_vmuluw_vsvvl() { } void __attribute__((noinline)) +test_vmuluw_vvvmvl() { + // CHECK-LABEL: @test_vmuluw_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmuluw_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmuluw_vsvmvl() { + // CHECK-LABEL: @test_vmuluw_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmuluw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmuluw_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vmulswsx_vvvl() { // CHECK-LABEL: @test_vmulswsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1005,6 +1469,20 @@ test_vmulswsx_vsvvl() { } void __attribute__((noinline)) +test_vmulswsx_vvvmvl() { + // CHECK-LABEL: @test_vmulswsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmulswsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmulswsx_vsvmvl() { + // CHECK-LABEL: @test_vmulswsx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmulswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmulswsx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vmulswzx_vvvl() { // CHECK-LABEL: @test_vmulswzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1033,6 +1511,20 @@ test_vmulswzx_vsvvl() { } void __attribute__((noinline)) +test_vmulswzx_vvvmvl() { + // CHECK-LABEL: @test_vmulswzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmulswzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmulswzx_vsvmvl() { + // CHECK-LABEL: @test_vmulswzx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmulswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmulswzx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vmulsl_vvvl() { // CHECK-LABEL: @test_vmulsl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1061,6 +1553,20 @@ test_vmulsl_vsvvl() { } void __attribute__((noinline)) +test_vmulsl_vvvmvl() { + // CHECK-LABEL: @test_vmulsl_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmulsl_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmulsl_vsvmvl() { + // CHECK-LABEL: @test_vmulsl_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmulsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmulsl_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vmulslw_vvvl() { // CHECK-LABEL: @test_vmulslw_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmulslw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1117,6 +1623,20 @@ test_vdivul_vsvvl() { } void __attribute__((noinline)) +test_vdivul_vvvmvl() { + // CHECK-LABEL: @test_vdivul_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivul_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vdivul_vsvmvl() { + // CHECK-LABEL: @test_vdivul_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivul_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivuw_vvvl() { // CHECK-LABEL: @test_vdivuw_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1145,6 +1665,20 @@ test_vdivuw_vsvvl() { } void __attribute__((noinline)) +test_vdivuw_vvvmvl() { + // CHECK-LABEL: @test_vdivuw_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivuw_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vdivuw_vsvmvl() { + // CHECK-LABEL: @test_vdivuw_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivuw_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivul_vvsl() { // CHECK-LABEL: @test_vdivul_vvsl // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256) @@ -1159,6 +1693,13 @@ test_vdivul_vvsvl() { } void __attribute__((noinline)) +test_vdivul_vvsmvl() { + // CHECK-LABEL: @test_vdivul_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivul.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivul_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivuw_vvsl() { // CHECK-LABEL: @test_vdivuw_vvsl // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256) @@ -1173,6 +1714,13 @@ test_vdivuw_vvsvl() { } void __attribute__((noinline)) +test_vdivuw_vvsmvl() { + // CHECK-LABEL: @test_vdivuw_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivuw.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivuw_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivswsx_vvvl() { // CHECK-LABEL: @test_vdivswsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1201,6 +1749,20 @@ test_vdivswsx_vsvvl() { } void __attribute__((noinline)) +test_vdivswsx_vvvmvl() { + // CHECK-LABEL: @test_vdivswsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivswsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vdivswsx_vsvmvl() { + // CHECK-LABEL: @test_vdivswsx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivswsx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivswzx_vvvl() { // CHECK-LABEL: @test_vdivswzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1229,6 +1791,20 @@ test_vdivswzx_vsvvl() { } void __attribute__((noinline)) +test_vdivswzx_vvvmvl() { + // CHECK-LABEL: @test_vdivswzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivswzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vdivswzx_vsvmvl() { + // CHECK-LABEL: @test_vdivswzx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivswzx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivswsx_vvsl() { // CHECK-LABEL: @test_vdivswsx_vvsl // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256) @@ -1243,6 +1819,13 @@ test_vdivswsx_vvsvl() { } void __attribute__((noinline)) +test_vdivswsx_vvsmvl() { + // CHECK-LABEL: @test_vdivswsx_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivswsx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivswsx_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivswzx_vvsl() { // CHECK-LABEL: @test_vdivswzx_vvsl // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvsl(<256 x double> %{{.*}}, i32 %{{.*}}, i32 256) @@ -1257,6 +1840,13 @@ test_vdivswzx_vvsvl() { } void __attribute__((noinline)) +test_vdivswzx_vvsmvl() { + // CHECK-LABEL: @test_vdivswzx_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivswzx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivswzx_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivsl_vvvl() { // CHECK-LABEL: @test_vdivsl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1285,6 +1875,20 @@ test_vdivsl_vsvvl() { } void __attribute__((noinline)) +test_vdivsl_vvvmvl() { + // CHECK-LABEL: @test_vdivsl_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivsl_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vdivsl_vsvmvl() { + // CHECK-LABEL: @test_vdivsl_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivsl_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vdivsl_vvsl() { // CHECK-LABEL: @test_vdivsl_vvsl // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvsl(<256 x double> %{{.*}}, i64 %{{.*}}, i32 256) @@ -1299,6 +1903,13 @@ test_vdivsl_vvsvl() { } void __attribute__((noinline)) +test_vdivsl_vvsmvl() { + // CHECK-LABEL: @test_vdivsl_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vdivsl.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vdivsl_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vcmpul_vvvl() { // CHECK-LABEL: @test_vcmpul_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1327,6 +1938,20 @@ test_vcmpul_vsvvl() { } void __attribute__((noinline)) +test_vcmpul_vvvmvl() { + // CHECK-LABEL: @test_vcmpul_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpul_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vcmpul_vsvmvl() { + // CHECK-LABEL: @test_vcmpul_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpul.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpul_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vcmpuw_vvvl() { // CHECK-LABEL: @test_vcmpuw_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1355,6 +1980,20 @@ test_vcmpuw_vsvvl() { } void __attribute__((noinline)) +test_vcmpuw_vvvmvl() { + // CHECK-LABEL: @test_vcmpuw_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpuw_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vcmpuw_vsvmvl() { + // CHECK-LABEL: @test_vcmpuw_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpuw.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpuw_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvcmpu_vvvl() { // CHECK-LABEL: @test_pvcmpu_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1383,6 +2022,20 @@ test_pvcmpu_vsvvl() { } void __attribute__((noinline)) +test_pvcmpu_vvvMvl() { + // CHECK-LABEL: @test_pvcmpu_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvcmpu_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvcmpu_vsvMvl() { + // CHECK-LABEL: @test_pvcmpu_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvcmpu.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvcmpu_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vcmpswsx_vvvl() { // CHECK-LABEL: @test_vcmpswsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1411,6 +2064,20 @@ test_vcmpswsx_vsvvl() { } void __attribute__((noinline)) +test_vcmpswsx_vvvmvl() { + // CHECK-LABEL: @test_vcmpswsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpswsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vcmpswsx_vsvmvl() { + // CHECK-LABEL: @test_vcmpswsx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpswsx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vcmpswzx_vvvl() { // CHECK-LABEL: @test_vcmpswzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1439,6 +2106,20 @@ test_vcmpswzx_vsvvl() { } void __attribute__((noinline)) +test_vcmpswzx_vvvmvl() { + // CHECK-LABEL: @test_vcmpswzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpswzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vcmpswzx_vsvmvl() { + // CHECK-LABEL: @test_vcmpswzx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpswzx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvcmps_vvvl() { // CHECK-LABEL: @test_pvcmps_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1467,6 +2148,20 @@ test_pvcmps_vsvvl() { } void __attribute__((noinline)) +test_pvcmps_vvvMvl() { + // CHECK-LABEL: @test_pvcmps_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvcmps_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvcmps_vsvMvl() { + // CHECK-LABEL: @test_pvcmps_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvcmps.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvcmps_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vcmpsl_vvvl() { // CHECK-LABEL: @test_vcmpsl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1495,6 +2190,20 @@ test_vcmpsl_vsvvl() { } void __attribute__((noinline)) +test_vcmpsl_vvvmvl() { + // CHECK-LABEL: @test_vcmpsl_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpsl_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vcmpsl_vsvmvl() { + // CHECK-LABEL: @test_vcmpsl_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcmpsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcmpsl_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vmaxswsx_vvvl() { // CHECK-LABEL: @test_vmaxswsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1523,6 +2232,20 @@ test_vmaxswsx_vsvvl() { } void __attribute__((noinline)) +test_vmaxswsx_vvvmvl() { + // CHECK-LABEL: @test_vmaxswsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmaxswsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmaxswsx_vsvmvl() { + // CHECK-LABEL: @test_vmaxswsx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmaxswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmaxswsx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vmaxswzx_vvvl() { // CHECK-LABEL: @test_vmaxswzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1551,6 +2274,20 @@ test_vmaxswzx_vsvvl() { } void __attribute__((noinline)) +test_vmaxswzx_vvvmvl() { + // CHECK-LABEL: @test_vmaxswzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmaxswzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmaxswzx_vsvmvl() { + // CHECK-LABEL: @test_vmaxswzx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmaxswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmaxswzx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvmaxs_vvvl() { // CHECK-LABEL: @test_pvmaxs_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1579,6 +2316,20 @@ test_pvmaxs_vsvvl() { } void __attribute__((noinline)) +test_pvmaxs_vvvMvl() { + // CHECK-LABEL: @test_pvmaxs_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvmaxs_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvmaxs_vsvMvl() { + // CHECK-LABEL: @test_pvmaxs_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvmaxs.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvmaxs_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vminswsx_vvvl() { // CHECK-LABEL: @test_vminswsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1607,6 +2358,20 @@ test_vminswsx_vsvvl() { } void __attribute__((noinline)) +test_vminswsx_vvvmvl() { + // CHECK-LABEL: @test_vminswsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vminswsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vminswsx_vsvmvl() { + // CHECK-LABEL: @test_vminswsx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vminswsx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vminswsx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vminswzx_vvvl() { // CHECK-LABEL: @test_vminswzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1635,6 +2400,20 @@ test_vminswzx_vsvvl() { } void __attribute__((noinline)) +test_vminswzx_vvvmvl() { + // CHECK-LABEL: @test_vminswzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vminswzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vminswzx_vsvmvl() { + // CHECK-LABEL: @test_vminswzx_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vminswzx.vsvmvl(i32 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vminswzx_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvmins_vvvl() { // CHECK-LABEL: @test_pvmins_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1663,6 +2442,20 @@ test_pvmins_vsvvl() { } void __attribute__((noinline)) +test_pvmins_vvvMvl() { + // CHECK-LABEL: @test_pvmins_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvmins_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvmins_vsvMvl() { + // CHECK-LABEL: @test_pvmins_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvmins.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvmins_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vmaxsl_vvvl() { // CHECK-LABEL: @test_vmaxsl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1691,6 +2484,20 @@ test_vmaxsl_vsvvl() { } void __attribute__((noinline)) +test_vmaxsl_vvvmvl() { + // CHECK-LABEL: @test_vmaxsl_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmaxsl_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmaxsl_vsvmvl() { + // CHECK-LABEL: @test_vmaxsl_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmaxsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmaxsl_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vminsl_vvvl() { // CHECK-LABEL: @test_vminsl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1719,6 +2526,20 @@ test_vminsl_vsvvl() { } void __attribute__((noinline)) +test_vminsl_vvvmvl() { + // CHECK-LABEL: @test_vminsl_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vminsl_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vminsl_vsvmvl() { + // CHECK-LABEL: @test_vminsl_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vminsl.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vminsl_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vand_vvvl() { // CHECK-LABEL: @test_vand_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vand.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1747,6 +2568,20 @@ test_vand_vsvvl() { } void __attribute__((noinline)) +test_vand_vvvmvl() { + // CHECK-LABEL: @test_vand_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vand.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vand_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vand_vsvmvl() { + // CHECK-LABEL: @test_vand_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vand.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vand_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvand_vvvl() { // CHECK-LABEL: @test_pvand_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvand.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1775,6 +2610,20 @@ test_pvand_vsvvl() { } void __attribute__((noinline)) +test_pvand_vvvMvl() { + // CHECK-LABEL: @test_pvand_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvand.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvand_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvand_vsvMvl() { + // CHECK-LABEL: @test_pvand_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvand.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvand_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vor_vvvl() { // CHECK-LABEL: @test_vor_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vor.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1803,6 +2652,20 @@ test_vor_vsvvl() { } void __attribute__((noinline)) +test_vor_vvvmvl() { + // CHECK-LABEL: @test_vor_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vor.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vor_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vor_vsvmvl() { + // CHECK-LABEL: @test_vor_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vor.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vor_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvor_vvvl() { // CHECK-LABEL: @test_pvor_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvor.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1831,6 +2694,20 @@ test_pvor_vsvvl() { } void __attribute__((noinline)) +test_pvor_vvvMvl() { + // CHECK-LABEL: @test_pvor_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvor.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvor_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvor_vsvMvl() { + // CHECK-LABEL: @test_pvor_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvor.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvor_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vxor_vvvl() { // CHECK-LABEL: @test_vxor_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vxor.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1859,6 +2736,20 @@ test_vxor_vsvvl() { } void __attribute__((noinline)) +test_vxor_vvvmvl() { + // CHECK-LABEL: @test_vxor_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vxor.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vxor_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vxor_vsvmvl() { + // CHECK-LABEL: @test_vxor_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vxor.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vxor_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvxor_vvvl() { // CHECK-LABEL: @test_pvxor_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1887,6 +2778,20 @@ test_pvxor_vsvvl() { } void __attribute__((noinline)) +test_pvxor_vvvMvl() { + // CHECK-LABEL: @test_pvxor_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvxor_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvxor_vsvMvl() { + // CHECK-LABEL: @test_pvxor_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvxor.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvxor_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_veqv_vvvl() { // CHECK-LABEL: @test_veqv_vvvl // CHECK: call <256 x double> @llvm.ve.vl.veqv.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1915,6 +2820,20 @@ test_veqv_vsvvl() { } void __attribute__((noinline)) +test_veqv_vvvmvl() { + // CHECK-LABEL: @test_veqv_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.veqv.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_veqv_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_veqv_vsvmvl() { + // CHECK-LABEL: @test_veqv_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.veqv.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_veqv_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pveqv_vvvl() { // CHECK-LABEL: @test_pveqv_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -1943,6 +2862,20 @@ test_pveqv_vsvvl() { } void __attribute__((noinline)) +test_pveqv_vvvMvl() { + // CHECK-LABEL: @test_pveqv_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pveqv_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pveqv_vsvMvl() { + // CHECK-LABEL: @test_pveqv_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pveqv.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pveqv_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vldz_vvl() { // CHECK-LABEL: @test_vldz_vvl // CHECK: call <256 x double> @llvm.ve.vl.vldz.vvl(<256 x double> %{{.*}}, i32 256) @@ -1957,6 +2890,13 @@ test_vldz_vvvl() { } void __attribute__((noinline)) +test_vldz_vvmvl() { + // CHECK-LABEL: @test_vldz_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vldz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vldz_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvldzlo_vvl() { // CHECK-LABEL: @test_pvldzlo_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvldzlo.vvl(<256 x double> %{{.*}}, i32 256) @@ -1971,6 +2911,13 @@ test_pvldzlo_vvvl() { } void __attribute__((noinline)) +test_pvldzlo_vvmvl() { + // CHECK-LABEL: @test_pvldzlo_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.pvldzlo.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvldzlo_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvldzup_vvl() { // CHECK-LABEL: @test_pvldzup_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvldzup.vvl(<256 x double> %{{.*}}, i32 256) @@ -1985,6 +2932,13 @@ test_pvldzup_vvvl() { } void __attribute__((noinline)) +test_pvldzup_vvmvl() { + // CHECK-LABEL: @test_pvldzup_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.pvldzup.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvldzup_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvldz_vvl() { // CHECK-LABEL: @test_pvldz_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvldz.vvl(<256 x double> %{{.*}}, i32 256) @@ -1999,6 +2953,13 @@ test_pvldz_vvvl() { } void __attribute__((noinline)) +test_pvldz_vvMvl() { + // CHECK-LABEL: @test_pvldz_vvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvldz.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvldz_vvMvl(vr1, vm1_512, vr2, 256); +} + +void __attribute__((noinline)) test_vpcnt_vvl() { // CHECK-LABEL: @test_vpcnt_vvl // CHECK: call <256 x double> @llvm.ve.vl.vpcnt.vvl(<256 x double> %{{.*}}, i32 256) @@ -2013,6 +2974,13 @@ test_vpcnt_vvvl() { } void __attribute__((noinline)) +test_vpcnt_vvmvl() { + // CHECK-LABEL: @test_vpcnt_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vpcnt.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vpcnt_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvpcntlo_vvl() { // CHECK-LABEL: @test_pvpcntlo_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvpcntlo.vvl(<256 x double> %{{.*}}, i32 256) @@ -2027,6 +2995,13 @@ test_pvpcntlo_vvvl() { } void __attribute__((noinline)) +test_pvpcntlo_vvmvl() { + // CHECK-LABEL: @test_pvpcntlo_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.pvpcntlo.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvpcntlo_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvpcntup_vvl() { // CHECK-LABEL: @test_pvpcntup_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvpcntup.vvl(<256 x double> %{{.*}}, i32 256) @@ -2041,6 +3016,13 @@ test_pvpcntup_vvvl() { } void __attribute__((noinline)) +test_pvpcntup_vvmvl() { + // CHECK-LABEL: @test_pvpcntup_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.pvpcntup.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvpcntup_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvpcnt_vvl() { // CHECK-LABEL: @test_pvpcnt_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvpcnt.vvl(<256 x double> %{{.*}}, i32 256) @@ -2055,6 +3037,13 @@ test_pvpcnt_vvvl() { } void __attribute__((noinline)) +test_pvpcnt_vvMvl() { + // CHECK-LABEL: @test_pvpcnt_vvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvpcnt.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvpcnt_vvMvl(vr1, vm1_512, vr2, 256); +} + +void __attribute__((noinline)) test_vbrv_vvl() { // CHECK-LABEL: @test_vbrv_vvl // CHECK: call <256 x double> @llvm.ve.vl.vbrv.vvl(<256 x double> %{{.*}}, i32 256) @@ -2069,6 +3058,13 @@ test_vbrv_vvvl() { } void __attribute__((noinline)) +test_vbrv_vvmvl() { + // CHECK-LABEL: @test_vbrv_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vbrv.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vbrv_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvbrvlo_vvl() { // CHECK-LABEL: @test_pvbrvlo_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvbrvlo.vvl(<256 x double> %{{.*}}, i32 256) @@ -2083,6 +3079,13 @@ test_pvbrvlo_vvvl() { } void __attribute__((noinline)) +test_pvbrvlo_vvmvl() { + // CHECK-LABEL: @test_pvbrvlo_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.pvbrvlo.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvbrvlo_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvbrvup_vvl() { // CHECK-LABEL: @test_pvbrvup_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvbrvup.vvl(<256 x double> %{{.*}}, i32 256) @@ -2097,6 +3100,13 @@ test_pvbrvup_vvvl() { } void __attribute__((noinline)) +test_pvbrvup_vvmvl() { + // CHECK-LABEL: @test_pvbrvup_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.pvbrvup.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvbrvup_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvbrv_vvl() { // CHECK-LABEL: @test_pvbrv_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvbrv.vvl(<256 x double> %{{.*}}, i32 256) @@ -2111,6 +3121,13 @@ test_pvbrv_vvvl() { } void __attribute__((noinline)) +test_pvbrv_vvMvl() { + // CHECK-LABEL: @test_pvbrv_vvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvbrv.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvbrv_vvMvl(vr1, vm1_512, vr2, 256); +} + +void __attribute__((noinline)) test_vseq_vl() { // CHECK-LABEL: @test_vseq_vl // CHECK: call <256 x double> @llvm.ve.vl.vseq.vl(i32 256) @@ -2195,6 +3212,20 @@ test_vsll_vvsvl() { } void __attribute__((noinline)) +test_vsll_vvvmvl() { + // CHECK-LABEL: @test_vsll_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsll.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsll_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsll_vvsmvl() { + // CHECK-LABEL: @test_vsll_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsll.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsll_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvsll_vvvl() { // CHECK-LABEL: @test_pvsll_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2223,6 +3254,20 @@ test_pvsll_vvsvl() { } void __attribute__((noinline)) +test_pvsll_vvvMvl() { + // CHECK-LABEL: @test_pvsll_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsll_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvsll_vvsMvl() { + // CHECK-LABEL: @test_pvsll_vvsMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsll.vvsMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsll_vvsMvl(vr1, v2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vsrl_vvvl() { // CHECK-LABEL: @test_vsrl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2251,6 +3296,20 @@ test_vsrl_vvsvl() { } void __attribute__((noinline)) +test_vsrl_vvvmvl() { + // CHECK-LABEL: @test_vsrl_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsrl_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsrl_vvsmvl() { + // CHECK-LABEL: @test_vsrl_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsrl.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsrl_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvsrl_vvvl() { // CHECK-LABEL: @test_pvsrl_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2279,6 +3338,20 @@ test_pvsrl_vvsvl() { } void __attribute__((noinline)) +test_pvsrl_vvvMvl() { + // CHECK-LABEL: @test_pvsrl_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsrl_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvsrl_vvsMvl() { + // CHECK-LABEL: @test_pvsrl_vvsMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsrl.vvsMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsrl_vvsMvl(vr1, v2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vslawsx_vvvl() { // CHECK-LABEL: @test_vslawsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2307,6 +3380,20 @@ test_vslawsx_vvsvl() { } void __attribute__((noinline)) +test_vslawsx_vvvmvl() { + // CHECK-LABEL: @test_vslawsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vslawsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vslawsx_vvsmvl() { + // CHECK-LABEL: @test_vslawsx_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vslawsx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vslawsx_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vslawzx_vvvl() { // CHECK-LABEL: @test_vslawzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2335,6 +3422,20 @@ test_vslawzx_vvsvl() { } void __attribute__((noinline)) +test_vslawzx_vvvmvl() { + // CHECK-LABEL: @test_vslawzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vslawzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vslawzx_vvsmvl() { + // CHECK-LABEL: @test_vslawzx_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vslawzx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vslawzx_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvsla_vvvl() { // CHECK-LABEL: @test_pvsla_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2363,6 +3464,20 @@ test_pvsla_vvsvl() { } void __attribute__((noinline)) +test_pvsla_vvvMvl() { + // CHECK-LABEL: @test_pvsla_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsla_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvsla_vvsMvl() { + // CHECK-LABEL: @test_pvsla_vvsMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsla.vvsMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsla_vvsMvl(vr1, v2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vslal_vvvl() { // CHECK-LABEL: @test_vslal_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2391,6 +3506,20 @@ test_vslal_vvsvl() { } void __attribute__((noinline)) +test_vslal_vvvmvl() { + // CHECK-LABEL: @test_vslal_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vslal_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vslal_vvsmvl() { + // CHECK-LABEL: @test_vslal_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vslal.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vslal_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vsrawsx_vvvl() { // CHECK-LABEL: @test_vsrawsx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2419,6 +3548,20 @@ test_vsrawsx_vvsvl() { } void __attribute__((noinline)) +test_vsrawsx_vvvmvl() { + // CHECK-LABEL: @test_vsrawsx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsrawsx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsrawsx_vvsmvl() { + // CHECK-LABEL: @test_vsrawsx_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsrawsx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsrawsx_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vsrawzx_vvvl() { // CHECK-LABEL: @test_vsrawzx_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2447,6 +3590,20 @@ test_vsrawzx_vvsvl() { } void __attribute__((noinline)) +test_vsrawzx_vvvmvl() { + // CHECK-LABEL: @test_vsrawzx_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsrawzx_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsrawzx_vvsmvl() { + // CHECK-LABEL: @test_vsrawzx_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsrawzx.vvsmvl(<256 x double> %{{.*}}, i32 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsrawzx_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvsra_vvvl() { // CHECK-LABEL: @test_pvsra_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2475,6 +3632,20 @@ test_pvsra_vvsvl() { } void __attribute__((noinline)) +test_pvsra_vvvMvl() { + // CHECK-LABEL: @test_pvsra_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsra_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvsra_vvsMvl() { + // CHECK-LABEL: @test_pvsra_vvsMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvsra.vvsMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvsra_vvsMvl(vr1, v2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vsral_vvvl() { // CHECK-LABEL: @test_vsral_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2503,6 +3674,20 @@ test_vsral_vvsvl() { } void __attribute__((noinline)) +test_vsral_vvvmvl() { + // CHECK-LABEL: @test_vsral_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsral_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vsral_vvsmvl() { + // CHECK-LABEL: @test_vsral_vvsmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsral.vvsmvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsral_vvsmvl(vr1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vsfa_vvssl() { // CHECK-LABEL: @test_vsfa_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vsfa.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -2517,6 +3702,13 @@ test_vsfa_vvssvl() { } void __attribute__((noinline)) +test_vsfa_vvssmvl() { + // CHECK-LABEL: @test_vsfa_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vsfa.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vsfa_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfaddd_vvvl() { // CHECK-LABEL: @test_vfaddd_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2545,6 +3737,20 @@ test_vfaddd_vsvvl() { } void __attribute__((noinline)) +test_vfaddd_vvvmvl() { + // CHECK-LABEL: @test_vfaddd_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfaddd_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfaddd_vsvmvl() { + // CHECK-LABEL: @test_vfaddd_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfaddd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfaddd_vsvmvl(vd1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfadds_vvvl() { // CHECK-LABEL: @test_vfadds_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2573,6 +3779,20 @@ test_vfadds_vsvvl() { } void __attribute__((noinline)) +test_vfadds_vvvmvl() { + // CHECK-LABEL: @test_vfadds_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfadds_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfadds_vsvmvl() { + // CHECK-LABEL: @test_vfadds_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfadds.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfadds_vsvmvl(vf1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvfadd_vvvl() { // CHECK-LABEL: @test_pvfadd_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2601,6 +3821,20 @@ test_pvfadd_vsvvl() { } void __attribute__((noinline)) +test_pvfadd_vvvMvl() { + // CHECK-LABEL: @test_pvfadd_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfadd_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvfadd_vsvMvl() { + // CHECK-LABEL: @test_pvfadd_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfadd.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfadd_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vfsubd_vvvl() { // CHECK-LABEL: @test_vfsubd_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2629,6 +3863,20 @@ test_vfsubd_vsvvl() { } void __attribute__((noinline)) +test_vfsubd_vvvmvl() { + // CHECK-LABEL: @test_vfsubd_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfsubd_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfsubd_vsvmvl() { + // CHECK-LABEL: @test_vfsubd_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfsubd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfsubd_vsvmvl(vd1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfsubs_vvvl() { // CHECK-LABEL: @test_vfsubs_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2657,6 +3905,20 @@ test_vfsubs_vsvvl() { } void __attribute__((noinline)) +test_vfsubs_vvvmvl() { + // CHECK-LABEL: @test_vfsubs_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfsubs_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfsubs_vsvmvl() { + // CHECK-LABEL: @test_vfsubs_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfsubs.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfsubs_vsvmvl(vf1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvfsub_vvvl() { // CHECK-LABEL: @test_pvfsub_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2685,6 +3947,20 @@ test_pvfsub_vsvvl() { } void __attribute__((noinline)) +test_pvfsub_vvvMvl() { + // CHECK-LABEL: @test_pvfsub_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfsub_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvfsub_vsvMvl() { + // CHECK-LABEL: @test_pvfsub_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfsub.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfsub_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vfmuld_vvvl() { // CHECK-LABEL: @test_vfmuld_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2713,6 +3989,20 @@ test_vfmuld_vsvvl() { } void __attribute__((noinline)) +test_vfmuld_vvvmvl() { + // CHECK-LABEL: @test_vfmuld_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmuld_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfmuld_vsvmvl() { + // CHECK-LABEL: @test_vfmuld_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmuld.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmuld_vsvmvl(vd1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfmuls_vvvl() { // CHECK-LABEL: @test_vfmuls_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2741,6 +4031,20 @@ test_vfmuls_vsvvl() { } void __attribute__((noinline)) +test_vfmuls_vvvmvl() { + // CHECK-LABEL: @test_vfmuls_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmuls_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfmuls_vsvmvl() { + // CHECK-LABEL: @test_vfmuls_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmuls.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmuls_vsvmvl(vf1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvfmul_vvvl() { // CHECK-LABEL: @test_pvfmul_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2769,6 +4073,20 @@ test_pvfmul_vsvvl() { } void __attribute__((noinline)) +test_pvfmul_vvvMvl() { + // CHECK-LABEL: @test_pvfmul_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfmul_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvfmul_vsvMvl() { + // CHECK-LABEL: @test_pvfmul_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmul.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfmul_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vfdivd_vvvl() { // CHECK-LABEL: @test_vfdivd_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2797,6 +4115,20 @@ test_vfdivd_vsvvl() { } void __attribute__((noinline)) +test_vfdivd_vvvmvl() { + // CHECK-LABEL: @test_vfdivd_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfdivd_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfdivd_vsvmvl() { + // CHECK-LABEL: @test_vfdivd_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfdivd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfdivd_vsvmvl(vd1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfdivs_vvvl() { // CHECK-LABEL: @test_vfdivs_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2825,6 +4157,20 @@ test_vfdivs_vsvvl() { } void __attribute__((noinline)) +test_vfdivs_vvvmvl() { + // CHECK-LABEL: @test_vfdivs_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfdivs_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfdivs_vsvmvl() { + // CHECK-LABEL: @test_vfdivs_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfdivs.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfdivs_vsvmvl(vf1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfsqrtd_vvl() { // CHECK-LABEL: @test_vfsqrtd_vvl // CHECK: call <256 x double> @llvm.ve.vl.vfsqrtd.vvl(<256 x double> %{{.*}}, i32 256) @@ -2838,6 +4184,7 @@ test_vfsqrtd_vvvl() { vr3 = _vel_vfsqrtd_vvvl(vr1, vr2, 256); } + void __attribute__((noinline)) test_vfsqrts_vvl() { // CHECK-LABEL: @test_vfsqrts_vvl @@ -2881,6 +4228,20 @@ test_vfcmpd_vsvvl() { } void __attribute__((noinline)) +test_vfcmpd_vvvmvl() { + // CHECK-LABEL: @test_vfcmpd_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfcmpd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfcmpd_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfcmpd_vsvmvl() { + // CHECK-LABEL: @test_vfcmpd_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfcmpd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfcmpd_vsvmvl(vd1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfcmps_vvvl() { // CHECK-LABEL: @test_vfcmps_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2909,6 +4270,20 @@ test_vfcmps_vsvvl() { } void __attribute__((noinline)) +test_vfcmps_vvvmvl() { + // CHECK-LABEL: @test_vfcmps_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfcmps_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfcmps_vsvmvl() { + // CHECK-LABEL: @test_vfcmps_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfcmps.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfcmps_vsvmvl(vf1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvfcmp_vvvl() { // CHECK-LABEL: @test_pvfcmp_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2937,6 +4312,20 @@ test_pvfcmp_vsvvl() { } void __attribute__((noinline)) +test_pvfcmp_vvvMvl() { + // CHECK-LABEL: @test_pvfcmp_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfcmp_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvfcmp_vsvMvl() { + // CHECK-LABEL: @test_pvfcmp_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfcmp.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfcmp_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vfmaxd_vvvl() { // CHECK-LABEL: @test_vfmaxd_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2965,6 +4354,20 @@ test_vfmaxd_vsvvl() { } void __attribute__((noinline)) +test_vfmaxd_vvvmvl() { + // CHECK-LABEL: @test_vfmaxd_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmaxd_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfmaxd_vsvmvl() { + // CHECK-LABEL: @test_vfmaxd_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmaxd.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmaxd_vsvmvl(vd1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfmaxs_vvvl() { // CHECK-LABEL: @test_vfmaxs_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -2993,6 +4396,20 @@ test_vfmaxs_vsvvl() { } void __attribute__((noinline)) +test_vfmaxs_vvvmvl() { + // CHECK-LABEL: @test_vfmaxs_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmaxs_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfmaxs_vsvmvl() { + // CHECK-LABEL: @test_vfmaxs_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmaxs.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmaxs_vsvmvl(vf1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvfmax_vvvl() { // CHECK-LABEL: @test_pvfmax_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3021,6 +4438,20 @@ test_pvfmax_vsvvl() { } void __attribute__((noinline)) +test_pvfmax_vvvMvl() { + // CHECK-LABEL: @test_pvfmax_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfmax_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvfmax_vsvMvl() { + // CHECK-LABEL: @test_pvfmax_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmax.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfmax_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vfmind_vvvl() { // CHECK-LABEL: @test_vfmind_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3049,6 +4480,20 @@ test_vfmind_vsvvl() { } void __attribute__((noinline)) +test_vfmind_vvvmvl() { + // CHECK-LABEL: @test_vfmind_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmind_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfmind_vsvmvl() { + // CHECK-LABEL: @test_vfmind_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmind.vsvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmind_vsvmvl(vd1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vfmins_vvvl() { // CHECK-LABEL: @test_vfmins_vvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3077,6 +4522,20 @@ test_vfmins_vsvvl() { } void __attribute__((noinline)) +test_vfmins_vvvmvl() { + // CHECK-LABEL: @test_vfmins_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmins_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vfmins_vsvmvl() { + // CHECK-LABEL: @test_vfmins_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmins.vsvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vfmins_vsvmvl(vf1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_pvfmin_vvvl() { // CHECK-LABEL: @test_pvfmin_vvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3105,6 +4564,20 @@ test_pvfmin_vsvvl() { } void __attribute__((noinline)) +test_pvfmin_vvvMvl() { + // CHECK-LABEL: @test_pvfmin_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfmin_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_pvfmin_vsvMvl() { + // CHECK-LABEL: @test_pvfmin_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmin.vsvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvfmin_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vfmadd_vvvvl() { // CHECK-LABEL: @test_vfmadd_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3147,6 +4620,27 @@ test_vfmadd_vvsvvl() { } void __attribute__((noinline)) +test_vfmadd_vvvvmvl() { + // CHECK-LABEL: @test_vfmadd_vvvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmadd_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfmadd_vsvvmvl() { + // CHECK-LABEL: @test_vfmadd_vsvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vsvvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmadd_vsvvmvl(vd1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfmadd_vvsvmvl() { + // CHECK-LABEL: @test_vfmadd_vvsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmadd.vvsvmvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmadd_vvsvmvl(vr1, vd1, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) test_vfmads_vvvvl() { // CHECK-LABEL: @test_vfmads_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3189,6 +4683,27 @@ test_vfmads_vvsvvl() { } void __attribute__((noinline)) +test_vfmads_vvvvmvl() { + // CHECK-LABEL: @test_vfmads_vvvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmads_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfmads_vsvvmvl() { + // CHECK-LABEL: @test_vfmads_vsvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vsvvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmads_vsvvmvl(vf1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfmads_vvsvmvl() { + // CHECK-LABEL: @test_vfmads_vvsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmads.vvsvmvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmads_vvsvmvl(vr1, vf1, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) test_pvfmad_vvvvl() { // CHECK-LABEL: @test_pvfmad_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3231,6 +4746,27 @@ test_pvfmad_vvsvvl() { } void __attribute__((noinline)) +test_pvfmad_vvvvMvl() { + // CHECK-LABEL: @test_pvfmad_vvvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfmad_vvvvMvl(vr1, vr2, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) +test_pvfmad_vsvvMvl() { + // CHECK-LABEL: @test_pvfmad_vsvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vsvvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfmad_vsvvMvl(v1, vr2, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) +test_pvfmad_vvsvMvl() { + // CHECK-LABEL: @test_pvfmad_vvsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmad.vvsvMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfmad_vvsvMvl(vr1, v1, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) test_vfmsbd_vvvvl() { // CHECK-LABEL: @test_vfmsbd_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3273,6 +4809,27 @@ test_vfmsbd_vvsvvl() { } void __attribute__((noinline)) +test_vfmsbd_vvvvmvl() { + // CHECK-LABEL: @test_vfmsbd_vvvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmsbd_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfmsbd_vsvvmvl() { + // CHECK-LABEL: @test_vfmsbd_vsvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vsvvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmsbd_vsvvmvl(vd1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfmsbd_vvsvmvl() { + // CHECK-LABEL: @test_vfmsbd_vvsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmsbd.vvsvmvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmsbd_vvsvmvl(vr1, vd1, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) test_vfmsbs_vvvvl() { // CHECK-LABEL: @test_vfmsbs_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3315,6 +4872,27 @@ test_vfmsbs_vvsvvl() { } void __attribute__((noinline)) +test_vfmsbs_vvvvmvl() { + // CHECK-LABEL: @test_vfmsbs_vvvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmsbs_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfmsbs_vsvvmvl() { + // CHECK-LABEL: @test_vfmsbs_vsvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vsvvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmsbs_vsvvmvl(vf1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfmsbs_vvsvmvl() { + // CHECK-LABEL: @test_vfmsbs_vvsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfmsbs.vvsvmvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfmsbs_vvsvmvl(vr1, vf1, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) test_pvfmsb_vvvvl() { // CHECK-LABEL: @test_pvfmsb_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3357,6 +4935,27 @@ test_pvfmsb_vvsvvl() { } void __attribute__((noinline)) +test_pvfmsb_vvvvMvl() { + // CHECK-LABEL: @test_pvfmsb_vvvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfmsb_vvvvMvl(vr1, vr2, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) +test_pvfmsb_vsvvMvl() { + // CHECK-LABEL: @test_pvfmsb_vsvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vsvvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfmsb_vsvvMvl(v1, vr2, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) +test_pvfmsb_vvsvMvl() { + // CHECK-LABEL: @test_pvfmsb_vvsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfmsb.vvsvMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfmsb_vvsvMvl(vr1, v1, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) test_vfnmadd_vvvvl() { // CHECK-LABEL: @test_vfnmadd_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3399,6 +4998,27 @@ test_vfnmadd_vvsvvl() { } void __attribute__((noinline)) +test_vfnmadd_vvvvmvl() { + // CHECK-LABEL: @test_vfnmadd_vvvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmadd_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfnmadd_vsvvmvl() { + // CHECK-LABEL: @test_vfnmadd_vsvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vsvvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmadd_vsvvmvl(vd1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfnmadd_vvsvmvl() { + // CHECK-LABEL: @test_vfnmadd_vvsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmadd.vvsvmvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmadd_vvsvmvl(vr1, vd1, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) test_vfnmads_vvvvl() { // CHECK-LABEL: @test_vfnmads_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3441,6 +5061,27 @@ test_vfnmads_vvsvvl() { } void __attribute__((noinline)) +test_vfnmads_vvvvmvl() { + // CHECK-LABEL: @test_vfnmads_vvvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmads_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfnmads_vsvvmvl() { + // CHECK-LABEL: @test_vfnmads_vsvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vsvvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmads_vsvvmvl(vf1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfnmads_vvsvmvl() { + // CHECK-LABEL: @test_vfnmads_vvsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmads.vvsvmvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmads_vvsvmvl(vr1, vf1, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) test_pvfnmad_vvvvl() { // CHECK-LABEL: @test_pvfnmad_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3483,6 +5124,27 @@ test_pvfnmad_vvsvvl() { } void __attribute__((noinline)) +test_pvfnmad_vvvvMvl() { + // CHECK-LABEL: @test_pvfnmad_vvvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfnmad_vvvvMvl(vr1, vr2, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) +test_pvfnmad_vsvvMvl() { + // CHECK-LABEL: @test_pvfnmad_vsvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vsvvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfnmad_vsvvMvl(v1, vr2, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) +test_pvfnmad_vvsvMvl() { + // CHECK-LABEL: @test_pvfnmad_vvsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfnmad.vvsvMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfnmad_vvsvMvl(vr1, v1, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) test_vfnmsbd_vvvvl() { // CHECK-LABEL: @test_vfnmsbd_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3525,6 +5187,27 @@ test_vfnmsbd_vvsvvl() { } void __attribute__((noinline)) +test_vfnmsbd_vvvvmvl() { + // CHECK-LABEL: @test_vfnmsbd_vvvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmsbd_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfnmsbd_vsvvmvl() { + // CHECK-LABEL: @test_vfnmsbd_vsvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vsvvmvl(double %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmsbd_vsvvmvl(vd1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfnmsbd_vvsvmvl() { + // CHECK-LABEL: @test_vfnmsbd_vvsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbd.vvsvmvl(<256 x double> %{{.*}}, double %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmsbd_vvsvmvl(vr1, vd1, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) test_vfnmsbs_vvvvl() { // CHECK-LABEL: @test_vfnmsbs_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3567,6 +5250,27 @@ test_vfnmsbs_vvsvvl() { } void __attribute__((noinline)) +test_vfnmsbs_vvvvmvl() { + // CHECK-LABEL: @test_vfnmsbs_vvvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmsbs_vvvvmvl(vr1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfnmsbs_vsvvmvl() { + // CHECK-LABEL: @test_vfnmsbs_vsvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vsvvmvl(float %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmsbs_vsvvmvl(vf1, vr2, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) +test_vfnmsbs_vvsvmvl() { + // CHECK-LABEL: @test_vfnmsbs_vvsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vfnmsbs.vvsvmvl(<256 x double> %{{.*}}, float %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_vfnmsbs_vvsvmvl(vr1, vf1, vr3, vm1, vr4, 256); +} + +void __attribute__((noinline)) test_pvfnmsb_vvvvl() { // CHECK-LABEL: @test_pvfnmsb_vvvvl // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvvvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, i32 256) @@ -3609,6 +5313,27 @@ test_pvfnmsb_vvsvvl() { } void __attribute__((noinline)) +test_pvfnmsb_vvvvMvl() { + // CHECK-LABEL: @test_pvfnmsb_vvvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfnmsb_vvvvMvl(vr1, vr2, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) +test_pvfnmsb_vsvvMvl() { + // CHECK-LABEL: @test_pvfnmsb_vsvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vsvvMvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfnmsb_vsvvMvl(v1, vr2, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) +test_pvfnmsb_vvsvMvl() { + // CHECK-LABEL: @test_pvfnmsb_vvsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvfnmsb.vvsvMvl(<256 x double> %{{.*}}, i64 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr4 = _vel_pvfnmsb_vvsvMvl(vr1, v1, vr3, vm1_512, vr4, 256); +} + +void __attribute__((noinline)) test_vrcpd_vvl() { // CHECK-LABEL: @test_vrcpd_vvl // CHECK: call <256 x double> @llvm.ve.vl.vrcpd.vvl(<256 x double> %{{.*}}, i32 256) @@ -3749,6 +5474,13 @@ test_vcvtwdsx_vvvl() { } void __attribute__((noinline)) +test_vcvtwdsx_vvmvl() { + // CHECK-LABEL: @test_vcvtwdsx_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsx.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtwdsx_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtwdsxrz_vvl() { // CHECK-LABEL: @test_vcvtwdsxrz_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsxrz.vvl(<256 x double> %{{.*}}, i32 256) @@ -3763,6 +5495,13 @@ test_vcvtwdsxrz_vvvl() { } void __attribute__((noinline)) +test_vcvtwdsxrz_vvmvl() { + // CHECK-LABEL: @test_vcvtwdsxrz_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdsxrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtwdsxrz_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtwdzx_vvl() { // CHECK-LABEL: @test_vcvtwdzx_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzx.vvl(<256 x double> %{{.*}}, i32 256) @@ -3777,6 +5516,13 @@ test_vcvtwdzx_vvvl() { } void __attribute__((noinline)) +test_vcvtwdzx_vvmvl() { + // CHECK-LABEL: @test_vcvtwdzx_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzx.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtwdzx_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtwdzxrz_vvl() { // CHECK-LABEL: @test_vcvtwdzxrz_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzxrz.vvl(<256 x double> %{{.*}}, i32 256) @@ -3791,6 +5537,13 @@ test_vcvtwdzxrz_vvvl() { } void __attribute__((noinline)) +test_vcvtwdzxrz_vvmvl() { + // CHECK-LABEL: @test_vcvtwdzxrz_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtwdzxrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtwdzxrz_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtwssx_vvl() { // CHECK-LABEL: @test_vcvtwssx_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssx.vvl(<256 x double> %{{.*}}, i32 256) @@ -3805,6 +5558,13 @@ test_vcvtwssx_vvvl() { } void __attribute__((noinline)) +test_vcvtwssx_vvmvl() { + // CHECK-LABEL: @test_vcvtwssx_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssx.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtwssx_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtwssxrz_vvl() { // CHECK-LABEL: @test_vcvtwssxrz_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssxrz.vvl(<256 x double> %{{.*}}, i32 256) @@ -3819,6 +5579,13 @@ test_vcvtwssxrz_vvvl() { } void __attribute__((noinline)) +test_vcvtwssxrz_vvmvl() { + // CHECK-LABEL: @test_vcvtwssxrz_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtwssxrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtwssxrz_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtwszx_vvl() { // CHECK-LABEL: @test_vcvtwszx_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszx.vvl(<256 x double> %{{.*}}, i32 256) @@ -3833,6 +5600,13 @@ test_vcvtwszx_vvvl() { } void __attribute__((noinline)) +test_vcvtwszx_vvmvl() { + // CHECK-LABEL: @test_vcvtwszx_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszx.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtwszx_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtwszxrz_vvl() { // CHECK-LABEL: @test_vcvtwszxrz_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvl(<256 x double> %{{.*}}, i32 256) @@ -3847,6 +5621,13 @@ test_vcvtwszxrz_vvvl() { } void __attribute__((noinline)) +test_vcvtwszxrz_vvmvl() { + // CHECK-LABEL: @test_vcvtwszxrz_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtwszxrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtwszxrz_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_pvcvtws_vvl() { // CHECK-LABEL: @test_pvcvtws_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvl(<256 x double> %{{.*}}, i32 256) @@ -3861,6 +5642,13 @@ test_pvcvtws_vvvl() { } void __attribute__((noinline)) +test_pvcvtws_vvMvl() { + // CHECK-LABEL: @test_pvcvtws_vvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvcvtws.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvcvtws_vvMvl(vr1, vm1_512, vr2, 256); +} + +void __attribute__((noinline)) test_pvcvtwsrz_vvl() { // CHECK-LABEL: @test_pvcvtwsrz_vvl // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvl(<256 x double> %{{.*}}, i32 256) @@ -3875,6 +5663,13 @@ test_pvcvtwsrz_vvvl() { } void __attribute__((noinline)) +test_pvcvtwsrz_vvMvl() { + // CHECK-LABEL: @test_pvcvtwsrz_vvMvl + // CHECK: call <256 x double> @llvm.ve.vl.pvcvtwsrz.vvMvl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_pvcvtwsrz_vvMvl(vr1, vm1_512, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtld_vvl() { // CHECK-LABEL: @test_vcvtld_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvl(<256 x double> %{{.*}}, i32 256) @@ -3889,6 +5684,13 @@ test_vcvtld_vvvl() { } void __attribute__((noinline)) +test_vcvtld_vvmvl() { + // CHECK-LABEL: @test_vcvtld_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtld.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtld_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtldrz_vvl() { // CHECK-LABEL: @test_vcvtldrz_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvl(<256 x double> %{{.*}}, i32 256) @@ -3903,6 +5705,13 @@ test_vcvtldrz_vvvl() { } void __attribute__((noinline)) +test_vcvtldrz_vvmvl() { + // CHECK-LABEL: @test_vcvtldrz_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcvtldrz.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcvtldrz_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) test_vcvtdw_vvl() { // CHECK-LABEL: @test_vcvtdw_vvl // CHECK: call <256 x double> @llvm.ve.vl.vcvtdw.vvl(<256 x double> %{{.*}}, i32 256) @@ -3987,6 +5796,62 @@ test_vcvtsd_vvvl() { } void __attribute__((noinline)) +test_vmrg_vvvml() { + // CHECK-LABEL: @test_vmrg_vvvml + // CHECK: call <256 x double> @llvm.ve.vl.vmrg.vvvml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vmrg_vvvml(vr1, vr2, vm1, 256); +} + +void __attribute__((noinline)) +test_vmrg_vvvmvl() { + // CHECK-LABEL: @test_vmrg_vvvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmrg.vvvmvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmrg_vvvmvl(vr1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmrg_vsvml() { + // CHECK-LABEL: @test_vmrg_vsvml + // CHECK: call <256 x double> @llvm.ve.vl.vmrg.vsvml(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vmrg_vsvml(v1, vr2, vm1, 256); +} + +void __attribute__((noinline)) +test_vmrg_vsvmvl() { + // CHECK-LABEL: @test_vmrg_vsvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vmrg.vsvmvl(i64 %{{.*}}, <256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmrg_vsvmvl(v1, vr2, vm1, vr3, 256); +} + +void __attribute__((noinline)) +test_vmrgw_vvvMl() { + // CHECK-LABEL: @test_vmrgw_vvvMl + // CHECK: call <256 x double> @llvm.ve.vl.vmrgw.vvvMl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 256) + vr3 = _vel_vmrgw_vvvMl(vr1, vr2, vm1_512, 256); +} + +void __attribute__((noinline)) +test_vmrgw_vvvMvl() { + // CHECK-LABEL: @test_vmrgw_vvvMvl + // CHECK: call <256 x double> @llvm.ve.vl.vmrgw.vvvMvl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmrgw_vvvMvl(vr1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) +test_vmrgw_vsvMl() { + // CHECK-LABEL: @test_vmrgw_vsvMl + // CHECK: call <256 x double> @llvm.ve.vl.vmrgw.vsvMl(i32 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 256) + vr3 = _vel_vmrgw_vsvMl(v1, vr2, vm1_512, 256); +} + +void __attribute__((noinline)) +test_vmrgw_vsvMvl() { + // CHECK-LABEL: @test_vmrgw_vsvMvl + // CHECK: call <256 x double> @llvm.ve.vl.vmrgw.vsvMvl(i32 %{{.*}}, <256 x double> %{{.*}}, <512 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vmrgw_vsvMvl(v1, vr2, vm1_512, vr3, 256); +} + +void __attribute__((noinline)) test_vshf_vvvsl() { // CHECK-LABEL: @test_vshf_vvvsl // CHECK: call <256 x double> @llvm.ve.vl.vshf.vvvsl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i32 256) @@ -4001,6 +5866,2008 @@ test_vshf_vvvsvl() { } void __attribute__((noinline)) +test_vcp_vvmvl() { + // CHECK-LABEL: @test_vcp_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vcp.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vcp_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) +test_vex_vvmvl() { + // CHECK-LABEL: @test_vex_vvmvl + // CHECK: call <256 x double> @llvm.ve.vl.vex.vvmvl(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vex_vvmvl(vr1, vm1, vr2, 256); +} + +void __attribute__((noinline)) +test_vfmklat_ml(int vl) { + // CHECK-LABEL: @test_vfmklat_ml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklat.ml(i32 %{{.*}}) + vm1 = _vel_vfmklat_ml(vl); +} + +void __attribute__((noinline)) +test_vfmklaf_ml(int vl) { + // CHECK-LABEL: @test_vfmklaf_ml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklaf.ml(i32 %{{.*}}) + vm1 = _vel_vfmklaf_ml(vl); +} + +void __attribute__((noinline)) +test_pvfmkat_Ml(int vl) { + // CHECK-LABEL: @test_pvfmkat_Ml + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkat.Ml(i32 %{{.*}}) + vm1_512 = _vel_pvfmkat_Ml(vl); +} + +void __attribute__((noinline)) +test_pvfmkaf_Ml(int vl) { + // CHECK-LABEL: @test_pvfmkaf_Ml + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkaf.Ml(i32 %{{.*}}) + vm1_512 = _vel_pvfmkaf_Ml(vl); +} + +void __attribute__((noinline)) +test_vfmklgt_mvl(int vl) { + // CHECK-LABEL: @test_vfmklgt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklgt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklgt_mvml(int vl) { + // CHECK-LABEL: @test_vfmklgt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklgt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkllt_mvl(int vl) { + // CHECK-LABEL: @test_vfmkllt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkllt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkllt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkllt_mvml(int vl) { + // CHECK-LABEL: @test_vfmkllt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkllt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkllt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklne_mvl(int vl) { + // CHECK-LABEL: @test_vfmklne_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklne.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklne_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklne_mvml(int vl) { + // CHECK-LABEL: @test_vfmklne_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklne_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkleq_mvl(int vl) { + // CHECK-LABEL: @test_vfmkleq_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkleq.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkleq_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkleq_mvml(int vl) { + // CHECK-LABEL: @test_vfmkleq_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkleq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkleq_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklge_mvl(int vl) { + // CHECK-LABEL: @test_vfmklge_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklge.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklge_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklge_mvml(int vl) { + // CHECK-LABEL: @test_vfmklge_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklge_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklle_mvl(int vl) { + // CHECK-LABEL: @test_vfmklle_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklle.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklle_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklle_mvml(int vl) { + // CHECK-LABEL: @test_vfmklle_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklle.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklle_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklnum_mvl(int vl) { + // CHECK-LABEL: @test_vfmklnum_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklnum_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklnum_mvml(int vl) { + // CHECK-LABEL: @test_vfmklnum_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklnum_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmklnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmklnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklgtnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmklgtnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklgtnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklgtnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmklgtnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklgtnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklltnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmklltnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklltnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklltnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmklltnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklltnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklnenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmklnenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklnenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklnenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmklnenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklnenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkleqnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkleqnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkleqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkleqnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkleqnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkleqnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkleqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkleqnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmklgenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmklgenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklgenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmklgenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmklgenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmklgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmklgenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkllenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkllenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkllenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkllenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkllenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkllenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkllenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkllenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwgt_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwgt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwgt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwgt_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwgt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwgt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwlt_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwlt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwlt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwlt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwlt_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwlt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwlt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwlt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwne_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwne_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwne.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwne_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwne_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwne_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwne_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkweq_mvl(int vl) { + // CHECK-LABEL: @test_vfmkweq_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkweq.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkweq_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkweq_mvml(int vl) { + // CHECK-LABEL: @test_vfmkweq_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkweq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkweq_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwge_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwge_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwge.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwge_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwge_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwge_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwge_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwle_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwle_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwle.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwle_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwle_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwle_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwle.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwle_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwnum_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwnum_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwnum_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwnum_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwnum_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwnum_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwgtnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwgtnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwgtnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwgtnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwgtnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwgtnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwltnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwltnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwltnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwltnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwltnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwltnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwnenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwnenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwnenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwnenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwnenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwnenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkweqnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkweqnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkweqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkweqnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkweqnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkweqnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkweqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkweqnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwgenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwgenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwgenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwgenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwgenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwgenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkwlenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkwlenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwlenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwlenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkwlenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkwlenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkwlenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkwlenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlogt_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlogt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlogt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupgt_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupgt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupgt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlogt_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlogt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlogt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupgt_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupgt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupgt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlolt_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlolt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlolt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlolt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwuplt_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwuplt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuplt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwuplt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlolt_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlolt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlolt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlolt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwuplt_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwuplt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuplt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwuplt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlone_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlone_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlone.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlone_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupne_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupne_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupne.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupne_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlone_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlone_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlone.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlone_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupne_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupne_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupne_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwloeq_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwloeq_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwloeq_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupeq_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupeq_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupeq_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwloeq_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwloeq_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwloeq_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupeq_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupeq_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupeq_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwloge_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwloge_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloge.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwloge_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupge_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupge_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupge.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupge_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwloge_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwloge_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwloge_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupge_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupge_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupge_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlole_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlole_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlole.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlole_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwuple_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwuple_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuple.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwuple_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlole_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlole_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlole.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlole_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwuple_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwuple_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuple.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwuple_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlonum_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlonum_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonum.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlonum_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupnum_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupnum_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupnum_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlonum_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlonum_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlonum_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupnum_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupnum_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupnum_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlonan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlonan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlonan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlonan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlonan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlonan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlogtnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlogtnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlogtnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupgtnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupgtnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupgtnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlogtnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlogtnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlogtnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupgtnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupgtnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupgtnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwloltnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwloltnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwloltnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupltnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupltnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupltnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwloltnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwloltnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwloltnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupltnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupltnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupltnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlonenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlonenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlonenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupnenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupnenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupnenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlonenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlonenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlonenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlonenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupnenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupnenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupnenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwloeqnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwloeqnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwloeqnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupeqnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupeqnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupeqnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwloeqnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwloeqnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwloeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwloeqnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupeqnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupeqnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupeqnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlogenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlogenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlogenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupgenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwupgenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupgenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlogenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlogenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlogenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlogenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwupgenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwupgenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwupgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwupgenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlolenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlolenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlolenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlolenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwuplenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwuplenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuplenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwuplenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlolenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwlolenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwlolenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwlolenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwuplenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkwuplenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkwuplenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkwuplenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkwgt_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwgt_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgt.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwgt_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwgt_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwgt_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgt.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwgt_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlt_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlt_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwlt.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwlt_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlt_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwlt_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwlt.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwlt_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwne_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwne_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwne.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwne_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwne_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwne_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwne.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwne_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkweq_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkweq_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkweq.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkweq_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkweq_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkweq_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkweq.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkweq_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwge_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwge_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwge.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwge_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwge_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwge_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwge.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwge_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwle_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwle_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwle.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwle_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwle_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwle_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwle.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwle_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwnum_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwnum_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnum.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwnum_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwnum_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwnum_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnum.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwnum_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwnan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwnan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwnan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwnan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwnan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwnan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwgtnan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwgtnan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgtnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwgtnan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwgtnan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwgtnan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgtnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwgtnan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwltnan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwltnan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwltnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwltnan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwltnan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwltnan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwltnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwltnan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwnenan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwnenan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwnenan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwnenan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwnenan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwnenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwnenan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkweqnan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkweqnan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkweqnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkweqnan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkweqnan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkweqnan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkweqnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkweqnan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwgenan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwgenan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwgenan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwgenan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwgenan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwgenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwgenan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlenan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkwlenan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwlenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwlenan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkwlenan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkwlenan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkwlenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkwlenan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_vfmkdgt_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdgt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdgt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdgt_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdgt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdgt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdlt_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdlt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdlt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdlt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdlt_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdlt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdlt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdlt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdne_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdne_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdne.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdne_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdne_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdne_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdne_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdeq_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdeq_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdeq_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdeq_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdeq_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdeq_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdge_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdge_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdge.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdge_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdge_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdge_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdge_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdle_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdle_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdle.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdle_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdle_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdle_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdle.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdle_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdnum_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdnum_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdnum_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdnum_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdnum_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdnum_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdgtnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdgtnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdgtnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdgtnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdgtnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdgtnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdltnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdltnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdltnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdltnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdltnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdltnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdnenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdnenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdnenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdnenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdnenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdnenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdeqnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdeqnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdeqnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdeqnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdeqnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdeqnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdgenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdgenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdgenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdgenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdgenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdgenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkdlenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkdlenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdlenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdlenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkdlenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkdlenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkdlenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkdlenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksgt_mvl(int vl) { + // CHECK-LABEL: @test_vfmksgt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksgt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksgt_mvml(int vl) { + // CHECK-LABEL: @test_vfmksgt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksgt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkslt_mvl(int vl) { + // CHECK-LABEL: @test_vfmkslt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkslt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkslt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkslt_mvml(int vl) { + // CHECK-LABEL: @test_vfmkslt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkslt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkslt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksne_mvl(int vl) { + // CHECK-LABEL: @test_vfmksne_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksne.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksne_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksne_mvml(int vl) { + // CHECK-LABEL: @test_vfmksne_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksne_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkseq_mvl(int vl) { + // CHECK-LABEL: @test_vfmkseq_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkseq.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkseq_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkseq_mvml(int vl) { + // CHECK-LABEL: @test_vfmkseq_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkseq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkseq_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksge_mvl(int vl) { + // CHECK-LABEL: @test_vfmksge_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksge.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksge_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksge_mvml(int vl) { + // CHECK-LABEL: @test_vfmksge_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksge_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksle_mvl(int vl) { + // CHECK-LABEL: @test_vfmksle_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksle.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksle_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksle_mvml(int vl) { + // CHECK-LABEL: @test_vfmksle_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksle.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksle_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksnum_mvl(int vl) { + // CHECK-LABEL: @test_vfmksnum_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksnum_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksnum_mvml(int vl) { + // CHECK-LABEL: @test_vfmksnum_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksnum_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmksnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmksnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksgtnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmksgtnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksgtnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksgtnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmksgtnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksgtnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksltnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmksltnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksltnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksltnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmksltnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksltnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksnenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmksnenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksnenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksnenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmksnenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksnenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkseqnan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkseqnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkseqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkseqnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkseqnan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkseqnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkseqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkseqnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmksgenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmksgenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksgenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmksgenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmksgenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmksgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmksgenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_vfmkslenan_mvl(int vl) { + // CHECK-LABEL: @test_vfmkslenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkslenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkslenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_vfmkslenan_mvml(int vl) { + // CHECK-LABEL: @test_vfmkslenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.vfmkslenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_vfmkslenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslogt_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslogt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslogt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupgt_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupgt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupgt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslogt_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslogt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslogt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupgt_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupgt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupgt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslolt_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslolt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslolt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslolt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksuplt_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksuplt_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuplt.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksuplt_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslolt_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslolt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslolt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslolt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksuplt_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksuplt_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuplt.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksuplt_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslone_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslone_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslone.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslone_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupne_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupne_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupne.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupne_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslone_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslone_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslone.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslone_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupne_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupne_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupne.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupne_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksloeq_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksloeq_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksloeq_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupeq_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupeq_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupeq.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupeq_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksloeq_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksloeq_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksloeq_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupeq_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupeq_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupeq.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupeq_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksloge_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksloge_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloge.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksloge_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupge_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupge_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupge.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupge_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksloge_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksloge_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksloge_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupge_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupge_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupge.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupge_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslole_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslole_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslole.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslole_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksuple_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksuple_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuple.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksuple_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslole_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslole_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslole.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslole_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksuple_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksuple_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuple.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksuple_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslonum_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslonum_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonum.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslonum_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupnum_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupnum_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnum.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupnum_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslonum_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslonum_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslonum_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupnum_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupnum_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnum.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupnum_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslonan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslonan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslonan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslonan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslonan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslonan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslogtnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslogtnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslogtnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupgtnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupgtnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgtnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupgtnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslogtnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslogtnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslogtnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupgtnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupgtnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgtnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupgtnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksloltnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksloltnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksloltnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupltnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupltnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupltnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupltnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksloltnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksloltnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksloltnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupltnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupltnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupltnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupltnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslonenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslonenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslonenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupnenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupnenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupnenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslonenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslonenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslonenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslonenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupnenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupnenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupnenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupnenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksloeqnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksloeqnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksloeqnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupeqnan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupeqnan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupeqnan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupeqnan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksloeqnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksloeqnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksloeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksloeqnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupeqnan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupeqnan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupeqnan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupeqnan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslogenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslogenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslogenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksupgenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksupgenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupgenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslogenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslogenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslogenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslogenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksupgenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksupgenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksupgenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksupgenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmkslolenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslolenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslolenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslolenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksuplenan_mvl(int vl) { + // CHECK-LABEL: @test_pvfmksuplenan_mvl + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuplenan.mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksuplenan_mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslolenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmkslolenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmkslolenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmkslolenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksuplenan_mvml(int vl) { + // CHECK-LABEL: @test_pvfmksuplenan_mvml + // CHECK: call <256 x i1> @llvm.ve.vl.pvfmksuplenan.mvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 %{{.*}}) + vm1 = _vel_pvfmksuplenan_mvml(vr1, vm2, vl); +} + +void __attribute__((noinline)) +test_pvfmksgt_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksgt_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgt.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksgt_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksgt_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksgt_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgt.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksgt_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkslt_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslt_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkslt.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkslt_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslt_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkslt_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkslt.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkslt_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksne_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksne_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksne.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksne_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksne_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksne_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksne.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksne_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkseq_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkseq_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkseq.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkseq_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkseq_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkseq_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkseq.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkseq_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksge_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksge_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksge.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksge_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksge_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksge_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksge.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksge_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksle_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksle_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksle.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksle_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksle_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksle_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksle.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksle_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksnum_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksnum_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnum.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksnum_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksnum_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksnum_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnum.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksnum_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksnan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksnan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksnan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksnan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksnan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksnan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksgtnan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksgtnan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgtnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksgtnan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksgtnan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksgtnan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgtnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksgtnan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksltnan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksltnan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksltnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksltnan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksltnan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksltnan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksltnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksltnan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksnenan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksnenan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksnenan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksnenan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksnenan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksnenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksnenan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkseqnan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkseqnan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkseqnan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkseqnan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkseqnan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkseqnan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkseqnan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkseqnan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmksgenan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmksgenan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksgenan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmksgenan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmksgenan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmksgenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmksgenan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) +test_pvfmkslenan_Mvl(int vl) { + // CHECK-LABEL: @test_pvfmkslenan_Mvl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkslenan.Mvl(<256 x double> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkslenan_Mvl(vr1, vl); +} + +void __attribute__((noinline)) +test_pvfmkslenan_MvMl(int vl) { + // CHECK-LABEL: @test_pvfmkslenan_MvMl + // CHECK: call <512 x i1> @llvm.ve.vl.pvfmkslenan.MvMl(<256 x double> %{{.*}}, <512 x i1> %{{.*}}, i32 %{{.*}}) + vm1_512 = _vel_pvfmkslenan_MvMl(vr1, vm2_512, vl); +} + +void __attribute__((noinline)) test_vsumwsx_vvl() { // CHECK-LABEL: @test_vsumwsx_vvl // CHECK: call <256 x double> @llvm.ve.vl.vsumwsx.vvl(<256 x double> %{{.*}}, i32 256) @@ -4008,6 +7875,13 @@ test_vsumwsx_vvl() { } void __attribute__((noinline)) +test_vsumwsx_vvml() { + // CHECK-LABEL: @test_vsumwsx_vvml + // CHECK: call <256 x double> @llvm.ve.vl.vsumwsx.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr2 = _vel_vsumwsx_vvml(vr1, vm1, 256); +} + +void __attribute__((noinline)) test_vsumwzx_vvl() { // CHECK-LABEL: @test_vsumwzx_vvl // CHECK: call <256 x double> @llvm.ve.vl.vsumwzx.vvl(<256 x double> %{{.*}}, i32 256) @@ -4015,6 +7889,13 @@ test_vsumwzx_vvl() { } void __attribute__((noinline)) +test_vsumwzx_vvml() { + // CHECK-LABEL: @test_vsumwzx_vvml + // CHECK: call <256 x double> @llvm.ve.vl.vsumwzx.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr2 = _vel_vsumwzx_vvml(vr1, vm1, 256); +} + +void __attribute__((noinline)) test_vsuml_vvl() { // CHECK-LABEL: @test_vsuml_vvl // CHECK: call <256 x double> @llvm.ve.vl.vsuml.vvl(<256 x double> %{{.*}}, i32 256) @@ -4022,6 +7903,13 @@ test_vsuml_vvl() { } void __attribute__((noinline)) +test_vsuml_vvml() { + // CHECK-LABEL: @test_vsuml_vvml + // CHECK: call <256 x double> @llvm.ve.vl.vsuml.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr2 = _vel_vsuml_vvml(vr1, vm1, 256); +} + +void __attribute__((noinline)) test_vfsumd_vvl() { // CHECK-LABEL: @test_vfsumd_vvl // CHECK: call <256 x double> @llvm.ve.vl.vfsumd.vvl(<256 x double> %{{.*}}, i32 256) @@ -4029,6 +7917,13 @@ test_vfsumd_vvl() { } void __attribute__((noinline)) +test_vfsumd_vvml() { + // CHECK-LABEL: @test_vfsumd_vvml + // CHECK: call <256 x double> @llvm.ve.vl.vfsumd.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr2 = _vel_vfsumd_vvml(vr1, vm1, 256); +} + +void __attribute__((noinline)) test_vfsums_vvl() { // CHECK-LABEL: @test_vfsums_vvl // CHECK: call <256 x double> @llvm.ve.vl.vfsums.vvl(<256 x double> %{{.*}}, i32 256) @@ -4036,6 +7931,13 @@ test_vfsums_vvl() { } void __attribute__((noinline)) +test_vfsums_vvml() { + // CHECK-LABEL: @test_vfsums_vvml + // CHECK: call <256 x double> @llvm.ve.vl.vfsums.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr2 = _vel_vfsums_vvml(vr1, vm1, 256); +} + +void __attribute__((noinline)) test_vrmaxswfstsx_vvl() { // CHECK-LABEL: @test_vrmaxswfstsx_vvl // CHECK: call <256 x double> @llvm.ve.vl.vrmaxswfstsx.vvl(<256 x double> %{{.*}}, i32 256) @@ -4323,6 +8225,13 @@ test_vrand_vvl() { } void __attribute__((noinline)) +test_vrand_vvml() { + // CHECK-LABEL: @test_vrand_vvml + // CHECK: call <256 x double> @llvm.ve.vl.vrand.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vrand_vvml(vr1, vm1, 256); +} + +void __attribute__((noinline)) test_vror_vvl() { // CHECK-LABEL: @test_vror_vvl // CHECK: call <256 x double> @llvm.ve.vl.vror.vvl(<256 x double> %{{.*}}, i32 256) @@ -4330,6 +8239,13 @@ test_vror_vvl() { } void __attribute__((noinline)) +test_vror_vvml() { + // CHECK-LABEL: @test_vror_vvml + // CHECK: call <256 x double> @llvm.ve.vl.vror.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vror_vvml(vr1, vm1, 256); +} + +void __attribute__((noinline)) test_vrxor_vvl() { // CHECK-LABEL: @test_vrxor_vvl // CHECK: call <256 x double> @llvm.ve.vl.vrxor.vvl(<256 x double> %{{.*}}, i32 256) @@ -4337,6 +8253,13 @@ test_vrxor_vvl() { } void __attribute__((noinline)) +test_vrxor_vvml() { + // CHECK-LABEL: @test_vrxor_vvml + // CHECK: call <256 x double> @llvm.ve.vl.vrxor.vvml(<256 x double> %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vrxor_vvml(vr1, vm1, 256); +} + +void __attribute__((noinline)) test_vgt_vvssl() { // CHECK-LABEL: @test_vgt_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vgt.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4351,6 +8274,20 @@ test_vgt_vvssvl() { } void __attribute__((noinline)) +test_vgt_vvssml() { + // CHECK-LABEL: @test_vgt_vvssml + // CHECK: call <256 x double> @llvm.ve.vl.vgt.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vgt_vvssml(vr1, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_vgt_vvssmvl() { + // CHECK-LABEL: @test_vgt_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vgt.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vgt_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vgtnc_vvssl() { // CHECK-LABEL: @test_vgtnc_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vgtnc.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4365,6 +8302,20 @@ test_vgtnc_vvssvl() { } void __attribute__((noinline)) +test_vgtnc_vvssml() { + // CHECK-LABEL: @test_vgtnc_vvssml + // CHECK: call <256 x double> @llvm.ve.vl.vgtnc.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vgtnc_vvssml(vr1, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_vgtnc_vvssmvl() { + // CHECK-LABEL: @test_vgtnc_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vgtnc.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vgtnc_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vgtu_vvssl() { // CHECK-LABEL: @test_vgtu_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vgtu.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4379,6 +8330,20 @@ test_vgtu_vvssvl() { } void __attribute__((noinline)) +test_vgtu_vvssml() { + // CHECK-LABEL: @test_vgtu_vvssml + // CHECK: call <256 x double> @llvm.ve.vl.vgtu.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vgtu_vvssml(vr1, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_vgtu_vvssmvl() { + // CHECK-LABEL: @test_vgtu_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vgtu.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vgtu_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vgtunc_vvssl() { // CHECK-LABEL: @test_vgtunc_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vgtunc.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4393,6 +8358,20 @@ test_vgtunc_vvssvl() { } void __attribute__((noinline)) +test_vgtunc_vvssml() { + // CHECK-LABEL: @test_vgtunc_vvssml + // CHECK: call <256 x double> @llvm.ve.vl.vgtunc.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vgtunc_vvssml(vr1, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_vgtunc_vvssmvl() { + // CHECK-LABEL: @test_vgtunc_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vgtunc.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vgtunc_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vgtlsx_vvssl() { // CHECK-LABEL: @test_vgtlsx_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vgtlsx.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4407,6 +8386,20 @@ test_vgtlsx_vvssvl() { } void __attribute__((noinline)) +test_vgtlsx_vvssml() { + // CHECK-LABEL: @test_vgtlsx_vvssml + // CHECK: call <256 x double> @llvm.ve.vl.vgtlsx.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vgtlsx_vvssml(vr1, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_vgtlsx_vvssmvl() { + // CHECK-LABEL: @test_vgtlsx_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vgtlsx.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vgtlsx_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vgtlsxnc_vvssl() { // CHECK-LABEL: @test_vgtlsxnc_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vgtlsxnc.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4421,6 +8414,20 @@ test_vgtlsxnc_vvssvl() { } void __attribute__((noinline)) +test_vgtlsxnc_vvssml() { + // CHECK-LABEL: @test_vgtlsxnc_vvssml + // CHECK: call <256 x double> @llvm.ve.vl.vgtlsxnc.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vgtlsxnc_vvssml(vr1, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_vgtlsxnc_vvssmvl() { + // CHECK-LABEL: @test_vgtlsxnc_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vgtlsxnc.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vgtlsxnc_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vgtlzx_vvssl() { // CHECK-LABEL: @test_vgtlzx_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vgtlzx.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4435,6 +8442,20 @@ test_vgtlzx_vvssvl() { } void __attribute__((noinline)) +test_vgtlzx_vvssml() { + // CHECK-LABEL: @test_vgtlzx_vvssml + // CHECK: call <256 x double> @llvm.ve.vl.vgtlzx.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vgtlzx_vvssml(vr1, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_vgtlzx_vvssmvl() { + // CHECK-LABEL: @test_vgtlzx_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vgtlzx.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vgtlzx_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vgtlzxnc_vvssl() { // CHECK-LABEL: @test_vgtlzxnc_vvssl // CHECK: call <256 x double> @llvm.ve.vl.vgtlzxnc.vvssl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4449,6 +8470,20 @@ test_vgtlzxnc_vvssvl() { } void __attribute__((noinline)) +test_vgtlzxnc_vvssml() { + // CHECK-LABEL: @test_vgtlzxnc_vvssml + // CHECK: call <256 x double> @llvm.ve.vl.vgtlzxnc.vvssml(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + vr3 = _vel_vgtlzxnc_vvssml(vr1, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_vgtlzxnc_vvssmvl() { + // CHECK-LABEL: @test_vgtlzxnc_vvssmvl + // CHECK: call <256 x double> @llvm.ve.vl.vgtlzxnc.vvssmvl(<256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, <256 x double> %{{.*}}, i32 256) + vr3 = _vel_vgtlzxnc_vvssmvl(vr1, v1, v2, vm1, vr3, 256); +} + +void __attribute__((noinline)) test_vsc_vvssl() { // CHECK-LABEL: @test_vsc_vvssl // CHECK: call void @llvm.ve.vl.vsc.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4456,6 +8491,13 @@ test_vsc_vvssl() { } void __attribute__((noinline)) +test_vsc_vvssml() { + // CHECK-LABEL: @test_vsc_vvssml + // CHECK: call void @llvm.ve.vl.vsc.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vsc_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vscnc_vvssl() { // CHECK-LABEL: @test_vscnc_vvssl // CHECK: call void @llvm.ve.vl.vscnc.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4463,6 +8505,13 @@ test_vscnc_vvssl() { } void __attribute__((noinline)) +test_vscnc_vvssml() { + // CHECK-LABEL: @test_vscnc_vvssml + // CHECK: call void @llvm.ve.vl.vscnc.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vscnc_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vscot_vvssl() { // CHECK-LABEL: @test_vscot_vvssl // CHECK: call void @llvm.ve.vl.vscot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4470,6 +8519,13 @@ test_vscot_vvssl() { } void __attribute__((noinline)) +test_vscot_vvssml() { + // CHECK-LABEL: @test_vscot_vvssml + // CHECK: call void @llvm.ve.vl.vscot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vscot_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vscncot_vvssl() { // CHECK-LABEL: @test_vscncot_vvssl // CHECK: call void @llvm.ve.vl.vscncot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4477,6 +8533,13 @@ test_vscncot_vvssl() { } void __attribute__((noinline)) +test_vscncot_vvssml() { + // CHECK-LABEL: @test_vscncot_vvssml + // CHECK: call void @llvm.ve.vl.vscncot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vscncot_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vscu_vvssl() { // CHECK-LABEL: @test_vscu_vvssl // CHECK: call void @llvm.ve.vl.vscu.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4484,6 +8547,13 @@ test_vscu_vvssl() { } void __attribute__((noinline)) +test_vscu_vvssml() { + // CHECK-LABEL: @test_vscu_vvssml + // CHECK: call void @llvm.ve.vl.vscu.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vscu_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vscunc_vvssl() { // CHECK-LABEL: @test_vscunc_vvssl // CHECK: call void @llvm.ve.vl.vscunc.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4491,6 +8561,13 @@ test_vscunc_vvssl() { } void __attribute__((noinline)) +test_vscunc_vvssml() { + // CHECK-LABEL: @test_vscunc_vvssml + // CHECK: call void @llvm.ve.vl.vscunc.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vscunc_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vscuot_vvssl() { // CHECK-LABEL: @test_vscuot_vvssl // CHECK: call void @llvm.ve.vl.vscuot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4498,6 +8575,13 @@ test_vscuot_vvssl() { } void __attribute__((noinline)) +test_vscuot_vvssml() { + // CHECK-LABEL: @test_vscuot_vvssml + // CHECK: call void @llvm.ve.vl.vscuot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vscuot_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vscuncot_vvssl() { // CHECK-LABEL: @test_vscuncot_vvssl // CHECK: call void @llvm.ve.vl.vscuncot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4505,6 +8589,13 @@ test_vscuncot_vvssl() { } void __attribute__((noinline)) +test_vscuncot_vvssml() { + // CHECK-LABEL: @test_vscuncot_vvssml + // CHECK: call void @llvm.ve.vl.vscuncot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vscuncot_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vscl_vvssl() { // CHECK-LABEL: @test_vscl_vvssl // CHECK: call void @llvm.ve.vl.vscl.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4512,6 +8603,13 @@ test_vscl_vvssl() { } void __attribute__((noinline)) +test_vscl_vvssml() { + // CHECK-LABEL: @test_vscl_vvssml + // CHECK: call void @llvm.ve.vl.vscl.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vscl_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vsclnc_vvssl() { // CHECK-LABEL: @test_vsclnc_vvssl // CHECK: call void @llvm.ve.vl.vsclnc.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4519,6 +8617,13 @@ test_vsclnc_vvssl() { } void __attribute__((noinline)) +test_vsclnc_vvssml() { + // CHECK-LABEL: @test_vsclnc_vvssml + // CHECK: call void @llvm.ve.vl.vsclnc.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vsclnc_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vsclot_vvssl() { // CHECK-LABEL: @test_vsclot_vvssl // CHECK: call void @llvm.ve.vl.vsclot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4526,6 +8631,13 @@ test_vsclot_vvssl() { } void __attribute__((noinline)) +test_vsclot_vvssml() { + // CHECK-LABEL: @test_vsclot_vvssml + // CHECK: call void @llvm.ve.vl.vsclot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vsclot_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) test_vsclncot_vvssl() { // CHECK-LABEL: @test_vsclncot_vvssl // CHECK: call void @llvm.ve.vl.vsclncot.vvssl(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, i32 256) @@ -4533,6 +8645,118 @@ test_vsclncot_vvssl() { } void __attribute__((noinline)) +test_vsclncot_vvssml() { + // CHECK-LABEL: @test_vsclncot_vvssml + // CHECK: call void @llvm.ve.vl.vsclncot.vvssml(<256 x double> %{{.*}}, <256 x double> %{{.*}}, i64 %{{.*}}, i64 %{{.*}}, <256 x i1> %{{.*}}, i32 256) + _vel_vsclncot_vvssml(vr1, vr2, v1, v2, vm1, 256); +} + +void __attribute__((noinline)) +test_andm_mmm() { + // CHECK-LABEL: @test_andm_mmm + // CHECK: call <256 x i1> @llvm.ve.vl.andm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}}) + vm3 = _vel_andm_mmm(vm1, vm2); +} + +void __attribute__((noinline)) +test_andm_MMM() { + // CHECK-LABEL: @test_andm_MMM + // CHECK: call <512 x i1> @llvm.ve.vl.andm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}}) + vm3_512 = _vel_andm_MMM(vm1_512, vm2_512); +} + +void __attribute__((noinline)) +test_orm_mmm() { + // CHECK-LABEL: @test_orm_mmm + // CHECK: call <256 x i1> @llvm.ve.vl.orm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}}) + vm3 = _vel_orm_mmm(vm1, vm2); +} + +void __attribute__((noinline)) +test_orm_MMM() { + // CHECK-LABEL: @test_orm_MMM + // CHECK: call <512 x i1> @llvm.ve.vl.orm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}}) + vm3_512 = _vel_orm_MMM(vm1_512, vm2_512); +} + +void __attribute__((noinline)) +test_xorm_mmm() { + // CHECK-LABEL: @test_xorm_mmm + // CHECK: call <256 x i1> @llvm.ve.vl.xorm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}}) + vm3 = _vel_xorm_mmm(vm1, vm2); +} + +void __attribute__((noinline)) +test_xorm_MMM() { + // CHECK-LABEL: @test_xorm_MMM + // CHECK: call <512 x i1> @llvm.ve.vl.xorm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}}) + vm3_512 = _vel_xorm_MMM(vm1_512, vm2_512); +} + +void __attribute__((noinline)) +test_eqvm_mmm() { + // CHECK-LABEL: @test_eqvm_mmm + // CHECK: call <256 x i1> @llvm.ve.vl.eqvm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}}) + vm3 = _vel_eqvm_mmm(vm1, vm2); +} + +void __attribute__((noinline)) +test_eqvm_MMM() { + // CHECK-LABEL: @test_eqvm_MMM + // CHECK: call <512 x i1> @llvm.ve.vl.eqvm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}}) + vm3_512 = _vel_eqvm_MMM(vm1_512, vm2_512); +} + +void __attribute__((noinline)) +test_nndm_mmm() { + // CHECK-LABEL: @test_nndm_mmm + // CHECK: call <256 x i1> @llvm.ve.vl.nndm.mmm(<256 x i1> %{{.*}}, <256 x i1> %{{.*}}) + vm3 = _vel_nndm_mmm(vm1, vm2); +} + +void __attribute__((noinline)) +test_nndm_MMM() { + // CHECK-LABEL: @test_nndm_MMM + // CHECK: call <512 x i1> @llvm.ve.vl.nndm.MMM(<512 x i1> %{{.*}}, <512 x i1> %{{.*}}) + vm3_512 = _vel_nndm_MMM(vm1_512, vm2_512); +} + +void __attribute__((noinline)) +test_negm_mm() { + // CHECK-LABEL: @test_negm_mm + // CHECK: call <256 x i1> @llvm.ve.vl.negm.mm(<256 x i1> %{{.*}}) + vm2 = _vel_negm_mm(vm1); +} + +void __attribute__((noinline)) +test_negm_MM() { + // CHECK-LABEL: @test_negm_MM + // CHECK: call <512 x i1> @llvm.ve.vl.negm.MM(<512 x i1> %{{.*}}) + vm2_512 = _vel_negm_MM(vm1_512); +} + +void __attribute__((noinline)) +test_pcvm_sml() { + // CHECK-LABEL: @test_pcvm_sml + // CHECK: call i64 @llvm.ve.vl.pcvm.sml(<256 x i1> %{{.*}}, i32 256) + v1 = _vel_pcvm_sml(vm1, 256); +} + +void __attribute__((noinline)) +test_lzvm_sml() { + // CHECK-LABEL: @test_lzvm_sml + // CHECK: call i64 @llvm.ve.vl.lzvm.sml(<256 x i1> %{{.*}}, i32 256) + v1 = _vel_lzvm_sml(vm1, 256); +} + +void __attribute__((noinline)) +test_tovm_sml() { + // CHECK-LABEL: @test_tovm_sml + // CHECK: call i64 @llvm.ve.vl.tovm.sml(<256 x i1> %{{.*}}, i32 256) + v1 = _vel_tovm_sml(vm1, 256); +} + +void __attribute__((noinline)) test_lcr_sss() { // CHECK-LABEL: @test_lcr_sss // CHECK: call i64 @llvm.ve.vl.lcr.sss(i64 %{{.*}}, i64 %{{.*}}) |