Lines Matching refs:VsrD

1600 VSX_ADD_SUB(XSADDDP, add, 1, float64, VsrD(0), 1, 0)
1601 VSX_ADD_SUB(XSADDSP, add, 1, float64, VsrD(0), 1, 1)
1602 VSX_ADD_SUB(XVADDDP, add, 2, float64, VsrD(i), 0, 0)
1604 VSX_ADD_SUB(XSSUBDP, sub, 1, float64, VsrD(0), 1, 0)
1605 VSX_ADD_SUB(XSSUBSP, sub, 1, float64, VsrD(0), 1, 1)
1606 VSX_ADD_SUB(XVSUBDP, sub, 2, float64, VsrD(i), 0, 0)
1677 VSX_MUL(XSMULDP, 1, float64, VsrD(0), 1, 0)
1678 VSX_MUL(XSMULSP, 1, float64, VsrD(0), 1, 1)
1679 VSX_MUL(XVMULDP, 2, float64, VsrD(i), 0, 0)
1751 VSX_DIV(XSDIVDP, 1, float64, VsrD(0), 1, 0)
1752 VSX_DIV(XSDIVSP, 1, float64, VsrD(0), 1, 1)
1753 VSX_DIV(XVDIVDP, 2, float64, VsrD(i), 0, 0)
1819 VSX_RE(xsredp, 1, float64, VsrD(0), 1, 0)
1820 VSX_RE(xsresp, 1, float64, VsrD(0), 1, 1)
1821 VSX_RE(xvredp, 2, float64, VsrD(i), 0, 0)
1864 VSX_SQRT(xssqrtdp, 1, float64, VsrD(0), 1, 0)
1865 VSX_SQRT(xssqrtsp, 1, float64, VsrD(0), 1, 1)
1866 VSX_SQRT(xvsqrtdp, 2, float64, VsrD(i), 0, 0)
1908 VSX_RSQRTE(xsrsqrtedp, 1, float64, VsrD(0), 1, 0)
1909 VSX_RSQRTE(xsrsqrtesp, 1, float64, VsrD(0), 1, 1)
1910 VSX_RSQRTE(xvrsqrtedp, 2, float64, VsrD(i), 0, 0)
1966 VSX_TDIV(xstdivdp, 1, float64, VsrD(0), -1022, 1023, 52)
1967 VSX_TDIV(xvtdivdp, 2, float64, VsrD(i), -1022, 1023, 52)
2019 VSX_TSQRT(xstsqrtdp, 1, float64, VsrD(0), -1022, 52)
2020 VSX_TSQRT(xvtsqrtdp, 2, float64, VsrD(i), -1022, 52)
2061 VSX_MADD(XSMADDDP, 1, float64, VsrD(0), MADD_FLGS, 1)
2062 VSX_MADD(XSMSUBDP, 1, float64, VsrD(0), MSUB_FLGS, 1)
2063 VSX_MADD(XSNMADDDP, 1, float64, VsrD(0), NMADD_FLGS, 1)
2064 VSX_MADD(XSNMSUBDP, 1, float64, VsrD(0), NMSUB_FLGS, 1)
2065 VSX_MADD(XSMADDSP, 1, float64r32, VsrD(0), MADD_FLGS, 1)
2066 VSX_MADD(XSMSUBSP, 1, float64r32, VsrD(0), MSUB_FLGS, 1)
2067 VSX_MADD(XSNMADDSP, 1, float64r32, VsrD(0), NMADD_FLGS, 1)
2068 VSX_MADD(XSNMSUBSP, 1, float64r32, VsrD(0), NMSUB_FLGS, 1)
2070 VSX_MADD(xvmadddp, 2, float64, VsrD(i), MADD_FLGS, 0)
2071 VSX_MADD(xvmsubdp, 2, float64, VsrD(i), MSUB_FLGS, 0)
2072 VSX_MADD(xvnmadddp, 2, float64, VsrD(i), NMADD_FLGS, 0)
2073 VSX_MADD(xvnmsubdp, 2, float64, VsrD(i), NMSUB_FLGS, 0)
2162 VSX_SCALAR_CMP(XSCMPEQDP, float64, eq, VsrD(0), 0)
2163 VSX_SCALAR_CMP(XSCMPGEDP, float64, le, VsrD(0), 1)
2164 VSX_SCALAR_CMP(XSCMPGTDP, float64, lt, VsrD(0), 1)
2175 exp_a = extract64(xa->VsrD(0), 52, 11); in helper_xscmpexpdp()
2176 exp_b = extract64(xb->VsrD(0), 52, 11); in helper_xscmpexpdp()
2178 if (unlikely(float64_is_any_nan(xa->VsrD(0)) || in helper_xscmpexpdp()
2179 float64_is_any_nan(xb->VsrD(0)))) { in helper_xscmpexpdp()
2204 exp_a = extract64(xa->VsrD(0), 48, 15); in helper_xscmpexpqp()
2205 exp_b = extract64(xb->VsrD(0), 48, 15); in helper_xscmpexpqp()
2235 switch (float64_compare(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2248 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status) || in do_scalar_cmp()
2249 float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2254 } else if (float64_is_quiet_nan(xa->VsrD(0), &env->fp_status) || in do_scalar_cmp()
2255 float64_is_quiet_nan(xb->VsrD(0), &env->fp_status)) { in do_scalar_cmp()
2384 VSX_MAX_MIN(XSMAXDP, maxnum, 1, float64, VsrD(0))
2385 VSX_MAX_MIN(XVMAXDP, maxnum, 2, float64, VsrD(i))
2387 VSX_MAX_MIN(XSMINDP, minnum, 1, float64, VsrD(0))
2388 VSX_MAX_MIN(XVMINDP, minnum, 2, float64, VsrD(i))
2418 VSX_MAX_MINC(XSMAXCDP, true, float64, VsrD(0));
2419 VSX_MAX_MINC(XSMINCDP, false, float64, VsrD(0));
2430 if (unlikely(float64_is_any_nan(xa->VsrD(0)))) { \
2431 if (float64_is_signaling_nan(xa->VsrD(0), &env->fp_status)) { \
2434 t.VsrD(0) = xa->VsrD(0); \
2435 } else if (unlikely(float64_is_any_nan(xb->VsrD(0)))) { \
2436 if (float64_is_signaling_nan(xb->VsrD(0), &env->fp_status)) { \
2439 t.VsrD(0) = xb->VsrD(0); \
2440 } else if (float64_is_zero(xa->VsrD(0)) && \
2441 float64_is_zero(xb->VsrD(0))) { \
2443 if (!float64_is_neg(xa->VsrD(0)) || \
2444 !float64_is_neg(xb->VsrD(0))) { \
2445 t.VsrD(0) = 0ULL; \
2447 t.VsrD(0) = 0x8000000000000000ULL; \
2450 if (float64_is_neg(xa->VsrD(0)) || \
2451 float64_is_neg(xb->VsrD(0))) { \
2452 t.VsrD(0) = 0x8000000000000000ULL; \
2454 t.VsrD(0) = 0ULL; \
2458 !float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status)) || \
2460 float64_lt(xa->VsrD(0), xb->VsrD(0), &env->fp_status))) { \
2461 t.VsrD(0) = xa->VsrD(0); \
2463 t.VsrD(0) = xb->VsrD(0); \
2528 VSX_CMP(XVCMPEQDP, 2, float64, VsrD(i), eq, 0, 1)
2529 VSX_CMP(XVCMPGEDP, 2, float64, VsrD(i), le, 1, 1)
2530 VSX_CMP(XVCMPGTDP, 2, float64, VsrD(i), lt, 1, 1)
2531 VSX_CMP(XVCMPNEDP, 2, float64, VsrD(i), eq, 0, 0)
2571 VSX_CVT_FP_TO_FP(xscvspdp, 1, float32, float64, VsrW(0), VsrD(0), 1)
2572 VSX_CVT_FP_TO_FP(xvcvspdp, 2, float32, float64, VsrW(2 * i), VsrD(i), 0)
2583 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
2584 if (unlikely(stp##_is_signaling_nan(xb->VsrD(i), \
2637 VSX_CVT_FP_TO_FP_VECTOR(xscvdpqp, 1, float64, float128, VsrD(0), f128, 1)
2674 VSX_CVT_FP_TO_FP_HP(xscvdphp, 1, float64, float16, VsrD(0), VsrH(3), 1)
2675 VSX_CVT_FP_TO_FP_HP(xscvhpdp, 1, float16, float64, VsrH(3), VsrD(0), 1)
2712 t.VsrD(0) = float128_to_float64(xb->f128, &tstat); in helper_XSCVQPDP()
2716 t.VsrD(0) = float64_snan_to_qnan(t.VsrD(0)); in helper_XSCVQPDP()
2718 helper_compute_fprf_float64(env, t.VsrD(0)); in helper_XSCVQPDP()
2803 VSX_CVT_FP_TO_INT(xscvdpsxds, 1, float64, int64, VsrD(0), VsrD(0), true, \
2805 VSX_CVT_FP_TO_INT(xscvdpuxds, 1, float64, uint64, VsrD(0), VsrD(0), true, 0ULL)
2806 VSX_CVT_FP_TO_INT(xvcvdpsxds, 2, float64, int64, VsrD(i), VsrD(i), false, \
2808 VSX_CVT_FP_TO_INT(xvcvdpuxds, 2, float64, uint64, VsrD(i), VsrD(i), false, \
2810 VSX_CVT_FP_TO_INT(xvcvspsxds, 2, float32, int64, VsrW(2 * i), VsrD(i), false, \
2814 VSX_CVT_FP_TO_INT(xvcvspuxds, 2, float32, uint64, VsrW(2 * i), VsrD(i), \
2828 t.VsrD(0) = float_invalid_cvt(env, flags, t.VsrD(0), rnan, 0, GETPC());\
2829 t.VsrD(1) = -(t.VsrD(0) & 1); \
2857 t.VsrW(2 * i) = stp##_to_##ttp##_round_to_zero(xb->VsrD(i), \
2906 VSX_CVT_FP_TO_INT_VECTOR(xscvqpsdz, float128, int64, f128, VsrD(0), \
2908 VSX_CVT_FP_TO_INT_VECTOR(xscvqpswz, float128, int32, f128, VsrD(0), \
2910 VSX_CVT_FP_TO_INT_VECTOR(xscvqpudz, float128, uint64, f128, VsrD(0), 0x0ULL)
2911 VSX_CVT_FP_TO_INT_VECTOR(xscvqpuwz, float128, uint32, f128, VsrD(0), 0x0ULL)
2946 VSX_CVT_INT_TO_FP(xscvsxddp, 1, int64, float64, VsrD(0), VsrD(0), 1, 0)
2947 VSX_CVT_INT_TO_FP(xscvuxddp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 0)
2948 VSX_CVT_INT_TO_FP(xscvsxdsp, 1, int64, float64, VsrD(0), VsrD(0), 1, 1)
2949 VSX_CVT_INT_TO_FP(xscvuxdsp, 1, uint64, float64, VsrD(0), VsrD(0), 1, 1)
2950 VSX_CVT_INT_TO_FP(xvcvsxddp, 2, int64, float64, VsrD(i), VsrD(i), 0, 0)
2951 VSX_CVT_INT_TO_FP(xvcvuxddp, 2, uint64, float64, VsrD(i), VsrD(i), 0, 0)
2952 VSX_CVT_INT_TO_FP(xvcvsxwdp, 2, int32, float64, VsrW(2 * i), VsrD(i), 0, 0)
2953 VSX_CVT_INT_TO_FP(xvcvuxwdp, 2, uint64, float64, VsrW(2 * i), VsrD(i), 0, 0)
2964 t.VsrW(2 * i) = stp##_to_##ttp(xb->VsrD(i), &env->fp_status); \
3009 VSX_CVT_INT_TO_FP_VECTOR(xscvsdqp, int64, float128, VsrD(0), f128)
3010 VSX_CVT_INT_TO_FP_VECTOR(xscvudqp, uint64, float128, VsrD(0), f128)
3069 VSX_ROUND(xsrdpi, 1, float64, VsrD(0), float_round_ties_away, 1)
3070 VSX_ROUND(xsrdpic, 1, float64, VsrD(0), FLOAT_ROUND_CURRENT, 1)
3071 VSX_ROUND(xsrdpim, 1, float64, VsrD(0), float_round_down, 1)
3072 VSX_ROUND(xsrdpip, 1, float64, VsrD(0), float_round_up, 1)
3073 VSX_ROUND(xsrdpiz, 1, float64, VsrD(0), float_round_to_zero, 1)
3075 VSX_ROUND(xvrdpi, 2, float64, VsrD(i), float_round_ties_away, 0)
3076 VSX_ROUND(xvrdpic, 2, float64, VsrD(i), FLOAT_ROUND_CURRENT, 0)
3077 VSX_ROUND(xvrdpim, 2, float64, VsrD(i), float_round_down, 0)
3078 VSX_ROUND(xvrdpip, 2, float64, VsrD(i), float_round_up, 0)
3079 VSX_ROUND(xvrdpiz, 2, float64, VsrD(i), float_round_to_zero, 0)
3176 VSX_XS_TSTDC(XSTSTDCDP, VsrD(0), float64)
3183 uint32_t cc, match, sign = float64_is_neg(b->VsrD(0)); in VSX_XS_TSTDC()
3184 uint32_t exp = (b->VsrD(0) >> 52) & 0x7FF; in VSX_XS_TSTDC()
3185 int not_sp = (int)not_SP_value(b->VsrD(0)); in VSX_XS_TSTDC()
3186 match = float64_tstdc(b->VsrD(0), dcmx) || (exp > 0 && exp < 0x381); in VSX_XS_TSTDC()