target/ppc: rewrite f[n]m[add,sub] using float64_muladd

Use the softfloat api for fused multiply-add.
Introduce routine to set the FPSCR flags VXNAN, VXIMZ nad VMISI.

Signed-off-by: Nikunj A Dadhania <nikunj@linux.vnet.ibm.com>
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
Nikunj A Dadhania 2017-03-02 19:40:29 +05:30 committed by David Gibson
parent ec975e839c
commit 992d7e976c

View File

@ -743,178 +743,62 @@ uint64_t helper_frim(CPUPPCState *env, uint64_t arg)
return do_fri(env, arg, float_round_down);
}
/* fmadd - fmadd. */
uint64_t helper_fmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
uint64_t arg3)
static void float64_maddsub_update_excp(CPUPPCState *env, float64 arg1,
float64 arg2, float64 arg3,
unsigned int madd_flags)
{
CPU_DoubleU farg1, farg2, farg3;
farg1.ll = arg1;
farg2.ll = arg2;
farg3.ll = arg3;
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
if (unlikely((float64_is_infinity(arg1) && float64_is_zero(arg2)) ||
(float64_is_zero(arg1) && float64_is_infinity(arg2)))) {
/* Multiplication of zero by infinity */
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
arg1 = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else if (unlikely(float64_is_signaling_nan(arg1, &env->fp_status) ||
float64_is_signaling_nan(arg2, &env->fp_status) ||
float64_is_signaling_nan(arg3, &env->fp_status))) {
/* sNaN operation */
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
} else if ((float64_is_infinity(arg1) || float64_is_infinity(arg2)) &&
float64_is_infinity(arg3)) {
uint8_t aSign, bSign, cSign;
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
if (unlikely(float128_is_infinity(ft0_128) &&
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
aSign = float64_is_neg(arg1);
bSign = float64_is_neg(arg2);
cSign = float64_is_neg(arg3);
if (madd_flags & float_muladd_negate_c) {
cSign ^= 1;
}
if (aSign ^ bSign ^ cSign) {
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
}
}
return farg1.ll;
}
/* fmsub - fmsub. */
uint64_t helper_fmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
uint64_t arg3)
{
CPU_DoubleU farg1, farg2, farg3;
farg1.ll = arg1;
farg2.ll = arg2;
farg3.ll = arg3;
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) &&
float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
if (unlikely(float128_is_infinity(ft0_128) &&
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
}
}
return farg1.ll;
#define FPU_FMADD(op, madd_flags) \
uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \
uint64_t arg2, uint64_t arg3) \
{ \
uint32_t flags; \
float64 ret = float64_muladd(arg1, arg2, arg3, madd_flags, \
&env->fp_status); \
flags = get_float_exception_flags(&env->fp_status); \
if (flags) { \
if (flags & float_flag_invalid) { \
float64_maddsub_update_excp(env, arg1, arg2, arg3, \
madd_flags); \
} \
float_check_status(env); \
} \
return ret; \
}
/* fnmadd - fnmadd. */
uint64_t helper_fnmadd(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
uint64_t arg3)
{
CPU_DoubleU farg1, farg2, farg3;
#define MADD_FLGS 0
#define MSUB_FLGS float_muladd_negate_c
#define NMADD_FLGS float_muladd_negate_result
#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
farg1.ll = arg1;
farg2.ll = arg2;
farg3.ll = arg3;
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) && float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
if (unlikely(float128_is_infinity(ft0_128) &&
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) != float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
}
if (likely(!float64_is_any_nan(farg1.d))) {
farg1.d = float64_chs(farg1.d);
}
}
return farg1.ll;
}
/* fnmsub - fnmsub. */
uint64_t helper_fnmsub(CPUPPCState *env, uint64_t arg1, uint64_t arg2,
uint64_t arg3)
{
CPU_DoubleU farg1, farg2, farg3;
farg1.ll = arg1;
farg2.ll = arg2;
farg3.ll = arg3;
if (unlikely((float64_is_infinity(farg1.d) && float64_is_zero(farg2.d)) ||
(float64_is_zero(farg1.d) &&
float64_is_infinity(farg2.d)))) {
/* Multiplication of zero by infinity */
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXIMZ, 1);
} else {
if (unlikely(float64_is_signaling_nan(farg1.d, &env->fp_status) ||
float64_is_signaling_nan(farg2.d, &env->fp_status) ||
float64_is_signaling_nan(farg3.d, &env->fp_status))) {
/* sNaN operation */
float_invalid_op_excp(env, POWERPC_EXCP_FP_VXSNAN, 1);
}
/* This is the way the PowerPC specification defines it */
float128 ft0_128, ft1_128;
ft0_128 = float64_to_float128(farg1.d, &env->fp_status);
ft1_128 = float64_to_float128(farg2.d, &env->fp_status);
ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
if (unlikely(float128_is_infinity(ft0_128) &&
float64_is_infinity(farg3.d) &&
float128_is_neg(ft0_128) == float64_is_neg(farg3.d))) {
/* Magnitude subtraction of infinities */
farg1.ll = float_invalid_op_excp(env, POWERPC_EXCP_FP_VXISI, 1);
} else {
ft1_128 = float64_to_float128(farg3.d, &env->fp_status);
ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
farg1.d = float128_to_float64(ft0_128, &env->fp_status);
}
if (likely(!float64_is_any_nan(farg1.d))) {
farg1.d = float64_chs(farg1.d);
}
}
return farg1.ll;
}
FPU_FMADD(fmadd, MADD_FLGS)
FPU_FMADD(fnmadd, NMADD_FLGS)
FPU_FMADD(fmsub, MSUB_FLGS)
FPU_FMADD(fnmsub, NMSUB_FLGS)
/* frsp - frsp. */
uint64_t helper_frsp(CPUPPCState *env, uint64_t arg)
@ -2384,11 +2268,6 @@ void helper_##op(CPUPPCState *env, uint32_t opcode) \
float_check_status(env); \
}
#define MADD_FLGS 0
#define MSUB_FLGS float_muladd_negate_c
#define NMADD_FLGS float_muladd_negate_result
#define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result)
VSX_MADD(xsmaddadp, 1, float64, VsrD(0), MADD_FLGS, 1, 1, 0)
VSX_MADD(xsmaddmdp, 1, float64, VsrD(0), MADD_FLGS, 0, 1, 0)
VSX_MADD(xsmsubadp, 1, float64, VsrD(0), MSUB_FLGS, 1, 1, 0)