fpu/softfloat.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-)
This will allow us to share code between FloatParts and FloatParts128.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
Cc: Alex Bennee <alex.bennee@linaro.org>
What do you think of this instead of inlining pick_nan_muladd
into the two muladd implementations?
r~
---
fpu/softfloat.c | 40 ++++++++++++++++++++++++----------------
1 file changed, 24 insertions(+), 16 deletions(-)
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 3e625c47cd..60fdddd163 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -929,16 +929,23 @@ static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)
return a;
}
-static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
- bool inf_zero, float_status *s)
+/*
+ * Given pointers to A, B, C, and the respective classes, return the
+ * pointer to the structure that is the NaN result, or NULL to signal
+ * that the result is the default NaN.
+ */
+static inline void *
+pick_nan_muladd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
+ void *a, void *b, void *c,
+ bool inf_zero, int abc_mask, float_status *s)
{
int which;
- if (is_snan(a.cls) || is_snan(b.cls) || is_snan(c.cls)) {
+ if (unlikely(abc_mask & float_cmask_snan)) {
s->float_exception_flags |= float_flag_invalid;
}
- which = pickNaNMulAdd(a.cls, b.cls, c.cls, inf_zero, s);
+ which = pickNaNMulAdd(a_cls, b_cls, c_cls, inf_zero, s);
if (s->default_nan_mode) {
/* Note that this check is after pickNaNMulAdd so that function
@@ -949,23 +956,16 @@ static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
switch (which) {
case 0:
- break;
+ return a;
case 1:
- a = b;
- break;
+ return b;
case 2:
- a = c;
- break;
+ return c;
case 3:
- return parts_default_nan(s);
+ return NULL;
default:
g_assert_not_reached();
}
-
- if (is_snan(a.cls)) {
- return parts_silence_nan(a, s);
- }
- return a;
}
/*
@@ -1366,7 +1366,15 @@ static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,
* off to the target-specific pick-a-NaN routine.
*/
if (unlikely(abc_mask & float_cmask_anynan)) {
- return pick_nan_muladd(a, b, c, inf_zero, s);
+ FloatParts *r = pick_nan_muladd(a.cls, b.cls, c.cls, &a, &b, &c,
+ inf_zero, abc_mask, s);
+ if (r == NULL) {
+ return parts_default_nan(s);
+ }
+ if (is_snan(r->cls)) {
+ return parts_silence_nan(*r, s);
+ }
+ return *r;
}
if (unlikely(inf_zero)) {
--
2.25.1
Richard Henderson <richard.henderson@linaro.org> writes:
> This will allow us to share code between FloatParts and FloatParts128.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> Cc: Alex Bennee <alex.bennee@linaro.org>
>
> What do you think of this instead of inlining pick_nan_muladd
> into the two muladd implementations?
I think that can work. I was noodling about with float_addsub128 over
the weekend so I'll post what that looks like once I've tested it.
Anyway:
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
>
>
> r~
>
> ---
> fpu/softfloat.c | 40 ++++++++++++++++++++++++----------------
> 1 file changed, 24 insertions(+), 16 deletions(-)
>
> diff --git a/fpu/softfloat.c b/fpu/softfloat.c
> index 3e625c47cd..60fdddd163 100644
> --- a/fpu/softfloat.c
> +++ b/fpu/softfloat.c
> @@ -929,16 +929,23 @@ static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)
> return a;
> }
>
> -static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
> - bool inf_zero, float_status *s)
> +/*
> + * Given pointers to A, B, C, and the respective classes, return the
> + * pointer to the structure that is the NaN result, or NULL to signal
> + * that the result is the default NaN.
> + */
> +static inline void *
> +pick_nan_muladd(FloatClass a_cls, FloatClass b_cls, FloatClass c_cls,
> + void *a, void *b, void *c,
> + bool inf_zero, int abc_mask, float_status *s)
> {
> int which;
>
> - if (is_snan(a.cls) || is_snan(b.cls) || is_snan(c.cls)) {
> + if (unlikely(abc_mask & float_cmask_snan)) {
> s->float_exception_flags |= float_flag_invalid;
> }
>
> - which = pickNaNMulAdd(a.cls, b.cls, c.cls, inf_zero, s);
> + which = pickNaNMulAdd(a_cls, b_cls, c_cls, inf_zero, s);
>
> if (s->default_nan_mode) {
> /* Note that this check is after pickNaNMulAdd so that function
> @@ -949,23 +956,16 @@ static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
>
> switch (which) {
> case 0:
> - break;
> + return a;
> case 1:
> - a = b;
> - break;
> + return b;
> case 2:
> - a = c;
> - break;
> + return c;
> case 3:
> - return parts_default_nan(s);
> + return NULL;
> default:
> g_assert_not_reached();
> }
> -
> - if (is_snan(a.cls)) {
> - return parts_silence_nan(a, s);
> - }
> - return a;
> }
>
> /*
> @@ -1366,7 +1366,15 @@ static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,
> * off to the target-specific pick-a-NaN routine.
> */
> if (unlikely(abc_mask & float_cmask_anynan)) {
> - return pick_nan_muladd(a, b, c, inf_zero, s);
> + FloatParts *r = pick_nan_muladd(a.cls, b.cls, c.cls, &a, &b, &c,
> + inf_zero, abc_mask, s);
> + if (r == NULL) {
> + return parts_default_nan(s);
> + }
> + if (is_snan(r->cls)) {
> + return parts_silence_nan(*r, s);
> + }
> + return *r;
> }
>
> if (unlikely(inf_zero)) {
--
Alex Bennée
© 2016 - 2026 Red Hat, Inc.