Performance results (single and double precision) for fp-bench:
1. Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz
- before:
add-single: 135.07 MFlops
add-double: 131.60 MFlops
sub-single: 130.04 MFlops
sub-double: 133.01 MFlops
- after:
add-single: 443.04 MFlops
add-double: 301.95 MFlops
sub-single: 411.36 MFlops
sub-double: 293.15 MFlops
2. ARM Aarch64 A57 @ 2.4GHz
- before:
add-single: 44.79 MFlops
add-double: 49.20 MFlops
sub-single: 44.55 MFlops
sub-double: 49.06 MFlops
- after:
add-single: 93.28 MFlops
add-double: 88.27 MFlops
sub-single: 91.47 MFlops
sub-double: 88.27 MFlops
3. IBM POWER8E @ 2.1 GHz
- before:
add-single: 72.59 MFlops
add-double: 72.27 MFlops
sub-single: 75.33 MFlops
sub-double: 70.54 MFlops
- after:
add-single: 112.95 MFlops
add-double: 201.11 MFlops
sub-single: 116.80 MFlops
sub-double: 188.72 MFlops
Note that the IBM and ARM machines benefit from having
HARDFLOAT_2F{32,64}_USE_FP set to 0. Otherwise their performance
can suffer significantly:
- IBM Power8:
add-single: [1] 54.94 vs [0] 116.37 MFlops
add-double: [1] 58.92 vs [0] 201.44 MFlops
- Aarch64 A57:
add-single: [1] 80.72 vs [0] 93.24 MFlops
add-double: [1] 82.10 vs [0] 88.18 MFlops
On the Intel machine, having 2F64 set to 1 pays off, but it
doesn't for 2F32:
- Intel i7-6700K:
add-single: [1] 285.79 vs [0] 426.70 MFlops
add-double: [1] 302.15 vs [0] 278.82 MFlops
Signed-off-by: Emilio G. Cota <cota@braap.org>
---
fpu/softfloat.c | 117 ++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 98 insertions(+), 19 deletions(-)
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 306a12fa8d..cc500b1618 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1050,49 +1050,128 @@ float16 QEMU_FLATTEN float16_add(float16 a, float16 b, float_status *status)
return float16_round_pack_canonical(pr, status);
}
-float32 QEMU_FLATTEN float32_add(float32 a, float32 b, float_status *status)
+float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status)
+{
+ FloatParts pa = float16_unpack_canonical(a, status);
+ FloatParts pb = float16_unpack_canonical(b, status);
+ FloatParts pr = addsub_floats(pa, pb, true, status);
+
+ return float16_round_pack_canonical(pr, status);
+}
+
+static float32 QEMU_SOFTFLOAT_ATTR
+soft_f32_addsub(float32 a, float32 b, bool subtract, float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pb = float32_unpack_canonical(b, status);
- FloatParts pr = addsub_floats(pa, pb, false, status);
+ FloatParts pr = addsub_floats(pa, pb, subtract, status);
return float32_round_pack_canonical(pr, status);
}
-float64 QEMU_FLATTEN float64_add(float64 a, float64 b, float_status *status)
+static inline float32 soft_f32_add(float32 a, float32 b, float_status *status)
+{
+ return soft_f32_addsub(a, b, false, status);
+}
+
+static inline float32 soft_f32_sub(float32 a, float32 b, float_status *status)
+{
+ return soft_f32_addsub(a, b, true, status);
+}
+
+static float64 QEMU_SOFTFLOAT_ATTR
+soft_f64_addsub(float64 a, float64 b, bool subtract, float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pb = float64_unpack_canonical(b, status);
- FloatParts pr = addsub_floats(pa, pb, false, status);
+ FloatParts pr = addsub_floats(pa, pb, subtract, status);
return float64_round_pack_canonical(pr, status);
}
-float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status)
+static inline float64 soft_f64_add(float64 a, float64 b, float_status *status)
{
- FloatParts pa = float16_unpack_canonical(a, status);
- FloatParts pb = float16_unpack_canonical(b, status);
- FloatParts pr = addsub_floats(pa, pb, true, status);
+ return soft_f64_addsub(a, b, false, status);
+}
- return float16_round_pack_canonical(pr, status);
+static inline float64 soft_f64_sub(float64 a, float64 b, float_status *status)
+{
+ return soft_f64_addsub(a, b, true, status);
}
-float32 QEMU_FLATTEN float32_sub(float32 a, float32 b, float_status *status)
+static float hard_f32_add(float a, float b)
{
- FloatParts pa = float32_unpack_canonical(a, status);
- FloatParts pb = float32_unpack_canonical(b, status);
- FloatParts pr = addsub_floats(pa, pb, true, status);
+ return a + b;
+}
- return float32_round_pack_canonical(pr, status);
+static float hard_f32_sub(float a, float b)
+{
+ return a - b;
}
-float64 QEMU_FLATTEN float64_sub(float64 a, float64 b, float_status *status)
+static double hard_f64_add(double a, double b)
{
- FloatParts pa = float64_unpack_canonical(a, status);
- FloatParts pb = float64_unpack_canonical(b, status);
- FloatParts pr = addsub_floats(pa, pb, true, status);
+ return a + b;
+}
- return float64_round_pack_canonical(pr, status);
+static double hard_f64_sub(double a, double b)
+{
+ return a - b;
+}
+
+static bool f32_addsub_post(union_float32 a, union_float32 b)
+{
+ if (QEMU_HARDFLOAT_2F32_USE_FP) {
+ return !(fpclassify(a.h) == FP_ZERO && fpclassify(b.h) == FP_ZERO);
+ }
+ return !(float32_is_zero(a.s) && float32_is_zero(b.s));
+}
+
+static bool f64_addsub_post(union_float64 a, union_float64 b)
+{
+ if (QEMU_HARDFLOAT_2F64_USE_FP) {
+ return !(fpclassify(a.h) == FP_ZERO && fpclassify(b.h) == FP_ZERO);
+ } else {
+ return !(float64_is_zero(a.s) && float64_is_zero(b.s));
+ }
+}
+
+static float32 float32_addsub(float32 a, float32 b, float_status *s,
+ hard_f32_op2_fn hard, soft_f32_op2_fn soft)
+{
+ return float32_gen2(a, b, s, hard, soft,
+ f32_is_zon2, f32_addsub_post, NULL, NULL);
+}
+
+static float64 float64_addsub(float64 a, float64 b, float_status *s,
+ hard_f64_op2_fn hard, soft_f64_op2_fn soft)
+{
+ return float64_gen2(a, b, s, hard, soft,
+ f64_is_zon2, f64_addsub_post, NULL, NULL);
+}
+
+float32 QEMU_FLATTEN
+float32_add(float32 a, float32 b, float_status *s)
+{
+ return float32_addsub(a, b, s, hard_f32_add, soft_f32_add);
+}
+
+float32 QEMU_FLATTEN
+float32_sub(float32 a, float32 b, float_status *s)
+{
+ return float32_addsub(a, b, s, hard_f32_sub, soft_f32_sub);
+}
+
+float64 QEMU_FLATTEN
+float64_add(float64 a, float64 b, float_status *s)
+{
+ return float64_addsub(a, b, s, hard_f64_add, soft_f64_add);
+}
+
+float64 QEMU_FLATTEN
+float64_sub(float64 a, float64 b, float_status *s)
+{
+ return float64_addsub(a, b, s, hard_f64_sub, soft_f64_sub);
}
/*
--
2.17.1
Emilio G. Cota <cota@braap.org> writes:
> Performance results (single and double precision) for fp-bench:
>
> 1. Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz
> - before:
> add-single: 135.07 MFlops
> add-double: 131.60 MFlops
> sub-single: 130.04 MFlops
> sub-double: 133.01 MFlops
> - after:
> add-single: 443.04 MFlops
> add-double: 301.95 MFlops
> sub-single: 411.36 MFlops
> sub-double: 293.15 MFlops
>
> 2. ARM Aarch64 A57 @ 2.4GHz
> - before:
> add-single: 44.79 MFlops
> add-double: 49.20 MFlops
> sub-single: 44.55 MFlops
> sub-double: 49.06 MFlops
> - after:
> add-single: 93.28 MFlops
> add-double: 88.27 MFlops
> sub-single: 91.47 MFlops
> sub-double: 88.27 MFlops
>
> 3. IBM POWER8E @ 2.1 GHz
> - before:
> add-single: 72.59 MFlops
> add-double: 72.27 MFlops
> sub-single: 75.33 MFlops
> sub-double: 70.54 MFlops
> - after:
> add-single: 112.95 MFlops
> add-double: 201.11 MFlops
> sub-single: 116.80 MFlops
> sub-double: 188.72 MFlops
>
> Note that the IBM and ARM machines benefit from having
> HARDFLOAT_2F{32,64}_USE_FP set to 0. Otherwise their performance
> can suffer significantly:
Is this just the latency of pushing the number into a SIMD register and
checking the flags compared to a bitmask check?
> - IBM Power8:
> add-single: [1] 54.94 vs [0] 116.37 MFlops
> add-double: [1] 58.92 vs [0] 201.44 MFlops
> - Aarch64 A57:
> add-single: [1] 80.72 vs [0] 93.24 MFlops
> add-double: [1] 82.10 vs [0] 88.18 MFlops
>
> On the Intel machine, having 2F64 set to 1 pays off, but it
> doesn't for 2F32:
> - Intel i7-6700K:
> add-single: [1] 285.79 vs [0] 426.70 MFlops
> add-double: [1] 302.15 vs [0] 278.82 MFlops
>
> Signed-off-by: Emilio G. Cota <cota@braap.org>
> ---
> fpu/softfloat.c | 117 ++++++++++++++++++++++++++++++++++++++++--------
> 1 file changed, 98 insertions(+), 19 deletions(-)
>
> diff --git a/fpu/softfloat.c b/fpu/softfloat.c
> index 306a12fa8d..cc500b1618 100644
> --- a/fpu/softfloat.c
> +++ b/fpu/softfloat.c
> @@ -1050,49 +1050,128 @@ float16 QEMU_FLATTEN float16_add(float16 a, float16 b, float_status *status)
> return float16_round_pack_canonical(pr, status);
> }
>
> -float32 QEMU_FLATTEN float32_add(float32 a, float32 b, float_status *status)
> +float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status)
> +{
> + FloatParts pa = float16_unpack_canonical(a, status);
> + FloatParts pb = float16_unpack_canonical(b, status);
> + FloatParts pr = addsub_floats(pa, pb, true, status);
> +
> + return float16_round_pack_canonical(pr, status);
> +}
Hmm the diff is confusing but the changes look fine in the final code:
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
--
Alex Bennée
On Tue, Dec 04, 2018 at 18:34:18 +0000, Alex Bennée wrote:
>
> Emilio G. Cota <cota@braap.org> writes:
(snip)
> > Note that the IBM and ARM machines benefit from having
> > HARDFLOAT_2F{32,64}_USE_FP set to 0. Otherwise their performance
> > can suffer significantly:
>
> Is this just the latency of pushing the number into a SIMD register and
> checking the flags compared to a bitmask check?
That's the case in the generated x86 assembly, so I presume
the same it's happening in the other ISAs (I didn't check
the assembly there).
(snip)
>
> Hmm the diff is confusing but the changes look fine in the final code:
>
> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Thanks!
E.
© 2016 - 2026 Red Hat, Inc.