Performance results for fp-bench run under aarch64-linux-user
on an Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz host:
- before:
sqrt-single: 13.23 MFlops
sqrt-double: 13.24 MFlops
- after:
sqrt-single: 15.02 MFlops
sqrt-double: 15.07 MFlops
Note that sqrt in soft-ft is relatively fast, which means
that fp-bench is not very sensitive to changes to sqrt's
emulation speed.
Signed-off-by: Emilio G. Cota <cota@braap.org>
---
include/fpu/hostfloat.h | 2 ++
include/fpu/softfloat.h | 4 ++--
fpu/hostfloat.c | 20 ++++++++++++++++++++
fpu/softfloat.c | 6 ++++--
4 files changed, 28 insertions(+), 4 deletions(-)
diff --git a/include/fpu/hostfloat.h b/include/fpu/hostfloat.h
index c006576..b1e0689 100644
--- a/include/fpu/hostfloat.h
+++ b/include/fpu/hostfloat.h
@@ -16,11 +16,13 @@ float32 float32_sub(float32 a, float32 b, float_status *status);
float32 float32_mul(float32 a, float32 b, float_status *status);
float32 float32_div(float32 a, float32 b, float_status *status);
float32 float32_muladd(float32 a, float32 b, float32 c, int f, float_status *s);
+float32 float32_sqrt(float32 a, float_status *status);
float64 float64_add(float64 a, float64 b, float_status *status);
float64 float64_sub(float64 a, float64 b, float_status *status);
float64 float64_mul(float64 a, float64 b, float_status *status);
float64 float64_div(float64 a, float64 b, float_status *status);
float64 float64_muladd(float64 a, float64 b, float64 c, int f, float_status *s);
+float64 float64_sqrt(float64 a, float_status *status);
#endif /* HOSTFLOAT_H */
diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index 866bd3b..8d5a50a 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -348,7 +348,7 @@ float32 soft_float32_mul(float32, float32, float_status *status);
float32 soft_float32_div(float32, float32, float_status *status);
float32 float32_rem(float32, float32, float_status *status);
float32 soft_float32_muladd(float32, float32, float32, int, float_status *s);
-float32 float32_sqrt(float32, float_status *status);
+float32 soft_float32_sqrt(float32, float_status *status);
float32 float32_exp2(float32, float_status *status);
float32 float32_log2(float32, float_status *status);
int float32_eq(float32, float32, float_status *status);
@@ -488,7 +488,7 @@ float64 soft_float64_mul(float64, float64, float_status *status);
float64 soft_float64_div(float64, float64, float_status *status);
float64 float64_rem(float64, float64, float_status *status);
float64 soft_float64_muladd(float64, float64, float64, int, float_status *s);
-float64 float64_sqrt(float64, float_status *status);
+float64 soft_float64_sqrt(float64, float_status *status);
float64 float64_log2(float64, float_status *status);
int float64_eq(float64, float64, float_status *status);
int float64_le(float64, float64, float_status *status);
diff --git a/fpu/hostfloat.c b/fpu/hostfloat.c
index a56b70a..974bd57 100644
--- a/fpu/hostfloat.c
+++ b/fpu/hostfloat.c
@@ -270,3 +270,23 @@ GEN_FPU_DIV(float64_div, float64, double, fabs, DBL_MIN)
GEN_FPU_FMA(float32_muladd, float32, float, fmaf, fabsf, FLT_MIN)
GEN_FPU_FMA(float64_muladd, float64, double, fma, fabs, DBL_MIN)
#undef GEN_FPU_FMA
+
+#define GEN_FPU_SQRT(name, soft_t, host_t, host_sqrt_func) \
+ soft_t name(soft_t a, float_status *s) \
+ { \
+ soft_t ## _input_flush1(&a, s); \
+ if (likely((soft_t ## _is_normal(a) || soft_t ## _is_zero(a)) && \
+ !soft_t ## _is_neg(a) && \
+ s->float_exception_flags & float_flag_inexact && \
+ s->float_rounding_mode == float_round_nearest_even)) { \
+ host_t ha = soft_t ## _to_ ## host_t(a); \
+ host_t hr = host_sqrt_func(ha); \
+ \
+ return host_t ## _to_ ## soft_t(hr); \
+ } \
+ return soft_ ## soft_t ## _sqrt(a, s); \
+ }
+
+GEN_FPU_SQRT(float32_sqrt, float32, float, sqrtf)
+GEN_FPU_SQRT(float64_sqrt, float64, double, sqrt)
+#undef GEN_FPU_SQRT
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index da81ec9..096b658 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1980,14 +1980,16 @@ float16 __attribute__((flatten)) float16_sqrt(float16 a, float_status *status)
return float16_round_pack_canonical(pr, status);
}
-float32 __attribute__((flatten)) float32_sqrt(float32 a, float_status *status)
+float32 __attribute__((flatten))
+soft_float32_sqrt(float32 a, float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pr = sqrt_float(pa, status, &float32_params);
return float32_round_pack_canonical(pr, status);
}
-float64 __attribute__((flatten)) float64_sqrt(float64 a, float_status *status)
+float64 __attribute__((flatten))
+soft_float64_sqrt(float64 a, float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pr = sqrt_float(pa, status, &float64_params);
--
2.7.4
Emilio G. Cota <cota@braap.org> writes:
> Performance results for fp-bench run under aarch64-linux-user
> on an Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz host:
>
> - before:
> sqrt-single: 13.23 MFlops
> sqrt-double: 13.24 MFlops
>
> - after:
> sqrt-single: 15.02 MFlops
> sqrt-double: 15.07 MFlops
>
> Note that sqrt in soft-ft is relatively fast, which means
> that fp-bench is not very sensitive to changes to sqrt's
> emulation speed.
Weird, I thought we had slowed it down quite a bit in the re-factor as
we eschewed the estimate step for an easier to read but slower iterative
process. That's why I chose sqrt for my hostfp hack experiment.
>
> Signed-off-by: Emilio G. Cota <cota@braap.org>
> ---
> include/fpu/hostfloat.h | 2 ++
> include/fpu/softfloat.h | 4 ++--
> fpu/hostfloat.c | 20 ++++++++++++++++++++
> fpu/softfloat.c | 6 ++++--
> 4 files changed, 28 insertions(+), 4 deletions(-)
>
> diff --git a/include/fpu/hostfloat.h b/include/fpu/hostfloat.h
> index c006576..b1e0689 100644
> --- a/include/fpu/hostfloat.h
> +++ b/include/fpu/hostfloat.h
> @@ -16,11 +16,13 @@ float32 float32_sub(float32 a, float32 b, float_status *status);
> float32 float32_mul(float32 a, float32 b, float_status *status);
> float32 float32_div(float32 a, float32 b, float_status *status);
> float32 float32_muladd(float32 a, float32 b, float32 c, int f, float_status *s);
> +float32 float32_sqrt(float32 a, float_status *status);
>
> float64 float64_add(float64 a, float64 b, float_status *status);
> float64 float64_sub(float64 a, float64 b, float_status *status);
> float64 float64_mul(float64 a, float64 b, float_status *status);
> float64 float64_div(float64 a, float64 b, float_status *status);
> float64 float64_muladd(float64 a, float64 b, float64 c, int f, float_status *s);
> +float64 float64_sqrt(float64 a, float_status *status);
>
> #endif /* HOSTFLOAT_H */
> diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
> index 866bd3b..8d5a50a 100644
> --- a/include/fpu/softfloat.h
> +++ b/include/fpu/softfloat.h
> @@ -348,7 +348,7 @@ float32 soft_float32_mul(float32, float32, float_status *status);
> float32 soft_float32_div(float32, float32, float_status *status);
> float32 float32_rem(float32, float32, float_status *status);
> float32 soft_float32_muladd(float32, float32, float32, int, float_status *s);
> -float32 float32_sqrt(float32, float_status *status);
> +float32 soft_float32_sqrt(float32, float_status *status);
> float32 float32_exp2(float32, float_status *status);
> float32 float32_log2(float32, float_status *status);
> int float32_eq(float32, float32, float_status *status);
> @@ -488,7 +488,7 @@ float64 soft_float64_mul(float64, float64, float_status *status);
> float64 soft_float64_div(float64, float64, float_status *status);
> float64 float64_rem(float64, float64, float_status *status);
> float64 soft_float64_muladd(float64, float64, float64, int, float_status *s);
> -float64 float64_sqrt(float64, float_status *status);
> +float64 soft_float64_sqrt(float64, float_status *status);
> float64 float64_log2(float64, float_status *status);
> int float64_eq(float64, float64, float_status *status);
> int float64_le(float64, float64, float_status *status);
> diff --git a/fpu/hostfloat.c b/fpu/hostfloat.c
> index a56b70a..974bd57 100644
> --- a/fpu/hostfloat.c
> +++ b/fpu/hostfloat.c
> @@ -270,3 +270,23 @@ GEN_FPU_DIV(float64_div, float64, double, fabs, DBL_MIN)
> GEN_FPU_FMA(float32_muladd, float32, float, fmaf, fabsf, FLT_MIN)
> GEN_FPU_FMA(float64_muladd, float64, double, fma, fabs, DBL_MIN)
> #undef GEN_FPU_FMA
> +
> +#define GEN_FPU_SQRT(name, soft_t, host_t, host_sqrt_func) \
> + soft_t name(soft_t a, float_status *s) \
> + { \
> + soft_t ## _input_flush1(&a, s); \
> + if (likely((soft_t ## _is_normal(a) || soft_t ## _is_zero(a)) && \
> + !soft_t ## _is_neg(a) && \
> + s->float_exception_flags & float_flag_inexact && \
> + s->float_rounding_mode == float_round_nearest_even)) { \
> + host_t ha = soft_t ## _to_ ## host_t(a); \
> + host_t hr = host_sqrt_func(ha); \
> + \
> + return host_t ## _to_ ## soft_t(hr); \
> + } \
> + return soft_ ## soft_t ## _sqrt(a, s); \
> + }
> +
> +GEN_FPU_SQRT(float32_sqrt, float32, float, sqrtf)
> +GEN_FPU_SQRT(float64_sqrt, float64, double, sqrt)
> +#undef GEN_FPU_SQRT
> diff --git a/fpu/softfloat.c b/fpu/softfloat.c
> index da81ec9..096b658 100644
> --- a/fpu/softfloat.c
> +++ b/fpu/softfloat.c
> @@ -1980,14 +1980,16 @@ float16 __attribute__((flatten)) float16_sqrt(float16 a, float_status *status)
> return float16_round_pack_canonical(pr, status);
> }
>
> -float32 __attribute__((flatten)) float32_sqrt(float32 a, float_status *status)
> +float32 __attribute__((flatten))
> +soft_float32_sqrt(float32 a, float_status *status)
> {
> FloatParts pa = float32_unpack_canonical(a, status);
> FloatParts pr = sqrt_float(pa, status, &float32_params);
> return float32_round_pack_canonical(pr, status);
> }
>
> -float64 __attribute__((flatten)) float64_sqrt(float64 a, float_status *status)
> +float64 __attribute__((flatten))
> +soft_float64_sqrt(float64 a, float_status *status)
> {
> FloatParts pa = float64_unpack_canonical(a, status);
> FloatParts pr = sqrt_float(pa, status, &float64_params);
--
Alex Bennée
On Thu, Mar 22, 2018 at 01:29:23 +0000, Alex Bennée wrote:
> Emilio G. Cota <cota@braap.org> writes:
>
> > Performance results for fp-bench run under aarch64-linux-user
> > on an Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz host:
> >
> > - before:
> > sqrt-single: 13.23 MFlops
> > sqrt-double: 13.24 MFlops
> >
> > - after:
> > sqrt-single: 15.02 MFlops
> > sqrt-double: 15.07 MFlops
> >
> > Note that sqrt in soft-ft is relatively fast, which means
> > that fp-bench is not very sensitive to changes to sqrt's
> > emulation speed.
>
> Weird, I thought we had slowed it down quite a bit in the re-factor as
> we eschewed the estimate step for an easier to read but slower iterative
> process. That's why I chose sqrt for my hostfp hack experiment.
Yes, my first statement ("soft-ft is relatively fast") is
wrong. Sorry about that, I thought I had deleted it but it
slipped through.
What I should have said (but decided against to keep the commit log
short) is that fp-bench doesn't do a good job in being sensitive
to the performance of the sqrt instruction, so even if got it
to take 0 time we'd still get a small speedup.
Just realised that this happens because ~50% of the inputs are
negative, which will go through some very slow paths. This ends
up showing in perf like this:
# Overhead Command Shared Object Symbol
# ........ ........ ................. ...........................
#
61.74% fp-bench fp-bench [.] main
22.58% fp-bench libm-2.23.so [.] __kernel_standard
6.22% fp-bench libm-2.23.so [.] __kernel_standard_f
5.21% fp-bench libm-2.23.so [.] __sqrtf
2.17% fp-bench fp-bench [.] _init
1.91% fp-bench [kernel.kallsyms] [k] __call_rcu.constprop.70
0.18% fp-bench [kernel.kallsyms] [k] cpumask_any_but
0.01% perf [kernel.kallsyms] [k] native_iret
0.00% perf [kernel.kallsyms] [k] native_write_msr_safe
__sqrtf (which does 'sqrtss %xmm0,%xmm0') only takes 5% of the time!
I just fixed fp-bench to discard negative inputs. This looks
much better: (Note that this is fp-test-x86_64 instead of -aarch64,
which explains why the "before" throughput is different than
the one reported above)
[...]
+fma: (patch 11, i.e. sqrt still in soft-fp)
sqrt-single: 27.11 MFlops
sqrt-double: 27.17 MFlops
+sqrt: (12)
sqrt-single: 66.67 MFlops
sqrt-double: 66.79 MFlops
+cmp: (13)
sqrt-single: 126.46 MFlops
sqrt-double: 126.06 MFlops
+f32f64: (patch 14)
sqrt-single: 122.75 MFlops
sqrt-double: 126.57 MFlops
We get a >2x speedup, which is consistent with the fact
that now perf shows that sqrt takes ~60% of execution time.
Compare does matter here as well because libm is checking
sqrt's result against NaN.
I'll include this fix to fp-bench in v2.
Thanks,
E.
© 2016 - 2025 Red Hat, Inc.