Move the 64x64 => 128 multiply into a static inline helper function
for code clarity.
No need for the a/b_hi/lo variables, the implicit casts on the function
calls do the work for us.
Should have minimal effect on the generated code.
Signed-off-by: David Laight <david.laight.linux@gmail.com>
---
new patch for v3.
lib/math/div64.c | 54 +++++++++++++++++++++++++++---------------------
1 file changed, 30 insertions(+), 24 deletions(-)
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 2ac7e25039a1..fb77fd9d999d 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -193,42 +193,48 @@ static u64 mul_add(u32 a, u32 b, u32 c)
return add_u64_u32(mul_u32_u32(a, b), c);
}
-u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
-{
- if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx + %#llx) by zero, returning 0",
- __func__, a, b, c)) {
- /*
- * Return 0 (rather than ~(u64)0) because it is less likely to
- * have unexpected side effects.
- */
- return 0;
- }
-
#if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
-
+static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
+{
/* native 64x64=128 bits multiplication */
u128 prod = (u128)a * b + c;
- u64 n_lo = prod, n_hi = prod >> 64;
-#else
+ *p_lo = prod;
+ return prod >> 64;
+}
- /* perform a 64x64=128 bits multiplication manually */
- u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
+#else
+static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
+{
+ /* perform a 64x64=128 bits multiplication in 32bit chunks */
u64 x, y, z;
/* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
- x = mul_add(a_lo, b_lo, c);
- y = mul_add(a_lo, b_hi, c >> 32);
+ x = mul_add(a, b, c);
+ y = mul_add(a, b >> 32, c >> 32);
y = add_u64_u32(y, x >> 32);
- z = mul_add(a_hi, b_hi, y >> 32);
- y = mul_add(a_hi, b_lo, y);
- z = add_u64_u32(z, y >> 32);
- x = (y << 32) + (u32)x;
-
- u64 n_lo = x, n_hi = z;
+ z = mul_add(a >> 32, b >> 32, y >> 32);
+ y = mul_add(a >> 32, b, y);
+ *p_lo = (y << 32) + (u32)x;
+ return add_u64_u32(z, y >> 32);
+}
#endif
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
+{
+ u64 n_lo, n_hi;
+
+ if (WARN_ONCE(!d, "%s: division of (%llx * %llx + %llx) by zero, returning 0",
+ __func__, a, b, c )) {
+ /*
+ * Return 0 (rather than ~(u64)0) because it is less likely to
+ * have unexpected side effects.
+ */
+ return 0;
+ }
+
+ n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
if (!n_hi)
return div64_u64(n_lo, d);
--
2.39.5
On Sat, 14 Jun 2025, David Laight wrote:
> Move the 64x64 => 128 multiply into a static inline helper function
> for code clarity.
> No need for the a/b_hi/lo variables, the implicit casts on the function
> calls do the work for us.
> Should have minimal effect on the generated code.
>
> Signed-off-by: David Laight <david.laight.linux@gmail.com>
> ---
>
> new patch for v3.
>
> lib/math/div64.c | 54 +++++++++++++++++++++++++++---------------------
> 1 file changed, 30 insertions(+), 24 deletions(-)
>
> diff --git a/lib/math/div64.c b/lib/math/div64.c
> index 2ac7e25039a1..fb77fd9d999d 100644
> --- a/lib/math/div64.c
> +++ b/lib/math/div64.c
> @@ -193,42 +193,48 @@ static u64 mul_add(u32 a, u32 b, u32 c)
> return add_u64_u32(mul_u32_u32(a, b), c);
> }
>
> -u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> -{
> - if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx + %#llx) by zero, returning 0",
> - __func__, a, b, c)) {
> - /*
> - * Return 0 (rather than ~(u64)0) because it is less likely to
> - * have unexpected side effects.
> - */
> - return 0;
> - }
> -
> #if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
> -
> +static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
Why not move the #if inside the function body and have only one function
definition?
> +{
> /* native 64x64=128 bits multiplication */
> u128 prod = (u128)a * b + c;
> - u64 n_lo = prod, n_hi = prod >> 64;
>
> -#else
> + *p_lo = prod;
> + return prod >> 64;
> +}
>
> - /* perform a 64x64=128 bits multiplication manually */
> - u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
> +#else
> +static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
> +{
> + /* perform a 64x64=128 bits multiplication in 32bit chunks */
> u64 x, y, z;
>
> /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
> - x = mul_add(a_lo, b_lo, c);
> - y = mul_add(a_lo, b_hi, c >> 32);
> + x = mul_add(a, b, c);
> + y = mul_add(a, b >> 32, c >> 32);
> y = add_u64_u32(y, x >> 32);
> - z = mul_add(a_hi, b_hi, y >> 32);
> - y = mul_add(a_hi, b_lo, y);
> - z = add_u64_u32(z, y >> 32);
> - x = (y << 32) + (u32)x;
> -
> - u64 n_lo = x, n_hi = z;
> + z = mul_add(a >> 32, b >> 32, y >> 32);
> + y = mul_add(a >> 32, b, y);
> + *p_lo = (y << 32) + (u32)x;
> + return add_u64_u32(z, y >> 32);
> +}
>
> #endif
>
> +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> +{
> + u64 n_lo, n_hi;
> +
> + if (WARN_ONCE(!d, "%s: division of (%llx * %llx + %llx) by zero, returning 0",
> + __func__, a, b, c )) {
> + /*
> + * Return 0 (rather than ~(u64)0) because it is less likely to
> + * have unexpected side effects.
> + */
> + return 0;
> + }
> +
> + n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
> if (!n_hi)
> return div64_u64(n_lo, d);
>
> --
> 2.39.5
>
>
On Sat, 14 Jun 2025 11:37:54 -0400 (EDT)
Nicolas Pitre <npitre@baylibre.com> wrote:
> On Sat, 14 Jun 2025, David Laight wrote:
>
> > Move the 64x64 => 128 multiply into a static inline helper function
> > for code clarity.
> > No need for the a/b_hi/lo variables, the implicit casts on the function
> > calls do the work for us.
> > Should have minimal effect on the generated code.
> >
> > Signed-off-by: David Laight <david.laight.linux@gmail.com>
> > ---
> >
> > new patch for v3.
> >
> > lib/math/div64.c | 54 +++++++++++++++++++++++++++---------------------
> > 1 file changed, 30 insertions(+), 24 deletions(-)
> >
> > diff --git a/lib/math/div64.c b/lib/math/div64.c
> > index 2ac7e25039a1..fb77fd9d999d 100644
> > --- a/lib/math/div64.c
> > +++ b/lib/math/div64.c
> > @@ -193,42 +193,48 @@ static u64 mul_add(u32 a, u32 b, u32 c)
> > return add_u64_u32(mul_u32_u32(a, b), c);
> > }
> >
> > -u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> > -{
> > - if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx + %#llx) by zero, returning 0",
> > - __func__, a, b, c)) {
> > - /*
> > - * Return 0 (rather than ~(u64)0) because it is less likely to
> > - * have unexpected side effects.
> > - */
> > - return 0;
> > - }
> > -
> > #if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
> > -
> > +static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
>
> Why not move the #if inside the function body and have only one function
> definition?
Because I think it is easier to read with two definitions,
especially when the bodies are entirely different.
David
> > +{
> > /* native 64x64=128 bits multiplication */
> > u128 prod = (u128)a * b + c;
> > - u64 n_lo = prod, n_hi = prod >> 64;
> >
> > -#else
> > + *p_lo = prod;
> > + return prod >> 64;
> > +}
> >
> > - /* perform a 64x64=128 bits multiplication manually */
> > - u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
> > +#else
> > +static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
> > +{
> > + /* perform a 64x64=128 bits multiplication in 32bit chunks */
> > u64 x, y, z;
> >
> > /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
> > - x = mul_add(a_lo, b_lo, c);
> > - y = mul_add(a_lo, b_hi, c >> 32);
> > + x = mul_add(a, b, c);
> > + y = mul_add(a, b >> 32, c >> 32);
> > y = add_u64_u32(y, x >> 32);
> > - z = mul_add(a_hi, b_hi, y >> 32);
> > - y = mul_add(a_hi, b_lo, y);
> > - z = add_u64_u32(z, y >> 32);
> > - x = (y << 32) + (u32)x;
> > -
> > - u64 n_lo = x, n_hi = z;
> > + z = mul_add(a >> 32, b >> 32, y >> 32);
> > + y = mul_add(a >> 32, b, y);
> > + *p_lo = (y << 32) + (u32)x;
> > + return add_u64_u32(z, y >> 32);
> > +}
> >
> > #endif
> >
> > +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> > +{
> > + u64 n_lo, n_hi;
> > +
> > + if (WARN_ONCE(!d, "%s: division of (%llx * %llx + %llx) by zero, returning 0",
> > + __func__, a, b, c )) {
> > + /*
> > + * Return 0 (rather than ~(u64)0) because it is less likely to
> > + * have unexpected side effects.
> > + */
> > + return 0;
> > + }
> > +
> > + n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
> > if (!n_hi)
> > return div64_u64(n_lo, d);
> >
> > --
> > 2.39.5
> >
> >
On Sat, 14 Jun 2025, David Laight wrote:
> On Sat, 14 Jun 2025 11:37:54 -0400 (EDT)
> Nicolas Pitre <npitre@baylibre.com> wrote:
>
> > On Sat, 14 Jun 2025, David Laight wrote:
> >
> > > Move the 64x64 => 128 multiply into a static inline helper function
> > > for code clarity.
> > > No need for the a/b_hi/lo variables, the implicit casts on the function
> > > calls do the work for us.
> > > Should have minimal effect on the generated code.
> > >
> > > Signed-off-by: David Laight <david.laight.linux@gmail.com>
> > > ---
> > >
> > > new patch for v3.
> > >
> > > lib/math/div64.c | 54 +++++++++++++++++++++++++++---------------------
> > > 1 file changed, 30 insertions(+), 24 deletions(-)
> > >
> > > diff --git a/lib/math/div64.c b/lib/math/div64.c
> > > index 2ac7e25039a1..fb77fd9d999d 100644
> > > --- a/lib/math/div64.c
> > > +++ b/lib/math/div64.c
> > > @@ -193,42 +193,48 @@ static u64 mul_add(u32 a, u32 b, u32 c)
> > > return add_u64_u32(mul_u32_u32(a, b), c);
> > > }
> > >
> > > -u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> > > -{
> > > - if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx + %#llx) by zero, returning 0",
> > > - __func__, a, b, c)) {
> > > - /*
> > > - * Return 0 (rather than ~(u64)0) because it is less likely to
> > > - * have unexpected side effects.
> > > - */
> > > - return 0;
> > > - }
> > > -
> > > #if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
> > > -
> > > +static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
> >
> > Why not move the #if inside the function body and have only one function
> > definition?
>
> Because I think it is easier to read with two definitions,
> especially when the bodies are entirely different.
We have differing opinions here, but I don't care that strongly in this
case.
Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
>
> David
>
> > > +{
> > > /* native 64x64=128 bits multiplication */
> > > u128 prod = (u128)a * b + c;
> > > - u64 n_lo = prod, n_hi = prod >> 64;
> > >
> > > -#else
> > > + *p_lo = prod;
> > > + return prod >> 64;
> > > +}
> > >
> > > - /* perform a 64x64=128 bits multiplication manually */
> > > - u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
> > > +#else
> > > +static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
> > > +{
> > > + /* perform a 64x64=128 bits multiplication in 32bit chunks */
> > > u64 x, y, z;
> > >
> > > /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
> > > - x = mul_add(a_lo, b_lo, c);
> > > - y = mul_add(a_lo, b_hi, c >> 32);
> > > + x = mul_add(a, b, c);
> > > + y = mul_add(a, b >> 32, c >> 32);
> > > y = add_u64_u32(y, x >> 32);
> > > - z = mul_add(a_hi, b_hi, y >> 32);
> > > - y = mul_add(a_hi, b_lo, y);
> > > - z = add_u64_u32(z, y >> 32);
> > > - x = (y << 32) + (u32)x;
> > > -
> > > - u64 n_lo = x, n_hi = z;
> > > + z = mul_add(a >> 32, b >> 32, y >> 32);
> > > + y = mul_add(a >> 32, b, y);
> > > + *p_lo = (y << 32) + (u32)x;
> > > + return add_u64_u32(z, y >> 32);
> > > +}
> > >
> > > #endif
> > >
> > > +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> > > +{
> > > + u64 n_lo, n_hi;
> > > +
> > > + if (WARN_ONCE(!d, "%s: division of (%llx * %llx + %llx) by zero, returning 0",
> > > + __func__, a, b, c )) {
> > > + /*
> > > + * Return 0 (rather than ~(u64)0) because it is less likely to
> > > + * have unexpected side effects.
> > > + */
> > > + return 0;
> > > + }
> > > +
> > > + n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
> > > if (!n_hi)
> > > return div64_u64(n_lo, d);
> > >
> > > --
> > > 2.39.5
> > >
> > >
>
>
© 2016 - 2026 Red Hat, Inc.