The existing mul_u64_u64_div_u64() rounds down, a 'rounding up'
variant needs 'divisor - 1' adding in between the multiply and
divide so cannot easily be done by a caller.
Add mul_u64_add_u64_div_u64(a, b, c, d) that calculates (a * b + c)/d
and implement the 'round down' and 'round up' using it.
Update the x86-64 asm to optimise for 'c' being a constant zero.
Add kerndoc definitions for all three functions.
Signed-off-by: David Laight <david.laight.linux@gmail.com>
---
Changes for v2 (formally patch 1/3):
- Reinstate the early call to div64_u64() on 32bit when 'c' is zero.
Although I'm not convinced the path is common enough to be worth
the two ilog2() calls.
Changes for v3 (formally patch 3/4):
- The early call to div64_u64() has been removed by patch 3.
Pretty much guaranteed to be a pessimisation.
Changes for v4:
- For x86-64 split the multiply, add and divide into three asm blocks.
(gcc makes a pigs breakfast of (u128)a * b + c)
- Change the kerndoc since divide by zero will (probably) fault.
arch/x86/include/asm/div64.h | 20 +++++++++------
include/linux/math64.h | 48 +++++++++++++++++++++++++++++++++++-
lib/math/div64.c | 14 ++++++-----
3 files changed, 67 insertions(+), 15 deletions(-)
diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index 9931e4c7d73f..cabdc2d5a68f 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -84,21 +84,25 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
* Will generate an #DE when the result doesn't fit u64, could fix with an
* __ex_table[] entry when it becomes an issue.
*/
-static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
+static inline u64 mul_u64_add_u64_div_u64(u64 rax, u64 mul, u64 add, u64 div)
{
- u64 q;
+ u64 rdx;
- asm ("mulq %2; divq %3" : "=a" (q)
- : "a" (a), "rm" (mul), "rm" (div)
- : "rdx");
+ asm ("mulq %[mul]" : "+a" (rax), "=d" (rdx) : [mul] "rm" (mul));
- return q;
+ if (statically_true(add))
+ asm ("addq %[add], %[lo]; adcq $0, %[hi]" :
+ [lo] "+r" (rax), [hi] "+r" (rdx) : [add] "irm" (add));
+
+ asm ("divq %[div]" : "+a" (rax), "+d" (rdx) : [div] "rm" (div));
+
+ return rax;
}
-#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
+#define mul_u64_add_u64_div_u64 mul_u64_add_u64_div_u64
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
{
- return mul_u64_u64_div_u64(a, mul, div);
+ return mul_u64_add_u64_div_u64(a, mul, 0, div);
}
#define mul_u64_u32_div mul_u64_u32_div
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 6aaccc1626ab..e889d850b7f1 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -282,7 +282,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
-u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+/**
+ * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @c: unsigned 64bit addend
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * add a third value and then divide by a fourth.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @c) / @d
+ */
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
+
+/**
+ * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide by a third value.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: @a * @b / @d
+ */
+#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
+
+/**
+ * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide and round up.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @d - 1) / @d
+ */
+#define mul_u64_u64_div_u64_roundup(a, b, d) \
+ ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
+
/**
* DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 7158d141b6e9..25295daebde9 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -183,13 +183,13 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
}
EXPORT_SYMBOL(iter_div_u64_rem);
-#ifndef mul_u64_u64_div_u64
-u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
+#ifndef mul_u64_add_u64_div_u64
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
{
#if defined(__SIZEOF_INT128__)
/* native 64x64=128 bits multiplication */
- u128 prod = (u128)a * b;
+ u128 prod = (u128)a * b + c;
u64 n_lo = prod, n_hi = prod >> 64;
#else
@@ -198,8 +198,10 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
u64 x, y, z;
- x = (u64)a_lo * b_lo;
- y = (u64)a_lo * b_hi + (u32)(x >> 32);
+ /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
+ x = (u64)a_lo * b_lo + (u32)c;
+ y = (u64)a_lo * b_hi + (u32)(c >> 32);
+ y += (u32)(x >> 32);
z = (u64)a_hi * b_hi + (u32)(y >> 32);
y = (u64)a_hi * b_lo + (u32)y;
z += (u32)(y >> 32);
@@ -265,5 +267,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
return res;
}
-EXPORT_SYMBOL(mul_u64_u64_div_u64);
+EXPORT_SYMBOL(mul_u64_add_u64_div_u64);
#endif
--
2.39.5
On Wed, 29 Oct 2025 17:38:23 +0000
David Laight <david.laight.linux@gmail.com> wrote:
There is a slight bug ...
> The existing mul_u64_u64_div_u64() rounds down, a 'rounding up'
> variant needs 'divisor - 1' adding in between the multiply and
> divide so cannot easily be done by a caller.
>
> Add mul_u64_add_u64_div_u64(a, b, c, d) that calculates (a * b + c)/d
> and implement the 'round down' and 'round up' using it.
>
> Update the x86-64 asm to optimise for 'c' being a constant zero.
>
> Add kerndoc definitions for all three functions.
>
> Signed-off-by: David Laight <david.laight.linux@gmail.com>
> ---
>
> Changes for v2 (formally patch 1/3):
> - Reinstate the early call to div64_u64() on 32bit when 'c' is zero.
> Although I'm not convinced the path is common enough to be worth
> the two ilog2() calls.
>
> Changes for v3 (formally patch 3/4):
> - The early call to div64_u64() has been removed by patch 3.
> Pretty much guaranteed to be a pessimisation.
>
> Changes for v4:
> - For x86-64 split the multiply, add and divide into three asm blocks.
> (gcc makes a pigs breakfast of (u128)a * b + c)
> - Change the kerndoc since divide by zero will (probably) fault.
>
> arch/x86/include/asm/div64.h | 20 +++++++++------
> include/linux/math64.h | 48 +++++++++++++++++++++++++++++++++++-
> lib/math/div64.c | 14 ++++++-----
> 3 files changed, 67 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
> index 9931e4c7d73f..cabdc2d5a68f 100644
> --- a/arch/x86/include/asm/div64.h
> +++ b/arch/x86/include/asm/div64.h
> @@ -84,21 +84,25 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
> * Will generate an #DE when the result doesn't fit u64, could fix with an
> * __ex_table[] entry when it becomes an issue.
> */
> -static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
> +static inline u64 mul_u64_add_u64_div_u64(u64 rax, u64 mul, u64 add, u64 div)
> {
> - u64 q;
> + u64 rdx;
>
> - asm ("mulq %2; divq %3" : "=a" (q)
> - : "a" (a), "rm" (mul), "rm" (div)
> - : "rdx");
> + asm ("mulq %[mul]" : "+a" (rax), "=d" (rdx) : [mul] "rm" (mul));
>
> - return q;
> + if (statically_true(add))
This needs to be:
if (!statically_true(!add))
Do you need me to resend the full series?
David
> + asm ("addq %[add], %[lo]; adcq $0, %[hi]" :
> + [lo] "+r" (rax), [hi] "+r" (rdx) : [add] "irm" (add));
> +
> + asm ("divq %[div]" : "+a" (rax), "+d" (rdx) : [div] "rm" (div));
> +
> + return rax;
> }
> -#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
> +#define mul_u64_add_u64_div_u64 mul_u64_add_u64_div_u64
>
> static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
> {
> - return mul_u64_u64_div_u64(a, mul, div);
> + return mul_u64_add_u64_div_u64(a, mul, 0, div);
> }
> #define mul_u64_u32_div mul_u64_u32_div
>
> diff --git a/include/linux/math64.h b/include/linux/math64.h
> index 6aaccc1626ab..e889d850b7f1 100644
> --- a/include/linux/math64.h
> +++ b/include/linux/math64.h
> @@ -282,7 +282,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
> }
> #endif /* mul_u64_u32_div */
>
> -u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
> +/**
> + * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
> + * @a: first unsigned 64bit multiplicand
> + * @b: second unsigned 64bit multiplicand
> + * @c: unsigned 64bit addend
> + * @d: unsigned 64bit divisor
> + *
> + * Multiply two 64bit values together to generate a 128bit product
> + * add a third value and then divide by a fourth.
> + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
> + * Architecture specific code may trap on zero or overflow.
> + *
> + * Return: (@a * @b + @c) / @d
> + */
> +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
> +
> +/**
> + * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
> + * @a: first unsigned 64bit multiplicand
> + * @b: second unsigned 64bit multiplicand
> + * @d: unsigned 64bit divisor
> + *
> + * Multiply two 64bit values together to generate a 128bit product
> + * and then divide by a third value.
> + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
> + * Architecture specific code may trap on zero or overflow.
> + *
> + * Return: @a * @b / @d
> + */
> +#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
> +
> +/**
> + * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
> + * @a: first unsigned 64bit multiplicand
> + * @b: second unsigned 64bit multiplicand
> + * @d: unsigned 64bit divisor
> + *
> + * Multiply two 64bit values together to generate a 128bit product
> + * and then divide and round up.
> + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
> + * Architecture specific code may trap on zero or overflow.
> + *
> + * Return: (@a * @b + @d - 1) / @d
> + */
> +#define mul_u64_u64_div_u64_roundup(a, b, d) \
> + ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
> +
>
> /**
> * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
> diff --git a/lib/math/div64.c b/lib/math/div64.c
> index 7158d141b6e9..25295daebde9 100644
> --- a/lib/math/div64.c
> +++ b/lib/math/div64.c
> @@ -183,13 +183,13 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
> }
> EXPORT_SYMBOL(iter_div_u64_rem);
>
> -#ifndef mul_u64_u64_div_u64
> -u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
> +#ifndef mul_u64_add_u64_div_u64
> +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> {
> #if defined(__SIZEOF_INT128__)
>
> /* native 64x64=128 bits multiplication */
> - u128 prod = (u128)a * b;
> + u128 prod = (u128)a * b + c;
> u64 n_lo = prod, n_hi = prod >> 64;
>
> #else
> @@ -198,8 +198,10 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
> u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
> u64 x, y, z;
>
> - x = (u64)a_lo * b_lo;
> - y = (u64)a_lo * b_hi + (u32)(x >> 32);
> + /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
> + x = (u64)a_lo * b_lo + (u32)c;
> + y = (u64)a_lo * b_hi + (u32)(c >> 32);
> + y += (u32)(x >> 32);
> z = (u64)a_hi * b_hi + (u32)(y >> 32);
> y = (u64)a_hi * b_lo + (u32)y;
> z += (u32)(y >> 32);
> @@ -265,5 +267,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
>
> return res;
> }
> -EXPORT_SYMBOL(mul_u64_u64_div_u64);
> +EXPORT_SYMBOL(mul_u64_add_u64_div_u64);
> #endif
On Fri, 31 Oct 2025 20:59:17 +0000 David Laight <david.laight.linux@gmail.com> wrote:
> > -static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
> > +static inline u64 mul_u64_add_u64_div_u64(u64 rax, u64 mul, u64 add, u64 div)
> > {
> > - u64 q;
> > + u64 rdx;
> >
> > - asm ("mulq %2; divq %3" : "=a" (q)
> > - : "a" (a), "rm" (mul), "rm" (div)
> > - : "rdx");
> > + asm ("mulq %[mul]" : "+a" (rax), "=d" (rdx) : [mul] "rm" (mul));
> >
> > - return q;
> > + if (statically_true(add))
>
> This needs to be:
> if (!statically_true(!add))
>
> Do you need me to resend the full series?
I queued a fix, thanks.
--- a/arch/x86/include/asm/div64.h~lib-add-mul_u64_add_u64_div_u64-and-mul_u64_u64_div_u64_roundup-fix
+++ a/arch/x86/include/asm/div64.h
@@ -90,7 +90,7 @@ static inline u64 mul_u64_add_u64_div_u6
asm ("mulq %[mul]" : "+a" (rax), "=d" (rdx) : [mul] "rm" (mul));
- if (statically_true(add))
+ if (!statically_true(add))
asm ("addq %[add], %[lo]; adcq $0, %[hi]" :
[lo] "+r" (rax), [hi] "+r" (rdx) : [add] "irm" (add));
_
On Wed, 29 Oct 2025, David Laight wrote:
> The existing mul_u64_u64_div_u64() rounds down, a 'rounding up'
> variant needs 'divisor - 1' adding in between the multiply and
> divide so cannot easily be done by a caller.
>
> Add mul_u64_add_u64_div_u64(a, b, c, d) that calculates (a * b + c)/d
> and implement the 'round down' and 'round up' using it.
>
> Update the x86-64 asm to optimise for 'c' being a constant zero.
>
> Add kerndoc definitions for all three functions.
>
> Signed-off-by: David Laight <david.laight.linux@gmail.com>
Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
> ---
>
> Changes for v2 (formally patch 1/3):
> - Reinstate the early call to div64_u64() on 32bit when 'c' is zero.
> Although I'm not convinced the path is common enough to be worth
> the two ilog2() calls.
>
> Changes for v3 (formally patch 3/4):
> - The early call to div64_u64() has been removed by patch 3.
> Pretty much guaranteed to be a pessimisation.
>
> Changes for v4:
> - For x86-64 split the multiply, add and divide into three asm blocks.
> (gcc makes a pigs breakfast of (u128)a * b + c)
> - Change the kerndoc since divide by zero will (probably) fault.
>
> arch/x86/include/asm/div64.h | 20 +++++++++------
> include/linux/math64.h | 48 +++++++++++++++++++++++++++++++++++-
> lib/math/div64.c | 14 ++++++-----
> 3 files changed, 67 insertions(+), 15 deletions(-)
>
> diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
> index 9931e4c7d73f..cabdc2d5a68f 100644
> --- a/arch/x86/include/asm/div64.h
> +++ b/arch/x86/include/asm/div64.h
> @@ -84,21 +84,25 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
> * Will generate an #DE when the result doesn't fit u64, could fix with an
> * __ex_table[] entry when it becomes an issue.
> */
> -static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
> +static inline u64 mul_u64_add_u64_div_u64(u64 rax, u64 mul, u64 add, u64 div)
> {
> - u64 q;
> + u64 rdx;
>
> - asm ("mulq %2; divq %3" : "=a" (q)
> - : "a" (a), "rm" (mul), "rm" (div)
> - : "rdx");
> + asm ("mulq %[mul]" : "+a" (rax), "=d" (rdx) : [mul] "rm" (mul));
>
> - return q;
> + if (statically_true(add))
> + asm ("addq %[add], %[lo]; adcq $0, %[hi]" :
> + [lo] "+r" (rax), [hi] "+r" (rdx) : [add] "irm" (add));
> +
> + asm ("divq %[div]" : "+a" (rax), "+d" (rdx) : [div] "rm" (div));
> +
> + return rax;
> }
> -#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
> +#define mul_u64_add_u64_div_u64 mul_u64_add_u64_div_u64
>
> static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
> {
> - return mul_u64_u64_div_u64(a, mul, div);
> + return mul_u64_add_u64_div_u64(a, mul, 0, div);
> }
> #define mul_u64_u32_div mul_u64_u32_div
>
> diff --git a/include/linux/math64.h b/include/linux/math64.h
> index 6aaccc1626ab..e889d850b7f1 100644
> --- a/include/linux/math64.h
> +++ b/include/linux/math64.h
> @@ -282,7 +282,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
> }
> #endif /* mul_u64_u32_div */
>
> -u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
> +/**
> + * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
> + * @a: first unsigned 64bit multiplicand
> + * @b: second unsigned 64bit multiplicand
> + * @c: unsigned 64bit addend
> + * @d: unsigned 64bit divisor
> + *
> + * Multiply two 64bit values together to generate a 128bit product
> + * add a third value and then divide by a fourth.
> + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
> + * Architecture specific code may trap on zero or overflow.
> + *
> + * Return: (@a * @b + @c) / @d
> + */
> +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
> +
> +/**
> + * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
> + * @a: first unsigned 64bit multiplicand
> + * @b: second unsigned 64bit multiplicand
> + * @d: unsigned 64bit divisor
> + *
> + * Multiply two 64bit values together to generate a 128bit product
> + * and then divide by a third value.
> + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
> + * Architecture specific code may trap on zero or overflow.
> + *
> + * Return: @a * @b / @d
> + */
> +#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
> +
> +/**
> + * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
> + * @a: first unsigned 64bit multiplicand
> + * @b: second unsigned 64bit multiplicand
> + * @d: unsigned 64bit divisor
> + *
> + * Multiply two 64bit values together to generate a 128bit product
> + * and then divide and round up.
> + * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
> + * Architecture specific code may trap on zero or overflow.
> + *
> + * Return: (@a * @b + @d - 1) / @d
> + */
> +#define mul_u64_u64_div_u64_roundup(a, b, d) \
> + ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
> +
>
> /**
> * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
> diff --git a/lib/math/div64.c b/lib/math/div64.c
> index 7158d141b6e9..25295daebde9 100644
> --- a/lib/math/div64.c
> +++ b/lib/math/div64.c
> @@ -183,13 +183,13 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
> }
> EXPORT_SYMBOL(iter_div_u64_rem);
>
> -#ifndef mul_u64_u64_div_u64
> -u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
> +#ifndef mul_u64_add_u64_div_u64
> +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> {
> #if defined(__SIZEOF_INT128__)
>
> /* native 64x64=128 bits multiplication */
> - u128 prod = (u128)a * b;
> + u128 prod = (u128)a * b + c;
> u64 n_lo = prod, n_hi = prod >> 64;
>
> #else
> @@ -198,8 +198,10 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
> u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
> u64 x, y, z;
>
> - x = (u64)a_lo * b_lo;
> - y = (u64)a_lo * b_hi + (u32)(x >> 32);
> + /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
> + x = (u64)a_lo * b_lo + (u32)c;
> + y = (u64)a_lo * b_hi + (u32)(c >> 32);
> + y += (u32)(x >> 32);
> z = (u64)a_hi * b_hi + (u32)(y >> 32);
> y = (u64)a_hi * b_lo + (u32)y;
> z += (u32)(y >> 32);
> @@ -265,5 +267,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
>
> return res;
> }
> -EXPORT_SYMBOL(mul_u64_u64_div_u64);
> +EXPORT_SYMBOL(mul_u64_add_u64_div_u64);
> #endif
> --
> 2.39.5
>
>
© 2016 - 2025 Red Hat, Inc.