The existing mul_u64_u64_div_u64() rounds down, a 'rounding up'
variant needs 'divisor - 1' adding in between the multiply and
divide so cannot easily be done by a caller.
Add mul_u64_add_u64_div_u64(a, b, c, d) that calculates (a * b + c)/d
and implement the 'round down' and 'round up' using it.
Update the x86-64 asm to optimise for 'c' being a constant zero.
Add kerndoc definitions for all three functions.
Signed-off-by: David Laight <david.laight.linux@gmail.com>
Changes for v2 (formally patch 1/3):
- Reinstate the early call to div64_u64() on 32bit when 'c' is zero.
Although I'm not convinced the path is common enough to be worth
the two ilog2() calls.
Changes for v3 (formally patch 3/4):
- The early call to div64_u64() has been removed by patch 3.
Pretty much guaranteed to be a pessimisation.
Signed-off-by: David Laight <david.laight.linux@gmail.com>
---
arch/x86/include/asm/div64.h | 19 ++++++++++-----
include/linux/math64.h | 45 +++++++++++++++++++++++++++++++++++-
lib/math/div64.c | 22 ++++++++++--------
3 files changed, 69 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index 9931e4c7d73f..7a0a916a2d7d 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -84,21 +84,28 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
* Will generate an #DE when the result doesn't fit u64, could fix with an
* __ex_table[] entry when it becomes an issue.
*/
-static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
+static inline u64 mul_u64_add_u64_div_u64(u64 a, u64 mul, u64 add, u64 div)
{
u64 q;
- asm ("mulq %2; divq %3" : "=a" (q)
- : "a" (a), "rm" (mul), "rm" (div)
- : "rdx");
+ if (statically_true(!add)) {
+ asm ("mulq %2; divq %3" : "=a" (q)
+ : "a" (a), "rm" (mul), "rm" (div)
+ : "rdx");
+ } else {
+ asm ("mulq %2; addq %3, %%rax; adcq $0, %%rdx; divq %4"
+ : "=a" (q)
+ : "a" (a), "rm" (mul), "rm" (add), "rm" (div)
+ : "rdx");
+ }
return q;
}
-#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
+#define mul_u64_add_u64_div_u64 mul_u64_add_u64_div_u64
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
{
- return mul_u64_u64_div_u64(a, mul, div);
+ return mul_u64_add_u64_div_u64(a, mul, 0, div);
}
#define mul_u64_u32_div mul_u64_u32_div
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 6aaccc1626ab..e1c2e3642cec 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -282,7 +282,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
}
#endif /* mul_u64_u32_div */
-u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+/**
+ * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @c: unsigned 64bit addend
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * add a third value and then divide by a fourth.
+ * Generic code returns 0 if @d is zero and ~0 if the quotient exceeds 64 bits.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @c) / @d
+ */
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
+
+/**
+ * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide by a third value.
+ * Generic code returns 0 if @d is zero and ~0 if the quotient exceeds 64 bits.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: @a * @b / @d
+ */
+#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
+
+/**
+ * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide and round up.
+ * Generic code returns 0 if @d is zero and ~0 if the quotient exceeds 64 bits.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @d - 1) / @d
+ */
+#define mul_u64_u64_div_u64_roundup(a, b, d) \
+ ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
+
/**
* DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
diff --git a/lib/math/div64.c b/lib/math/div64.c
index ed9475b9e1ef..7850cc0a7596 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -184,11 +184,11 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
}
EXPORT_SYMBOL(iter_div_u64_rem);
-#ifndef mul_u64_u64_div_u64
-u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
+#ifndef mul_u64_add_u64_div_u64
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
{
- if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx) by zero, returning 0",
- __func__, a, b)) {
+ if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx + %#llx) by zero, returning 0",
+ __func__, a, b, c)) {
/*
* Return 0 (rather than ~(u64)0) because it is less likely to
* have unexpected side effects.
@@ -199,7 +199,7 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
#if defined(__SIZEOF_INT128__)
/* native 64x64=128 bits multiplication */
- u128 prod = (u128)a * b;
+ u128 prod = (u128)a * b + c;
u64 n_lo = prod, n_hi = prod >> 64;
#else
@@ -208,8 +208,10 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
u64 x, y, z;
- x = (u64)a_lo * b_lo;
- y = (u64)a_lo * b_hi + (u32)(x >> 32);
+ /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
+ x = (u64)a_lo * b_lo + (u32)c;
+ y = (u64)a_lo * b_hi + (u32)(c >> 32);
+ y += (u32)(x >> 32);
z = (u64)a_hi * b_hi + (u32)(y >> 32);
y = (u64)a_hi * b_lo + (u32)y;
z += (u32)(y >> 32);
@@ -223,8 +225,8 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
return div64_u64(n_lo, d);
if (WARN_ONCE(n_hi >= d,
- "%s: division of (%#llx * %#llx = %#llx%016llx) by %#llx overflows, returning ~0",
- __func__, a, b, n_hi, n_lo, d))
+ "%s: division of (%#llx * %#llx + %#llx = %#llx%016llx) by %#llx overflows, returning ~0",
+ __func__, a, b, c, n_hi, n_lo, d))
return ~(u64)0;
int shift = __builtin_ctzll(d);
@@ -268,5 +270,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
return res;
}
-EXPORT_SYMBOL(mul_u64_u64_div_u64);
+EXPORT_SYMBOL(mul_u64_add_u64_div_u64);
#endif
--
2.39.5
On Sat, 14 Jun 2025, David Laight wrote: > The existing mul_u64_u64_div_u64() rounds down, a 'rounding up' > variant needs 'divisor - 1' adding in between the multiply and > divide so cannot easily be done by a caller. > > Add mul_u64_add_u64_div_u64(a, b, c, d) that calculates (a * b + c)/d > and implement the 'round down' and 'round up' using it. > > Update the x86-64 asm to optimise for 'c' being a constant zero. > > Add kerndoc definitions for all three functions. > > Signed-off-by: David Laight <david.laight.linux@gmail.com> Reviewed-by: Nicolas Pitre <npitre@baylibre.com> > > Changes for v2 (formally patch 1/3): > - Reinstate the early call to div64_u64() on 32bit when 'c' is zero. > Although I'm not convinced the path is common enough to be worth > the two ilog2() calls. > > Changes for v3 (formally patch 3/4): > - The early call to div64_u64() has been removed by patch 3. > Pretty much guaranteed to be a pessimisation. Might get rid of the above in the log. Justification is in the previous patch. > Signed-off-by: David Laight <david.laight.linux@gmail.com> Double signoff. > --- > arch/x86/include/asm/div64.h | 19 ++++++++++----- > include/linux/math64.h | 45 +++++++++++++++++++++++++++++++++++- > lib/math/div64.c | 22 ++++++++++-------- > 3 files changed, 69 insertions(+), 17 deletions(-) > > diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h > index 9931e4c7d73f..7a0a916a2d7d 100644 > --- a/arch/x86/include/asm/div64.h > +++ b/arch/x86/include/asm/div64.h > @@ -84,21 +84,28 @@ static inline u64 mul_u32_u32(u32 a, u32 b) > * Will generate an #DE when the result doesn't fit u64, could fix with an > * __ex_table[] entry when it becomes an issue. > */ > -static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div) > +static inline u64 mul_u64_add_u64_div_u64(u64 a, u64 mul, u64 add, u64 div) > { > u64 q; > > - asm ("mulq %2; divq %3" : "=a" (q) > - : "a" (a), "rm" (mul), "rm" (div) > - : "rdx"); > + if (statically_true(!add)) { > + asm ("mulq %2; divq %3" : "=a" (q) > + : "a" (a), "rm" (mul), "rm" (div) > + : "rdx"); > + } else { > + asm ("mulq %2; addq %3, %%rax; adcq $0, %%rdx; divq %4" > + : "=a" (q) > + : "a" (a), "rm" (mul), "rm" (add), "rm" (div) > + : "rdx"); > + } > > return q; > } > -#define mul_u64_u64_div_u64 mul_u64_u64_div_u64 > +#define mul_u64_add_u64_div_u64 mul_u64_add_u64_div_u64 > > static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div) > { > - return mul_u64_u64_div_u64(a, mul, div); > + return mul_u64_add_u64_div_u64(a, mul, 0, div); > } > #define mul_u64_u32_div mul_u64_u32_div > > diff --git a/include/linux/math64.h b/include/linux/math64.h > index 6aaccc1626ab..e1c2e3642cec 100644 > --- a/include/linux/math64.h > +++ b/include/linux/math64.h > @@ -282,7 +282,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) > } > #endif /* mul_u64_u32_div */ > > -u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); > +/** > + * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide > + * @a: first unsigned 64bit multiplicand > + * @b: second unsigned 64bit multiplicand > + * @c: unsigned 64bit addend > + * @d: unsigned 64bit divisor > + * > + * Multiply two 64bit values together to generate a 128bit product > + * add a third value and then divide by a fourth. > + * Generic code returns 0 if @d is zero and ~0 if the quotient exceeds 64 bits. > + * Architecture specific code may trap on zero or overflow. > + * > + * Return: (@a * @b + @c) / @d > + */ > +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d); > + > +/** > + * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide > + * @a: first unsigned 64bit multiplicand > + * @b: second unsigned 64bit multiplicand > + * @d: unsigned 64bit divisor > + * > + * Multiply two 64bit values together to generate a 128bit product > + * and then divide by a third value. > + * Generic code returns 0 if @d is zero and ~0 if the quotient exceeds 64 bits. > + * Architecture specific code may trap on zero or overflow. > + * > + * Return: @a * @b / @d > + */ > +#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d) > + > +/** > + * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up > + * @a: first unsigned 64bit multiplicand > + * @b: second unsigned 64bit multiplicand > + * @d: unsigned 64bit divisor > + * > + * Multiply two 64bit values together to generate a 128bit product > + * and then divide and round up. > + * Generic code returns 0 if @d is zero and ~0 if the quotient exceeds 64 bits. > + * Architecture specific code may trap on zero or overflow. > + * > + * Return: (@a * @b + @d - 1) / @d > + */ > +#define mul_u64_u64_div_u64_roundup(a, b, d) \ > + ({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); }) > + > > /** > * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up > diff --git a/lib/math/div64.c b/lib/math/div64.c > index ed9475b9e1ef..7850cc0a7596 100644 > --- a/lib/math/div64.c > +++ b/lib/math/div64.c > @@ -184,11 +184,11 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) > } > EXPORT_SYMBOL(iter_div_u64_rem); > > -#ifndef mul_u64_u64_div_u64 > -u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d) > +#ifndef mul_u64_add_u64_div_u64 > +u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d) > { > - if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx) by zero, returning 0", > - __func__, a, b)) { > + if (WARN_ONCE(!d, "%s: division of (%#llx * %#llx + %#llx) by zero, returning 0", > + __func__, a, b, c)) { > /* > * Return 0 (rather than ~(u64)0) because it is less likely to > * have unexpected side effects. > @@ -199,7 +199,7 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d) > #if defined(__SIZEOF_INT128__) > > /* native 64x64=128 bits multiplication */ > - u128 prod = (u128)a * b; > + u128 prod = (u128)a * b + c; > u64 n_lo = prod, n_hi = prod >> 64; > > #else > @@ -208,8 +208,10 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d) > u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32; > u64 x, y, z; > > - x = (u64)a_lo * b_lo; > - y = (u64)a_lo * b_hi + (u32)(x >> 32); > + /* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */ > + x = (u64)a_lo * b_lo + (u32)c; > + y = (u64)a_lo * b_hi + (u32)(c >> 32); > + y += (u32)(x >> 32); > z = (u64)a_hi * b_hi + (u32)(y >> 32); > y = (u64)a_hi * b_lo + (u32)y; > z += (u32)(y >> 32); > @@ -223,8 +225,8 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d) > return div64_u64(n_lo, d); > > if (WARN_ONCE(n_hi >= d, > - "%s: division of (%#llx * %#llx = %#llx%016llx) by %#llx overflows, returning ~0", > - __func__, a, b, n_hi, n_lo, d)) > + "%s: division of (%#llx * %#llx + %#llx = %#llx%016llx) by %#llx overflows, returning ~0", > + __func__, a, b, c, n_hi, n_lo, d)) > return ~(u64)0; > > int shift = __builtin_ctzll(d); > @@ -268,5 +270,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d) > > return res; > } > -EXPORT_SYMBOL(mul_u64_u64_div_u64); > +EXPORT_SYMBOL(mul_u64_add_u64_div_u64); > #endif > -- > 2.39.5 > >
© 2016 - 2025 Red Hat, Inc.