[PATCH v5 next 4/9] lib: Add mul_u64_add_u64_div_u64() and mul_u64_u64_div_u64_roundup()

David Laight posted 9 patches 1 month, 1 week ago
[PATCH v5 next 4/9] lib: Add mul_u64_add_u64_div_u64() and mul_u64_u64_div_u64_roundup()
Posted by David Laight 1 month, 1 week ago
The existing mul_u64_u64_div_u64() rounds down, a 'rounding up'
variant needs 'divisor - 1' adding in between the multiply and
divide so cannot easily be done by a caller.

Add mul_u64_add_u64_div_u64(a, b, c, d) that calculates (a * b + c)/d
and implement the 'round down' and 'round up' using it.

Update the x86-64 asm to optimise for 'c' being a constant zero.

Add kerndoc definitions for all three functions.

Signed-off-by: David Laight <david.laight.linux@gmail.com>
Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
---

Changes for v2 (formally patch 1/3):
- Reinstate the early call to div64_u64() on 32bit when 'c' is zero.
  Although I'm not convinced the path is common enough to be worth
  the two ilog2() calls.
 
Changes for v3 (formally patch 3/4):
- The early call to div64_u64() has been removed by patch 3.
  Pretty much guaranteed to be a pessimisation.

Changes for v4: 
- For x86-64 split the multiply, add and divide into three asm blocks.
  (gcc makes a pigs breakfast of (u128)a * b + c)
- Change the kerndoc since divide by zero will (probably) fault.

Changes for v5:
- Fix test that excludes the add/adc asm block for constant zero 'add'.

 arch/x86/include/asm/div64.h | 20 +++++++++------
 include/linux/math64.h       | 48 +++++++++++++++++++++++++++++++++++-
 lib/math/div64.c             | 14 ++++++-----
 3 files changed, 67 insertions(+), 15 deletions(-)

diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index 9931e4c7d73f..6d8a3de3f43a 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -84,21 +84,25 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
  * Will generate an #DE when the result doesn't fit u64, could fix with an
  * __ex_table[] entry when it becomes an issue.
  */
-static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
+static inline u64 mul_u64_add_u64_div_u64(u64 rax, u64 mul, u64 add, u64 div)
 {
-	u64 q;
+	u64 rdx;
 
-	asm ("mulq %2; divq %3" : "=a" (q)
-				: "a" (a), "rm" (mul), "rm" (div)
-				: "rdx");
+	asm ("mulq %[mul]" : "+a" (rax), "=d" (rdx) : [mul] "rm" (mul));
 
-	return q;
+	if (!statically_true(!add))
+		asm ("addq %[add], %[lo]; adcq $0, %[hi]" :
+			[lo] "+r" (rax), [hi] "+r" (rdx) : [add] "irm" (add));
+
+	asm ("divq %[div]" : "+a" (rax), "+d" (rdx) : [div] "rm" (div));
+
+	return rax;
 }
-#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
+#define mul_u64_add_u64_div_u64 mul_u64_add_u64_div_u64
 
 static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
 {
-	return mul_u64_u64_div_u64(a, mul, div);
+	return mul_u64_add_u64_div_u64(a, mul, 0, div);
 }
 #define mul_u64_u32_div	mul_u64_u32_div
 
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 6aaccc1626ab..e889d850b7f1 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -282,7 +282,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
 }
 #endif /* mul_u64_u32_div */
 
-u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
+/**
+ * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @c: unsigned 64bit addend
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * add a third value and then divide by a fourth.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @c) / @d
+ */
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
+
+/**
+ * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide by a third value.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: @a * @b / @d
+ */
+#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
+
+/**
+ * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
+ * @a: first unsigned 64bit multiplicand
+ * @b: second unsigned 64bit multiplicand
+ * @d: unsigned 64bit divisor
+ *
+ * Multiply two 64bit values together to generate a 128bit product
+ * and then divide and round up.
+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
+ * Architecture specific code may trap on zero or overflow.
+ *
+ * Return: (@a * @b + @d - 1) / @d
+ */
+#define mul_u64_u64_div_u64_roundup(a, b, d) \
+	({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
+
 
 /**
  * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 4a4b1aa9e6e1..a88391b8ada0 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -183,13 +183,13 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
 }
 EXPORT_SYMBOL(iter_div_u64_rem);
 
-#ifndef mul_u64_u64_div_u64
-u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
+#ifndef mul_u64_add_u64_div_u64
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
 {
 #if defined(__SIZEOF_INT128__)
 
 	/* native 64x64=128 bits multiplication */
-	u128 prod = (u128)a * b;
+	u128 prod = (u128)a * b + c;
 	u64 n_lo = prod, n_hi = prod >> 64;
 
 #else
@@ -198,8 +198,10 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
 	u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
 	u64 x, y, z;
 
-	x = (u64)a_lo * b_lo;
-	y = (u64)a_lo * b_hi + (u32)(x >> 32);
+	/* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
+	x = (u64)a_lo * b_lo + (u32)c;
+	y = (u64)a_lo * b_hi + (u32)(c >> 32);
+	y += (u32)(x >> 32);
 	z = (u64)a_hi * b_hi + (u32)(y >> 32);
 	y = (u64)a_hi * b_lo + (u32)y;
 	z += (u32)(y >> 32);
@@ -265,5 +267,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
 
 	return res;
 }
-EXPORT_SYMBOL(mul_u64_u64_div_u64);
+EXPORT_SYMBOL(mul_u64_add_u64_div_u64);
 #endif
-- 
2.39.5
Re: [PATCH v5 next 4/9] lib: Add mul_u64_add_u64_div_u64() and mul_u64_u64_div_u64_roundup()
Posted by H. Peter Anvin 1 month, 1 week ago
On November 5, 2025 12:10:30 PM PST, David Laight <david.laight.linux@gmail.com> wrote:
>The existing mul_u64_u64_div_u64() rounds down, a 'rounding up'
>variant needs 'divisor - 1' adding in between the multiply and
>divide so cannot easily be done by a caller.
>
>Add mul_u64_add_u64_div_u64(a, b, c, d) that calculates (a * b + c)/d
>and implement the 'round down' and 'round up' using it.
>
>Update the x86-64 asm to optimise for 'c' being a constant zero.
>
>Add kerndoc definitions for all three functions.
>
>Signed-off-by: David Laight <david.laight.linux@gmail.com>
>Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
>---
>
>Changes for v2 (formally patch 1/3):
>- Reinstate the early call to div64_u64() on 32bit when 'c' is zero.
>  Although I'm not convinced the path is common enough to be worth
>  the two ilog2() calls.
> 
>Changes for v3 (formally patch 3/4):
>- The early call to div64_u64() has been removed by patch 3.
>  Pretty much guaranteed to be a pessimisation.
>
>Changes for v4: 
>- For x86-64 split the multiply, add and divide into three asm blocks.
>  (gcc makes a pigs breakfast of (u128)a * b + c)
>- Change the kerndoc since divide by zero will (probably) fault.
>
>Changes for v5:
>- Fix test that excludes the add/adc asm block for constant zero 'add'.
>
> arch/x86/include/asm/div64.h | 20 +++++++++------
> include/linux/math64.h       | 48 +++++++++++++++++++++++++++++++++++-
> lib/math/div64.c             | 14 ++++++-----
> 3 files changed, 67 insertions(+), 15 deletions(-)
>
>diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
>index 9931e4c7d73f..6d8a3de3f43a 100644
>--- a/arch/x86/include/asm/div64.h
>+++ b/arch/x86/include/asm/div64.h
>@@ -84,21 +84,25 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
>  * Will generate an #DE when the result doesn't fit u64, could fix with an
>  * __ex_table[] entry when it becomes an issue.
>  */
>-static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
>+static inline u64 mul_u64_add_u64_div_u64(u64 rax, u64 mul, u64 add, u64 div)
> {
>-	u64 q;
>+	u64 rdx;
> 
>-	asm ("mulq %2; divq %3" : "=a" (q)
>-				: "a" (a), "rm" (mul), "rm" (div)
>-				: "rdx");
>+	asm ("mulq %[mul]" : "+a" (rax), "=d" (rdx) : [mul] "rm" (mul));
> 
>-	return q;
>+	if (!statically_true(!add))
>+		asm ("addq %[add], %[lo]; adcq $0, %[hi]" :
>+			[lo] "+r" (rax), [hi] "+r" (rdx) : [add] "irm" (add));
>+
>+	asm ("divq %[div]" : "+a" (rax), "+d" (rdx) : [div] "rm" (div));
>+
>+	return rax;
> }
>-#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
>+#define mul_u64_add_u64_div_u64 mul_u64_add_u64_div_u64
> 
> static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
> {
>-	return mul_u64_u64_div_u64(a, mul, div);
>+	return mul_u64_add_u64_div_u64(a, mul, 0, div);
> }
> #define mul_u64_u32_div	mul_u64_u32_div
> 
>diff --git a/include/linux/math64.h b/include/linux/math64.h
>index 6aaccc1626ab..e889d850b7f1 100644
>--- a/include/linux/math64.h
>+++ b/include/linux/math64.h
>@@ -282,7 +282,53 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
> }
> #endif /* mul_u64_u32_div */
> 
>-u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div);
>+/**
>+ * mul_u64_add_u64_div_u64 - unsigned 64bit multiply, add, and divide
>+ * @a: first unsigned 64bit multiplicand
>+ * @b: second unsigned 64bit multiplicand
>+ * @c: unsigned 64bit addend
>+ * @d: unsigned 64bit divisor
>+ *
>+ * Multiply two 64bit values together to generate a 128bit product
>+ * add a third value and then divide by a fourth.
>+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
>+ * Architecture specific code may trap on zero or overflow.
>+ *
>+ * Return: (@a * @b + @c) / @d
>+ */
>+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d);
>+
>+/**
>+ * mul_u64_u64_div_u64 - unsigned 64bit multiply and divide
>+ * @a: first unsigned 64bit multiplicand
>+ * @b: second unsigned 64bit multiplicand
>+ * @d: unsigned 64bit divisor
>+ *
>+ * Multiply two 64bit values together to generate a 128bit product
>+ * and then divide by a third value.
>+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
>+ * Architecture specific code may trap on zero or overflow.
>+ *
>+ * Return: @a * @b / @d
>+ */
>+#define mul_u64_u64_div_u64(a, b, d) mul_u64_add_u64_div_u64(a, b, 0, d)
>+
>+/**
>+ * mul_u64_u64_div_u64_roundup - unsigned 64bit multiply and divide rounded up
>+ * @a: first unsigned 64bit multiplicand
>+ * @b: second unsigned 64bit multiplicand
>+ * @d: unsigned 64bit divisor
>+ *
>+ * Multiply two 64bit values together to generate a 128bit product
>+ * and then divide and round up.
>+ * The Generic code divides by 0 if @d is zero and returns ~0 on overflow.
>+ * Architecture specific code may trap on zero or overflow.
>+ *
>+ * Return: (@a * @b + @d - 1) / @d
>+ */
>+#define mul_u64_u64_div_u64_roundup(a, b, d) \
>+	({ u64 _tmp = (d); mul_u64_add_u64_div_u64(a, b, _tmp - 1, _tmp); })
>+
> 
> /**
>  * DIV64_U64_ROUND_UP - unsigned 64bit divide with 64bit divisor rounded up
>diff --git a/lib/math/div64.c b/lib/math/div64.c
>index 4a4b1aa9e6e1..a88391b8ada0 100644
>--- a/lib/math/div64.c
>+++ b/lib/math/div64.c
>@@ -183,13 +183,13 @@ u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
> }
> EXPORT_SYMBOL(iter_div_u64_rem);
> 
>-#ifndef mul_u64_u64_div_u64
>-u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
>+#ifndef mul_u64_add_u64_div_u64
>+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
> {
> #if defined(__SIZEOF_INT128__)
> 
> 	/* native 64x64=128 bits multiplication */
>-	u128 prod = (u128)a * b;
>+	u128 prod = (u128)a * b + c;
> 	u64 n_lo = prod, n_hi = prod >> 64;
> 
> #else
>@@ -198,8 +198,10 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
> 	u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
> 	u64 x, y, z;
> 
>-	x = (u64)a_lo * b_lo;
>-	y = (u64)a_lo * b_hi + (u32)(x >> 32);
>+	/* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
>+	x = (u64)a_lo * b_lo + (u32)c;
>+	y = (u64)a_lo * b_hi + (u32)(c >> 32);
>+	y += (u32)(x >> 32);
> 	z = (u64)a_hi * b_hi + (u32)(y >> 32);
> 	y = (u64)a_hi * b_lo + (u32)y;
> 	z += (u32)(y >> 32);
>@@ -265,5 +267,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 d)
> 
> 	return res;
> }
>-EXPORT_SYMBOL(mul_u64_u64_div_u64);
>+EXPORT_SYMBOL(mul_u64_add_u64_div_u64);
> #endif

For the roundup case, I'm somewhat curious how this compares with doing:

   cmp $1, %rdx
   sbb $-1, %rax

... after the division. At least it means not needing to compute d - 1, saving an instruction as well as a register. Unfortunately using an lea instruction to compute %rax (which otherwise would incorporate both) isn't possible since it doesn't set the flags.

The cmp; sbb sequence should be no slower than add; adc – I'm saying "no slower" because %rdx is never written to, so I think this is provably a better sequence; whether or not it is measurable is another thing (but if we are tweaking this stuff...)
Re: [PATCH v5 next 4/9] lib: Add mul_u64_add_u64_div_u64() and mul_u64_u64_div_u64_roundup()
Posted by David Laight 1 month, 1 week ago
On Wed, 05 Nov 2025 16:26:05 -0800
"H. Peter Anvin" <hpa@zytor.com> wrote:

> On November 5, 2025 12:10:30 PM PST, David Laight <david.laight.linux@gmail.com> wrote:
> >The existing mul_u64_u64_div_u64() rounds down, a 'rounding up'
> >variant needs 'divisor - 1' adding in between the multiply and
> >divide so cannot easily be done by a caller.
> >
> >Add mul_u64_add_u64_div_u64(a, b, c, d) that calculates (a * b + c)/d
> >and implement the 'round down' and 'round up' using it.
> >
> >Update the x86-64 asm to optimise for 'c' being a constant zero.
> >
> >Add kerndoc definitions for all three functions.
> >
> >Signed-off-by: David Laight <david.laight.linux@gmail.com>
> >Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
> >---
> >
> >Changes for v2 (formally patch 1/3):
> >- Reinstate the early call to div64_u64() on 32bit when 'c' is zero.
> >  Although I'm not convinced the path is common enough to be worth
> >  the two ilog2() calls.
> > 
> >Changes for v3 (formally patch 3/4):
> >- The early call to div64_u64() has been removed by patch 3.
> >  Pretty much guaranteed to be a pessimisation.
> >
> >Changes for v4: 
> >- For x86-64 split the multiply, add and divide into three asm blocks.
> >  (gcc makes a pigs breakfast of (u128)a * b + c)
> >- Change the kerndoc since divide by zero will (probably) fault.
> >
> >Changes for v5:
> >- Fix test that excludes the add/adc asm block for constant zero 'add'.
> >
> > arch/x86/include/asm/div64.h | 20 +++++++++------
> > include/linux/math64.h       | 48 +++++++++++++++++++++++++++++++++++-
> > lib/math/div64.c             | 14 ++++++-----
> > 3 files changed, 67 insertions(+), 15 deletions(-)
> >
> >diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
> >index 9931e4c7d73f..6d8a3de3f43a 100644
> >--- a/arch/x86/include/asm/div64.h
> >+++ b/arch/x86/include/asm/div64.h
> >@@ -84,21 +84,25 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
> >  * Will generate an #DE when the result doesn't fit u64, could fix with an
> >  * __ex_table[] entry when it becomes an issue.
> >  */
> >-static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
> >+static inline u64 mul_u64_add_u64_div_u64(u64 rax, u64 mul, u64 add, u64 div)
> > {
> >-	u64 q;
> >+	u64 rdx;
> > 
> >-	asm ("mulq %2; divq %3" : "=a" (q)
> >-				: "a" (a), "rm" (mul), "rm" (div)
> >-				: "rdx");
> >+	asm ("mulq %[mul]" : "+a" (rax), "=d" (rdx) : [mul] "rm" (mul));
> > 
> >-	return q;
> >+	if (!statically_true(!add))
> >+		asm ("addq %[add], %[lo]; adcq $0, %[hi]" :
> >+			[lo] "+r" (rax), [hi] "+r" (rdx) : [add] "irm" (add));
> >+
> >+	asm ("divq %[div]" : "+a" (rax), "+d" (rdx) : [div] "rm" (div));
> >+
> >+	return rax;
> > }
> >-#define mul_u64_u64_div_u64 mul_u64_u64_div_u64
> >+#define mul_u64_add_u64_div_u64 mul_u64_add_u64_div_u64
...
> 
> For the roundup case, I'm somewhat curious how this compares with doing:

I guess you are referring to the x86-64 asm version (left above).

>    cmp $1, %rdx
>    sbb $-1, %rax
> 
> ... after the division. At least it means not needing to compute d - 1,
> saving an instruction as well as a register.

> Unfortunately using an lea instruction to compute %rax (which otherwise
>  would incorporate both) isn't possible since it doesn't set the flags.
> 
> The cmp; sbb sequence should be no slower than add;
> adc – I'm saying "no slower" because %rdx is never written to,
> so I think this is provably a better sequence; whether or not it is
> measurable is another thing (but if we are tweaking this stuff...)

I wanted the same function as the non-x64-64 version and 'multiply and add'
possibly has other uses.

The instruction to calculate 'd - 1' (if not a constant) will usually
execute in parallel with an earlier instruction (eg the multiply)
so will be pretty much 'zero cost'.
The add/adc pair are in the 'register dependency chain' - so add a clock each.
The same is true for your cmp/sbb pair.

(Except on pre-broadwell Intel cpu where adc/sbb are two clocks.
I've lost the full reference, the initial changes fixed 'adc $0,x' and
generated the carry flag immediately and only delayed the result.
The doc said 'adc $0,reg' not 'adc $const,reg' - so maybe the sbb $-1
was two clocks for longer.)

	David