[PATCH v5 next 7/9] lib: mul_u64_u64_div_u64() optimise multiply on 32bit x86

David Laight posted 9 patches 1 month, 1 week ago
[PATCH v5 next 7/9] lib: mul_u64_u64_div_u64() optimise multiply on 32bit x86
Posted by David Laight 1 month, 1 week ago
gcc generates horrid code for both ((u64)u32_a * u32_b) and (u64_a + u32_b).
As well as the extra instructions it can generate a lot of spills to stack
(including spills of constant zeros and even multiplies by constant zero).

mul_u32_u32() already exists to optimise the multiply.
Add a similar add_u64_32() for the addition.
Disable both for clang - it generates better code without them.

Move the 64x64 => 128 multiply into a static inline helper function
for code clarity.
No need for the a/b_hi/lo variables, the implicit casts on the function
calls do the work for us.
Should have minimal effect on the generated code.

Use mul_u32_u32() and add_u64_u32() in the 64x64 => 128 multiply
in mul_u64_add_u64_div_u64().

Signed-off-by: David Laight <david.laight.linux@gmail.com>
Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
---

Changes for v4:
- merge in patch 8.
- Add comments about gcc being 'broken' for mixed 32/64 bit maths.
  clang doesn't have the same issues.
- Use a #define for define mul_add() to avoid 'defined but not used'
  errors.

 arch/x86/include/asm/div64.h | 19 +++++++++++++++++
 include/linux/math64.h       | 11 ++++++++++
 lib/math/div64.c             | 40 +++++++++++++++++++++++-------------
 3 files changed, 56 insertions(+), 14 deletions(-)

diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
index 6d8a3de3f43a..30fd06ede751 100644
--- a/arch/x86/include/asm/div64.h
+++ b/arch/x86/include/asm/div64.h
@@ -60,6 +60,12 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 }
 #define div_u64_rem	div_u64_rem
 
+/*
+ * gcc tends to zero extend 32bit values and do full 64bit maths.
+ * Define asm functions that avoid this.
+ * (clang generates better code for the C versions.)
+ */
+#ifndef __clang__
 static inline u64 mul_u32_u32(u32 a, u32 b)
 {
 	u32 high, low;
@@ -71,6 +77,19 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
 }
 #define mul_u32_u32 mul_u32_u32
 
+static inline u64 add_u64_u32(u64 a, u32 b)
+{
+	u32 high = a >> 32, low = a;
+
+	asm ("addl %[b], %[low]; adcl $0, %[high]"
+		: [low] "+r" (low), [high] "+r" (high)
+		: [b] "rm" (b) );
+
+	return low | (u64)high << 32;
+}
+#define add_u64_u32 add_u64_u32
+#endif
+
 /*
  * __div64_32() is never called on x86, so prevent the
  * generic definition from getting built.
diff --git a/include/linux/math64.h b/include/linux/math64.h
index e889d850b7f1..cc305206d89f 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -158,6 +158,17 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
 }
 #endif
 
+#ifndef add_u64_u32
+/*
+ * Many a GCC version also messes this up.
+ * Zero extending b and then spilling everything to stack.
+ */
+static inline u64 add_u64_u32(u64 a, u32 b)
+{
+	return a + b;
+}
+#endif
+
 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
 
 #ifndef mul_u64_u32_shr
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 18a9ba26c418..bb57a48ce36a 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -186,33 +186,45 @@ EXPORT_SYMBOL(iter_div_u64_rem);
 #endif
 
 #if !defined(mul_u64_add_u64_div_u64) || defined(test_mul_u64_add_u64_div_u64)
-u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
-{
+
+#define mul_add(a, b, c) add_u64_u32(mul_u32_u32(a, b), c)
+
 #if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
 
+static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
+{
 	/* native 64x64=128 bits multiplication */
 	u128 prod = (u128)a * b + c;
-	u64 n_lo = prod, n_hi = prod >> 64;
+
+	*p_lo = prod;
+	return prod >> 64;
+}
 
 #else
 
-	/* perform a 64x64=128 bits multiplication manually */
-	u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
+static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
+{
+	/* perform a 64x64=128 bits multiplication in 32bit chunks */
 	u64 x, y, z;
 
 	/* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
-	x = (u64)a_lo * b_lo + (u32)c;
-	y = (u64)a_lo * b_hi + (u32)(c >> 32);
-	y += (u32)(x >> 32);
-	z = (u64)a_hi * b_hi + (u32)(y >> 32);
-	y = (u64)a_hi * b_lo + (u32)y;
-	z += (u32)(y >> 32);
-	x = (y << 32) + (u32)x;
-
-	u64 n_lo = x, n_hi = z;
+	x = mul_add(a, b, c);
+	y = mul_add(a, b >> 32, c >> 32);
+	y = add_u64_u32(y, x >> 32);
+	z = mul_add(a >> 32, b >> 32, y >> 32);
+	y = mul_add(a >> 32, b, y);
+	*p_lo = (y << 32) + (u32)x;
+	return add_u64_u32(z, y >> 32);
+}
 
 #endif
 
+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
+{
+	u64 n_lo, n_hi;
+
+	n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
+
 	if (!n_hi)
 		return div64_u64(n_lo, d);
 
-- 
2.39.5
Re: [PATCH v5 next 7/9] lib: mul_u64_u64_div_u64() optimise multiply on 32bit x86
Posted by H. Peter Anvin 1 month, 1 week ago
On November 5, 2025 12:10:33 PM PST, David Laight <david.laight.linux@gmail.com> wrote:
>gcc generates horrid code for both ((u64)u32_a * u32_b) and (u64_a + u32_b).
>As well as the extra instructions it can generate a lot of spills to stack
>(including spills of constant zeros and even multiplies by constant zero).
>
>mul_u32_u32() already exists to optimise the multiply.
>Add a similar add_u64_32() for the addition.
>Disable both for clang - it generates better code without them.
>
>Move the 64x64 => 128 multiply into a static inline helper function
>for code clarity.
>No need for the a/b_hi/lo variables, the implicit casts on the function
>calls do the work for us.
>Should have minimal effect on the generated code.
>
>Use mul_u32_u32() and add_u64_u32() in the 64x64 => 128 multiply
>in mul_u64_add_u64_div_u64().
>
>Signed-off-by: David Laight <david.laight.linux@gmail.com>
>Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
>---
>
>Changes for v4:
>- merge in patch 8.
>- Add comments about gcc being 'broken' for mixed 32/64 bit maths.
>  clang doesn't have the same issues.
>- Use a #define for define mul_add() to avoid 'defined but not used'
>  errors.
>
> arch/x86/include/asm/div64.h | 19 +++++++++++++++++
> include/linux/math64.h       | 11 ++++++++++
> lib/math/div64.c             | 40 +++++++++++++++++++++++-------------
> 3 files changed, 56 insertions(+), 14 deletions(-)
>
>diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
>index 6d8a3de3f43a..30fd06ede751 100644
>--- a/arch/x86/include/asm/div64.h
>+++ b/arch/x86/include/asm/div64.h
>@@ -60,6 +60,12 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
> }
> #define div_u64_rem	div_u64_rem
> 
>+/*
>+ * gcc tends to zero extend 32bit values and do full 64bit maths.
>+ * Define asm functions that avoid this.
>+ * (clang generates better code for the C versions.)
>+ */
>+#ifndef __clang__
> static inline u64 mul_u32_u32(u32 a, u32 b)
> {
> 	u32 high, low;
>@@ -71,6 +77,19 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
> }
> #define mul_u32_u32 mul_u32_u32
> 
>+static inline u64 add_u64_u32(u64 a, u32 b)
>+{
>+	u32 high = a >> 32, low = a;
>+
>+	asm ("addl %[b], %[low]; adcl $0, %[high]"
>+		: [low] "+r" (low), [high] "+r" (high)
>+		: [b] "rm" (b) );
>+
>+	return low | (u64)high << 32;
>+}
>+#define add_u64_u32 add_u64_u32
>+#endif
>+
> /*
>  * __div64_32() is never called on x86, so prevent the
>  * generic definition from getting built.
>diff --git a/include/linux/math64.h b/include/linux/math64.h
>index e889d850b7f1..cc305206d89f 100644
>--- a/include/linux/math64.h
>+++ b/include/linux/math64.h
>@@ -158,6 +158,17 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
> }
> #endif
> 
>+#ifndef add_u64_u32
>+/*
>+ * Many a GCC version also messes this up.
>+ * Zero extending b and then spilling everything to stack.
>+ */
>+static inline u64 add_u64_u32(u64 a, u32 b)
>+{
>+	return a + b;
>+}
>+#endif
>+
> #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
> 
> #ifndef mul_u64_u32_shr
>diff --git a/lib/math/div64.c b/lib/math/div64.c
>index 18a9ba26c418..bb57a48ce36a 100644
>--- a/lib/math/div64.c
>+++ b/lib/math/div64.c
>@@ -186,33 +186,45 @@ EXPORT_SYMBOL(iter_div_u64_rem);
> #endif
> 
> #if !defined(mul_u64_add_u64_div_u64) || defined(test_mul_u64_add_u64_div_u64)
>-u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
>-{
>+
>+#define mul_add(a, b, c) add_u64_u32(mul_u32_u32(a, b), c)
>+
> #if defined(__SIZEOF_INT128__) && !defined(test_mul_u64_add_u64_div_u64)
> 
>+static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
>+{
> 	/* native 64x64=128 bits multiplication */
> 	u128 prod = (u128)a * b + c;
>-	u64 n_lo = prod, n_hi = prod >> 64;
>+
>+	*p_lo = prod;
>+	return prod >> 64;
>+}
> 
> #else
> 
>-	/* perform a 64x64=128 bits multiplication manually */
>-	u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
>+static inline u64 mul_u64_u64_add_u64(u64 *p_lo, u64 a, u64 b, u64 c)
>+{
>+	/* perform a 64x64=128 bits multiplication in 32bit chunks */
> 	u64 x, y, z;
> 
> 	/* Since (x-1)(x-1) + 2(x-1) == x.x - 1 two u32 can be added to a u64 */
>-	x = (u64)a_lo * b_lo + (u32)c;
>-	y = (u64)a_lo * b_hi + (u32)(c >> 32);
>-	y += (u32)(x >> 32);
>-	z = (u64)a_hi * b_hi + (u32)(y >> 32);
>-	y = (u64)a_hi * b_lo + (u32)y;
>-	z += (u32)(y >> 32);
>-	x = (y << 32) + (u32)x;
>-
>-	u64 n_lo = x, n_hi = z;
>+	x = mul_add(a, b, c);
>+	y = mul_add(a, b >> 32, c >> 32);
>+	y = add_u64_u32(y, x >> 32);
>+	z = mul_add(a >> 32, b >> 32, y >> 32);
>+	y = mul_add(a >> 32, b, y);
>+	*p_lo = (y << 32) + (u32)x;
>+	return add_u64_u32(z, y >> 32);
>+}
> 
> #endif
> 
>+u64 mul_u64_add_u64_div_u64(u64 a, u64 b, u64 c, u64 d)
>+{
>+	u64 n_lo, n_hi;
>+
>+	n_hi = mul_u64_u64_add_u64(&n_lo, a, b, c);
>+
> 	if (!n_hi)
> 		return div64_u64(n_lo, d);
> 

By the way have you filed gcc bug reports for this?
Re: [PATCH v5 next 7/9] lib: mul_u64_u64_div_u64() optimise multiply on 32bit x86
Posted by David Laight 1 month, 1 week ago
On Wed, 05 Nov 2025 15:45:29 -0800
"H. Peter Anvin" <hpa@zytor.com> wrote:

> On November 5, 2025 12:10:33 PM PST, David Laight <david.laight.linux@gmail.com> wrote:
> >gcc generates horrid code for both ((u64)u32_a * u32_b) and (u64_a + u32_b).
> >As well as the extra instructions it can generate a lot of spills to stack
> >(including spills of constant zeros and even multiplies by constant zero).
> >
> >mul_u32_u32() already exists to optimise the multiply.
> >Add a similar add_u64_32() for the addition.
> >Disable both for clang - it generates better code without them.
> >
> >Move the 64x64 => 128 multiply into a static inline helper function
> >for code clarity.
> >No need for the a/b_hi/lo variables, the implicit casts on the function
> >calls do the work for us.
> >Should have minimal effect on the generated code.
> >
> >Use mul_u32_u32() and add_u64_u32() in the 64x64 => 128 multiply
> >in mul_u64_add_u64_div_u64().
> >
> >Signed-off-by: David Laight <david.laight.linux@gmail.com>
> >Reviewed-by: Nicolas Pitre <npitre@baylibre.com>
> >---
> >
> >Changes for v4:
> >- merge in patch 8.
> >- Add comments about gcc being 'broken' for mixed 32/64 bit maths.
> >  clang doesn't have the same issues.
> >- Use a #define for define mul_add() to avoid 'defined but not used'
> >  errors.
> >
> > arch/x86/include/asm/div64.h | 19 +++++++++++++++++
> > include/linux/math64.h       | 11 ++++++++++
> > lib/math/div64.c             | 40 +++++++++++++++++++++++-------------
> > 3 files changed, 56 insertions(+), 14 deletions(-)
> >
> >diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
> >index 6d8a3de3f43a..30fd06ede751 100644
> >--- a/arch/x86/include/asm/div64.h
> >+++ b/arch/x86/include/asm/div64.h
> >@@ -60,6 +60,12 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
> > }
> > #define div_u64_rem	div_u64_rem
> > 
> >+/*
> >+ * gcc tends to zero extend 32bit values and do full 64bit maths.
> >+ * Define asm functions that avoid this.
> >+ * (clang generates better code for the C versions.)
> >+ */
> >+#ifndef __clang__
> > static inline u64 mul_u32_u32(u32 a, u32 b)
> > {
> > 	u32 high, low;
> >@@ -71,6 +77,19 @@ static inline u64 mul_u32_u32(u32 a, u32 b)
> > }
> > #define mul_u32_u32 mul_u32_u32
> > 
> >+static inline u64 add_u64_u32(u64 a, u32 b)
> >+{
> >+	u32 high = a >> 32, low = a;
> >+
> >+	asm ("addl %[b], %[low]; adcl $0, %[high]"
> >+		: [low] "+r" (low), [high] "+r" (high)
> >+		: [b] "rm" (b) );
> >+
> >+	return low | (u64)high << 32;
> >+}
> >+#define add_u64_u32 add_u64_u32
> >+#endif
...
> 
> By the way have you filed gcc bug reports for this?

As in the need for the asm() above?
No...
I doubt one was filed when the mul version was added either.
ISTR that some very recent gcc versions were a bit better, but it depends
on minor code changes and compiler options.

I suspect that internally gcc sometimes keeps a 64bit value as two 32bit
ones, but at other times it is assigned to a 64bit internal register.
If the latter happens it always promotes a 32bit value to 64 bits and
assigns to another 64bit register.
At that point it won't split the 64bit registers - so a lot of spills to
stack happen when it tries to assign real registers.
So breath on an 'A' (dx:ax) constraint and the generated code is horrid.

Even the lo | (u64)hi << 32 can generate 'or' instructions.

The same happens for int128 on 64bit.

	David