x86/csum: Remove unnecessary odd handling

Noah Goldstein posted 1 patch 2 years, 3 months ago
There is a newer version of this series
arch/x86/lib/csum-partial_64.c | 37 ++--------------------------------
1 file changed, 2 insertions(+), 35 deletions(-)
x86/csum: Remove unnecessary odd handling
Posted by Noah Goldstein 2 years, 3 months ago
The special case for odd aligned buffers is unnecessary and mostly
just adds overhead. Aligned buffers is the expectations, and even for
unaligned buffer, the only case that was helped is if the buffer was
1-byte from word aligned which is ~1/7 of the cases. Overall it seems
highly unlikely to be worth to extra branch.

It was left in the previous perf improvement patch because I was
erroneously comparing the exact output of `csum_partial(...)`, but
really we only need `csum_fold(csum_partial(...))` to match so its
safe to remove.

All csum kunit tests pass.

Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
---
 arch/x86/lib/csum-partial_64.c | 37 ++--------------------------------
 1 file changed, 2 insertions(+), 35 deletions(-)

diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index cea25ca8b8cf..d06112e98893 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -11,28 +11,6 @@
 #include <asm/checksum.h>
 #include <asm/word-at-a-time.h>
 
-static inline unsigned short from32to16(unsigned a)
-{
-	unsigned short b = a >> 16;
-	asm("addw %w2,%w0\n\t"
-	    "adcw $0,%w0\n"
-	    : "=r" (b)
-	    : "0" (b), "r" (a));
-	return b;
-}
-
-static inline __wsum csum_tail(u64 temp64, int odd)
-{
-	unsigned int result;
-
-	result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
-	if (unlikely(odd)) {
-		result = from32to16(result);
-		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-	}
-	return (__force __wsum)result;
-}
-
 /*
  * Do a checksum on an arbitrary memory area.
  * Returns a 32bit checksum.
@@ -47,17 +25,6 @@ static inline __wsum csum_tail(u64 temp64, int odd)
 __wsum csum_partial(const void *buff, int len, __wsum sum)
 {
 	u64 temp64 = (__force u64)sum;
-	unsigned odd;
-
-	odd = 1 & (unsigned long) buff;
-	if (unlikely(odd)) {
-		if (unlikely(len == 0))
-			return sum;
-		temp64 = ror32((__force u32)sum, 8);
-		temp64 += (*(unsigned char *)buff << 8);
-		len--;
-		buff++;
-	}
 
 	/*
 	 * len == 40 is the hot case due to IPv6 headers, but annotating it likely()
@@ -73,7 +40,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
 		    "adcq $0,%[res]"
 		    : [res] "+r"(temp64)
 		    : [src] "r"(buff), "m"(*(const char(*)[40])buff));
-		return csum_tail(temp64, odd);
+		return add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
 	}
 	if (unlikely(len >= 64)) {
 		/*
@@ -143,7 +110,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
 		    : [res] "+r"(temp64)
 		    : [trail] "r"(trail));
 	}
-	return csum_tail(temp64, odd);
+	return add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
 }
 EXPORT_SYMBOL(csum_partial);
 
-- 
2.34.1
RE: x86/csum: Remove unnecessary odd handling
Posted by David Laight 2 years, 3 months ago
From: Noah Goldstein
> Sent: 01 September 2023 23:21
...
> +	return add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);

The generic C alternative:
	return (temp64 + ror64(temp64, 32)) >> 32;
is the same number of instructions but might get
better scheduling.

The C version of csum_fold() from arc/include/asm/checksum.h
is also better than the x86 asm version.
(And also pretty much all the other architecture dependant
copies.)

	David

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)
Re: x86/csum: Remove unnecessary odd handling
Posted by Noah Goldstein 2 years, 2 months ago
On Wed, Sep 6, 2023 at 9:38 AM David Laight <David.Laight@aculab.com> wrote:
>
> From: Noah Goldstein
> > Sent: 01 September 2023 23:21
> ...
> > +     return add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
>
> The generic C alternative:
>         return (temp64 + ror64(temp64, 32)) >> 32;
> is the same number of instructions but might get
> better scheduling.
>
Sorry, I missed this.
Bright idea :)
Adding in new version + you reviewed by tag. Then hopefully this can
get in...

> The C version of csum_fold() from arc/include/asm/checksum.h
> is also better than the x86 asm version.
> (And also pretty much all the other architecture dependant
> copies.)
>
>         David
>
> -
> Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
> Registration No: 1397386 (Wales)
>
RE: x86/csum: Remove unnecessary odd handling
Posted by David Laight 2 years, 3 months ago
From: Noah Goldstein
> Sent: 01 September 2023 23:21
> 
> The special case for odd aligned buffers is unnecessary and mostly
> just adds overhead. Aligned buffers is the expectations, and even for
> unaligned buffer, the only case that was helped is if the buffer was
> 1-byte from word aligned which is ~1/7 of the cases. Overall it seems
> highly unlikely to be worth to extra branch.
> 
> It was left in the previous perf improvement patch because I was
> erroneously comparing the exact output of `csum_partial(...)`, but
> really we only need `csum_fold(csum_partial(...))` to match so its
> safe to remove.

This is pretty much the same patch I send in Dec 2021...

Reviewed-by: David Laight <david.laight@aculab.com>

> 
> All csum kunit tests pass.
> 
> Signed-off-by: Noah Goldstein <goldstein.w.n@gmail.com>
> Reviewed-by: Eric Dumazet <edumazet@google.com>
> ---
>  arch/x86/lib/csum-partial_64.c | 37 ++--------------------------------
>  1 file changed, 2 insertions(+), 35 deletions(-)
> 
> diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
> index cea25ca8b8cf..d06112e98893 100644
> --- a/arch/x86/lib/csum-partial_64.c
> +++ b/arch/x86/lib/csum-partial_64.c
> @@ -11,28 +11,6 @@
>  #include <asm/checksum.h>
>  #include <asm/word-at-a-time.h>
> 
> -static inline unsigned short from32to16(unsigned a)
> -{
> -	unsigned short b = a >> 16;
> -	asm("addw %w2,%w0\n\t"
> -	    "adcw $0,%w0\n"
> -	    : "=r" (b)
> -	    : "0" (b), "r" (a));
> -	return b;
> -}
> -
> -static inline __wsum csum_tail(u64 temp64, int odd)
> -{
> -	unsigned int result;
> -
> -	result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
> -	if (unlikely(odd)) {
> -		result = from32to16(result);
> -		result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
> -	}
> -	return (__force __wsum)result;
> -}
> -
>  /*
>   * Do a checksum on an arbitrary memory area.
>   * Returns a 32bit checksum.
> @@ -47,17 +25,6 @@ static inline __wsum csum_tail(u64 temp64, int odd)
>  __wsum csum_partial(const void *buff, int len, __wsum sum)
>  {
>  	u64 temp64 = (__force u64)sum;
> -	unsigned odd;
> -
> -	odd = 1 & (unsigned long) buff;
> -	if (unlikely(odd)) {
> -		if (unlikely(len == 0))
> -			return sum;
> -		temp64 = ror32((__force u32)sum, 8);
> -		temp64 += (*(unsigned char *)buff << 8);
> -		len--;
> -		buff++;
> -	}
> 
>  	/*
>  	 * len == 40 is the hot case due to IPv6 headers, but annotating it likely()
> @@ -73,7 +40,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
>  		    "adcq $0,%[res]"
>  		    : [res] "+r"(temp64)
>  		    : [src] "r"(buff), "m"(*(const char(*)[40])buff));
> -		return csum_tail(temp64, odd);
> +		return add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
>  	}
>  	if (unlikely(len >= 64)) {
>  		/*
> @@ -143,7 +110,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
>  		    : [res] "+r"(temp64)
>  		    : [trail] "r"(trail));
>  	}
> -	return csum_tail(temp64, odd);
> +	return add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
>  }
>  EXPORT_SYMBOL(csum_partial);
> 
> --
> 2.34.1

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)