arch/loongarch/include/asm/checksum.h | 50 +++++++++ arch/loongarch/lib/Makefile | 2 +- arch/loongarch/lib/csum.c | 142 ++++++++++++++++++++++++++ 3 files changed, 193 insertions(+), 1 deletion(-) create mode 100644 arch/loongarch/include/asm/checksum.h create mode 100644 arch/loongarch/lib/csum.c
loongArch platform is 64-bit system, which supports 8 bytes memory
accessing, generic checksum function uses 4 byte memory access.
This patch adds 8-bytes memory access optimization for checksum
function on loongArch. And the code comes from arm64 system.
When network hw checksum is disabled, iperf performance improves
about 10% with this patch.
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
---
arch/loongarch/include/asm/checksum.h | 50 +++++++++
arch/loongarch/lib/Makefile | 2 +-
arch/loongarch/lib/csum.c | 142 ++++++++++++++++++++++++++
3 files changed, 193 insertions(+), 1 deletion(-)
create mode 100644 arch/loongarch/include/asm/checksum.h
create mode 100644 arch/loongarch/lib/csum.c
diff --git a/arch/loongarch/include/asm/checksum.h b/arch/loongarch/include/asm/checksum.h
new file mode 100644
index 000000000000..c9d5af206604
--- /dev/null
+++ b/arch/loongarch/include/asm/checksum.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 ARM Ltd.
+ */
+#ifndef __ASM_CHECKSUM_H
+#define __ASM_CHECKSUM_H
+
+#include <linux/in6.h>
+
+#define _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum sum);
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+
+ sum += (sum >> 16) | (sum << 16);
+ return ~(__force __sum16)(sum >> 16);
+}
+#define csum_fold csum_fold
+
+static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ __uint128_t tmp;
+ u64 sum;
+ int n = ihl; /* we want it signed */
+
+ tmp = *(const __uint128_t *)iph;
+ iph += 16;
+ n -= 4;
+ tmp += ((tmp >> 64) | (tmp << 64));
+ sum = tmp >> 64;
+ do {
+ sum += *(const u32 *)iph;
+ iph += 4;
+ } while (--n > 0);
+
+ sum += ((sum >> 32) | (sum << 32));
+ return csum_fold((__force __wsum)(sum >> 32));
+}
+#define ip_fast_csum ip_fast_csum
+
+extern unsigned int do_csum(const unsigned char *buff, int len);
+#define do_csum do_csum
+
+#include <asm-generic/checksum.h>
+
+#endif /* __ASM_CHECKSUM_H */
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index 40bde632900f..6ba6df411f90 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -4,4 +4,4 @@
#
lib-y += delay.o memset.o memcpy.o memmove.o \
- clear_user.o copy_user.o dump_tlb.o unaligned.o
+ clear_user.o copy_user.o dump_tlb.o unaligned.o csum.o
diff --git a/arch/loongarch/lib/csum.c b/arch/loongarch/lib/csum.c
new file mode 100644
index 000000000000..0f7e3a5ce96a
--- /dev/null
+++ b/arch/loongarch/lib/csum.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019-2020 Arm Ltd.
+
+#include <linux/compiler.h>
+#include <linux/kasan-checks.h>
+#include <linux/kernel.h>
+
+#include <net/checksum.h>
+
+/* Looks dumb, but generates nice-ish code */
+static u64 accumulate(u64 sum, u64 data)
+{
+ __uint128_t tmp;
+
+ tmp = (__uint128_t)sum + data;
+ return tmp + (tmp >> 64);
+}
+
+/*
+ * We over-read the buffer and this makes KASAN unhappy. Instead, disable
+ * instrumentation and call kasan explicitly.
+ */
+unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
+{
+ unsigned int offset, shift, sum;
+ const u64 *ptr;
+ u64 data, sum64 = 0;
+
+ if (unlikely(len == 0))
+ return 0;
+
+ offset = (unsigned long)buff & 7;
+ /*
+ * This is to all intents and purposes safe, since rounding down cannot
+ * result in a different page or cache line being accessed, and @buff
+ * should absolutely not be pointing to anything read-sensitive. We do,
+ * however, have to be careful not to piss off KASAN, which means using
+ * unchecked reads to accommodate the head and tail, for which we'll
+ * compensate with an explicit check up-front.
+ */
+ kasan_check_read(buff, len);
+ ptr = (u64 *)(buff - offset);
+ len = len + offset - 8;
+
+ /*
+ * Head: zero out any excess leading bytes. Shifting back by the same
+ * amount should be at least as fast as any other way of handling the
+ * odd/even alignment, and means we can ignore it until the very end.
+ */
+ shift = offset * 8;
+ data = *ptr++;
+ data = (data >> shift) << shift;
+
+ /*
+ * Body: straightforward aligned loads from here on (the paired loads
+ * underlying the quadword type still only need dword alignment). The
+ * main loop strictly excludes the tail, so the second loop will always
+ * run at least once.
+ */
+ while (unlikely(len > 64)) {
+ __uint128_t tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = *(__uint128_t *)ptr;
+ tmp2 = *(__uint128_t *)(ptr + 2);
+ tmp3 = *(__uint128_t *)(ptr + 4);
+ tmp4 = *(__uint128_t *)(ptr + 6);
+
+ len -= 64;
+ ptr += 8;
+
+ /* This is the "don't dump the carry flag into a GPR" idiom */
+ tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+ tmp2 += (tmp2 >> 64) | (tmp2 << 64);
+ tmp3 += (tmp3 >> 64) | (tmp3 << 64);
+ tmp4 += (tmp4 >> 64) | (tmp4 << 64);
+ tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
+ tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+ tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
+ tmp3 += (tmp3 >> 64) | (tmp3 << 64);
+ tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
+ tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+ tmp1 = ((tmp1 >> 64) << 64) | sum64;
+ tmp1 += (tmp1 >> 64) | (tmp1 << 64);
+ sum64 = tmp1 >> 64;
+ }
+ while (len > 8) {
+ __uint128_t tmp;
+
+ sum64 = accumulate(sum64, data);
+ tmp = *(__uint128_t *)ptr;
+
+ len -= 16;
+ ptr += 2;
+
+ data = tmp >> 64;
+ sum64 = accumulate(sum64, tmp);
+ }
+ if (len > 0) {
+ sum64 = accumulate(sum64, data);
+ data = *ptr;
+ len -= 8;
+ }
+ /*
+ * Tail: zero any over-read bytes similarly to the head, again
+ * preserving odd/even alignment.
+ */
+ shift = len * -8;
+ data = (data << shift) >> shift;
+ sum64 = accumulate(sum64, data);
+
+ /* Finally, folding */
+ sum64 += (sum64 >> 32) | (sum64 << 32);
+ sum = sum64 >> 32;
+ sum += (sum >> 16) | (sum << 16);
+ if (offset & 1)
+ return (u16)swab32(sum);
+
+ return sum >> 16;
+}
+
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, __u8 proto, __wsum csum)
+{
+ __uint128_t src, dst;
+ u64 sum = (__force u64)csum;
+
+ src = *(const __uint128_t *)saddr->s6_addr;
+ dst = *(const __uint128_t *)daddr->s6_addr;
+
+ sum += (__force u32)htonl(len);
+ sum += (u32)proto << 24;
+ src += (src >> 64) | (src << 64);
+ dst += (dst >> 64) | (dst << 64);
+
+ sum = accumulate(sum, src >> 64);
+ sum = accumulate(sum, dst >> 64);
+
+ sum += ((sum >> 32) | (sum << 32));
+ return csum_fold((__force __wsum)(sum >> 32));
+}
+EXPORT_SYMBOL(csum_ipv6_magic);
--
2.27.0
From: Bibo Mao > Sent: 07 February 2023 04:02 > > loongArch platform is 64-bit system, which supports 8 bytes memory > accessing, generic checksum function uses 4 byte memory access. > This patch adds 8-bytes memory access optimization for checksum > function on loongArch. And the code comes from arm64 system. > > When network hw checksum is disabled, iperf performance improves > about 10% with this patch. > ... > +static inline __sum16 csum_fold(__wsum csum) > +{ > + u32 sum = (__force u32)csum; > + > + sum += (sum >> 16) | (sum << 16); > + return ~(__force __sum16)(sum >> 16); > +} Does LoongArch have a rotate instruction? But for everything except arm (which has a rotate+add instruction) the best is (probably): (~sum - rol32(sum, 16)) >> 16 To the point where it is worth killing all the asm versions and just using that one. David - Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK Registration No: 1397386 (Wales)
On 2023/2/8 21:12, David Laight wrote: > From: Bibo Mao >> Sent: 07 February 2023 04:02 >> >> loongArch platform is 64-bit system, which supports 8 bytes memory >> accessing, generic checksum function uses 4 byte memory access. >> This patch adds 8-bytes memory access optimization for checksum >> function on loongArch. And the code comes from arm64 system. >> >> When network hw checksum is disabled, iperf performance improves >> about 10% with this patch. >> > ... >> +static inline __sum16 csum_fold(__wsum csum) >> +{ >> + u32 sum = (__force u32)csum; >> + >> + sum += (sum >> 16) | (sum << 16); >> + return ~(__force __sum16)(sum >> 16); >> +} > > Does LoongArch have a rotate instruction? > But for everything except arm (which has a rotate+add instruction) > the best is (probably): > (~sum - rol32(sum, 16)) >> 16 > > To the point where it is worth killing all the asm > versions and just using that one. Yeah LoongArch can do rotates, and your suggestion can indeed reduce one insn from every invocation of csum_fold. From this: 000000000000096c <csum_fold>: sum += (sum >> 16) | (sum << 16); 96c: 004cc08c rotri.w $t0, $a0, 0x10 970: 00101184 add.w $a0, $t0, $a0 return ~(__force __sum16)(sum >> 16); 974: 0044c084 srli.w $a0, $a0, 0x10 978: 00141004 nor $a0, $zero, $a0 } 97c: 006f8084 bstrpick.w $a0, $a0, 0xf, 0x0 980: 4c000020 jirl $zero, $ra, 0 To: 0000000000000984 <csum_fold2>: return (~sum - rol32(sum, 16)) >> 16; 984: 0014100c nor $t0, $zero, $a0 return (x << amt) | (x >> (32 - amt)); 988: 004cc084 rotri.w $a0, $a0, 0x10 return (~sum - rol32(sum, 16)) >> 16; 98c: 00111184 sub.w $a0, $t0, $a0 } 990: 00df4084 bstrpick.d $a0, $a0, 0x1f, 0x10 994: 4c000020 jirl $zero, $ra, 0 I guess Bibo would take this suggestion and check the other arches afterwards, okay? ;-) -- WANG "xen0n" Xuerui Linux/LoongArch mailing list: https://lore.kernel.org/loongarch/
From: WANG Xuerui > Sent: 08 February 2023 13:48 ... > Yeah LoongArch can do rotates, and your suggestion can indeed reduce one > insn from every invocation of csum_fold. > > From this: > > 000000000000096c <csum_fold>: > sum += (sum >> 16) | (sum << 16); > 96c: 004cc08c rotri.w $t0, $a0, 0x10 > 970: 00101184 add.w $a0, $t0, $a0 > return ~(__force __sum16)(sum >> 16); > 974: 0044c084 srli.w $a0, $a0, 0x10 > 978: 00141004 nor $a0, $zero, $a0 > } > 97c: 006f8084 bstrpick.w $a0, $a0, 0xf, 0x0 > 980: 4c000020 jirl $zero, $ra, 0 > > To: > > 0000000000000984 <csum_fold2>: > return (~sum - rol32(sum, 16)) >> 16; > 984: 0014100c nor $t0, $zero, $a0 > return (x << amt) | (x >> (32 - amt)); > 988: 004cc084 rotri.w $a0, $a0, 0x10 > return (~sum - rol32(sum, 16)) >> 16; > 98c: 00111184 sub.w $a0, $t0, $a0 > } > 990: 00df4084 bstrpick.d $a0, $a0, 0x1f, 0x10 > 994: 4c000020 jirl $zero, $ra, 0 It is actually slightly better than that. In the csum_fold2 version the first two instructions are independent - so can execute in parallel on some cpu. David - Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK Registration No: 1397386 (Wales)
在 2023/2/8 22:19, David Laight 写道: > From: WANG Xuerui >> Sent: 08 February 2023 13:48 > ... >> Yeah LoongArch can do rotates, and your suggestion can indeed reduce one >> insn from every invocation of csum_fold. >> >> From this: >> >> 000000000000096c <csum_fold>: >> sum += (sum >> 16) | (sum << 16); >> 96c: 004cc08c rotri.w $t0, $a0, 0x10 >> 970: 00101184 add.w $a0, $t0, $a0 >> return ~(__force __sum16)(sum >> 16); >> 974: 0044c084 srli.w $a0, $a0, 0x10 >> 978: 00141004 nor $a0, $zero, $a0 >> } >> 97c: 006f8084 bstrpick.w $a0, $a0, 0xf, 0x0 >> 980: 4c000020 jirl $zero, $ra, 0 >> >> To: >> >> 0000000000000984 <csum_fold2>: >> return (~sum - rol32(sum, 16)) >> 16; >> 984: 0014100c nor $t0, $zero, $a0 >> return (x << amt) | (x >> (32 - amt)); >> 988: 004cc084 rotri.w $a0, $a0, 0x10 >> return (~sum - rol32(sum, 16)) >> 16; >> 98c: 00111184 sub.w $a0, $t0, $a0 >> } >> 990: 00df4084 bstrpick.d $a0, $a0, 0x1f, 0x10 >> 994: 4c000020 jirl $zero, $ra, 0 > > It is actually slightly better than that. > In the csum_fold2 version the first two instructions > are independent - so can execute in parallel on some cpu. > > David > Thanks for the good suggestion. Will send the second version soon. regards bibo,mao > - > Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK > Registration No: 1397386 (Wales)
© 2016 - 2025 Red Hat, Inc.