From nobody Sun Feb 8 03:58:13 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 627F538F24F for ; Tue, 13 Jan 2026 12:43:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768308196; cv=none; b=q25mcI1H7bHyOTDfXMNgxzgtjoN14fbqEYqMFJB3tN0ULDSesm8SODq6ATQIeXXs/DsVcp2CESCKXlKCJAIRUxnBxuGihw65Q0CF90P1ERVneArpWEcm0q6ka6CQJLxUqeza8t/Q9GA3FVS0Li5+GtEYcRpQrSZnmbUqHEr1VPk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768308196; c=relaxed/simple; bh=QvvHlpNsMHZYg0o05xxg11E/+PQv6nRN4oyH5aP5cos=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=jqvDK+wi3/c6eFtgwhKPB5KhPWIEpzRlhEi/yf2FTDJ5SpBr0O+trQDjYh3uqEJlAM0P4L1ietyIkEAEryDp03ux6uOjUaH8hCsv0oSq0jjvKyDjCN2oXRpsFzs45ktmz8Ri1A9BYjpX/1RSO26RcX20dTkw5eEzf9Ufzk52uR4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=KUCIo3TI; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="KUCIo3TI" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 59AEAC19422; Tue, 13 Jan 2026 12:43:13 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1768308195; bh=QvvHlpNsMHZYg0o05xxg11E/+PQv6nRN4oyH5aP5cos=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=KUCIo3TIElojfEs8jVB2YUPd6g5It9vKFoc0A9t0opz/APZbJkyMCorodh5fH/gdn jOvxHRlC9qM7sQvOPuQJmuLLuA8PlMt2UmEvvUAOEWc00NkWr/hBjxnABuWmHJZDEe ZcMSsJcEoQ7fhc20F1oT+H+xinFykt0JI9CRs/Tzx39v7yJd08ATZskU1hkbHHqENg ifB5omUCHdeQFCjZTS4KPtOgskFhzbdRnCRMNOXyxwakDpc5Ej9iOWT1cj0I3S57tK ulJkoGjBGRlk4Yi3vmFXMNJZQzN9fmxpoVvx7WYhEw8sbSEmQ2fcJ4khpJ2y5/SLwg iKo/gGKT5rNsA== From: Jisheng Zhang To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Alexandre Ghiti Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [PATCH 1/3] riscv: word-at-a-time: improve find_zero() for !RISCV_ISA_ZBB Date: Tue, 13 Jan 2026 20:24:55 +0800 Message-ID: <20260113122457.27507-2-jszhang@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20260113122457.27507-1-jszhang@kernel.org> References: <20260113122457.27507-1-jszhang@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Current find_zero() heavily depends on fls64() for calculation. This bring non-optimal code when !RISCV_ISA_ZBB. But in word-at-a-time case, we don't have to go with fls64() code path, instead, we can fallback to the generic word-at-a-time implementaion. What's more, the fls64() brings non-necessary zero bits couting for RV32. In fact, fls() is enough. Before the patch: 0000000000000000 : 0: c529 beqz a0,4a <.L1> 2: 577d li a4,-1 4: 9301 srli a4,a4,0x20 6: 03f00793 li a5,63 a: 00a76463 bltu a4,a0,12 <.L3> e: 1502 slli a0,a0,0x20 10: 47fd li a5,31 0000000000000012 <.L3>: 12: 577d li a4,-1 14: 8341 srli a4,a4,0x10 16: 00a76463 bltu a4,a0,1e <.L4> 1a: 37c1 addiw a5,a5,-16 1c: 0542 slli a0,a0,0x10 000000000000001e <.L4>: 1e: 577d li a4,-1 20: 8321 srli a4,a4,0x8 22: 00a76463 bltu a4,a0,2a <.L5> 26: 37e1 addiw a5,a5,-8 28: 0522 slli a0,a0,0x8 000000000000002a <.L5>: 2a: 577d li a4,-1 2c: 8311 srli a4,a4,0x4 2e: 00a76463 bltu a4,a0,36 <.L6> 32: 37f1 addiw a5,a5,-4 34: 0512 slli a0,a0,0x4 0000000000000036 <.L6>: 36: 577d li a4,-1 38: 8309 srli a4,a4,0x2 3a: 00a76463 bltu a4,a0,42 <.L7> 3e: 37f9 addiw a5,a5,-2 40: 050a slli a0,a0,0x2 0000000000000042 <.L7>: 42: 00054563 bltz a0,4c <.L12> 46: 4037d51b sraiw a0,a5,0x3 000000000000004a <.L1>: 4a: 8082 ret 000000000000004c <.L12>: 4c: 2785 addiw a5,a5,1 4e: 4037d51b sraiw a0,a5,0x3 52: 8082 ret After the patch: 0000000000000000 : 0: 102037b7 lui a5,0x10203 4: 0792 slli a5,a5,0x4 6: 40578793 addi a5,a5,1029 # 10203405 <.L4+0x102033c5> a: 07c2 slli a5,a5,0x10 c: 60878793 addi a5,a5,1544 10: 02f50533 mul a0,a0,a5 14: 9161 srli a0,a0,0x38 16: 8082 ret 33 instructions vs 8 instructions! And this kind of instructions reducing dramatically improves the performance of below micro-benchmark: $ cat tt.c #inlcude #inlcude "word-at-a-time.h" // copy and modify, eg. remove other headers int main() { int i; unsigned long ret =3D 0; for (i =3D 0; i < 100000000; i++) ret |=3D find_zero(0xabcd123 + i); printf("%ld\n", ret); } $ gcc -O tt.c $ time ./a.out Per my test, the above micro-benchmark is improved by about 1150%! Signed-off-by: Jisheng Zhang --- arch/riscv/include/asm/word-at-a-time.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/a= sm/word-at-a-time.h index 3802cda71ab7..0c8a9b337f93 100644 --- a/arch/riscv/include/asm/word-at-a-time.h +++ b/arch/riscv/include/asm/word-at-a-time.h @@ -13,6 +13,9 @@ #include #include =20 +#if !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_ZBB)) +#include +#else struct word_at_a_time { const unsigned long one_bits, high_bits; }; @@ -47,6 +50,8 @@ static inline unsigned long find_zero(unsigned long mask) /* The mask we created is directly usable as a bytemask */ #define zero_bytemask(mask) (mask) =20 +#endif /* !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_= ZBB)) */ + #ifdef CONFIG_DCACHE_WORD_ACCESS =20 /* --=20 2.51.0 From nobody Sun Feb 8 03:58:13 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 84D7638FEF5 for ; Tue, 13 Jan 2026 12:43:18 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768308198; cv=none; b=kSm+wMe0DTmrm73d9bEkRcWfPNbjE82nU/ojafc+i4JMv0X7YzFDoWfGGDItx8KqRT4qoy/nwp9G0lc8Kb9w20L1LmqDqh6vErpyY89SoZYexjtaKFOVZQ/XSNedacu6b2bxyYFR7I2Y424IhFIMwP32a/vj8mJMHhrOzRyCXGs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768308198; c=relaxed/simple; bh=jy33/Tc52BzbW3MkRv/+6XiOGSKTPkOy55r39xq3KKA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=BgqTQhAcq87iWaDMI+Y1LEVCJsHJaug9ZrMvxy1rCWhVlnB1LErzFsCjw+2IpmEOvMb+Zzs763BXxGHSjzR6Ipu5Sbo2y0tp/CP4DPWwoyDkHNaaiY5BxnVOQi26m52oX4p7ot+1i6USstsznJVYVg4eMfoE2VF/28oFQbjseUo= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=uG9oa/Hx; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="uG9oa/Hx" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 68582C16AAE; Tue, 13 Jan 2026 12:43:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1768308198; bh=jy33/Tc52BzbW3MkRv/+6XiOGSKTPkOy55r39xq3KKA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=uG9oa/Hxj5wlTi9C1TTXNuar1ADxBHwTZ9/+NKGpAPP0iWkeGAQC0MD0s7ueD369n rmxQny8MbrK8k202M8b+53cwRA9XBgV+14xrR6C5n6Ucrr+8KHSHyYQy+IIT260BTI 1mR6Em0EXndaYD8d52tQ5uzCkCzOwCipUYiQdIXs5+dw1EiBZULKL5TTJ0wBv+muc3 SX2Nl1h/Zc+DR4XhEczZhsbsINYwsA/YBWNRbyYWUV7PpwXEDMQQ7mbwy4MK7oefAn wR4EWBYHiTqspb4vIpDxlYu4yOxgwqFwbVmPIU9ekDXPSIEKFl0m14pDbDUOumyLXp F8sh41y6Erkxg== From: Jisheng Zhang To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Alexandre Ghiti Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [PATCH 2/3] riscv: word-at-a-time: improve find_zero() without Zbb Date: Tue, 13 Jan 2026 20:24:56 +0800 Message-ID: <20260113122457.27507-3-jszhang@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20260113122457.27507-1-jszhang@kernel.org> References: <20260113122457.27507-1-jszhang@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Previous commit improved the find_zero() performance for !RISCV_ISA_ZBB. What about RISCV_ISA_ZBB=3Dy but the HW doesn't support Zbb? We have the same heavy generic fls64() issue. Let's improve this situation by checking Zbb extension and fall back to generic count_masked_bytes() if Zbb isn't supported. To remove non-necessary zero bits couting on RV32, we also replace the 'fls64(mask) >> 3' with '!mask ? 0 : ((__fls(mask) + 1) >> 3);' We will get similar performance improvement as previous commit for RISCV_ISA_ZBB=3Dy but HW doesn't support Zbb. Signed-off-by: Jisheng Zhang --- arch/riscv/include/asm/word-at-a-time.h | 29 ++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/a= sm/word-at-a-time.h index 0c8a9b337f93..ca3d30741ed1 100644 --- a/arch/riscv/include/asm/word-at-a-time.h +++ b/arch/riscv/include/asm/word-at-a-time.h @@ -42,9 +42,36 @@ static inline unsigned long create_zero_mask(unsigned lo= ng bits) return bits >> 7; } =20 +#ifdef CONFIG_64BIT +/* + * Jan Achrenius on G+: microoptimized version of + * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56" + * that works for the bytemasks without having to + * mask them first. + */ +static inline long count_masked_bytes(unsigned long mask) +{ + return mask*0x0001020304050608ul >> 56; +} + +#else /* 32-bit case */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a =3D (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} +#endif + static inline unsigned long find_zero(unsigned long mask) { - return fls64(mask) >> 3; + if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) + return !mask ? 0 : ((__fls(mask) + 1) >> 3); + + return count_masked_bytes(mask); } =20 /* The mask we created is directly usable as a bytemask */ --=20 2.51.0 From nobody Sun Feb 8 03:58:13 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CBF943921FD for ; Tue, 13 Jan 2026 12:43:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768308200; cv=none; b=m7HJuS6zY+LYu2pPW1eGNN2qzF4elSDIgDzHKFmBn0bWd+pKkGCSHP40Frb7iByRBrxpv1V30N4anPk0dI/rXVXBziNxvhOxcimRVUQB4ddAoWhH7l9nHbsdMIdTD8Be8/ve9SwCcFQdKboJ8S1d5FkPp4oWYJT1yeuaWBHK3oI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1768308200; c=relaxed/simple; bh=uVtGUyIWHyjoUK8L0H3DB3EX/vfhDouF0fbHo6dDlXk=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=bRaLKc/Sodgsh78ww17cWsEuRpnya5PKPzshHlfxI8/a4gFnLjjJsDW+9LWKBk7lGM9CP/JpEcw6DhWRu5N3d9uv7RV2o/vYOco1cHgMgkGtIAkQv0eBc1L8/kifj4wK6f12mW6jtruFoDJkVTEtYo7YzCXZVOZuH0Eg0eVnHKI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=lEefDc4Z; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="lEefDc4Z" Received: by smtp.kernel.org (Postfix) with ESMTPSA id D97B0C2BC86; Tue, 13 Jan 2026 12:43:18 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1768308200; bh=uVtGUyIWHyjoUK8L0H3DB3EX/vfhDouF0fbHo6dDlXk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=lEefDc4ZMT1K6SasPRUSeFTJQq1vgaHpTZQxxl61W2ga8c/33ual9q6qDBFuBXtJH 6JtbR3fTxvP6zAEzzC3J/qXE7SRbE5kHbTba/g7Hyu5omNjM1F9mvOc6ISNysLP2fr FwmgVjaWiKZ4RUm6l6GnUFvzLIa+kLI3+KVO6geE9TLx9LjKFcqNX4ddQLS3f70M+O x8xYQ9ChMzNTl4vO/RA+az/2Xg0zgygRp13k9oAOgyC+tlkXMZaw+SPj0+PYjl00uD Ar9o4vZtRCOvjQZJ+LHA6jY9Oigu95NFwiIEmjkpw8UsqJ8dcb6pZY8GxuCI+WbliG WefgteHpR+7jw== From: Jisheng Zhang To: Paul Walmsley , Palmer Dabbelt , Albert Ou , Alexandre Ghiti Cc: linux-riscv@lists.infradead.org, linux-kernel@vger.kernel.org Subject: [PATCH 3/3] riscv: word-at-a-time: improve find_zero() for Zbb Date: Tue, 13 Jan 2026 20:24:57 +0800 Message-ID: <20260113122457.27507-4-jszhang@kernel.org> X-Mailer: git-send-email 2.51.0 In-Reply-To: <20260113122457.27507-1-jszhang@kernel.org> References: <20260113122457.27507-1-jszhang@kernel.org> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" In commit f915a3e5b018 ("arm64: word-at-a-time: improve byte count calculations for LE"), Linus improved the find_zero() for arm64 LE. Do the same optimization as he did: "do __ffs() on the intermediate value that found whether there is a zero byte, before we've actually computed the final byte mask.", so that we share the similar improvements: "The difference between the old and the new implementation is that "count_zero()" ends up scheduling better because it is being done on a value that is available earlier (before the final mask). But more importantly, it can be implemented without the insane semantics of the standard bit finding helpers that have the off-by-one issue and have to special-case the zero mask situation." Before the patch: 0000000000000000 : 0: c909 beqz a0,12 <.L1> 2: 60051793 clz a5,a0 6: 03f00513 li a0,63 a: 8d1d sub a0,a0,a5 c: 2505 addiw a0,a0,1 e: 4035551b sraiw a0,a0,0x3 0000000000000012 <.L1>: 12: 8082 ret After the patch: 0000000000000000 : 0: 60151513 ctz a0,a0 4: 810d srli a0,a0,0x3 6: 8082 ret 7 instructions vs 3 instructions! As can be seen, on RV64 w/ Zbb, the new "find_zero()" ends up just "ctz" plus the shift right that then ends up being subsumed by the "add to final length". But I have no HW platform which supports Zbb, so I can't get the performance improvement numbers by the last patch, only built and tested the patch on QEMU. Signed-off-by: Jisheng Zhang --- arch/riscv/include/asm/word-at-a-time.h | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/a= sm/word-at-a-time.h index ca3d30741ed1..8c5ac6a72f7f 100644 --- a/arch/riscv/include/asm/word-at-a-time.h +++ b/arch/riscv/include/asm/word-at-a-time.h @@ -38,6 +38,9 @@ static inline unsigned long prep_zero_mask(unsigned long = val, =20 static inline unsigned long create_zero_mask(unsigned long bits) { + if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) + return bits; + bits =3D (bits - 1) & ~bits; return bits >> 7; } @@ -69,13 +72,19 @@ static inline long count_masked_bytes(long mask) static inline unsigned long find_zero(unsigned long mask) { if (riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) - return !mask ? 0 : ((__fls(mask) + 1) >> 3); + return __ffs(mask) >> 3; =20 return count_masked_bytes(mask); } =20 -/* The mask we created is directly usable as a bytemask */ -#define zero_bytemask(mask) (mask) +static inline unsigned long zero_bytemask(unsigned long bits) +{ + if (!riscv_has_extension_likely(RISCV_ISA_EXT_ZBB)) + return bits; + + bits =3D (bits - 1) & ~bits; + return bits >> 7; +} =20 #endif /* !(defined(CONFIG_RISCV_ISA_ZBB) && defined(CONFIG_TOOLCHAIN_HAS_= ZBB)) */ =20 --=20 2.51.0