Since commit aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode
NEON at context switch"), kernel-mode NEON sections have been
preemptible on arm64. And since commit 7dadeaa6e851 ("sched: Further
restrict the preemption modes"), voluntary preemption is no longer
supported on arm64 either. Therefore, there's no longer any need to
limit the length of kernel-mode NEON sections on arm64.
Simplify the ChaCha code accordingly.
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
---
lib/crypto/arm64/chacha.h | 16 ++++------------
1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/lib/crypto/arm64/chacha.h b/lib/crypto/arm64/chacha.h
index ca8c6a8b0578..c6f8ddf98e2d 100644
--- a/lib/crypto/arm64/chacha.h
+++ b/lib/crypto/arm64/chacha.h
@@ -34,13 +34,13 @@ asmlinkage void hchacha_block_neon(const struct chacha_state *state,
u32 out[HCHACHA_OUT_WORDS], int nrounds);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
static void chacha_doneon(struct chacha_state *state, u8 *dst, const u8 *src,
- int bytes, int nrounds)
+ unsigned int bytes, int nrounds)
{
- while (bytes > 0) {
+ while (bytes) {
int l = min(bytes, CHACHA_BLOCK_SIZE * 5);
if (l <= CHACHA_BLOCK_SIZE) {
u8 buf[CHACHA_BLOCK_SIZE];
@@ -74,20 +74,12 @@ static void chacha_crypt_arch(struct chacha_state *state, u8 *dst,
{
if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE ||
!crypto_simd_usable())
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
- do {
- unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
-
- scoped_ksimd()
- chacha_doneon(state, dst, src, todo, nrounds);
-
- bytes -= todo;
- src += todo;
- dst += todo;
- } while (bytes);
+ scoped_ksimd()
+ chacha_doneon(state, dst, src, bytes, nrounds);
}
#define chacha_mod_init_arch chacha_mod_init_arch
static void chacha_mod_init_arch(void)
{
--
2.53.0