arch/riscv/lib/uaccess.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
The bytes copy for unaligned head would cover at most SZREG-1 bytes, so
it's better to set the threshold as >= (SZREG-1 + word_copy stride size)
which equals to 9*SZREG-1.
Signed-off-by: Xiao Wang <xiao.w.wang@intel.com>
---
arch/riscv/lib/uaccess.S | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index bc22c078aba8..2e665f8f8fcc 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -44,7 +44,7 @@ SYM_FUNC_START(fallback_scalar_usercopy)
* Use byte copy only if too small.
* SZREG holds 4 for RV32 and 8 for RV64
*/
- li a3, 9*SZREG /* size must be larger than size in word_copy */
+ li a3, 9*SZREG-1 /* size must >= (word_copy stride + SZREG-1) */
bltu a2, a3, .Lbyte_copy_tail
/*
--
2.25.1
Hi Xiao, On 13/03/2024 10:19, Xiao Wang wrote: > The bytes copy for unaligned head would cover at most SZREG-1 bytes, so > it's better to set the threshold as >= (SZREG-1 + word_copy stride size) > which equals to 9*SZREG-1. > > Signed-off-by: Xiao Wang <xiao.w.wang@intel.com> > --- > arch/riscv/lib/uaccess.S | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S > index bc22c078aba8..2e665f8f8fcc 100644 > --- a/arch/riscv/lib/uaccess.S > +++ b/arch/riscv/lib/uaccess.S > @@ -44,7 +44,7 @@ SYM_FUNC_START(fallback_scalar_usercopy) > * Use byte copy only if too small. > * SZREG holds 4 for RV32 and 8 for RV64 > */ > - li a3, 9*SZREG /* size must be larger than size in word_copy */ > + li a3, 9*SZREG-1 /* size must >= (word_copy stride + SZREG-1) */ > bltu a2, a3, .Lbyte_copy_tail > > /* This looks good to me: Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com> Thanks, Alex
© 2016 - 2026 Red Hat, Inc.