From: Jisheng Zhang <jszhang@kernel.org>
'asm goto' generates noticeably better code since we don't need to
test the error etc, the exception just jumps to the error handling
directly.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Signed-off-by: Cyril Bur <cyrilbur@tenstorrent.com>
---
arch/riscv/include/asm/uaccess.h | 70 +++++++++++++++-----------------
1 file changed, 33 insertions(+), 37 deletions(-)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 84b084e388a7..916df594ef3c 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -181,61 +181,66 @@ do { \
((x) = (__force __typeof__(x))0, -EFAULT); \
})
-#define __put_user_asm(insn, x, ptr, err) \
+#define __put_user_asm(insn, x, ptr, label) \
do { \
__typeof__(*(ptr)) __x = x; \
- __asm__ __volatile__ ( \
+ asm goto( \
"1:\n" \
- " " insn " %z1, %2\n" \
- "2:\n" \
- _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \
- : "+r" (err) \
- : "rJ" (__x), "m"(*(ptr))); \
+ " " insn " %z0, %1\n" \
+ _ASM_EXTABLE(1b, %l2) \
+ : : "rJ" (__x), "m"(*(ptr)) : : label); \
} while (0)
#ifdef CONFIG_64BIT
-#define __put_user_8(x, ptr, err) \
- __put_user_asm("sd", x, ptr, err)
+#define __put_user_8(x, ptr, label) \
+ __put_user_asm("sd", x, ptr, label)
#else /* !CONFIG_64BIT */
-#define __put_user_8(x, ptr, err) \
+#define __put_user_8(x, ptr, label) \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((x)-(x)))(x); \
- __asm__ __volatile__ ( \
+ asm goto( \
"1:\n" \
- " sw %z1, %3\n" \
+ " sw %z0, %2\n" \
"2:\n" \
- " sw %z2, %4\n" \
- "3:\n" \
- _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
- _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
- : "+r" (err) \
- : "rJ" (__x), "rJ" (__x >> 32), \
+ " sw %z1, %3\n" \
+ _ASM_EXTABLE(1b, %l4) \
+ _ASM_EXTABLE(2b, %l4) \
+ : : "rJ" (__x), "rJ" (__x >> 32), \
"m" (__ptr[__LSW]), \
- "m" (__ptr[__MSW])); \
+ "m" (__ptr[__MSW]) : : label); \
} while (0)
#endif /* CONFIG_64BIT */
-#define __put_user_nocheck(x, __gu_ptr, __pu_err) \
+#define __put_user_nocheck(x, __gu_ptr, label) \
do { \
switch (sizeof(*__gu_ptr)) { \
case 1: \
- __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
+ __put_user_asm("sb", (x), __gu_ptr, label); \
break; \
case 2: \
- __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
+ __put_user_asm("sh", (x), __gu_ptr, label); \
break; \
case 4: \
- __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
+ __put_user_asm("sw", (x), __gu_ptr, label); \
break; \
case 8: \
- __put_user_8((x), __gu_ptr, __pu_err); \
+ __put_user_8((x), __gu_ptr, label); \
break; \
default: \
BUILD_BUG(); \
} \
} while (0)
+#define __put_user_error(x, ptr, err) \
+do { \
+ __label__ err_label; \
+ __put_user_nocheck(x, ptr, err_label); \
+ break; \
+err_label: \
+ (err) = -EFAULT; \
+} while (0)
+
/**
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
@@ -266,7 +271,7 @@ do { \
__chk_user_ptr(__gu_ptr); \
\
__enable_user_access(); \
- __put_user_nocheck(__val, __gu_ptr, __pu_err); \
+ __put_user_error(__val, __gu_ptr, __pu_err); \
__disable_user_access(); \
\
__pu_err; \
@@ -340,13 +345,7 @@ do { \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
-do { \
- long __kr_err = 0; \
- \
- __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
- if (unlikely(__kr_err)) \
- goto err_label; \
-} while (0)
+ __put_user_nocheck(*((type *)(src)), (type *)(dst), err_label);
static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
{
@@ -361,11 +360,8 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
static inline unsigned long user_access_save(void) { return 0UL; }
static inline void user_access_restore(unsigned long enabled) { }
-#define unsafe_put_user(x, ptr, label) do { \
- long __kr_err = 0; \
- __put_user_nocheck(x, (ptr), __kr_err); \
- if (__kr_err) goto label; \
-} while (0)
+#define unsafe_put_user(x, ptr, label) \
+ __put_user_nocheck(x, (ptr), label);
#define unsafe_get_user(x, ptr, label) do { \
long __kr_err = 0; \
--
2.34.1
© 2016 - 2024 Red Hat, Inc.