Currently, when a function like strncpy_from_user() is called,
the userspace access protection is disabled and enabled
for every word read.
By implementing user_access_begin and families, the protection
is disabled at the beginning of the copy and enabled at the end.
The __inttype macro is borrowed from x86 implementation.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
---
arch/riscv/include/asm/uaccess.h | 63 ++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 72ec1d9bd3f3..09d4ca37522c 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -28,6 +28,19 @@
#define __disable_user_access() \
__asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
+/*
+ * This is the smallest unsigned integer type that can fit a value
+ * (up to 'long long')
+ */
+#define __inttype(x) __typeof__( \
+ __typefits(x,char, \
+ __typefits(x,short, \
+ __typefits(x,int, \
+ __typefits(x,long,0ULL)))))
+
+#define __typefits(x,type,not) \
+ __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
+
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
@@ -335,6 +348,56 @@ do { \
goto err_label; \
} while (0)
+static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr,len)))
+ return 0;
+ __enable_user_access();
+ return 1;
+}
+#define user_access_begin(a,b) user_access_begin(a,b)
+#define user_access_end() __disable_user_access();
+
+static inline unsigned long user_access_save(void) { return 0UL; }
+static inline void user_access_restore(unsigned long enabled) { }
+
+#define unsafe_put_user(x, ptr, label) do { \
+ long __kr_err = 0; \
+ __put_user_nocheck(x, (ptr), __kr_err); \
+ if (__kr_err) goto label; \
+} while (0)
+
+#define unsafe_get_user(x, ptr, label) do { \
+ long __kr_err = 0; \
+ __inttype(*(ptr)) __gu_val; \
+ __get_user_nocheck(__gu_val, (ptr), __kr_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (__kr_err) goto label; \
+} while (0)
+
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_copy_loop(dst, src, len, type, label) \
+ while (len >= sizeof(type)) { \
+ unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
+ dst += sizeof(type); \
+ src += sizeof(type); \
+ len -= sizeof(type); \
+ }
+
+#define unsafe_copy_to_user(_dst,_src,_len,label) \
+do { \
+ char __user *__ucu_dst = (_dst); \
+ const char *__ucu_src = (_src); \
+ size_t __ucu_len = (_len); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
+ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
+} while (0)
+
#else /* CONFIG_MMU */
#include <asm-generic/uaccess.h>
#endif /* CONFIG_MMU */
--
2.43.0
On 25/6/2024 2:04 pm, Jisheng Zhang wrote:
> Currently, when a function like strncpy_from_user() is called,
> the userspace access protection is disabled and enabled
> for every word read.
>
> By implementing user_access_begin and families, the protection
> is disabled at the beginning of the copy and enabled at the end.
>
> The __inttype macro is borrowed from x86 implementation.
>
Beat me to it, I've written an almost identical patch. The performance
improvement where the unsafe_ variants are used is very good even
without the rest of the series.
Reviewed-by: Cyril Bur <cyrilbur@tenstorrent.com>
> Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
> ---
> arch/riscv/include/asm/uaccess.h | 63 ++++++++++++++++++++++++++++++++
> 1 file changed, 63 insertions(+)
>
> diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
> index 72ec1d9bd3f3..09d4ca37522c 100644
> --- a/arch/riscv/include/asm/uaccess.h
> +++ b/arch/riscv/include/asm/uaccess.h
> @@ -28,6 +28,19 @@
> #define __disable_user_access() \
> __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
>
> +/*
> + * This is the smallest unsigned integer type that can fit a value
> + * (up to 'long long')
> + */
> +#define __inttype(x) __typeof__( \
> + __typefits(x,char, \
> + __typefits(x,short, \
> + __typefits(x,int, \
> + __typefits(x,long,0ULL)))))
> +
> +#define __typefits(x,type,not) \
> + __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
> +
> /*
> * The exception table consists of pairs of addresses: the first is the
> * address of an instruction that is allowed to fault, and the second is
> @@ -335,6 +348,56 @@ do { \
> goto err_label; \
> } while (0)
>
> +static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
> +{
> + if (unlikely(!access_ok(ptr,len)))
> + return 0;
> + __enable_user_access();
> + return 1;
> +}
> +#define user_access_begin(a,b) user_access_begin(a,b)
> +#define user_access_end() __disable_user_access();
> +
> +static inline unsigned long user_access_save(void) { return 0UL; }
> +static inline void user_access_restore(unsigned long enabled) { }
> +
> +#define unsafe_put_user(x, ptr, label) do { \
> + long __kr_err = 0; \
> + __put_user_nocheck(x, (ptr), __kr_err); \
> + if (__kr_err) goto label; \
> +} while (0)
> +
> +#define unsafe_get_user(x, ptr, label) do { \
> + long __kr_err = 0; \
> + __inttype(*(ptr)) __gu_val; \
> + __get_user_nocheck(__gu_val, (ptr), __kr_err); \
> + (x) = (__force __typeof__(*(ptr)))__gu_val; \
> + if (__kr_err) goto label; \
> +} while (0)
> +
> +/*
> + * We want the unsafe accessors to always be inlined and use
> + * the error labels - thus the macro games.
> + */
> +#define unsafe_copy_loop(dst, src, len, type, label) \
> + while (len >= sizeof(type)) { \
> + unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
> + dst += sizeof(type); \
> + src += sizeof(type); \
> + len -= sizeof(type); \
> + }
> +
> +#define unsafe_copy_to_user(_dst,_src,_len,label) \
> +do { \
> + char __user *__ucu_dst = (_dst); \
> + const char *__ucu_src = (_src); \
> + size_t __ucu_len = (_len); \
> + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
> + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
> + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
> + unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
> +} while (0)
> +
> #else /* CONFIG_MMU */
> #include <asm-generic/uaccess.h>
> #endif /* CONFIG_MMU */
© 2016 - 2025 Red Hat, Inc.