User space access regions are tedious and require similar code patterns all
over the place:
if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(val, from, Efault);
user_read_access_end();
return 0;
Efault:
user_read_access_end();
return -EFAULT;
This got worse with the recend addition of masked user access, which
optimizes the speculation prevention:
if (can_do_masked_user_access())
from = masked_user_read_access_begin((from));
else if (!user_read_access_begin(from, sizeof(*from)))
return -EFAULT;
unsafe_get_user(val, from, Efault);
user_read_access_end();
return 0;
Efault:
user_read_access_end();
return -EFAULT;
There have been issues with using the wrong user_*_access_end() variant in
the error path and other typical Copy&Pasta problems, e.g. using the wrong
fault label in the user accessor which ends up using the wrong accesss end
variant.
These patterns beg for scopes with automatic cleanup. The resulting outcome
is:
scoped_masked_user_read_access(from, return -EFAULT,
scoped_get_user(val, from); );
return 0;
The scope guarantees the proper cleanup for the access mode is invoked both
in the success and the failure (fault) path
The fault label is scope local and always 'scope_fault:', which prevents
mixing up fault labels between scopes. It is marked __maybe_unused so that
user access, which does not use a fault label and modifies a result
variable in the exception handler, does not cause a defined but unused
warning.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
---
V2: Remove the shady wrappers around the opening and use scopes with automatic cleanup
---
include/linux/uaccess.h | 151 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 151 insertions(+)
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,6 +2,7 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__
+#include <linux/cleanup.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
#include <linux/minmax.h>
@@ -35,9 +36,17 @@
#ifdef masked_user_access_begin
#define can_do_masked_user_access() 1
+# ifndef masked_user_write_access_begin
+# define masked_user_write_access_begin masked_user_access_begin
+# endif
+# ifndef masked_user_read_access_begin
+# define masked_user_read_access_begin masked_user_access_begin
+#endif
#else
#define can_do_masked_user_access() 0
#define masked_user_access_begin(src) NULL
+ #define masked_user_read_access_begin(src) NULL
+ #define masked_user_write_access_begin(src) NULL
#define mask_user_address(src) (src)
#endif
@@ -569,6 +578,148 @@ static inline void user_access_restore(u
#define user_read_access_end user_access_end
#endif
+/* Define RW variant so the below _mode macro expansion works */
+#define masked_user_rw_access_begin(u) masked_user_access_begin(u)
+#define user_rw_access_begin(u, s) user_access_begin(u, s)
+#define user_rw_access_end() user_access_end()
+
+/* Scoped user access */
+#define USER_ACCESS_GUARD(_mode) \
+static __always_inline void __user * \
+class_masked_user_##_mode##_begin(void __user *ptr) \
+{ \
+ return ptr; \
+} \
+ \
+static __always_inline void \
+class_masked_user_##_mode##_end(void __user *ptr) \
+{ \
+ user_##_mode##_access_end(); \
+} \
+ \
+DEFINE_CLASS(masked_user_ ##_mode## _access, void __user *, \
+ class_masked_user_##_mode##_end(_T), \
+ class_masked_user_##_mode##_begin(ptr), void __user *ptr) \
+ \
+static __always_inline class_masked_user_##_mode##_access_t \
+class_masked_user_##_mode##_access_ptr(void __user *scope) \
+{ \
+ return scope; \
+} \
+
+USER_ACCESS_GUARD(read)
+USER_ACCESS_GUARD(write)
+USER_ACCESS_GUARD(rw)
+#undef USER_ACCESS_GUARD
+
+/**
+ * __scoped_masked_user_access - Open a scope for masked user access
+ * @_mode: The mode of the access class (read, write, rw)
+ * @_uptr: The pointer to access user space memory
+ * @_ecode: Code to inject for the failure case
+ * @_code: The code to inject inside the scope
+ *
+ * When the scope is left user_##@_mode##_access_end() is invoked, if the
+ * corresponding user_##@_mode##_masked_begin() succeeded.
+ *
+ * The user access function inside the scope must use a fault label, which
+ * is inside the scope. __scoped_masked_user_access() provides a default
+ * label @scope_label, which is placed right before @_ecode. This label is
+ * scope local, so multiple masked user scopes can be in one function.
+ *
+ * The user access helpers scoped_put_user() and scoped_get_user() use
+ * @scope_label automatically.
+ *
+ * A single line statement in the scope::
+ *
+ * scoped_masked_user_read_access(ptr, return false,
+ * scoped_get_user(val, ptr););
+ *
+ * Multi-line statement::
+ *
+ * scoped_masked_user_rw_access(ptr, return false, {
+ * scoped_get_user(rval, &ptr->rval);
+ * scoped_put_user(wval, &ptr->wval);
+ * });
+ */
+#define __scoped_masked_user_access(_mode, _uptr, _ecode, _code) \
+do { \
+ unsigned long size = sizeof(*(_uptr)); \
+ typeof((_uptr)) _tmpptr = (_uptr); \
+ bool proceed = true; \
+ \
+ /* \
+ * Must be outside the CLASS scope below to handle the fail \
+ * of the non-masked access_begin() case correctly. \
+ */ \
+ if (can_do_masked_user_access()) \
+ _tmpptr = masked_user_##_mode##_access_begin((_uptr)); \
+ else \
+ proceed = user_##_mode##_access_begin((_uptr), size); \
+ \
+ if (!proceed) { \
+ _ecode; \
+ } else { \
+ __label__ scope_fault; \
+ CLASS(masked_user_##_mode##_access, scope) (_tmpptr); \
+ /* Force modified pointer usage in @_code */ \
+ const typeof((_uptr)) _uptr = _tmpptr; \
+ \
+ _code; \
+ if (0) { \
+ scope_fault: \
+ __maybe_unused; \
+ _ecode; \
+ } \
+ } \
+} while (0)
+
+/**
+ * scoped_masked_user_read_access - Start a scoped __user_masked_read_access()
+ * @_usrc: Pointer to the user space address to read from
+ * @_ecode: Code to inject for the failure case
+ *
+ * For further information see __scoped_masked_user_access() above.
+ */
+#define scoped_masked_user_read_access(_usrc, _ecode, _code) \
+ __scoped_masked_user_access(read, (_usrc), _ecode, ({_code}))
+
+/**
+ * scoped_masked_user_write_access - Start a scoped __user_masked_write_access()
+ * @_udst: Pointer to the user space address to write to
+ * @_ecode: Code to inject for the failure case
+ *
+ * For further information see __scoped_masked_user_access() above.
+ */
+#define scoped_masked_user_write_access(_udst, _ecode, _code) \
+ __scoped_masked_user_access(write, (_udst), _ecode, ({_code}))
+
+/**
+ * scoped_masked_user_rw_access - Start a scoped __user_masked_rw_access()
+ * @_uptr Pointer to the user space address to read from and write to
+ * @_ecode: Code to inject for the failure case
+ *
+ * For further information see __scoped_masked_user_access() above.
+ */
+#define scoped_masked_user_rw_access(_uptr, _ecode, _code) \
+ __scoped_masked_user_access(rw, (_uptr), _ecode, ({_code}))
+
+/**
+ * scoped_get_user - Read user memory from within a scoped masked access section
+ * @_dst: The destination variable to store the read value
+ * @_usrc: Pointer to the user space address to read from
+ */
+#define scoped_get_user(_dst, _usrc) \
+ unsafe_get_user((_dst), (_usrc), scope_fault)
+
+/**
+ * scoped_put_user - Write user memory from within a scoped masked access section
+ * @_val: Value to write
+ * @_udst: Pointer to the user space address to write to
+ */
+#define scoped_put_user(_val, _udst) \
+ unsafe_put_user((_val), (_udst), scope_fault)
+
#ifdef CONFIG_HARDENED_USERCOPY
void __noreturn usercopy_abort(const char *name, const char *detail,
bool to_user, unsigned long offset,
On 16-Sep-2025 06:33:13 PM, Thomas Gleixner wrote: > User space access regions are tedious and require similar code patterns all > over the place: > > if (!user_read_access_begin(from, sizeof(*from))) > return -EFAULT; > unsafe_get_user(val, from, Efault); > user_read_access_end(); > return 0; > Efault: > user_read_access_end(); > return -EFAULT; > > This got worse with the recend addition of masked user access, which > optimizes the speculation prevention: > > if (can_do_masked_user_access()) > from = masked_user_read_access_begin((from)); > else if (!user_read_access_begin(from, sizeof(*from))) > return -EFAULT; > unsafe_get_user(val, from, Efault); > user_read_access_end(); > return 0; > Efault: > user_read_access_end(); > return -EFAULT; > > There have been issues with using the wrong user_*_access_end() variant in > the error path and other typical Copy&Pasta problems, e.g. using the wrong > fault label in the user accessor which ends up using the wrong accesss end > variant. > > These patterns beg for scopes with automatic cleanup. The resulting outcome > is: > scoped_masked_user_read_access(from, return -EFAULT, > scoped_get_user(val, from); ); > return 0; I find a few aspects of the proposed API odd: - Explicitly implementing the error label within a macro parameter, - Having the scoped code within another macro parameter. I would rather expect something like this to mimick our expectations in C: int func(void __user *ptr, size_t len, char *val1, char *val2) { int ret; scoped_masked_user_read_access(ptr, len, ret) { scoped_get_user(val1, ptr[0]); scoped_get_user(val2, ptr[0]); } return ret; } Where: - ptr is the pointer at the beginning of the range where the userspace access will be done. - len is the length of the range. - ret is a variable used as output (set to -EFAULT on error, 0 on success). If the user needs to do something cleverer than get a -EFAULT on error, they can open-code it rather than use the scoped helper. - The scope is presented similarly to a "for ()" loop scope. Now I have no clue whether preprocessor limitations prevent achieving this somehow, or if it would end up generating poor assembler. Thoughts ? Thanks, Mathieu -- Mathieu Desnoyers EfficiOS Inc. http://www.efficios.com
On Thu, Sep 18 2025 at 09:20, Mathieu Desnoyers wrote: > On 16-Sep-2025 06:33:13 PM, Thomas Gleixner wrote: >> These patterns beg for scopes with automatic cleanup. The resulting outcome >> is: >> scoped_masked_user_read_access(from, return -EFAULT, >> scoped_get_user(val, from); ); >> return 0; > > I find a few aspects of the proposed API odd: > > - Explicitly implementing the error label within a macro parameter, Which error label are you talking about? > - Having the scoped code within another macro parameter. Macros are limited and the whole construct requires a closed scope to work and to keep the local variables and the jump label local. > I would rather expect something like this to mimick our expectations > in C: I obviously would love to do it more C style as everyone else. If you can come up with a way to do that in full C I'm all ears :) > int func(void __user *ptr, size_t len, char *val1, char *val2) > { > int ret; > > scoped_masked_user_read_access(ptr, len, ret) { > scoped_get_user(val1, ptr[0]); > scoped_get_user(val2, ptr[0]); > } > return ret; > } > > Where: > > - ptr is the pointer at the beginning of the range where the userspace > access will be done. That's the case with the proposed interface already, no? > - len is the length of the range. The majority of use cases does not need an explicit size because the size is determined by the data type. So not forcing everyone to write scope(ptr, sizeof(*ptr), ..) is a good thing, no? Adding a sized interface on top for the others is straight forward enough. > - ret is a variable used as output (set to -EFAULT on error, 0 on > success). If the user needs to do something cleverer than > get a -EFAULT on error, they can open-code it rather than use > the scoped helper. Just write "ret = WHATEVER" instead of "return WHATEVER". > - The scope is presented similarly to a "for ()" loop scope. It is a loop scope and you can exit it with either return or break. > Now I have no clue whether preprocessor limitations prevent achieving > this somehow, or if it would end up generating poor assembler. There is no real good way to implement it so that the scope local requirements are fulfilled. The only alternative would be to close the scope with a bracket after the code: scope(....) foo(); } or for multiline: scope(....) { .... )) The lonely extra bracket screws up reading even more than the code within the macro parameter because it's imbalanced. It also makes tooling from editors to code checkers mightily unhappy. Thanks, tglx
© 2016 - 2025 Red Hat, Inc.