The barrier_nospec() in 32-bit copy_from_user() is slow. Instead use
pointer masking to force the user pointer to all 1's if a previous
access_ok() mispredicted true for an invalid address.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/include/asm/uaccess.h | 34 +++++++++++++++++++++++++++++++
arch/x86/include/asm/uaccess_32.h | 2 +-
arch/x86/include/asm/uaccess_64.h | 29 +-------------------------
3 files changed, 36 insertions(+), 29 deletions(-)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 3a7755c1a441..e7ac97d42bc2 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -15,6 +15,40 @@
#include <asm/smap.h>
#include <asm/extable.h>
#include <asm/tlbflush.h>
+#include <asm/runtime-const.h>
+
+#ifdef CONFIG_X86_64
+/*
+ * Virtual variable: there's no actual backing store for this,
+ * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)'
+ */
+extern unsigned long USER_PTR_MAX;
+# define USER_PTR_MAX_CONST runtime_const_ptr(USER_PTR_MAX)
+#else
+# define USER_PTR_MAX_CONST TASK_SIZE_MAX-1
+#endif
+
+/*
+ * Masking the user address is an alternative to a conditional
+ * user_access_begin that can avoid the fencing. This only works
+ * for dense accesses starting at the address.
+ */
+static inline void __user *mask_user_address(const void __user *ptr)
+{
+ unsigned long mask;
+ asm("cmp %1,%0\n\t"
+ "sbb %0,%0"
+ :"=r" (mask)
+ :"r" (ptr),
+ "0" (USER_PTR_MAX_CONST));
+ return (__force void __user *)(mask | (__force unsigned long)ptr);
+}
+
+#define masked_user_access_begin(x) ({ \
+ __auto_type __masked_ptr = (x); \
+ __masked_ptr = mask_user_address(__masked_ptr); \
+ __uaccess_begin(); __masked_ptr; })
+
#ifdef CONFIG_X86_32
# include <asm/uaccess_32.h>
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 8393ba104b2c..6ec2d73f8bba 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -23,7 +23,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
static __always_inline unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- barrier_nospec();
+ from = mask_user_address(from);
return __copy_user_ll(to, (__force const void *)from, n);
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 7ce84090f0ec..dfb78154ac26 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -12,13 +12,6 @@
#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/percpu.h>
-#include <asm/runtime-const.h>
-
-/*
- * Virtual variable: there's no actual backing store for this,
- * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)'
- */
-extern unsigned long USER_PTR_MAX;
#ifdef CONFIG_ADDRESS_MASKING
/*
@@ -54,27 +47,7 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
#endif
#define valid_user_address(x) \
- ((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
-
-/*
- * Masking the user address is an alternative to a conditional
- * user_access_begin that can avoid the fencing. This only works
- * for dense accesses starting at the address.
- */
-static inline void __user *mask_user_address(const void __user *ptr)
-{
- unsigned long mask;
- asm("cmp %1,%0\n\t"
- "sbb %0,%0"
- :"=r" (mask)
- :"r" (ptr),
- "0" (runtime_const_ptr(USER_PTR_MAX)));
- return (__force void __user *)(mask | (__force unsigned long)ptr);
-}
-#define masked_user_access_begin(x) ({ \
- __auto_type __masked_ptr = (x); \
- __masked_ptr = mask_user_address(__masked_ptr); \
- __uaccess_begin(); __masked_ptr; })
+ ((__force unsigned long)(x) <= USER_PTR_MAX_CONST)
/*
* User pointers can have tag bits on x86-64. This scheme tolerates
--
2.47.0