The kernel allows arches to select between inline and outline
implementations of the copy_{from,to}_user() by defining individual
INLINE_COPY_FROM_USER and INLINE_COPY_TO_USER, correspondingly.
However, all arches enable or disable them always together.
Without the real use-case for one helper being inlined while the other
outlined, having independent controls is excessive and error prone.
Switch the codebase to the single unified INLINE_COPY_USER control.
Reported-by: "Christophe Leroy (CS GROUP)" <chleroy@kernel.org>
Closes: https://lore.kernel.org/all/746c9c50-20c4-4dc9-a539-bf1310ff9414@kernel.org/
Fixes: 1f9a8286bc0c ("uaccess: always export _copy_[from|to]_user with CONFIG_RUST")
Signed-off-by: Yury Norov <ynorov@nvidia.com>
---
arch/arc/include/asm/uaccess.h | 3 +--
arch/arm/include/asm/uaccess.h | 3 +--
arch/arm64/include/asm/uaccess.h | 3 +--
arch/hexagon/include/asm/uaccess.h | 3 +--
arch/loongarch/include/asm/uaccess.h | 3 +--
arch/m68k/include/asm/uaccess.h | 3 +--
arch/microblaze/include/asm/uaccess.h | 3 +--
arch/mips/include/asm/uaccess.h | 3 +--
arch/nios2/include/asm/uaccess.h | 3 +--
arch/openrisc/include/asm/uaccess.h | 3 +--
arch/parisc/include/asm/uaccess.h | 3 +--
arch/s390/include/asm/uaccess.h | 3 +--
arch/sh/include/asm/uaccess.h | 3 +--
arch/sparc/include/asm/uaccess_32.h | 3 +--
arch/sparc/include/asm/uaccess_64.h | 3 +--
arch/um/include/asm/uaccess.h | 3 +--
arch/xtensa/include/asm/uaccess.h | 3 +--
include/asm-generic/uaccess.h | 3 +--
include/linux/uaccess.h | 12 ++++++------
lib/usercopy.c | 4 +---
rust/helpers/uaccess.c | 2 +-
21 files changed, 26 insertions(+), 46 deletions(-)
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 1e8809ea000a..6df2209541ac 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -628,8 +628,7 @@ static inline unsigned long __clear_user(void __user *to, unsigned long n)
return res;
}
-#define INLINE_COPY_TO_USER
-#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_USER
#define __clear_user __clear_user
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index d6ae80b5df36..1593cf3b9800 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -616,8 +616,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
}
#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
#endif
-#define INLINE_COPY_TO_USER
-#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_USER
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 9810106a3f66..d8be8cb45050 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -456,8 +456,7 @@ do { \
unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
} while (0)
-#define INLINE_COPY_TO_USER
-#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_USER
extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
index bff77efc0d9a..1aecf60ec4f5 100644
--- a/arch/hexagon/include/asm/uaccess.h
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -26,8 +26,7 @@ unsigned long raw_copy_from_user(void *to, const void __user *from,
unsigned long n);
unsigned long raw_copy_to_user(void __user *to, const void *from,
unsigned long n);
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
__kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
#define __clear_user(a, s) __clear_user_hexagon((a), (s))
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
index 438269313e78..428f373feabf 100644
--- a/arch/loongarch/include/asm/uaccess.h
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -292,8 +292,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return __copy_user((__force void *)to, from, n);
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index 64914872a5c9..31d133faa45e 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -377,8 +377,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return __constant_copy_to_user(to, from, n);
return __generic_copy_to_user(to, from, n);
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 3aab2f17e046..afa0dd8d013f 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -250,8 +250,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
/*
* Copy a null terminated string from userspace.
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index c0cede273c7c..f00c36676b73 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -433,8 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return __cu_len_r;
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index 6ccc9a232c23..5e6e05cc6efc 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -57,8 +57,7 @@ extern unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n);
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
extern long strncpy_from_user(char *__to, const char __user *__from,
long __len);
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index d6500a374e18..db934ebc0069 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -218,8 +218,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long size)
{
return __copy_tofrom_user((__force void *)to, from, size);
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
extern unsigned long __clear_user(void __user *addr, unsigned long size);
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 6c531d2c847e..0d17f81c8b27 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -197,7 +197,6 @@ unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
unsigned long len);
unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
unsigned long len);
-#define INLINE_COPY_TO_USER
-#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_USER
#endif /* __PARISC_UACCESS_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index dff035372601..a9f32c53f699 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -30,8 +30,7 @@ void debug_user_asce(int exit);
#define uaccess_kmsan_or_inline __always_inline
#endif
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_from_user(void *to, const void __user *from, unsigned long size)
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index a79609eb14be..02e7a066538e 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -95,8 +95,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __copy_user((__force void *)to, from, n);
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
/*
* Clear the area and return remaining number of bytes
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 43284b6ec46a..5542d5b32994 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -190,8 +190,7 @@ static inline unsigned long raw_copy_from_user(void *to, const void __user *from
return __copy_user((__force void __user *) to, from, n);
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index b825a5dd0210..e2989cfba626 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -231,8 +231,7 @@ unsigned long __must_check raw_copy_from_user(void *to,
unsigned long __must_check raw_copy_to_user(void __user *to,
const void *from,
unsigned long size);
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
unsigned long __must_check raw_copy_in_user(void __user *to,
const void __user *from,
diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h
index 0df9ea4abda8..4417c8b1d37a 100644
--- a/arch/um/include/asm/uaccess.h
+++ b/arch/um/include/asm/uaccess.h
@@ -27,8 +27,7 @@ static inline int __access_ok(const void __user *ptr, unsigned long size);
#define __access_ok __access_ok
#define __clear_user __clear_user
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
#include <asm-generic/uaccess.h>
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 56aec6d504fe..6538a29a2bbd 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -237,8 +237,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
prefetch(from);
return __xtensa_copy_user((__force void *)to, from, n);
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
/*
* We need to return the number of bytes not cleared. Our memset()
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index b276f783494c..4569045e7139 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -91,8 +91,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
memcpy((void __force *)to, from, n);
return 0;
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
+#define INLINE_COPY_USER
#endif /* CONFIG_UACCESS_MEMCPY */
/*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 4fe63169d5a2..0ddd2806d7f5 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -84,7 +84,7 @@
* the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
* that are used instead. Out of those, __... ones are inlined. Plain
* copy_{to,from}_user() might or might not be inlined. If you want them
- * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
+ * inlined, have asm/uaccess.h define INLINE_COPY_USER.
*
* NOTE: only copy_from_user() zero-pads the destination in case of short copy.
* Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
@@ -157,7 +157,7 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
}
/*
- * Architectures that #define INLINE_COPY_TO_USER use this function
+ * Architectures that #define INLINE_COPY_USER use this function
* directly in the normal copy_to/from_user(), the other ones go
* through an extern _copy_to/from_user(), which expands the same code
* here.
@@ -190,7 +190,7 @@ _inline_copy_from_user(void *to, const void __user *from, unsigned long n)
memset(to + (n - res), 0, res);
return res;
}
-#ifndef INLINE_COPY_FROM_USER
+#ifndef INLINE_COPY_USER
extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
@@ -207,7 +207,7 @@ _inline_copy_to_user(void __user *to, const void *from, unsigned long n)
}
return n;
}
-#ifndef INLINE_COPY_TO_USER
+#ifndef INLINE_COPY_USER
extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
@@ -217,7 +217,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (!check_copy_size(to, n, false))
return n;
-#ifdef INLINE_COPY_FROM_USER
+#ifdef INLINE_COPY_USER
return _inline_copy_from_user(to, from, n);
#else
return _copy_from_user(to, from, n);
@@ -230,7 +230,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
if (!check_copy_size(from, n, true))
return n;
-#ifdef INLINE_COPY_TO_USER
+#ifdef INLINE_COPY_USER
return _inline_copy_to_user(to, from, n);
#else
return _copy_to_user(to, from, n);
diff --git a/lib/usercopy.c b/lib/usercopy.c
index b00a3a957de6..e2f0bf104a59 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -12,15 +12,13 @@
/* out-of-line parts */
-#if !defined(INLINE_COPY_FROM_USER)
+#if !defined(INLINE_COPY_USER)
unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n)
{
return _inline_copy_from_user(to, from, n);
}
EXPORT_SYMBOL(_copy_from_user);
-#endif
-#if !defined(INLINE_COPY_TO_USER)
unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
return _inline_copy_to_user(to, from, n);
diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c
index d9625b9ee046..6e59cc9c665c 100644
--- a/rust/helpers/uaccess.c
+++ b/rust/helpers/uaccess.c
@@ -14,7 +14,7 @@ rust_helper_copy_to_user(void __user *to, const void *from, unsigned long n)
return copy_to_user(to, from, n);
}
-#ifdef INLINE_COPY_FROM_USER
+#ifdef INLINE_COPY_USER
__rust_helper
unsigned long rust_helper__copy_from_user(void *to, const void __user *from, unsigned long n)
{
--
2.43.0
Le 25/03/2026 à 17:33, Yury Norov a écrit :
> The kernel allows arches to select between inline and outline
> implementations of the copy_{from,to}_user() by defining individual
> INLINE_COPY_FROM_USER and INLINE_COPY_TO_USER, correspondingly.
> However, all arches enable or disable them always together.
>
> Without the real use-case for one helper being inlined while the other
> outlined, having independent controls is excessive and error prone.
>
> Switch the codebase to the single unified INLINE_COPY_USER control.
Could we use a (non user selectable) Kconfig item instead, e.g.
CONFIG_ARCH_WANT_OUTLINE_USER_COPY ?
Also, looks like only powerpc doesn't select INLINE_COPY. Would it be
cleaner to change the logic to a flag for OUTLINE_COPY ?
Something like (untested):
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ad7a2fe63a2a..58743cb0e305 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -186,6 +186,7 @@ config PPC
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if PPC_RADIX_MMU
+ select ARCH_WANT_OUTLINE_USER_COPY
select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index b276f783494c..fb33a71fd24e 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -91,8 +91,6 @@ raw_copy_to_user(void __user *to, const void *from,
unsigned long n)
memcpy((void __force *)to, from, n);
return 0;
}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
#endif /* CONFIG_UACCESS_MEMCPY */
/*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 1f3804245c06..bcfa8c11e49f 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -83,8 +83,8 @@
* with that. They should not be used directly; they are used to
implement
* the 6 functions (copy_{to,from}_user(),
__copy_{to,from}_user_inatomic())
* that are used instead. Out of those, __... ones are inlined. Plain
- * copy_{to,from}_user() might or might not be inlined. If you want them
- * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
+ * copy_{to,from}_user() might or might not be inlined. If you don't
want them
+ * inlined, select CONFIG_ARCH_WANT_OUTLINE_USER_COPY.
*
* NOTE: only copy_from_user() zero-pads the destination in case of
short copy.
* Neither __copy_from_user() nor __copy_from_user_inatomic() zero
anything
@@ -157,8 +157,8 @@ __copy_to_user(void __user *to, const void *from,
unsigned long n)
}
/*
- * Architectures that #define INLINE_COPY_TO_USER use this function
- * directly in the normal copy_to/from_user(), the other ones go
+ * Architectures that don't select CONFIG_ARCH_WANT_OUTLINE_USER_COPY
use this
+ * function directly in the normal copy_to/from_user(), the other ones go
* through an extern _copy_to/from_user(), which expands the same code
* here.
*/
@@ -190,7 +190,7 @@ _inline_copy_from_user(void *to, const void __user
*from, unsigned long n)
memset(to + (n - res), 0, res);
return res;
}
-#ifndef INLINE_COPY_FROM_USER
+#ifndef CONFIG_ARCH_WANT_OUTLINE_USER_COPY
extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
#endif
@@ -207,7 +207,7 @@ _inline_copy_to_user(void __user *to, const void
*from, unsigned long n)
}
return n;
}
-#ifndef INLINE_COPY_TO_USER
+#ifndef CONFIG_ARCH_WANT_OUTLINE_USER_COPY
extern __must_check unsigned long
_copy_to_user(void __user *, const void *, unsigned long);
#endif
@@ -217,7 +217,7 @@ copy_from_user(void *to, const void __user *from,
unsigned long n)
{
if (!check_copy_size(to, n, false))
return n;
-#ifdef INLINE_COPY_FROM_USER
+#ifndef CONFIG_ARCH_WANT_OUTLINE_USER_COPY
return _inline_copy_from_user(to, from, n);
#else
return _copy_from_user(to, from, n);
@@ -230,7 +230,7 @@ copy_to_user(void __user *to, const void *from,
unsigned long n)
if (!check_copy_size(from, n, true))
return n;
-#ifdef INLINE_COPY_TO_USER
+#ifndef CONFIG_ARCH_WANT_OUTLINE_USER_COPY
return _inline_copy_to_user(to, from, n);
#else
return _copy_to_user(to, from, n);
diff --git a/lib/Kconfig b/lib/Kconfig
index 0f2fb9610647..0106c1facfa4 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -550,6 +550,9 @@ config ARCH_HAS_UACCESS_FLUSHCACHE
config ARCH_HAS_COPY_MC
bool
+config ARCH_WANT_OUTLINE_USER_COPY
+ bool
+
# Temporary. Goes away when all archs are cleaned up
config ARCH_STACKWALK
bool
diff --git a/lib/usercopy.c b/lib/usercopy.c
index b00a3a957de6..aa52ac3f5d6b 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -12,15 +12,13 @@
/* out-of-line parts */
-#if !defined(INLINE_COPY_FROM_USER)
+#ifdef CONFIG_ARCH_WANT_OUTLINE_USER_COPY
unsigned long _copy_from_user(void *to, const void __user *from,
unsigned long n)
{
return _inline_copy_from_user(to, from, n);
}
EXPORT_SYMBOL(_copy_from_user);
-#endif
-#if !defined(INLINE_COPY_TO_USER)
unsigned long _copy_to_user(void __user *to, const void *from,
unsigned long n)
{
return _inline_copy_to_user(to, from, n);
diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c
index d9625b9ee046..79d950425dd9 100644
--- a/rust/helpers/uaccess.c
+++ b/rust/helpers/uaccess.c
@@ -14,7 +14,7 @@ rust_helper_copy_to_user(void __user *to, const void
*from, unsigned long n)
return copy_to_user(to, from, n);
}
-#ifdef INLINE_COPY_FROM_USER
+#ifndef CONFIG_ARCH_WANT_OUTLINE_USER_COPY
__rust_helper
unsigned long rust_helper__copy_from_user(void *to, const void __user
*from, unsigned long n)
{
>
> Reported-by: "Christophe Leroy (CS GROUP)" <chleroy@kernel.org>
> Closes: https://lore.kernel.org/all/746c9c50-20c4-4dc9-a539-bf1310ff9414@kernel.org/
> Fixes: 1f9a8286bc0c ("uaccess: always export _copy_[from|to]_user with CONFIG_RUST")
> Signed-off-by: Yury Norov <ynorov@nvidia.com>
Christophe
On Thu, Mar 26, 2026 at 02:44:40PM +0100, Christophe Leroy (CS GROUP) wrote:
>
>
> Le 25/03/2026 à 17:33, Yury Norov a écrit :
> > The kernel allows arches to select between inline and outline
> > implementations of the copy_{from,to}_user() by defining individual
> > INLINE_COPY_FROM_USER and INLINE_COPY_TO_USER, correspondingly.
> > However, all arches enable or disable them always together.
> >
> > Without the real use-case for one helper being inlined while the other
> > outlined, having independent controls is excessive and error prone.
> >
> > Switch the codebase to the single unified INLINE_COPY_USER control.
>
> Could we use a (non user selectable) Kconfig item instead, e.g.
> CONFIG_ARCH_WANT_OUTLINE_USER_COPY ?
This sounds interesting. I need to wrap it around my head for a while.
Right now, I believe, the best solution is to isolate this setting
from the sources as much as we can, and your suggestion looks like a
step forward.
Overall, I'm puzzled why some arches enable this while the others
don't. The next question is why copy_{from,to}_user is so special.
If this function benefits from being inlined for that particular
arch or compiler, which functions would also benefit and why?
Reasoning logically, if this WANT_INLINE thing makes sense, it would
make much more sense if we create a machinery for something like:
unsigned long __arch_inline copy_to_user();
> Also, looks like only powerpc doesn't select INLINE_COPY. Would it be
> cleaner to change the logic to a flag for OUTLINE_COPY ?
How that? x86_64 outlines it. Check it yourself:
@@ -206,7 +206,9 @@ _inline_copy_to_user(void __user *to, const void *from, unsigned long n)
#ifdef INLINE_COPY_USER
# define _copy_to_user _inline_copy_to_user
# define _copy_from_user _inline_copy_from_user
+#error INLINE_COPY_USER
#else
+#error OUTLINE_COPY_USER
extern __must_check unsigned long
_copy_from_user(void *, const void __user *, unsigned long);
If it was really a single arch, it would be worth to discuss what for do
we need this customization at all.
Le 26/03/2026 à 18:29, Yury Norov a écrit :
> On Thu, Mar 26, 2026 at 02:44:40PM +0100, Christophe Leroy (CS GROUP) wrote:
>>
>>
>> Le 25/03/2026 à 17:33, Yury Norov a écrit :
>>> The kernel allows arches to select between inline and outline
>>> implementations of the copy_{from,to}_user() by defining individual
>>> INLINE_COPY_FROM_USER and INLINE_COPY_TO_USER, correspondingly.
>>> However, all arches enable or disable them always together.
>>>
>>> Without the real use-case for one helper being inlined while the other
>>> outlined, having independent controls is excessive and error prone.
>>>
>>> Switch the codebase to the single unified INLINE_COPY_USER control.
>>
>> Could we use a (non user selectable) Kconfig item instead, e.g.
>> CONFIG_ARCH_WANT_OUTLINE_USER_COPY ?
>
> This sounds interesting. I need to wrap it around my head for a while.
> Right now, I believe, the best solution is to isolate this setting
> from the sources as much as we can, and your suggestion looks like a
> step forward.
>
> Overall, I'm puzzled why some arches enable this while the others
> don't. The next question is why copy_{from,to}_user is so special.
> If this function benefits from being inlined for that particular
> arch or compiler, which functions would also benefit and why?
>
> Reasoning logically, if this WANT_INLINE thing makes sense, it would
> make much more sense if we create a machinery for something like:
>
> unsigned long __arch_inline copy_to_user();
>
>> Also, looks like only powerpc doesn't select INLINE_COPY. Would it be
>> cleaner to change the logic to a flag for OUTLINE_COPY ?
>
> How that? x86_64 outlines it. Check it yourself:
>
> @@ -206,7 +206,9 @@ _inline_copy_to_user(void __user *to, const void *from, unsigned long n)
> #ifdef INLINE_COPY_USER
> # define _copy_to_user _inline_copy_to_user
> # define _copy_from_user _inline_copy_from_user
> +#error INLINE_COPY_USER
> #else
> +#error OUTLINE_COPY_USER
> extern __must_check unsigned long
> _copy_from_user(void *, const void __user *, unsigned long);
>
> If it was really a single arch, it would be worth to discuss what for do
> we need this customization at all.
Hum ... You are right. I don't know why, I thought powerpc was the only one.
Nevertheless, among the 21 architectures in arch/ , 17 of them select
INLINE_COPY_FROM_USER, which means only 4 don't define it.
So might still be interesting to reverse the logic, then only 4 arches
will have to select it, similar to the exemple below with powerpc:
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index ad7a2fe63a2a..58743cb0e305 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -186,6 +186,7 @@ config PPC
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if PPC_RADIX_MMU
+ select ARCH_WANT_OUTLINE_USER_COPY
select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
diff --git a/lib/Kconfig b/lib/Kconfig
index 0f2fb9610647..0106c1facfa4 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -550,6 +550,9 @@ config ARCH_HAS_UACCESS_FLUSHCACHE
config ARCH_HAS_COPY_MC
bool
+config ARCH_WANT_OUTLINE_USER_COPY
+ bool
+
# Temporary. Goes away when all archs are cleaned up
config ARCH_STACKWALK
bool
Christophe
© 2016 - 2026 Red Hat, Inc.