From nobody Tue May 5 10:13:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 40BBD31159C; Mon, 27 Apr 2026 17:17:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310233; cv=none; b=Fcv4IoDZLX3sKWsgNOQ8O4PaaewmbZwoxkKixz636T8pqy4uWOZFrewhtFadeg+pU6ZgZfIdqfxHlVcJWrQTwl5HcoJPVtz8Am3a5jJRG3t9E3/oM/ZPsAbQIja/uXZT9aI/MAXKbnuhZx6l4P+M76jj9PsNCslk1cwz/y7GQKM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310233; c=relaxed/simple; bh=745VKRe61Lf8afs4o89V2mPmcfGcqMzEwAyCkwUFzR4=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=b1sRQB/PoXn0uxh+TitWaBvdnAW19gbxBnm3Dzp0XHxW99/oz6sU6VR3f4pX9xsvdA6BU1sAwtZWkIiSjUipNvJgXRqPa20U5FMMAoy4fahOKIW7n1Fmw5KAEo0EonS8TO3wKRhiClcV+NL66QBcQt5E+J2PQ6d6aEYjXXAkv/A= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ccnzw+ca; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ccnzw+ca" Received: by smtp.kernel.org (Postfix) with ESMTPSA id AC974C2BCB5; Mon, 27 Apr 2026 17:16:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310232; bh=745VKRe61Lf8afs4o89V2mPmcfGcqMzEwAyCkwUFzR4=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ccnzw+capg6i/qeCJEEZZnJkaeh2P8NEABjGdQ84B8/8ElHQuhaIfwHK4KyIZjuzW fShA2udW2NQ0Ct7kJfcn/Kb043ljqkq7hTjPsRqwiNPGH9FmgvMogRNjEATWGrG26g 6zW5qtX0OAMj+t+S60HkjDsyGYMrisyc3m/7o9VPvVvQnsaPOrT5y+ZTzyh8Xto7fx AWFGU9d3c0Mo3tNoddRUR02dQmZ5LMWSNNkmTWtszo39IueJ4zsAp4+3T5NcIhGntz oVxgjUbjxhRTuJF8KweLMZaWvhi+rfu1/RR3XrUIQOPEazAae4t8SpoXJ0czDjJbhT 7qOjNG8l7cqnA== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 1/9] uaccess: Split check_zeroed_user() out of usercopy.c Date: Mon, 27 Apr 2026 19:13:42 +0200 Message-ID: X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=4389; i=chleroy@kernel.org; h=from:subject:message-id; bh=745VKRe61Lf8afs4o89V2mPmcfGcqMzEwAyCkwUFzR4=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxmtvkzq83WmyICs092/GNmylHyYwx1uy/R0rjqso Tc19NiJjlIWBjEuBlkxRZbj/7l3zej6kpo/dZc+zBxWJpAhDFycAjCRrBxGhr1T21lnr9ow8bLw mlcX9LlDfR53LmTbs/fCv2dGAh23RTkY/gpGWFpU+sbMuNm2suHwI9OT2xZsfrLc1dbD4+idxHd 72DkA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Until commit f5a1a536fa14 ("lib: introduce copy_struct_from_user() helper"), lib/usercopy.c was containing only the out-line version of user copy fonctions. That commit added function check_zeroed_user() into the same file. Move that function into a new file named usercheck.c, so that next patch can change usercopy.c build to a conditional build. Signed-off-by: Christophe Leroy (CS GROUP) --- lib/Makefile | 1 + lib/{usercopy.c =3D> usercheck.c} | 22 ------------ lib/usercopy.c | 62 --------------------------------- 3 files changed, 1 insertion(+), 84 deletions(-) copy lib/{usercopy.c =3D> usercheck.c} (73%) diff --git a/lib/Makefile b/lib/Makefile index f33a24bf1c19..7c0334d7675b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -59,6 +59,7 @@ obj-y +=3D bcd.o sort.o parser.o debug_locks.o random32.o= \ percpu-refcount.o rhashtable.o base64.o \ once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ generic-radix-tree.o bitmap-str.o +obj-y +=3D usercheck.o obj-y +=3D string_helpers.o obj-y +=3D hexdump.o obj-$(CONFIG_TEST_HEXDUMP) +=3D test_hexdump.o diff --git a/lib/usercopy.c b/lib/usercheck.c similarity index 73% copy from lib/usercopy.c copy to lib/usercheck.c index b00a3a957de6..15b0d9a18435 100644 --- a/lib/usercopy.c +++ b/lib/usercheck.c @@ -2,32 +2,10 @@ #include #include #include -#include -#include #include -#include -#include #include #include =20 -/* out-of-line parts */ - -#if !defined(INLINE_COPY_FROM_USER) -unsigned long _copy_from_user(void *to, const void __user *from, unsigned = long n) -{ - return _inline_copy_from_user(to, from, n); -} -EXPORT_SYMBOL(_copy_from_user); -#endif - -#if !defined(INLINE_COPY_TO_USER) -unsigned long _copy_to_user(void __user *to, const void *from, unsigned lo= ng n) -{ - return _inline_copy_to_user(to, from, n); -} -EXPORT_SYMBOL(_copy_to_user); -#endif - /** * check_zeroed_user: check if a userspace buffer only contains zero bytes * @from: Source address, in userspace. diff --git a/lib/usercopy.c b/lib/usercopy.c index b00a3a957de6..7a93f56d81dd 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -1,14 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 -#include -#include #include -#include -#include -#include -#include -#include #include -#include =20 /* out-of-line parts */ =20 @@ -27,57 +19,3 @@ unsigned long _copy_to_user(void __user *to, const void = *from, unsigned long n) } EXPORT_SYMBOL(_copy_to_user); #endif - -/** - * check_zeroed_user: check if a userspace buffer only contains zero bytes - * @from: Source address, in userspace. - * @size: Size of buffer. - * - * This is effectively shorthand for "memchr_inv(from, 0, size) =3D=3D NUL= L" for - * userspace addresses (and is more efficient because we don't care where = the - * first non-zero byte is). - * - * Returns: - * * 0: There were non-zero bytes present in the buffer. - * * 1: The buffer was full of zero bytes. - * * -EFAULT: access to userspace failed. - */ -int check_zeroed_user(const void __user *from, size_t size) -{ - unsigned long val; - uintptr_t align =3D (uintptr_t) from % sizeof(unsigned long); - - if (unlikely(size =3D=3D 0)) - return 1; - - from -=3D align; - size +=3D align; - - if (!user_read_access_begin(from, size)) - return -EFAULT; - - unsafe_get_user(val, (unsigned long __user *) from, err_fault); - if (align) - val &=3D ~aligned_byte_mask(align); - - while (size > sizeof(unsigned long)) { - if (unlikely(val)) - goto done; - - from +=3D sizeof(unsigned long); - size -=3D sizeof(unsigned long); - - unsafe_get_user(val, (unsigned long __user *) from, err_fault); - } - - if (size < sizeof(unsigned long)) - val &=3D aligned_byte_mask(size); - -done: - user_read_access_end(); - return (val =3D=3D 0); -err_fault: - user_read_access_end(); - return -EFAULT; -} -EXPORT_SYMBOL(check_zeroed_user); --=20 2.49.0 From nobody Tue May 5 10:13:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B0DC33770B; Mon, 27 Apr 2026 17:17:32 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310252; cv=none; b=j7cBcKTYNlba7cJV2AV0a9gLsn3bCWWV1FXKptQDehLbBF+nudT5ldad3Q2KGFLE7APlRzXlyH8rz0x9DaldbToDuE3q/yOYC7z+3va78v5M8+8cJZ+EKZRacNHqNCfCRpE2XPMTJxPp3fkPG3Hc3q1Hlpv9OIRwZJ3NqpBL/ZU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310252; c=relaxed/simple; bh=2A3pp34TaOBrBLCc7n3D/0VUADnJLMBU+ywYlmILDq0=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=VvJPG62fa9uCUAI3nGeWO54Fc3zZ/V6rMDrvh1CjjmzgOcsLMw7gfi3/FLE02k4w/SqlRkSxPP3ij8T0U3EbY7oHN+v6y2Em8iPVjKXetQUJrvqT/gNBhVwUrmDbg3yRraukPIGK3KUUrAUKAU+n8Ep/ecN7TY3q6oV+vYKw4Ns= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iZa57++f; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iZa57++f" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9AEAAC2BCB5; Mon, 27 Apr 2026 17:17:13 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310252; bh=2A3pp34TaOBrBLCc7n3D/0VUADnJLMBU+ywYlmILDq0=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=iZa57++f4J+GqwvmN6cOlz8Y1H7GNDryi4ZRD+CJk+hBQU7Iobf+tE7/GXYinO2zm iEth1up0MDkLOAxzQiz5mqcfHR3P06TXmSl3XzIFaDPQuFHo3UkJmapo/kW88jy3aQ 3aznlfwGIxcYhdaW4V5tVjatVhcyAHWEKfRp+CGH8zAHmw+dzciaz6iCaNPprV57bn 8osxBss4ioepFyLm+2kR+UIAIIrgH8FbX9Dl39gcSvCHEwtnH4JUk+pOBX2jvh7BG3 7M8Bd9CW5MVAHSPSRYqg08B9eeC3kSJm80fJ6QSe91ed2+rsgDjp3uvvAKjOOY8v3a 6k1/LUiGGTPUw== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 2/9] uaccess: Convert INLINE_COPY_{TO/FROM}_USER to kconfig and reduce ifdefery Date: Mon, 27 Apr 2026 19:13:43 +0200 Message-ID: <9fe875d2f55af59c12708336c571a46038528678.1777306795.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=18999; i=chleroy@kernel.org; h=from:subject:message-id; bh=2A3pp34TaOBrBLCc7n3D/0VUADnJLMBU+ywYlmILDq0=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxl942rk7qV/z0wpsopc/EjgrOKra6+/qq3zDzp1o 3Cx5MQvcR2lLAxiXAyyYoosx/9z75rR9SU1f+oufZg5rEwgQxi4OAVgIuKajAx/ndKvMf5+vP6+ 0DUOzaI/8u977T8/tn8jYyzy8FHU4dqzDP9dp5oyP1vg/Kml4FbUFMnSvxpN1/NNNGPF159+JNd bVcIDAA== X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Among the 21 architectures supported by the kernel, 16 define both INLINE_COPY_TO_USER and INLINE_COPY_FROM_USER while the 5 other ones don't define any of the two. To simplify and reduce risk of mistakes, convert them to a single kconfig item named CONFIG_ARCH_WANTS_NOINLINE_COPY which will be selected by the 5 architectures that don't want inlined copy. To minimise complication in a later patch, also remove ifdefery and replace it with IS_ENABLED(). Signed-off-by: Christophe Leroy (CS GROUP) --- arch/alpha/Kconfig | 1 + arch/arc/include/asm/uaccess.h | 3 --- arch/arm/include/asm/uaccess.h | 2 -- arch/arm64/include/asm/uaccess.h | 3 --- arch/csky/Kconfig | 1 + arch/hexagon/include/asm/uaccess.h | 3 --- arch/loongarch/include/asm/uaccess.h | 3 --- arch/m68k/include/asm/uaccess.h | 3 --- arch/microblaze/include/asm/uaccess.h | 2 -- arch/mips/include/asm/uaccess.h | 3 --- arch/nios2/include/asm/uaccess.h | 2 -- arch/openrisc/include/asm/uaccess.h | 2 -- arch/parisc/include/asm/uaccess.h | 3 --- arch/powerpc/Kconfig | 1 + arch/riscv/Kconfig | 1 + arch/s390/include/asm/uaccess.h | 3 --- arch/sh/include/asm/uaccess.h | 2 -- arch/sparc/include/asm/uaccess_32.h | 3 --- arch/sparc/include/asm/uaccess_64.h | 2 -- arch/um/include/asm/uaccess.h | 3 --- arch/x86/Kconfig | 1 + arch/xtensa/include/asm/uaccess.h | 2 -- include/asm-generic/uaccess.h | 2 -- include/linux/uaccess.h | 32 ++++++++++++--------------- lib/Kconfig | 3 +++ lib/Makefile | 3 ++- lib/usercopy.c | 4 ---- rust/helpers/uaccess.c | 2 +- 28 files changed, 25 insertions(+), 70 deletions(-) diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 7b7dafe7d9df..65e533cead6b 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -11,6 +11,7 @@ config ALPHA select ARCH_NO_PREEMPT select ARCH_NO_SG_CHAIN select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_WANTS_NOINLINE_COPY_USER select FORCE_PCI select PCI_DOMAINS if PCI select PCI_SYSCALL if PCI diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 1e8809ea000a..e8b161b37a03 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -628,9 +628,6 @@ static inline unsigned long __clear_user(void __user *t= o, unsigned long n) return res; } =20 -#define INLINE_COPY_TO_USER -#define INLINE_COPY_FROM_USER - #define __clear_user __clear_user =20 #include diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index d6ae80b5df36..7280c162bb71 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -616,8 +616,6 @@ raw_copy_to_user(void __user *to, const void *from, uns= igned long n) } #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) #endif -#define INLINE_COPY_TO_USER -#define INLINE_COPY_FROM_USER =20 static inline unsigned long __must_check clear_user(void __user *to, unsig= ned long n) { diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uacc= ess.h index b0c83a08dda9..1e20ec91b56f 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -456,9 +456,6 @@ do { \ unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ } while (0) =20 -#define INLINE_COPY_TO_USER -#define INLINE_COPY_FROM_USER - extern unsigned long __must_check __arch_clear_user(void __user *to, unsig= ned long n); static inline unsigned long __must_check __clear_user(void __user *to, uns= igned long n) { diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 4331313a42ff..d010d7eb47bf 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -40,6 +40,7 @@ config CSKY select ARCH_NEED_CMPXCHG_1_EMU select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace) select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT + select ARCH_WANTS_NOINLINE_COPY_USER select COMMON_CLK select CLKSRC_MMIO select CSKY_MPINTC if CPU_CK860 diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/= uaccess.h index bff77efc0d9a..4bf863217636 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -26,9 +26,6 @@ unsigned long raw_copy_from_user(void *to, const void __u= ser *from, unsigned long n); unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long coun= t); #define __clear_user(a, s) __clear_user_hexagon((a), (s)) =20 diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/= asm/uaccess.h index 438269313e78..72a04ac88549 100644 --- a/arch/loongarch/include/asm/uaccess.h +++ b/arch/loongarch/include/asm/uaccess.h @@ -292,9 +292,6 @@ raw_copy_to_user(void __user *to, const void *from, uns= igned long n) return __copy_user((__force void *)to, from, n); } =20 -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - /* * __clear_user: - Zero a block of memory in user space, with less checkin= g. * @addr: Destination address, in user space. diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uacces= s.h index 64914872a5c9..20e249a6ad07 100644 --- a/arch/m68k/include/asm/uaccess.h +++ b/arch/m68k/include/asm/uaccess.h @@ -377,9 +377,6 @@ raw_copy_to_user(void __user *to, const void *from, uns= igned long n) return __constant_copy_to_user(to, from, n); return __generic_copy_to_user(to, from, n); } -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ type *__gk_dst =3D (type *)(dst); \ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/includ= e/asm/uaccess.h index 3aab2f17e046..3355f541e12a 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -250,8 +250,6 @@ raw_copy_to_user(void __user *to, const void *from, uns= igned long n) { return __copy_tofrom_user(to, (__force const void __user *)from, n); } -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER =20 /* * Copy a null terminated string from userspace. diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uacces= s.h index c0cede273c7c..8714caefbac8 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -433,9 +433,6 @@ raw_copy_to_user(void __user *to, const void *from, uns= igned long n) return __cu_len_r; } =20 -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); =20 /* diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uacc= ess.h index 6ccc9a232c23..46d7312a1c96 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -57,8 +57,6 @@ extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER =20 extern long strncpy_from_user(char *__to, const char __user *__from, long __len); diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/as= m/uaccess.h index d6500a374e18..c84effde867a 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -218,8 +218,6 @@ raw_copy_to_user(void __user *to, const void *from, uns= igned long size) { return __copy_tofrom_user((__force void *)to, from, size); } -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER =20 extern unsigned long __clear_user(void __user *addr, unsigned long size); =20 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/ua= ccess.h index 6c531d2c847e..1dd6a1dd653f 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -197,7 +197,4 @@ unsigned long __must_check raw_copy_to_user(void __user= *dst, const void *src, unsigned long len); unsigned long __must_check raw_copy_from_user(void *dst, const void __user= *src, unsigned long len); -#define INLINE_COPY_TO_USER -#define INLINE_COPY_FROM_USER - #endif /* __PARISC_UACCESS_H */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index e93df95b79e7..6816f402fe3d 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -187,6 +187,7 @@ config PPC select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if PPC_RADIX_MMU select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx + select ARCH_WANTS_NOINLINE_COPY_USER select ARCH_WEAK_RELEASE_ACQUIRE select AUDIT_ARCH_COMPAT_GENERIC select BINFMT_ELF diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index d235396c4514..492b920c1a51 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -88,6 +88,7 @@ config RISCV select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP select ARCH_WANTS_NO_INSTR + select ARCH_WANTS_NOINLINE_COPY_USER if MMU select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uacces= s.h index dff035372601..2e0472c20da0 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -30,9 +30,6 @@ void debug_user_asce(int exit); #define uaccess_kmsan_or_inline __always_inline #endif =20 -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - static uaccess_kmsan_or_inline __must_check unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long size) { diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index a79609eb14be..0cd75308e6d3 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -95,8 +95,6 @@ raw_copy_to_user(void __user *to, const void *from, unsig= ned long n) { return __copy_user((__force void *)to, from, n); } -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER =20 /* * Clear the area and return remaining number of bytes diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/u= access_32.h index 43284b6ec46a..e01f43c6421c 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -190,9 +190,6 @@ static inline unsigned long raw_copy_from_user(void *to= , const void __user *from return __copy_user((__force void __user *) to, from, n); } =20 -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - static inline unsigned long __clear_user(void __user *addr, unsigned long = size) { unsigned long ret; diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/u= access_64.h index b825a5dd0210..62ee0b074fec 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -231,8 +231,6 @@ unsigned long __must_check raw_copy_from_user(void *to, unsigned long __must_check raw_copy_to_user(void __user *to, const void *from, unsigned long size); -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER =20 unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 0df9ea4abda8..1e14260c7f0f 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -27,9 +27,6 @@ static inline int __access_ok(const void __user *ptr, uns= igned long size); #define __access_ok __access_ok #define __clear_user __clear_user =20 -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER - #include =20 static inline int __access_ok(const void __user *ptr, unsigned long size) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f3f7cb01d69d..c1e58d8c6864 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -143,6 +143,7 @@ config X86 select ARCH_WANTS_CLOCKSOURCE_READ_INLINE if X86_64 select ARCH_WANTS_DYNAMIC_TASK_STRUCT select ARCH_WANTS_NO_INSTR + select ARCH_WANTS_NOINLINE_COPY_USER select ARCH_WANT_GENERAL_HUGETLB select ARCH_WANT_HUGE_PMD_SHARE if X86_64 select ARCH_WANT_LD_ORPHAN_WARN diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/ua= ccess.h index 56aec6d504fe..f9e1623a7be9 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -237,8 +237,6 @@ raw_copy_to_user(void __user *to, const void *from, uns= igned long n) prefetch(from); return __xtensa_copy_user((__force void *)to, from, n); } -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER =20 /* * We need to return the number of bytes not cleared. Our memset() diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index b276f783494c..fb33a71fd24e 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -91,8 +91,6 @@ raw_copy_to_user(void __user *to, const void *from, unsig= ned long n) memcpy((void __force *)to, from, n); return 0; } -#define INLINE_COPY_FROM_USER -#define INLINE_COPY_TO_USER #endif /* CONFIG_UACCESS_MEMCPY */ =20 /* diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 56328601218c..bd1201c81d94 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -83,8 +83,8 @@ * with that. They should not be used directly; they are used to implement * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic(= )) * that are used instead. Out of those, __... ones are inlined. Plain - * copy_{to,from}_user() might or might not be inlined. If you want them - * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. + * copy_{to,from}_user() might or might not be inlined. If you don't want= them + * inlined, select CONFIG_ARCH_WANTS_NOINLINE_COPY_USER. * * NOTE: only copy_from_user() zero-pads the destination in case of short = copy. * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything @@ -157,8 +157,8 @@ __copy_to_user(void __user *to, const void *from, unsig= ned long n) } =20 /* - * Architectures that #define INLINE_COPY_TO_USER use this function - * directly in the normal copy_to/from_user(), the other ones go + * Architectures that don't select CONFIG_ARCH_WANTS_NOINLINE_COPY_USER use + * this function directly in the normal copy_to/from_user(), the other one= s go * through an extern _copy_to/from_user(), which expands the same code * here. */ @@ -190,10 +190,9 @@ _inline_copy_from_user(void *to, const void __user *fr= om, unsigned long n) memset(to + (n - res), 0, res); return res; } -#ifndef INLINE_COPY_FROM_USER + extern __must_check unsigned long _copy_from_user(void *, const void __user *, unsigned long); -#endif =20 static inline __must_check unsigned long _inline_copy_to_user(void __user *to, const void *from, unsigned long n) @@ -207,21 +206,19 @@ _inline_copy_to_user(void __user *to, const void *fro= m, unsigned long n) } return n; } -#ifndef INLINE_COPY_TO_USER + extern __must_check unsigned long _copy_to_user(void __user *, const void *, unsigned long); -#endif =20 static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { if (!check_copy_size(to, n, false)) return n; -#ifdef INLINE_COPY_FROM_USER - return _inline_copy_from_user(to, from, n); -#else - return _copy_from_user(to, from, n); -#endif + if (IS_ENABLED(ARCH_WANTS_NOINLINE_COPY_USER)) + return _copy_from_user(to, from, n); + else + return _inline_copy_from_user(to, from, n); } =20 static __always_inline unsigned long __must_check @@ -230,11 +227,10 @@ copy_to_user(void __user *to, const void *from, unsig= ned long n) if (!check_copy_size(from, n, true)) return n; =20 -#ifdef INLINE_COPY_TO_USER - return _inline_copy_to_user(to, from, n); -#else - return _copy_to_user(to, from, n); -#endif + if (IS_ENABLED(ARCH_WANTS_NOINLINE_COPY_USER)) + return _copy_to_user(to, from, n); + else + return _inline_copy_to_user(to, from, n); } =20 #ifndef copy_mc_to_kernel diff --git a/lib/Kconfig b/lib/Kconfig index 00a9509636c1..a2e07d4dd2bf 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -68,6 +68,9 @@ config ARCH_HAS_STRNCPY_FROM_USER config ARCH_HAS_STRNLEN_USER bool =20 +config ARCH_WANTS_NOINLINE_COPY_USER + bool + config GENERIC_STRNCPY_FROM_USER def_bool !ARCH_HAS_STRNCPY_FROM_USER =20 diff --git a/lib/Makefile b/lib/Makefile index 7c0334d7675b..f4d577910671 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -57,9 +57,10 @@ obj-y +=3D bcd.o sort.o parser.o debug_locks.o random32.= o \ list_sort.o uuid.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o lwq.o memweight.o kfifo.o \ percpu-refcount.o rhashtable.o base64.o \ - once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ + once.o refcount.o rcuref.o errseq.o bucket_locks.o \ generic-radix-tree.o bitmap-str.o obj-y +=3D usercheck.o +obj-$(CONFIG_ARCH_WANTS_NOINLINE_COPY_USER) +=3D usercopy.o obj-y +=3D string_helpers.o obj-y +=3D hexdump.o obj-$(CONFIG_TEST_HEXDUMP) +=3D test_hexdump.o diff --git a/lib/usercopy.c b/lib/usercopy.c index 7a93f56d81dd..d2deb4b0a3c5 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -4,18 +4,14 @@ =20 /* out-of-line parts */ =20 -#if !defined(INLINE_COPY_FROM_USER) unsigned long _copy_from_user(void *to, const void __user *from, unsigned = long n) { return _inline_copy_from_user(to, from, n); } EXPORT_SYMBOL(_copy_from_user); -#endif =20 -#if !defined(INLINE_COPY_TO_USER) unsigned long _copy_to_user(void __user *to, const void *from, unsigned lo= ng n) { return _inline_copy_to_user(to, from, n); } EXPORT_SYMBOL(_copy_to_user); -#endif diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c index d9625b9ee046..01de4fbbcc84 100644 --- a/rust/helpers/uaccess.c +++ b/rust/helpers/uaccess.c @@ -14,7 +14,7 @@ rust_helper_copy_to_user(void __user *to, const void *fro= m, unsigned long n) return copy_to_user(to, from, n); } =20 -#ifdef INLINE_COPY_FROM_USER +#ifndef CONFIG_ARCH_WANTS_NOINLINE_COPY_USER __rust_helper unsigned long rust_helper__copy_from_user(void *to, const void __user *fro= m, unsigned long n) { --=20 2.49.0 From nobody Tue May 5 10:13:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C2A0631B82B; Mon, 27 Apr 2026 17:17:55 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310275; cv=none; b=XaoOrR2Ja+PAA1m+bZeyUPtkPeBkOMbXCEXQer/8AA8hG6bU3ZuGKS2Q+VX2UPgo7/9/hF0DYjXKpaYNViS+HAeAaiNne0xl7PM3SsYiVivTqiM4/KfFGpxoCKqwo5UPeiZbhbqnytFaiqru7LCl9ng2+3VYrdY/ROXAR4sQFwo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310275; c=relaxed/simple; bh=W5kkD2qOaf9p6y+JEmiY6YQ2keZoelr0Bif5djAA9uo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=CTohxNhSmoZ211/BZ98jHlhAM9hP1Qs33KIcX24lIwwbWzyxD3kQdMtCC2DycZcZ00Yd8Nqq3ZYL83DlXGUvGLqOWBC25EmwovXaDta9IxHSTOOJM8sNUlimGj/hyBlD/oxNuDzpPYn7x/hD14pLY0WGyfb6suCYPjJIYfeMkAc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=dWIdgH61; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="dWIdgH61" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5B91DC2BCB9; Mon, 27 Apr 2026 17:17:32 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310275; bh=W5kkD2qOaf9p6y+JEmiY6YQ2keZoelr0Bif5djAA9uo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=dWIdgH61f3FZjJK5/4M8qA2Z3PdTv2PxzuZWcauBhk9DdVxbBkauIdrH8T66hguCp RKByLYUodinjcBlbYhttuH2LGK1CACi5Chox46drqrLCValqYMs6iS6hvWfa8xjP86 SRX8mzMn7HjzzZ5PMWVnIWvrJFLR/iyEIDn4Oi2BFSrDeboqSnIrX7WfhL6Ub+tY2g I0hH+wNzy6c//f0TkDzcuax+fX8WXY8rTh4FJ51XM25PFd/tzjClOdpIOdXk4NjGqA oBIlgUIOAdKFgg5RvdKRR4Lqk1Q+lbng1I1sXXDqtORIX/zFE6g7Yv8vrz97KbzGDV XrdmpG3tF9W3Q== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 3/9] x86/umip: Be stricter in fixup_umip_exception() Date: Mon, 27 Apr 2026 19:13:44 +0200 Message-ID: <9e8e43d4f81d8f8b6f68311f1c6f859d718d36e4.1777306795.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=1000; i=chleroy@kernel.org; h=from:subject:message-id; bh=W5kkD2qOaf9p6y+JEmiY6YQ2keZoelr0Bif5djAA9uo=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxnda1Jdur5TX+RgO1P4hHufdruHR358Lz3BYPORx G8pRRuZOkpZGMS4GGTFFFmO/+feNaPrS2r+1F36MHNYmUCGMHBxCsBE9kkxMjTtX+zW2RH44ELN UZ/TjtH3Kj+Uzdh01e2y7qEZTgrchx8y/E9kjcpJ3dj/tNZwXUT0XHGmyS9k3dX75mj427u7JaY VMwMA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" fixup_umip_exception() calls copy_to_user() and checks whether the returned value is strictly positive. A subsequent patch will change the return of copy_to_user() to return -EFAULT in case of error. Change the test to checking that the result is not 0. At the time being copy_to_user() return an unsigned value so 'strictly positive' is the same as 'not 0'. Signed-off-by: Christophe Leroy (CS GROUP) --- arch/x86/kernel/umip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c index 3ce99cbcf187..dfff28ea1dea 100644 --- a/arch/x86/kernel/umip.c +++ b/arch/x86/kernel/umip.c @@ -409,7 +409,7 @@ bool fixup_umip_exception(struct pt_regs *regs) return false; =20 nr_copied =3D copy_to_user(uaddr, dummy_data, dummy_data_size); - if (nr_copied > 0) { + if (nr_copied) { /* * If copy fails, send a signal and tell caller that * fault was fixed up. --=20 2.49.0 From nobody Tue May 5 10:13:01 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass(p=quarantine dis=none) header.from=kernel.org ARC-Seal: i=1; a=rsa-sha256; t=1777310325; cv=none; d=zohomail.com; s=zohoarc; b=a/glJMXSf6iuOBVMCvZgSK8+ylk7NVBM7wrwdiXRX9975yxbklRAC9hPRWu8GyQm6Lr5+m7/cjWhh4M7U7nODk3PPpM9rexaBa1SBqtjM3kEkQ6GcDR2AtPiMkxTDzTKtsSq0mkZSjueFs8m0ycffdbr8qDSVZNSLEwf9MJ+Ils= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1777310325; h=Content-Transfer-Encoding:Cc:Cc:Date:Date:From:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:Subject:To:To:Message-Id:Reply-To; bh=sOGbn9OoG8rBGqzI2kLVy+02fyWRJuxPwbq3dZ0vyfI=; b=PVl9IVLxzgDuqSc0IsBHLiid9FrxXLW+5ug2IK9MgMFYmdCfyGKLW3agA415W2YXMozoww9hR9NkmIcHYo4Ix66KEmI/IQDOHC+xjmRoutPY6NL4XFo6SlgiSKgNJW9RmQIbXWiZNAGVrGaaHTgj9VWBRL0fEbcITFtC8YOsuYw= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass header.from= (p=quarantine dis=none) Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1777310325413865.6576143571805; Mon, 27 Apr 2026 10:18:45 -0700 (PDT) Received: from list by lists.xenproject.org with outflank-mailman.1295154.1571845 (Exim 4.92) (envelope-from ) id 1wHPbB-0000Tl-Nv; Mon, 27 Apr 2026 17:18:21 +0000 Received: by outflank-mailman (output) from mailman id 1295154.1571845; Mon, 27 Apr 2026 17:18:21 +0000 Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1wHPbB-0000Te-LA; Mon, 27 Apr 2026 17:18:21 +0000 Received: by outflank-mailman (input) for mailman id 1295154; Mon, 27 Apr 2026 17:18:20 +0000 Received: from mx.expurgate.net ([195.190.135.10]) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1wHPbA-0000QZ-7S for xen-devel@lists.xenproject.org; Mon, 27 Apr 2026 17:18:20 +0000 Received: from mx.expurgate.net (helo=localhost) by mx.expurgate.net with esmtp id 1wHPb8-00Dj0u-2c for xen-devel@lists.xenproject.org; Mon, 27 Apr 2026 19:18:19 +0200 Received: from [10.42.69.9] (helo=localhost) by localhost with ESMTP (eXpurgate MTA 0.9.1) (envelope-from ) id 69ef9a53-e002-0a2a0a5209dd-0a2a45099bd4-10 for ; Mon, 27 Apr 2026 19:18:19 +0200 Received: from [172.234.252.31] (helo=sea.source.kernel.org) by tlsNG-bad1c0.mxtls.expurgate.net with ESMTPS (eXpurgate 4.56.1) (envelope-from ) id 69ef9a59-2497-0a2a45090019-aceafc1fe118-3 for ; Mon, 27 Apr 2026 19:18:19 +0200 Received: from smtp.kernel.org (transwarp.subspace.kernel.org [100.75.92.58]) by sea.source.kernel.org (Postfix) with ESMTP id 8B93C440C2; Mon, 27 Apr 2026 17:18:17 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 08C06C2BCF4; Mon, 27 Apr 2026 17:17:56 +0000 (UTC) X-Outflank-Mailman: Message body and most headers restored to incoming version X-BeenThere: xen-devel@lists.xenproject.org List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xenproject.org Precedence: list Sender: "Xen-devel" Authentication-Results: eu.smtp.expurgate.cloud; dkim=pass header.s=k20201202 header.d=kernel.org header.i="@kernel.org" header.h="From:To:Cc:Subject:Date:In-Reply-To:References" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310297; bh=G/2eQ9TI5TAejE5IgsfdvVcfACCawPpecEdM1VXM32k=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=JDwlcHynPBKJgZEQOCWtilxNz5OJTbEC9ImCjJuQT3RnOoZjD4+90hMORF2q1smV0 ZZwStmTz2ZWfCFTglmd/E7DTmKQiqUkyq6DoW4QkOcmP8LtfpYhN9V4ZMaLV1xjKUX qf/gxqJdT3gAPMXuYRhHcRDH0KM3YGUgvPXpdKncm5DePPHjb0VOG6buq4s8MreRM+ EZo8pRxSQAzQ5kujjD1AkNCv3dgeUm7ZrfC1oOr8cnbPHFYMiYdejiO8B5iigWhrca 1OgKQbcauQ7QAB8qeF6Zf8CM71cpuIXWBUm7GSS+m87r+rFbtJTWOdD0Mn8qIs0Vfn 3JDYXNLvVgu3g== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 4/9] uaccess: Introduce copy_{to/from}_user_partial() Date: Mon, 27 Apr 2026 19:13:45 +0200 Message-ID: X-Mailer: git-send-email 2.49.0 In-Reply-To: References: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=1585; i=chleroy@kernel.org; h=from:subject:message-id; bh=G/2eQ9TI5TAejE5IgsfdvVcfACCawPpecEdM1VXM32k=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxmjMKmwhcOl/KmCyQK3t3e2vTXauaOc++38fue5J 1/fnLd3d0cpC4MYF4OsmCLL8f/cu2Z0fUnNn7pLH2YOKxPIEAYuTgGYyD5HRoZnIqUtqi/uzJzx /elrrqTduXsuSM05kFXcMu1CQyFnzPFOhv/RJewtZeu89EsObX+ieXizf2Xo8RNCJwMOumbxCC5 Mr+EBAA== X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable X-purgate-ID: tlsNG-bad1c0/1777310299-490B3A53-41D4CE8E/0/0 X-purgate-type: clean X-purgate-size: 1587 X-ZohoMail-DKIM: pass (identity @kernel.org) X-ZM-MESSAGEID: 1777310327073158500 Content-Type: text/plain; charset="utf-8" Today there are approximately 3000 calls for copy_to_user() and 3000 calls to copy_from_user(). The majority of callers of copy_{to/from}_user() don't care about the return value, they only check whether it is 0 or not, and when it is not 0 they handle it as a -EACCES. In order to allow better optimisation of copy_{to/from}_user() when the size of the copy is known at build time, create new fonctions named copy_{to/from}_user_partial() to be used by the few callers that are interested in partial copies and need to now how many bytes remain at the end of the copy. For the time being it is just the same as copy_{to/from}_user(). Signed-off-by: Christophe Leroy (CS GROUP) --- include/linux/uaccess.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index bd1201c81d94..2d37173782b3 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -221,6 +221,8 @@ copy_from_user(void *to, const void __user *from, unsig= ned long n) return _inline_copy_from_user(to, from, n); } =20 +#define copy_from_user_partial copy_from_user + static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { @@ -233,6 +235,8 @@ copy_to_user(void __user *to, const void *from, unsigne= d long n) return _inline_copy_to_user(to, from, n); } =20 +#define copy_to_user_partial copy_to_user + #ifndef copy_mc_to_kernel /* * Without arch opt-in this generic copy_mc_to_kernel() will not handle --=20 2.49.0 From nobody Tue May 5 10:13:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 13A0C339714; Mon, 27 Apr 2026 17:18:32 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310313; cv=none; b=kz1l/ngozsI96xXmZj6JdhkNwseDzgqerxTXCHsNjAg9NGRCe6Mu7W0EKXK67K2FdRvUXEULmOGYX4325KFDzMUKJ0521x6D1/g1RzTBJi6tqyFzjZ483MbRx7eMXfAUA7eSdWOsyiw6BpsKwWFvXyXEFv0O3WL/xcnlU7oU8U4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310313; c=relaxed/simple; bh=ykb2AFsFlD4OWf31v2pDwxXHhaD6RIAaFdGo66PLJJQ=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=iSBj76P4hslpjmEe+0DTh7B7VJy6FPKr2F3CwXFTNv7FEgiJq6BvShoVgJeKdk1v7jcLGkpJYZoVd7+kl32Abi2MSKzQ1TBnSfnvxI2paqsXnCMvdUcmd0u7U7cv2p7iTjVrgb0aI/0pg9b+BmbiXEzHL+oHUqvrnzjWtgnri2o= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=VVYqWJ9y; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="VVYqWJ9y" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 31802C2BCF4; Mon, 27 Apr 2026 17:18:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310312; bh=ykb2AFsFlD4OWf31v2pDwxXHhaD6RIAaFdGo66PLJJQ=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=VVYqWJ9y3YbZh7SD1t9FZfVCwwasAh9H5TbUiJGW0WBLo8QeGfk22g4AfRfUPPWqU Aoc8LlWA/aSMgy9dc9PmA3AGYLZUU5W7rwnOfj07vTIJFBwTSF9yiiM4DvkX1/MBQu db67uJWBEaWIGjEVq27JeF6TjgL+v+lGhLnh5fqlRmMpg7nUlTADncH5wD5lENeOzx AnSeuD99A4n/GOBDGHS6YE5DsSz3FUwz6A+06zvkQMMGJou23G/4wgzSuudbgGF0+O Dmr1a73NapDMfoHC1BZSTwINTaDSDCQgyzzeSi4wB6M2kWQzSVBg/2nN/ihP2WWDMt aSuTg8ZLqpxJg== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 5/9] uaccess: Switch to copy_{to/from}_user_partial() when relevant Date: Mon, 27 Apr 2026 19:13:46 +0200 Message-ID: <289b424e243ba2c4139ea04009cf8b9c448a87ff.1777306795.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=54038; i=chleroy@kernel.org; h=from:subject:message-id; bh=ykb2AFsFlD4OWf31v2pDwxXHhaD6RIAaFdGo66PLJJQ=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxnT8I410Uhy0qp/RVcua12akXn97Jr7B907GK1ex fMIJG+93FHKwiDGxSArpshy/D/3rhldX1Lzp+7Sh5nDygQyhIGLUwAmYtrO8D+j/Q7HhgvHVxxp 95Tc/nBGsdAKdauVyxVmaezQe2149M4LRoa79udnSurd+iisF/Rta6/v3KKjtnV8bRpT1XZWfDZ UYGEHAA== X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" In a subsequent patch, copy_{to/from}_user() will be modified to return -EFAULT when copy fails. Among the 6000 calls to copy_{to/from}_user(), around 2% rely on copy_{to/from}_user() doing partial copies and returning amount of not copied bytes. Change those users to copy_{to/from}_user_partial(). This change was done based on whether callers assign the returned value to a variable or just check whether the return value is 0 or not. Several of them only use it for debug to print the amount of bytes not copied. Those could maybe be changed to stop reporting that amount and not be converted to partial copy. Some not trivial handling might have been unecessarily converted. This is not a problem and they can be converted back later for better performance. The callers where located with following commands then reviewed one by one: sed -i s/"return copy_to_user("/"return copy_to_user_partial("/g `git grep= -l "return copy_to_user("` sed -i s/" =3D copy_to_user("/" =3D copy_to_user_partial("/g `git grep -l = " =3D copy_to_user("` sed -i s/" +=3D copy_to_user("/" +=3D copy_to_user_partial("/g `git grep -= l " +=3D copy_to_user("` sed -i s/" -=3D copy_to_user("/" -=3D copy_to_user_partial("/g `git grep -= l " -=3D copy_to_user("` Then the same was done with copy_from_user(). During the review, patterns like the following were rejected and kept as is: - return copy_to_user(osf_stat, &tmp, sizeof(tmp)) ? -EFAULT : 0; + return copy_to_user_partial(osf_stat, &tmp, sizeof(tmp)) ? -EFAULT : 0; Signed-off-by: Christophe Leroy (CS GROUP) --- arch/alpha/kernel/osf_sys.c | 4 ++-- arch/alpha/kernel/termios.c | 2 +- arch/arc/kernel/disasm.c | 2 +- arch/arm64/include/asm/gcs.h | 2 +- arch/arm64/kernel/signal32.c | 2 +- arch/mips/kernel/rtlx.c | 8 ++++---- arch/mips/kernel/vpe.c | 2 +- arch/powerpc/kvm/book3s_64_mmu_hv.c | 4 ++-- arch/powerpc/kvm/book3s_64_mmu_radix.c | 4 ++-- arch/powerpc/kvm/book3s_hv.c | 2 +- arch/riscv/kernel/signal.c | 2 +- arch/s390/include/asm/idals.h | 8 ++++---- arch/sparc/kernel/termios.c | 2 +- arch/um/kernel/process.c | 2 +- arch/x86/lib/insn-eval.c | 2 +- arch/x86/um/signal.c | 2 +- drivers/android/binder_alloc.c | 2 +- drivers/comedi/comedi_fops.c | 4 ++-- drivers/dma/idxd/cdev.c | 2 +- drivers/firmware/efi/test/efi_test.c | 2 +- drivers/fsi/fsi-scom.c | 2 +- .../amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 2 +- drivers/gpu/drm/i915/gt/intel_sseu.c | 4 ++-- drivers/gpu/drm/i915/i915_gem.c | 4 ++-- drivers/hwtracing/intel_th/msu.c | 2 +- drivers/misc/ibmvmc.c | 2 +- drivers/misc/vmw_vmci/vmci_host.c | 2 +- drivers/most/most_cdev.c | 2 +- drivers/net/ieee802154/ca8210.c | 4 ++-- drivers/net/wireless/ath/wil6210/debugfs.c | 2 +- .../wireless/intel/iwlwifi/pcie/gen1_2/trans.c | 2 +- drivers/net/wireless/ti/wlcore/debugfs.c | 2 +- drivers/ps3/ps3-lpm.c | 2 +- drivers/s390/crypto/zcrypt_api.h | 4 ++-- drivers/spi/spidev.c | 2 +- .../staging/media/atomisp/pci/atomisp_cmd.c | 8 ++++---- drivers/tty/tty_ioctl.c | 14 +++++++------- drivers/tty/vt/vc_screen.c | 4 ++-- drivers/usb/gadget/function/f_hid.c | 4 ++-- drivers/usb/gadget/function/f_printer.c | 2 +- drivers/vfio/vfio_iommu_type1.c | 4 ++-- drivers/xen/xenbus/xenbus_dev_frontend.c | 2 +- fs/namespace.c | 2 +- fs/ocfs2/dlmfs/dlmfs.c | 2 +- fs/proc/base.c | 4 ++-- include/linux/bpfptr.h | 2 +- include/linux/sockptr.h | 4 ++-- ipc/msg.c | 8 ++++---- ipc/sem.c | 8 ++++---- ipc/shm.c | 18 +++++++++--------- kernel/regset.c | 2 +- kernel/sys.c | 4 ++-- lib/kfifo.c | 8 ++++---- mm/kasan/kasan_test_c.c | 4 ++-- mm/memory.c | 2 +- net/x25/af_x25.c | 2 +- rust/helpers/uaccess.c | 4 ++-- sound/pci/emu10k1/emufx.c | 4 ++-- sound/pci/rme9652/hdsp.c | 6 +++--- sound/soc/intel/avs/probes.c | 6 +++--- sound/soc/sof/compress.c | 12 ++++++------ sound/soc/sof/sof-client-probes.c | 6 +++--- 62 files changed, 122 insertions(+), 122 deletions(-) diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 7b6543d2cca3..c8ea39fdbb9f 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -944,7 +944,7 @@ get_tv32(struct timespec64 *o, struct timeval32 __user = *i) static inline long put_tv32(struct timeval32 __user *o, struct timespec64 *i) { - return copy_to_user(o, &(struct timeval32){ + return copy_to_user_partial(o, &(struct timeval32){ .tv_sec =3D i->tv_sec, .tv_usec =3D i->tv_nsec / NSEC_PER_USEC}, sizeof(struct timeval32)); @@ -953,7 +953,7 @@ put_tv32(struct timeval32 __user *o, struct timespec64 = *i) static inline long put_tv_to_tv32(struct timeval32 __user *o, struct __kernel_old_timeval *i) { - return copy_to_user(o, &(struct timeval32){ + return copy_to_user_partial(o, &(struct timeval32){ .tv_sec =3D i->tv_sec, .tv_usec =3D i->tv_usec}, sizeof(struct timeval32)); diff --git a/arch/alpha/kernel/termios.c b/arch/alpha/kernel/termios.c index a4c29a22edf7..a3693c29a0fd 100644 --- a/arch/alpha/kernel/termios.c +++ b/arch/alpha/kernel/termios.c @@ -52,5 +52,5 @@ int kernel_termios_to_user_termio(struct termio __user *t= ermio, v.c_cc[_VEOL2] =3D termios->c_cc[VEOL2]; v.c_cc[_VSWTC] =3D termios->c_cc[VSWTC]; =20 - return copy_to_user(termio, &v, sizeof(struct termio)); + return copy_to_user_partial(termio, &v, sizeof(struct termio)); } diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c index ccc7e8c39eb3..a3ef9d079e7f 100644 --- a/arch/arc/kernel/disasm.c +++ b/arch/arc/kernel/disasm.c @@ -34,7 +34,7 @@ void __kprobes disasm_instr(unsigned long addr, struct di= sasm_state *state, /* This fetches the upper part of the 32 bit instruction * in both the cases of Little Endian or Big Endian configurations. */ if (userspace) { - bytes_not_copied =3D copy_from_user(ins_buf, + bytes_not_copied =3D copy_from_user_partial(ins_buf, (const void __user *) addr, 8); if (bytes_not_copied > 6) goto fault; diff --git a/arch/arm64/include/asm/gcs.h b/arch/arm64/include/asm/gcs.h index 8fa0707069e8..7ee23a8130b0 100644 --- a/arch/arm64/include/asm/gcs.h +++ b/arch/arm64/include/asm/gcs.h @@ -139,7 +139,7 @@ static inline u64 get_user_gcs(unsigned long __user *ad= dr, int *err) =20 /* Ensure previous GCS operation are visible before we read the page */ gcsb_dsync(); - ret =3D copy_from_user(&load, addr, sizeof(load)); + ret =3D copy_from_user_partial(&load, addr, sizeof(load)); if (ret !=3D 0) *err =3D ret; return load; diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index bb3b526ff43f..7016d2a3bb76 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -53,7 +53,7 @@ static inline int put_sigset_t(compat_sigset_t __user *us= et, sigset_t *set) cset.sig[0] =3D set->sig[0] & 0xffffffffull; cset.sig[1] =3D set->sig[0] >> 32; =20 - return copy_to_user(uset, &cset, sizeof(*uset)); + return copy_to_user_partial(uset, &cset, sizeof(*uset)); } =20 static inline int get_sigset_t(sigset_t *set, diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 18c509c59f33..bc468064194d 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -262,13 +262,13 @@ ssize_t rtlx_read(int index, void __user *buff, size_= t count) /* then how much from the read pointer onwards */ fl =3D min(count, (size_t)lx->buffer_size - lx->lx_read); =20 - failed =3D copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); + failed =3D copy_to_user_partial(buff, lx->lx_buffer + lx->lx_read, fl); if (failed) goto out; =20 /* and if there is anything left at the beginning of the buffer */ if (count - fl) - failed =3D copy_to_user(buff + fl, lx->lx_buffer, count - fl); + failed =3D copy_to_user_partial(buff + fl, lx->lx_buffer, count - fl); =20 out: count -=3D failed; @@ -304,13 +304,13 @@ ssize_t rtlx_write(int index, const void __user *buff= er, size_t count) /* first bit from write pointer to the end of the buffer, or count */ fl =3D min(count, (size_t) rt->buffer_size - rt->rt_write); =20 - failed =3D copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); + failed =3D copy_from_user_partial(rt->rt_buffer + rt->rt_write, buffer, f= l); if (failed) goto out; =20 /* if there's any left copy to the beginning of the buffer */ if (count - fl) - failed =3D copy_from_user(rt->rt_buffer, buffer + fl, count - fl); + failed =3D copy_from_user_partial(rt->rt_buffer, buffer + fl, count - fl= ); =20 out: count -=3D failed; diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index b05ee21a1d67..5a8d72d6c80c 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -854,7 +854,7 @@ static ssize_t vpe_write(struct file *file, const char = __user *buffer, return -ENOMEM; } =20 - count -=3D copy_from_user(v->pbuffer + v->len, buffer, count); + count -=3D copy_from_user_partial(v->pbuffer + v->len, buffer, count); if (!count) return -EFAULT; =20 diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_= 64_mmu_hv.c index 2ccb3d138f46..1c43c7b8e801 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -2028,7 +2028,7 @@ static ssize_t debugfs_htab_read(struct file *file, c= har __user *buf, n =3D p->chars_left; if (n > len) n =3D len; - r =3D copy_to_user(buf, p->buf + p->buf_index, n); + r =3D copy_to_user_partial(buf, p->buf + p->buf_index, n); n -=3D r; p->chars_left -=3D n; p->buf_index +=3D n; @@ -2068,7 +2068,7 @@ static ssize_t debugfs_htab_read(struct file *file, c= har __user *buf, p->chars_left =3D n; if (n > len) n =3D len; - r =3D copy_to_user(buf, p->buf, n); + r =3D copy_to_user_partial(buf, p->buf, n); n -=3D r; p->chars_left -=3D n; p->buf_index =3D n; diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book= 3s_64_mmu_radix.c index 933fc7cb9afc..0a27e018d27b 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -1307,7 +1307,7 @@ static ssize_t debugfs_radix_read(struct file *file, = char __user *buf, n =3D p->chars_left; if (n > len) n =3D len; - r =3D copy_to_user(buf, p->buf + p->buf_index, n); + r =3D copy_to_user_partial(buf, p->buf + p->buf_index, n); n -=3D r; p->chars_left -=3D n; p->buf_index +=3D n; @@ -1407,7 +1407,7 @@ static ssize_t debugfs_radix_read(struct file *file, = char __user *buf, p->chars_left =3D n; if (n > len) n =3D len; - r =3D copy_to_user(buf, p->buf, n); + r =3D copy_to_user_partial(buf, p->buf, n); n -=3D r; p->chars_left -=3D n; p->buf_index =3D n; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 61dbeea317f3..4c7a8f687c99 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -2916,7 +2916,7 @@ static ssize_t debugfs_timings_read(struct file *file= , char __user *buf, return 0; if (len > p->buflen - pos) len =3D p->buflen - pos; - n =3D copy_to_user(buf, p->buf + pos, len); + n =3D copy_to_user_partial(buf, p->buf + pos, len); if (n) { if (n =3D=3D len) return -EFAULT; diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 59784dc117e4..4630dbad7428 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -139,7 +139,7 @@ static long __restore_v_state(struct pt_regs *regs, voi= d __user *sc_vec) * Copy the whole vector content from user space datap. Use * copy_from_user to prevent information leak. */ - return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); + return copy_from_user_partial(current->thread.vstate.datap, datap, riscv_= v_vsize); } =20 static long save_cfiss_state(struct pt_regs *regs, void __user *sc_cfi) diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h index 06e1ec2afd5a..d86f4eb1ce42 100644 --- a/arch/s390/include/asm/idals.h +++ b/arch/s390/include/asm/idals.h @@ -301,14 +301,14 @@ static inline size_t idal_buffer_to_user(struct idal_= buffer *ib, void __user *to BUG_ON(count > ib->size); for (i =3D 0; count > IDA_BLOCK_SIZE; i++) { vaddr =3D dma64_to_virt(ib->data[i]); - left =3D copy_to_user(to, vaddr, IDA_BLOCK_SIZE); + left =3D copy_to_user_partial(to, vaddr, IDA_BLOCK_SIZE); if (left) return left + count - IDA_BLOCK_SIZE; to =3D (void __user *)to + IDA_BLOCK_SIZE; count -=3D IDA_BLOCK_SIZE; } vaddr =3D dma64_to_virt(ib->data[i]); - return copy_to_user(to, vaddr, count); + return copy_to_user_partial(to, vaddr, count); } =20 /* @@ -323,14 +323,14 @@ static inline size_t idal_buffer_from_user(struct ida= l_buffer *ib, const void __ BUG_ON(count > ib->size); for (i =3D 0; count > IDA_BLOCK_SIZE; i++) { vaddr =3D dma64_to_virt(ib->data[i]); - left =3D copy_from_user(vaddr, from, IDA_BLOCK_SIZE); + left =3D copy_from_user_partial(vaddr, from, IDA_BLOCK_SIZE); if (left) return left + count - IDA_BLOCK_SIZE; from =3D (void __user *)from + IDA_BLOCK_SIZE; count -=3D IDA_BLOCK_SIZE; } vaddr =3D dma64_to_virt(ib->data[i]); - return copy_from_user(vaddr, from, count); + return copy_from_user_partial(vaddr, from, count); } =20 #endif diff --git a/arch/sparc/kernel/termios.c b/arch/sparc/kernel/termios.c index ee64965c27cd..db9c07b7d5ee 100644 --- a/arch/sparc/kernel/termios.c +++ b/arch/sparc/kernel/termios.c @@ -27,7 +27,7 @@ int kernel_termios_to_user_termio(struct termio __user *t= ermio, v.c_cc[_VMIN] =3D termios->c_cc[VMIN]; v.c_cc[_VTIME] =3D termios->c_cc[VTIME]; } - return copy_to_user(termio, &v, sizeof(struct termio)); + return copy_to_user_partial(termio, &v, sizeof(struct termio)); } =20 int user_termios_to_kernel_termios(struct ktermios *k, diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index 63b38a3f73f7..d41625dfa00b 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -252,7 +252,7 @@ EXPORT_SYMBOL(uml_strdup); =20 int copy_from_user_proc(void *to, void __user *from, int size) { - return copy_from_user(to, from, size); + return copy_from_user_partial(to, from, size); } =20 int singlestepping(void) diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index e03eeec55cfe..e7cb03ab26f1 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -1512,7 +1512,7 @@ int insn_fetch_from_user(struct pt_regs *regs, unsign= ed char buf[MAX_INSN_SIZE]) if (insn_get_effective_ip(regs, &ip)) return -EINVAL; =20 - not_copied =3D copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE); + not_copied =3D copy_from_user_partial(buf, (void __user *)ip, MAX_INSN_SI= ZE); =20 return MAX_INSN_SIZE - not_copied; } diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c index 2934e170b0fe..e0fab7c1625b 100644 --- a/arch/x86/um/signal.c +++ b/arch/x86/um/signal.c @@ -40,7 +40,7 @@ static int copy_sc_from_user(struct pt_regs *regs, /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn =3D do_no_restart_syscall; =20 - err =3D copy_from_user(&sc, from, sizeof(sc)); + err =3D copy_from_user_partial(&sc, from, sizeof(sc)); if (err) return err; =20 diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index e4488ad86a65..8ba9c57b489c 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1346,7 +1346,7 @@ binder_alloc_copy_user_to_buffer(struct binder_alloc = *alloc, buffer_offset, &pgoff); size =3D min_t(size_t, bytes, PAGE_SIZE - pgoff); kptr =3D kmap_local_page(page) + pgoff; - ret =3D copy_from_user(kptr, from, size); + ret =3D copy_from_user_partial(kptr, from, size); kunmap_local(kptr); if (ret) return bytes - size + ret; diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index c09bbe04be6c..272fdc54fb81 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -2659,7 +2659,7 @@ static unsigned int comedi_buf_copy_to_user(struct co= medi_subdevice *s, unsigned int copy_amount =3D min(n, PAGE_SIZE - offset); unsigned int uncopied; =20 - uncopied =3D copy_to_user(dest, buf_page_list[page].virt_addr + + uncopied =3D copy_to_user_partial(dest, buf_page_list[page].virt_addr + offset, copy_amount); copy_amount -=3D uncopied; n -=3D copy_amount; @@ -2687,7 +2687,7 @@ static unsigned int comedi_buf_copy_from_user(struct = comedi_subdevice *s, unsigned int copy_amount =3D min(n, PAGE_SIZE - offset); unsigned int uncopied; =20 - uncopied =3D copy_from_user(buf_page_list[page].virt_addr + + uncopied =3D copy_from_user_partial(buf_page_list[page].virt_addr + offset, src, copy_amount); copy_amount -=3D uncopied; n -=3D copy_amount; diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 0366c7cf3502..ac79bab6d6c3 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -751,7 +751,7 @@ int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, un= signed long addr, * to addr in the mm. */ kthread_use_mm(mm); - left =3D copy_to_user((void __user *)addr + status_size, cr + status_size, + left =3D copy_to_user_partial((void __user *)addr + status_size, cr + sta= tus_size, len - status_size); /* * Copy status only after the rest of completion record is copied diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/te= st/efi_test.c index d54d6a671326..43b280ceb955 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c @@ -133,7 +133,7 @@ copy_ucs2_to_user_len(efi_char16_t __user *dst, efi_cha= r16_t *src, size_t len) if (!src) return 0; =20 - return copy_to_user(dst, src, len); + return copy_to_user_partial(dst, src, len); } =20 static long efi_runtime_get_variable(unsigned long arg) diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c index bb4d3700c934..370ec75b20e6 100644 --- a/drivers/fsi/fsi-scom.c +++ b/drivers/fsi/fsi-scom.c @@ -332,7 +332,7 @@ static ssize_t scom_read(struct file *filep, char __use= r *buf, size_t len, return rc; } =20 - rc =3D copy_to_user(buf, &val, len); + rc =3D copy_to_user_partial(buf, &val, len); if (rc) dev_dbg(dev, "copy to user failed:%d\n", rc); =20 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/dr= ivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 2409ac72b166..712605ec7ecc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -1346,7 +1346,7 @@ static ssize_t dp_sdp_message_debugfs_write(struct fi= le *f, const char __user *b =20 acrtc_state =3D to_dm_crtc_state(connector->base.state->crtc->state); =20 - r =3D copy_from_user(data, buf, write_size); + r =3D copy_from_user_partial(data, buf, write_size); =20 write_size -=3D r; =20 diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt= /intel_sseu.c index 656a499b2706..3f5b450a914a 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -114,7 +114,7 @@ int intel_sseu_copy_eumask_to_user(void __user *to, } } =20 - return copy_to_user(to, eu_mask, len); + return copy_to_user_partial(to, eu_mask, len); } =20 /** @@ -146,7 +146,7 @@ int intel_sseu_copy_ssmask_to_user(void __user *to, } } =20 - return copy_to_user(to, ss_mask, len); + return copy_to_user_partial(to, ss_mask, len); } =20 static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_ge= m.c index a432daf8038a..c1c2e762498f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -291,7 +291,7 @@ gtt_user_read(struct io_mapping *mapping, io_mapping_unmap_atomic(vaddr); if (unwritten) { vaddr =3D io_mapping_map_wc(mapping, base, PAGE_SIZE); - unwritten =3D copy_to_user(user_data, + unwritten =3D copy_to_user_partial(user_data, (void __force *)vaddr + offset, length); io_mapping_unmap(vaddr); @@ -525,7 +525,7 @@ ggtt_write(struct io_mapping *mapping, io_mapping_unmap_atomic(vaddr); if (unwritten) { vaddr =3D io_mapping_map_wc(mapping, base, PAGE_SIZE); - unwritten =3D copy_from_user((void __force *)vaddr + offset, + unwritten =3D copy_from_user_partial((void __force *)vaddr + offset, user_data, length); io_mapping_unmap(vaddr); } diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/= msu.c index a82cf74f39ad..9b97b71b44f1 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -1457,7 +1457,7 @@ static unsigned long msc_win_to_user(void *data, void= *src, size_t len) struct msc_win_to_user_struct *u =3D data; unsigned long ret; =20 - ret =3D copy_to_user(u->buf + u->offset, src, len); + ret =3D copy_to_user_partial(u->buf + u->offset, src, len); u->offset +=3D len - ret; =20 return ret; diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c index beb18c34f20d..e1d99354dd29 100644 --- a/drivers/misc/ibmvmc.c +++ b/drivers/misc/ibmvmc.c @@ -1112,7 +1112,7 @@ static ssize_t ibmvmc_write(struct file *file, const = char *buffer, while (c > 0) { bytes =3D min_t(size_t, c, vmc_buffer->size); =20 - bytes -=3D copy_from_user(buf, p, bytes); + bytes -=3D copy_from_user_partial(buf, p, bytes); if (!bytes) { ret =3D -EFAULT; goto out; diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci= _host.c index b71ca1bf0a20..bd502edbc173 100644 --- a/drivers/misc/vmw_vmci/vmci_host.c +++ b/drivers/misc/vmw_vmci/vmci_host.c @@ -213,7 +213,7 @@ static int drv_cp_harray_to_user(void __user *user_buf_= uva, =20 *user_buf_size =3D array_size * sizeof(*handles); if (*user_buf_size) - *retval =3D copy_to_user(user_buf_uva, + *retval =3D copy_to_user_partial(user_buf_uva, vmci_handle_arr_get_handles (handle_array), *user_buf_size); =20 diff --git a/drivers/most/most_cdev.c b/drivers/most/most_cdev.c index 5df508d8d60a..969c865ccbef 100644 --- a/drivers/most/most_cdev.c +++ b/drivers/most/most_cdev.c @@ -265,7 +265,7 @@ comp_read(struct file *filp, char __user *buf, size_t c= ount, loff_t *offset) count, mbo->processed_length - c->mbo_offs); =20 - not_copied =3D copy_to_user(buf, + not_copied =3D copy_to_user_partial(buf, mbo->virt_address + c->mbo_offs, to_copy); =20 diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca821= 0.c index ed4178155a5d..d474a008c73e 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2460,7 +2460,7 @@ static ssize_t ca8210_test_int_user_write( return -EBADE; } =20 - ret =3D copy_from_user(command, in_buf, len); + ret =3D copy_from_user_partial(command, in_buf, len); if (ret) { dev_err( &priv->spi->dev, @@ -2548,7 +2548,7 @@ static ssize_t ca8210_test_int_user_read( cmdlen =3D fifo_buffer[1]; bytes_not_copied =3D cmdlen + 2; =20 - bytes_not_copied =3D copy_to_user(buf, fifo_buffer, bytes_not_copied); + bytes_not_copied =3D copy_to_user_partial(buf, fifo_buffer, bytes_not_cop= ied); if (bytes_not_copied > 0) { dev_err( &priv->spi->dev, diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wirel= ess/ath/wil6210/debugfs.c index b8cb736a7185..f2130248fb7f 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -659,7 +659,7 @@ static ssize_t wil_read_file_ioblob(struct file *file, = char __user *user_buf, wil_memcpy_fromio_32(buf, (const void __iomem *) wil_blob->blob.data + aligned_pos, aligned_count); =20 - ret =3D copy_to_user(user_buf, buf + unaligned_bytes, count); + ret =3D copy_to_user_partial(user_buf, buf + unaligned_bytes, count); =20 wil_mem_access_unlock(wil); wil_pm_runtime_put(wil); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c b/drive= rs/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c index a05f60f9224b..66ddaa0d8e36 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c @@ -3060,7 +3060,7 @@ static bool iwl_write_to_user_buf(char __user *user_b= uf, ssize_t count, if (*size > buf_size_left) *size =3D buf_size_left; =20 - *size -=3D copy_to_user(user_buf, buf, *size); + *size -=3D copy_to_user_partial(user_buf, buf, *size); *bytes_copied +=3D *size; =20 if (buf_size_left =3D=3D *size) diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireles= s/ti/wlcore/debugfs.c index bbfd2725215b..d359baea5100 100644 --- a/drivers/net/wireless/ti/wlcore/debugfs.c +++ b/drivers/net/wireless/ti/wlcore/debugfs.c @@ -1088,7 +1088,7 @@ static ssize_t dev_mem_read(struct file *file, mutex_unlock(&wl->mutex); =20 if (ret =3D=3D 0) { - ret =3D copy_to_user(user_buf, buf, bytes); + ret =3D copy_to_user_partial(user_buf, buf, bytes); if (ret < bytes) { bytes -=3D ret; *ppos +=3D bytes; diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c index f8d8f607134a..5a2b150cda49 100644 --- a/drivers/ps3/ps3-lpm.c +++ b/drivers/ps3/ps3-lpm.c @@ -999,7 +999,7 @@ int ps3_lpm_copy_tb_to_user(unsigned long offset, void = __user *buf, return result =3D=3D LV1_WRONG_STATE ? -EBUSY : -EINVAL; } =20 - result =3D copy_to_user(buf, lpm_priv->tb_cache, tmp); + result =3D copy_to_user_partial(buf, lpm_priv->tb_cache, tmp); =20 if (result) { dev_dbg(sbd_core(), "%s:%u: 0x%llx bytes at 0x%p\n", diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_= api.h index 6ef8850a42df..61a5de90c354 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -185,7 +185,7 @@ static inline unsigned long z_copy_from_user(bool users= pace, unsigned long n) { if (likely(userspace)) - return copy_from_user(to, from, n); + return copy_from_user_partial(to, from, n); memcpy(to, (void __force *)from, n); return 0; } @@ -196,7 +196,7 @@ static inline unsigned long z_copy_to_user(bool userspa= ce, unsigned long n) { if (likely(userspace)) - return copy_to_user(to, from, n); + return copy_to_user_partial(to, from, n); memcpy((void __force *)to, from, n); return 0; } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 638221178384..5b42fabcf4c4 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -157,7 +157,7 @@ spidev_read(struct file *filp, char __user *buf, size_t= count, loff_t *f_pos) if (status > 0) { unsigned long missing; =20 - missing =3D copy_to_user(buf, spidev->rx_buffer, status); + missing =3D copy_to_user_partial(buf, spidev->rx_buffer, status); if (missing =3D=3D status) status =3D -EFAULT; else diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/stag= ing/media/atomisp/pci/atomisp_cmd.c index fec369575d88..10a7aff375a9 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -1491,7 +1491,7 @@ int atomisp_gdc_cac_table(struct atomisp_sub_device *= asd, int flag, } =20 for (i =3D 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) { - ret =3D copy_from_user(tab->coordinates_x[i], + ret =3D copy_from_user_partial(tab->coordinates_x[i], config->coordinates_x[i], config->height * config->width * sizeof(*config->coordinates_x[i])); @@ -1502,7 +1502,7 @@ int atomisp_gdc_cac_table(struct atomisp_sub_device *= asd, int flag, atomisp_css_morph_table_free(tab); return -EFAULT; } - ret =3D copy_from_user(tab->coordinates_y[i], + ret =3D copy_from_user_partial(tab->coordinates_y[i], config->coordinates_y[i], config->height * config->width * sizeof(*config->coordinates_y[i])); @@ -1709,7 +1709,7 @@ int atomisp_3a_stat(struct atomisp_sub_device *asd, i= nt flag, config->exp_id =3D s3a_buf->s3a_data->exp_id; config->isp_config_id =3D s3a_buf->s3a_data->isp_config_id; =20 - ret =3D copy_to_user(config->data, asd->params.s3a_user_stat->data, + ret =3D copy_to_user_partial(config->data, asd->params.s3a_user_stat->dat= a, asd->params.s3a_output_bytes); if (ret) { dev_err(isp->dev, "copy to user failed: copied %lu bytes\n", @@ -2031,7 +2031,7 @@ static unsigned int long copy_from_compatible(void *t= o, const void *from, unsigned long n, bool from_user) { if (from_user) - return copy_from_user(to, (void __user *)from, n); + return copy_from_user_partial(to, (void __user *)from, n); else memcpy(to, from, n); return 0; diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 90c70d8d14e3..cdc274c0ff81 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c @@ -388,29 +388,29 @@ __weak int kernel_termios_to_user_termio(struct termi= o __user *termio, v.c_lflag =3D termios->c_lflag; v.c_line =3D termios->c_line; memcpy(v.c_cc, termios->c_cc, NCC); - return copy_to_user(termio, &v, sizeof(struct termio)); + return copy_to_user_partial(termio, &v, sizeof(struct termio)); } =20 #ifdef TCGETS2 __weak int user_termios_to_kernel_termios(struct ktermios *k, struct termios2 __user *u) { - return copy_from_user(k, u, sizeof(struct termios2)); + return copy_from_user_partial(k, u, sizeof(struct termios2)); } __weak int kernel_termios_to_user_termios(struct termios2 __user *u, struct ktermios *k) { - return copy_to_user(u, k, sizeof(struct termios2)); + return copy_to_user_partial(u, k, sizeof(struct termios2)); } __weak int user_termios_to_kernel_termios_1(struct ktermios *k, struct termios __user *u) { - return copy_from_user(k, u, sizeof(struct termios)); + return copy_from_user_partial(k, u, sizeof(struct termios)); } __weak int kernel_termios_to_user_termios_1(struct termios __user *u, struct ktermios *k) { - return copy_to_user(u, k, sizeof(struct termios)); + return copy_to_user_partial(u, k, sizeof(struct termios)); } =20 #else @@ -418,12 +418,12 @@ __weak int kernel_termios_to_user_termios_1(struct te= rmios __user *u, __weak int user_termios_to_kernel_termios(struct ktermios *k, struct termios __user *u) { - return copy_from_user(k, u, sizeof(struct termios)); + return copy_from_user_partial(k, u, sizeof(struct termios)); } __weak int kernel_termios_to_user_termios(struct termios __user *u, struct ktermios *k) { - return copy_to_user(u, k, sizeof(struct termios)); + return copy_to_user_partial(u, k, sizeof(struct termios)); } #endif /* TCGETS2 */ =20 diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 4d2d46c95fef..e54c708149c3 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -450,7 +450,7 @@ vcs_read(struct file *file, char __user *buf, size_t co= unt, loff_t *ppos) */ =20 console_unlock(); - ret =3D copy_to_user(buf, con_buf + skip, this_round); + ret =3D copy_to_user_partial(buf, con_buf + skip, this_round); console_lock(); =20 if (ret) { @@ -630,7 +630,7 @@ vcs_write(struct file *file, const char __user *buf, si= ze_t count, loff_t *ppos) * in the write data from userspace safely. */ console_unlock(); - ret =3D copy_from_user(con_buf, buf, this_round); + ret =3D copy_from_user_partial(con_buf, buf, this_round); console_lock(); =20 if (ret) { diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/funct= ion/f_hid.c index c5a12a6760ea..f22dd3697a46 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -349,7 +349,7 @@ static ssize_t f_hidg_intout_read(struct file *file, ch= ar __user *buffer, spin_unlock_irqrestore(&hidg->read_spinlock, flags); =20 /* copy to user outside spinlock */ - count -=3D copy_to_user(buffer, req->buf + list->pos, count); + count -=3D copy_to_user_partial(buffer, req->buf + list->pos, count); list->pos +=3D count; =20 /* @@ -410,7 +410,7 @@ static ssize_t f_hidg_ssreport_read(struct file *file, = char __user *buffer, spin_unlock_irqrestore(&hidg->read_spinlock, flags); =20 if (tmp_buf !=3D NULL) { - count -=3D copy_to_user(buffer, tmp_buf, count); + count -=3D copy_to_user_partial(buffer, tmp_buf, count); kfree(tmp_buf); } else { count =3D -ENOMEM; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/f= unction/f_printer.c index e4f7828ae75d..4fbed987b639 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -525,7 +525,7 @@ printer_read(struct file *fd, char __user *buf, size_t = len, loff_t *ptr) else size =3D len; =20 - size -=3D copy_to_user(buf, current_rx_buf, size); + size -=3D copy_to_user_partial(buf, current_rx_buf, size); bytes_copied +=3D size; len -=3D size; buf +=3D size; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type= 1.c index c8151ba54de3..ad74a891aa80 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -3173,7 +3173,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_= iommu *iommu, vaddr =3D dma->vaddr + offset; =20 if (write) { - *copied =3D copy_to_user((void __user *)vaddr, data, + *copied =3D copy_to_user_partial((void __user *)vaddr, data, count) ? 0 : count; if (*copied && iommu->dirty_page_tracking) { unsigned long pgshift =3D __ffs(iommu->pgsize_bitmap); @@ -3186,7 +3186,7 @@ static int vfio_iommu_type1_dma_rw_chunk(struct vfio_= iommu *iommu, (offset >> pgshift) + 1); } } else - *copied =3D copy_from_user(data, (void __user *)vaddr, + *copied =3D copy_from_user_partial(data, (void __user *)vaddr, count) ? 0 : count; if (kthread) kthread_unuse_mm(mm); diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/= xenbus_dev_frontend.c index 61db6932a9d2..b1db90dac1d1 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -150,7 +150,7 @@ static ssize_t xenbus_file_read(struct file *filp, while (i < len) { size_t sz =3D min_t(size_t, len - i, rb->len - rb->cons); =20 - ret =3D copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); + ret =3D copy_to_user_partial(ubuf + i, &rb->msg[rb->cons], sz); =20 i +=3D sz - ret; rb->cons +=3D sz - ret; diff --git a/fs/namespace.c b/fs/namespace.c index fe919abd2f01..27afb73fef20 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -4033,7 +4033,7 @@ static void *copy_mount_options(const void __user * d= ata) if (!copy) return ERR_PTR(-ENOMEM); =20 - left =3D copy_from_user(copy, data, PAGE_SIZE); + left =3D copy_from_user_partial(copy, data, PAGE_SIZE); =20 /* * Not all architectures have an exact copy_from_user(). Resort to diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 5821e33df78f..97c0b391b98e 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -255,7 +255,7 @@ static ssize_t dlmfs_file_write(struct file *filp, if (!count) return 0; =20 - bytes_left =3D copy_from_user(lvb_buf, buf, count); + bytes_left =3D copy_from_user_partial(lvb_buf, buf, count); count -=3D bytes_left; if (count) user_dlm_write_lvb(inode, lvb_buf, count); diff --git a/fs/proc/base.c b/fs/proc/base.c index d9acfa89c894..49577662ae70 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -278,7 +278,7 @@ static ssize_t get_mm_proctitle(struct mm_struct *mm, c= har __user *buf, len -=3D pos; if (len > count) len =3D count; - len -=3D copy_to_user(buf, page+pos, len); + len -=3D copy_to_user_partial(buf, page+pos, len); if (!len) len =3D -EFAULT; ret =3D len; @@ -359,7 +359,7 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, cha= r __user *buf, got =3D access_remote_vm(mm, pos, page, size, FOLL_ANON); if (got <=3D 0) break; - got -=3D copy_to_user(buf, page, got); + got -=3D copy_to_user_partial(buf, page, got); if (unlikely(!got)) { if (!len) len =3D -EFAULT; diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index f6e0795db484..e4444d0f0cfe 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -50,7 +50,7 @@ static inline int copy_from_bpfptr_offset(void *dst, bpfp= tr_t src, size_t offset, size_t size) { if (!bpfptr_is_kernel(src)) - return copy_from_user(dst, src.user + offset, size); + return copy_from_user_partial(dst, src.user + offset, size); return copy_from_kernel_nofault(dst, src.kernel + offset, size); } =20 diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 3e6c8e9d67ae..52ddddfe728d 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -45,7 +45,7 @@ static inline int copy_from_sockptr_offset(void *dst, soc= kptr_t src, size_t offset, size_t size) { if (!sockptr_is_kernel(src)) - return copy_from_user(dst, src.user + offset, size); + return copy_from_user_partial(dst, src.user + offset, size); memcpy(dst, src.kernel + offset, size); return 0; } @@ -111,7 +111,7 @@ static inline int copy_to_sockptr_offset(sockptr_t dst,= size_t offset, const void *src, size_t size) { if (!sockptr_is_kernel(dst)) - return copy_to_user(dst.user + offset, src, size); + return copy_to_user_partial(dst.user + offset, src, size); memcpy(dst.kernel + offset, src, size); return 0; } diff --git a/ipc/msg.c b/ipc/msg.c index 62996b97f0ac..39848238219d 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -322,7 +322,7 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds = *in, int version) { switch (version) { case IPC_64: - return copy_to_user(buf, in, sizeof(*in)); + return copy_to_user_partial(buf, in, sizeof(*in)); case IPC_OLD: { struct msqid_ds out; @@ -355,7 +355,7 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds = *in, int version) out.msg_lspid =3D in->msg_lspid; out.msg_lrpid =3D in->msg_lrpid; =20 - return copy_to_user(buf, &out, sizeof(out)); + return copy_to_user_partial(buf, &out, sizeof(out)); } default: return -EINVAL; @@ -712,7 +712,7 @@ static int copy_compat_msqid_to_user(void __user *buf, = struct msqid64_ds *in, v.msg_qbytes =3D in->msg_qbytes; v.msg_lspid =3D in->msg_lspid; v.msg_lrpid =3D in->msg_lrpid; - return copy_to_user(buf, &v, sizeof(v)); + return copy_to_user_partial(buf, &v, sizeof(v)); } else { struct compat_msqid_ds v; memset(&v, 0, sizeof(v)); @@ -725,7 +725,7 @@ static int copy_compat_msqid_to_user(void __user *buf, = struct msqid64_ds *in, v.msg_qbytes =3D in->msg_qbytes; v.msg_lspid =3D in->msg_lspid; v.msg_lrpid =3D in->msg_lrpid; - return copy_to_user(buf, &v, sizeof(v)); + return copy_to_user_partial(buf, &v, sizeof(v)); } } =20 diff --git a/ipc/sem.c b/ipc/sem.c index 6cdf862b1f5c..3b56086ba07d 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -1196,7 +1196,7 @@ static unsigned long copy_semid_to_user(void __user *= buf, struct semid64_ds *in, { switch (version) { case IPC_64: - return copy_to_user(buf, in, sizeof(*in)); + return copy_to_user_partial(buf, in, sizeof(*in)); case IPC_OLD: { struct semid_ds out; @@ -1209,7 +1209,7 @@ static unsigned long copy_semid_to_user(void __user *= buf, struct semid64_ds *in, out.sem_ctime =3D in->sem_ctime; out.sem_nsems =3D in->sem_nsems; =20 - return copy_to_user(buf, &out, sizeof(out)); + return copy_to_user_partial(buf, &out, sizeof(out)); } default: return -EINVAL; @@ -1759,7 +1759,7 @@ static int copy_compat_semid_to_user(void __user *buf= , struct semid64_ds *in, v.sem_ctime =3D lower_32_bits(in->sem_ctime); v.sem_ctime_high =3D upper_32_bits(in->sem_ctime); v.sem_nsems =3D in->sem_nsems; - return copy_to_user(buf, &v, sizeof(v)); + return copy_to_user_partial(buf, &v, sizeof(v)); } else { struct compat_semid_ds v; memset(&v, 0, sizeof(v)); @@ -1767,7 +1767,7 @@ static int copy_compat_semid_to_user(void __user *buf= , struct semid64_ds *in, v.sem_otime =3D in->sem_otime; v.sem_ctime =3D in->sem_ctime; v.sem_nsems =3D in->sem_nsems; - return copy_to_user(buf, &v, sizeof(v)); + return copy_to_user_partial(buf, &v, sizeof(v)); } } =20 diff --git a/ipc/shm.c b/ipc/shm.c index a95dae447707..1eb53c3df3b9 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -853,7 +853,7 @@ static inline unsigned long copy_shmid_to_user(void __u= ser *buf, struct shmid64_ { switch (version) { case IPC_64: - return copy_to_user(buf, in, sizeof(*in)); + return copy_to_user_partial(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; @@ -868,7 +868,7 @@ static inline unsigned long copy_shmid_to_user(void __u= ser *buf, struct shmid64_ out.shm_lpid =3D in->shm_lpid; out.shm_nattch =3D in->shm_nattch; =20 - return copy_to_user(buf, &out, sizeof(out)); + return copy_to_user_partial(buf, &out, sizeof(out)); } default: return -EINVAL; @@ -905,7 +905,7 @@ static inline unsigned long copy_shminfo_to_user(void _= _user *buf, struct shminf { switch (version) { case IPC_64: - return copy_to_user(buf, in, sizeof(*in)); + return copy_to_user_partial(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; @@ -920,7 +920,7 @@ static inline unsigned long copy_shminfo_to_user(void _= _user *buf, struct shminf out.shmseg =3D in->shmseg; out.shmall =3D in->shmall; =20 - return copy_to_user(buf, &out, sizeof(out)); + return copy_to_user_partial(buf, &out, sizeof(out)); } default: return -EINVAL; @@ -1359,7 +1359,7 @@ static int copy_compat_shminfo_to_user(void __user *b= uf, struct shminfo64 *in, info.shmmni =3D in->shmmni; info.shmseg =3D in->shmseg; info.shmall =3D in->shmall; - return copy_to_user(buf, &info, sizeof(info)); + return copy_to_user_partial(buf, &info, sizeof(info)); } else { struct shminfo info; memset(&info, 0, sizeof(info)); @@ -1368,7 +1368,7 @@ static int copy_compat_shminfo_to_user(void __user *b= uf, struct shminfo64 *in, info.shmmni =3D in->shmmni; info.shmseg =3D in->shmseg; info.shmall =3D in->shmall; - return copy_to_user(buf, &info, sizeof(info)); + return copy_to_user_partial(buf, &info, sizeof(info)); } } =20 @@ -1384,7 +1384,7 @@ static int put_compat_shm_info(struct shm_info *ip, info.shm_swp =3D ip->shm_swp; info.swap_attempts =3D ip->swap_attempts; info.swap_successes =3D ip->swap_successes; - return copy_to_user(uip, &info, sizeof(info)); + return copy_to_user_partial(uip, &info, sizeof(info)); } =20 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *= in, @@ -1404,7 +1404,7 @@ static int copy_compat_shmid_to_user(void __user *buf= , struct shmid64_ds *in, v.shm_nattch =3D in->shm_nattch; v.shm_cpid =3D in->shm_cpid; v.shm_lpid =3D in->shm_lpid; - return copy_to_user(buf, &v, sizeof(v)); + return copy_to_user_partial(buf, &v, sizeof(v)); } else { struct compat_shmid_ds v; memset(&v, 0, sizeof(v)); @@ -1417,7 +1417,7 @@ static int copy_compat_shmid_to_user(void __user *buf= , struct shmid64_ds *in, v.shm_nattch =3D in->shm_nattch; v.shm_cpid =3D in->shm_cpid; v.shm_lpid =3D in->shm_lpid; - return copy_to_user(buf, &v, sizeof(v)); + return copy_to_user_partial(buf, &v, sizeof(v)); } } =20 diff --git a/kernel/regset.c b/kernel/regset.c index b2871fa68b2a..29c6d19c3465 100644 --- a/kernel/regset.c +++ b/kernel/regset.c @@ -70,7 +70,7 @@ int copy_regset_to_user(struct task_struct *target, =20 ret =3D regset_get_alloc(target, regset, size, &buf); if (ret > 0) - ret =3D copy_to_user(data, buf, ret) ? -EFAULT : 0; + ret =3D copy_to_user_partial(data, buf, ret) ? -EFAULT : 0; kvfree(buf); return ret; } diff --git a/kernel/sys.c b/kernel/sys.c index 62e842055cc9..8e1ce8c26884 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1343,7 +1343,7 @@ static int override_release(char __user *release, siz= e_t len) v =3D LINUX_VERSION_PATCHLEVEL + 60; copy =3D clamp_t(size_t, len, 1, sizeof(buf)); copy =3D scnprintf(buf, copy, "2.6.%u%s", v, rest); - ret =3D copy_to_user(release, buf, copy + 1); + ret =3D copy_to_user_partial(release, buf, copy + 1); } return ret; } @@ -1567,7 +1567,7 @@ SYSCALL_DEFINE2(getrlimit, unsigned int, resource, st= ruct rlimit __user *, rlim) =20 ret =3D do_prlimit(current, resource, NULL, &value); if (!ret) - ret =3D copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; + ret =3D copy_to_user_partial(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; =20 return ret; } diff --git a/lib/kfifo.c b/lib/kfifo.c index 2633f9cc336c..00c19a321aae 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c @@ -203,11 +203,11 @@ static unsigned long kfifo_copy_from_user(struct __kf= ifo *fifo, } l =3D min(len, size - off); =20 - ret =3D copy_from_user(fifo->data + off, from, l); + ret =3D copy_from_user_partial(fifo->data + off, from, l); if (unlikely(ret)) ret =3D DIV_ROUND_UP(ret + len - l, esize); else { - ret =3D copy_from_user(fifo->data, from + l, len - l); + ret =3D copy_from_user_partial(fifo->data, from + l, len - l); if (unlikely(ret)) ret =3D DIV_ROUND_UP(ret, esize); } @@ -263,11 +263,11 @@ static unsigned long kfifo_copy_to_user(struct __kfif= o *fifo, void __user *to, } l =3D min(len, size - off); =20 - ret =3D copy_to_user(to, fifo->data + off, l); + ret =3D copy_to_user_partial(to, fifo->data + off, l); if (unlikely(ret)) ret =3D DIV_ROUND_UP(ret + len - l, esize); else { - ret =3D copy_to_user(to + l, fifo->data, len - l); + ret =3D copy_to_user_partial(to + l, fifo->data, len - l); if (unlikely(ret)) ret =3D DIV_ROUND_UP(ret, esize); } diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c index 32d06cbf6a31..a4d19fc1068a 100644 --- a/mm/kasan/kasan_test_c.c +++ b/mm/kasan/kasan_test_c.c @@ -2169,9 +2169,9 @@ static void copy_user_test_oob(struct kunit *test) usermem =3D (char __user *)useraddr; =20 KUNIT_EXPECT_KASAN_FAIL(test, - unused =3D copy_from_user(kmem, usermem, size + 1)); + unused =3D copy_from_user_partial(kmem, usermem, size + 1)); KUNIT_EXPECT_KASAN_FAIL_READ(test, - unused =3D copy_to_user(usermem, kmem, size + 1)); + unused =3D copy_to_user_partial(usermem, kmem, size + 1)); KUNIT_EXPECT_KASAN_FAIL(test, unused =3D __copy_from_user(kmem, usermem, size + 1)); KUNIT_EXPECT_KASAN_FAIL_READ(test, diff --git a/mm/memory.c b/mm/memory.c index ea6568571131..5a2f7543a2da 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -7529,7 +7529,7 @@ long copy_folio_from_user(struct folio *dst_folio, kaddr =3D kmap_local_page(subpage); if (!allow_pagefault) pagefault_disable(); - rc =3D copy_from_user(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); + rc =3D copy_from_user_partial(kaddr, usr_src + i * PAGE_SIZE, PAGE_SIZE); if (!allow_pagefault) pagefault_enable(); kunmap_local(kaddr); diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index af8762b24039..7327c98b206a 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -471,7 +471,7 @@ static int x25_getsockopt(struct socket *sock, int leve= l, int optname, goto out; =20 val =3D test_bit(X25_Q_BIT_FLAG, &x25_sk(sk)->flags); - rc =3D copy_to_user(optval, &val, len) ? -EFAULT : 0; + rc =3D copy_to_user_partial(optval, &val, len) ? -EFAULT : 0; out: return rc; } diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c index 01de4fbbcc84..710e07cd60ae 100644 --- a/rust/helpers/uaccess.c +++ b/rust/helpers/uaccess.c @@ -5,13 +5,13 @@ __rust_helper unsigned long rust_helper_copy_from_user(void *to, const void __user *from, unsigned lon= g n) { - return copy_from_user(to, from, n); + return copy_from_user_partial(to, from, n); } =20 __rust_helper unsigned long rust_helper_copy_to_user(void __user *to, const void *from, unsigned long = n) { - return copy_to_user(to, from, n); + return copy_to_user_partial(to, from, n); } =20 #ifndef CONFIG_ARCH_WANTS_NOINLINE_COPY_USER diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c index 08e0556bf161..3941bf9666a9 100644 --- a/sound/pci/emu10k1/emufx.c +++ b/sound/pci/emu10k1/emufx.c @@ -739,10 +739,10 @@ static int copy_gctl_to_user(struct snd_emu10k1 *emu, =20 _dst =3D (struct snd_emu10k1_fx8010_control_gpr __user *)dst; if (emu->support_tlv) - return copy_to_user(&_dst[idx], src, sizeof(*src)); + return copy_to_user_partial(&_dst[idx], src, sizeof(*src)); =09 octl =3D (struct snd_emu10k1_fx8010_control_old_gpr __user *)dst; - return copy_to_user(&octl[idx], src, sizeof(*octl)); + return copy_to_user_partial(&octl[idx], src, sizeof(*octl)); } =20 static int copy_ctl_elem_id(const struct emu10k1_ctl_elem_id *list, int i, diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index 31cc2d91c8d2..d5842d8a8509 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c @@ -4541,7 +4541,7 @@ static int snd_hdsp_capture_release(struct snd_pcm_su= bstream *substream) static inline int copy_u32_le(void __user *dest, void __iomem *src) { u32 val =3D readl(src); - return copy_to_user(dest, &val, 4); + return copy_to_user_partial(dest, &val, 4); } =20 static inline int copy_u64_le(void __user *dest, void __iomem *src_low, vo= id __iomem *src_high) @@ -4551,7 +4551,7 @@ static inline int copy_u64_le(void __user *dest, void= __iomem *src_low, void __i rms_low =3D readl(src_low); rms_high =3D readl(src_high); rms =3D ((u64)rms_high << 32) | rms_low; - return copy_to_user(dest, &rms, 8); + return copy_to_user_partial(dest, &rms, 8); } =20 static inline int copy_u48_le(void __user *dest, void __iomem *src_low, vo= id __iomem *src_high) @@ -4561,7 +4561,7 @@ static inline int copy_u48_le(void __user *dest, void= __iomem *src_low, void __i rms_low =3D readl(src_low) & 0xffffff00; rms_high =3D readl(src_high) & 0xffffff00; rms =3D ((u64)rms_high << 32) | rms_low; - return copy_to_user(dest, &rms, 8); + return copy_to_user_partial(dest, &rms, 8); } =20 static int hdsp_9652_get_peak(struct hdsp *hdsp, struct hdsp_peak_rms __us= er *peak_rms) diff --git a/sound/soc/intel/avs/probes.c b/sound/soc/intel/avs/probes.c index 099119ad28b3..bc2871d3e18c 100644 --- a/sound/soc/intel/avs/probes.c +++ b/sound/soc/intel/avs/probes.c @@ -244,10 +244,10 @@ static int avs_probe_compr_copy(struct snd_soc_compon= ent *comp, struct snd_compr n =3D rtd->buffer_size - offset; =20 if (count < n) { - ret =3D copy_to_user(buf, ptr, count); + ret =3D copy_to_user_partial(buf, ptr, count); } else { - ret =3D copy_to_user(buf, ptr, n); - ret +=3D copy_to_user(buf + n, rtd->dma_area, count - n); + ret =3D copy_to_user_partial(buf, ptr, n); + ret +=3D copy_to_user_partial(buf + n, rtd->dma_area, count - n); } =20 if (ret) diff --git a/sound/soc/sof/compress.c b/sound/soc/sof/compress.c index 93f2376585db..d54be8a188ec 100644 --- a/sound/soc/sof/compress.c +++ b/sound/soc/sof/compress.c @@ -324,10 +324,10 @@ static int sof_compr_copy_playback(struct snd_compr_r= untime *rtd, n =3D rtd->buffer_size - offset; =20 if (count < n) { - ret =3D copy_from_user(ptr, buf, count); + ret =3D copy_from_user_partial(ptr, buf, count); } else { - ret =3D copy_from_user(ptr, buf, n); - ret +=3D copy_from_user(rtd->dma_area, buf + n, count - n); + ret =3D copy_from_user_partial(ptr, buf, n); + ret +=3D copy_from_user_partial(rtd->dma_area, buf + n, count - n); } =20 return count - ret; @@ -345,10 +345,10 @@ static int sof_compr_copy_capture(struct snd_compr_ru= ntime *rtd, n =3D rtd->buffer_size - offset; =20 if (count < n) { - ret =3D copy_to_user(buf, ptr, count); + ret =3D copy_to_user_partial(buf, ptr, count); } else { - ret =3D copy_to_user(buf, ptr, n); - ret +=3D copy_to_user(buf + n, rtd->dma_area, count - n); + ret =3D copy_to_user_partial(buf, ptr, n); + ret +=3D copy_to_user_partial(buf + n, rtd->dma_area, count - n); } =20 return count - ret; diff --git a/sound/soc/sof/sof-client-probes.c b/sound/soc/sof/sof-client-p= robes.c index 124f55508159..4c5f4f016ff8 100644 --- a/sound/soc/sof/sof-client-probes.c +++ b/sound/soc/sof/sof-client-probes.c @@ -184,10 +184,10 @@ static int sof_probes_compr_copy(struct snd_soc_compo= nent *component, n =3D rtd->buffer_size - offset; =20 if (count < n) { - ret =3D copy_to_user(buf, ptr, count); + ret =3D copy_to_user_partial(buf, ptr, count); } else { - ret =3D copy_to_user(buf, ptr, n); - ret +=3D copy_to_user(buf + n, rtd->dma_area, count - n); + ret =3D copy_to_user_partial(buf, ptr, n); + ret +=3D copy_to_user_partial(buf + n, rtd->dma_area, count - n); } =20 if (ret) --=20 2.49.0 From nobody Tue May 5 10:13:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DFE553E3C7A; Mon, 27 Apr 2026 17:18:45 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310326; cv=none; b=uo8EpjUguetClx/AO0r+aYVychqPgkJMJ7myAT357bTBk3/bOK2L7RG7txT912rcvIavtfF3i4XPPuOKqtSY71QvhTo0GqOqhIvzTS11ltEehFba3sfdIj62ryKV/p8gCERJY/1oBmT5l0wHm4kQKyxBxlsBR57CaKZYMBuln5Y= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310326; c=relaxed/simple; bh=x+yz+Ptlr8yUKihbRCrYsj2OzN4+HDHdWj3mTkeeywo=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=srV2r8dm8FRntRFChPpNwjqvn6mBvn5nwWMGaLYH9bmqtGW6DGvHGHs5Plx3ztW6yMkMFgaZ/reaava1o2osheYw67W9zfExqhONQ0StIrVVcNu7POCynZ6S7oDTnBtsjEHVcWQ9q4P9cgso13FDR6G7GxDmmIFkE4bjrkfdnyg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=YS9v5ort; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="YS9v5ort" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 5390FC2BCB4; Mon, 27 Apr 2026 17:18:33 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310325; bh=x+yz+Ptlr8yUKihbRCrYsj2OzN4+HDHdWj3mTkeeywo=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=YS9v5orti9jIm2coPKy7cH4iQCLjTYOH2T4kGrmAKw1lk2UpD8UcWDut4XHtMo5CX 290KHU3PNOVzMmnTKEL1o/OfzgOnzBVsJnbuGicRbc3HqjVsrjLo1DZ4Ncwz6tafWZ SG8/hG1Ct8pUgSHrDrW4chwaJ34uCldA2tlOQo4bI8M4kMxsW5bP0OLNOAC7BeTwPk Mil/pgBJA2/iCGWwmbEh5WNZZrQ4Md/gtqmnREocwFdhobkWjiqhIB30lRO/GV15k3 DZOePxOSt4vdcYmGqKSVWX7R1FegGyxqEmqkjMUEcFWkKzHsBpXNo8/6bbP3EGANbq KZwGUUWEURiLg== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 6/9] uaccess: Change copy_{to/from}_user to return -EFAULT Date: Mon, 27 Apr 2026 19:13:47 +0200 Message-ID: <1a55107abe15dd78450888e2b5327c3a56af29b7.1777306795.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=2416; i=chleroy@kernel.org; h=from:subject:message-id; bh=x+yz+Ptlr8yUKihbRCrYsj2OzN4+HDHdWj3mTkeeywo=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxkjryH88LzkjscmK202zhcwuOPhcJTlidTVNdtCP 9vs/VPG2lHKwiDGxSArpshy/D/3rhldX1Lzp+7Sh5nDygQyhIGLUwAm0nCN4b9/yT75hpDtr4zk 9XPfORmtVO2p/XH+8bFg730PPNfMvn6dkWHDjsJSC09Vr9bDobdrxTgfMm1ckveKR+Rkud3skLM HLXgA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Now that copy_{to/from}_user_partial() are used by callers which expect partial copy with number of not copied bytes as return value, change copy_{to/from}_user() to return an int, and return -EFAULT when the copy is not complete. Signed-off-by: Christophe Leroy (CS GROUP) --- include/linux/uaccess.h | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 2d37173782b3..33b7d0f5f808 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -211,7 +211,7 @@ extern __must_check unsigned long _copy_to_user(void __user *, const void *, unsigned long); =20 static __always_inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) +copy_from_user_common(void *to, const void __user *from, unsigned long n, = bool partial) { if (!check_copy_size(to, n, false)) return n; @@ -221,10 +221,20 @@ copy_from_user(void *to, const void __user *from, uns= igned long n) return _inline_copy_from_user(to, from, n); } =20 -#define copy_from_user_partial copy_from_user +static __always_inline unsigned long __must_check +copy_from_user_partial(void *to, const void __user *from, unsigned long n) +{ + return copy_from_user_common(to, from, n, true); +} + +static __always_inline int __must_check +copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return copy_from_user_common(to, from, n, false) ? -EFAULT : 0; +} =20 static __always_inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) +copy_to_user_common(void __user *to, const void *from, unsigned long n, bo= ol partial) { if (!check_copy_size(from, n, true)) return n; @@ -235,7 +245,17 @@ copy_to_user(void __user *to, const void *from, unsign= ed long n) return _inline_copy_to_user(to, from, n); } =20 -#define copy_to_user_partial copy_to_user +static __always_inline unsigned long __must_check +copy_to_user_partial(void __user *to, const void *from, unsigned long n) +{ + return copy_to_user_common(to, from, n, true); +} + +static __always_inline int __must_check +copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return copy_to_user_common(to, from, n, false) ? -EFAULT : 0; +} =20 #ifndef copy_mc_to_kernel /* --=20 2.49.0 From nobody Tue May 5 10:13:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 9FA1329405; Mon, 27 Apr 2026 17:18:59 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310339; cv=none; b=V9gMzGfX3K/aQXkNsD/oVjsyD+cQw8S09dynWZgeTgP6xCWHYE79CWBm1WCOn0TwlZ3iyVKH+FU3/Cad6Xi2kquq6PnIRjvPl8pSGxfiDp8jMwESlnphM/SzU/ELLGgpyS5sONKhsTY9spLNDU3unUTS+qgLsO2cHnhWMMHZVkw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310339; c=relaxed/simple; bh=VVuQn2YnZFi2JH9uh9hmEqXb1BpygsXabSyFGnhBvmI=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=SKWUVqk+wJPhskYIH7RUeMKAxd/Isym2+6X45CWG2s1WBtpAbctD9t0gy+k9Fz2ebHkIxb3q5jR5xxNRIE8dQWKMEGWjpo3JqNbdIDiYMhg/Bn7c0h21r+zZbj33Q+WMhEipG09K//Kp4+Cwt3wu+NrIm0wMqXJ5M+/TJeECG5s= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=kdO2kRaq; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="kdO2kRaq" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 59C90C2BCB4; Mon, 27 Apr 2026 17:18:46 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310339; bh=VVuQn2YnZFi2JH9uh9hmEqXb1BpygsXabSyFGnhBvmI=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=kdO2kRaqrKnrRzE/uOUoSc+QuIvBLF9bEcfUYb9jTJLWJk0AT883/0gLUkpYyvhxW JwKLSaa6O8znPG+ygzo4nQLM8J0f65SbAuU342P7t8CFNHOEuyufyC86uU6TKB6BuB gSew3oyOMPJ8ALphDrpzU/JPmVzf88NlkSTCpSsq0dGpC1oXnkppqU6so9I0ayHunG vmD6szQq9NnauXryTPXCD0GLKF95gnwZ47owOPgIM468cdafyYqaGorVn/T0ZUPF41 utCUjd396YBehKkYMEmrRy4GP6QkmxuYFD7L2LBro+x9SzGKxh5BvzwrXlX1jpWNJG /RZq8/R9DjgcA== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 7/9] x86: Add unsafe_copy_from_user() Date: Mon, 27 Apr 2026 19:13:48 +0200 Message-ID: <0ee46bb228d97163fbdc14f2a7c52b93d8bc34ce.1777306795.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=2390; i=chleroy@kernel.org; h=from:subject:message-id; bh=VVuQn2YnZFi2JH9uh9hmEqXb1BpygsXabSyFGnhBvmI=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxljeGSBfXj99HCNy2WTVFXXC9z/Hmi+//aE1/MbC +50nw1Q6ChlYRDjYpAVU2Q5/p9714yuL6n5U3fpw8xhZQIZwsDFKQAT2djH8D+e3yYgNP/F6xtd z9QNXjIccVycwR574E38S3tWp2uq95Yy/M9xKJ2hYJCUJV9391rVwmlfl7x6GzD19bqwZeyVdgF fZ3MAAA== X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" At the time being, x86 and arm64 are missing unsafe_copy_from_user(). Add it. Signed-off-by: Christophe Leroy (CS GROUP) --- arch/x86/include/asm/uaccess.h | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 3a0dd3c2b233..10c458ffa399 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -598,7 +598,7 @@ _label: \ * We want the unsafe accessors to always be inlined and use * the error labels - thus the macro games. */ -#define unsafe_copy_loop(dst, src, len, type, label) \ +#define unsafe_put_loop(dst, src, len, type, label) \ while (len >=3D sizeof(type)) { \ unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \ dst +=3D sizeof(type); \ @@ -611,10 +611,29 @@ do { \ char __user *__ucu_dst =3D (_dst); \ const char *__ucu_src =3D (_src); \ size_t __ucu_len =3D (_len); \ - unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ - unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ - unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ - unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ + unsafe_put_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ + unsafe_put_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ + unsafe_put_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ + unsafe_put_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ +} while (0) + +#define unsafe_get_loop(dst, src, len, type, label) \ + while (len >=3D sizeof(type)) { \ + unsafe_get_user(*(type __user *)(src),(type *)(dst),label); \ + dst +=3D sizeof(type); \ + src +=3D sizeof(type); \ + len -=3D sizeof(type); \ + } + +#define unsafe_copy_from_user(_dst,_src,_len,label) \ +do { \ + char *__ucu_dst =3D (_dst); \ + const char __user *__ucu_src =3D (_src); \ + size_t __ucu_len =3D (_len); \ + unsafe_get_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ + unsafe_get_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ + unsafe_get_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ + unsafe_get_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ } while (0) =20 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT --=20 2.49.0 From nobody Tue May 5 10:13:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7A9BF3E3C6C; Mon, 27 Apr 2026 17:19:12 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310352; cv=none; b=Qlb3CEq/LdfRy5UYlAwHoWtFfNk3yja6fqZDB+NVBo98KSMGLINCzdyWA3vbX/pAw7gcIYXrEACb1urTAKK93S7j100rpCUmeX+sScCw0kNPFJM3D8wcAIDKto4fcps3TWTwExkVrYVs4hVNTzuK0NVzwk8eQl2ws8loSfDYBTQ= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310352; c=relaxed/simple; bh=vbZIj5I8a11ig9c375SeIMkNInKGe+jckYb8HPag1tw=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=UvM6B3zveu4BEzyEt88B2ZJQuUwnFpsKVQOonk0Dxp0uVrezORIajtQyJXY9A0tfFCoQ4oN5JnU18UcGxdJLd60z4SBOj4uoo3LwotFqHe/+G1py0t9omHk48/FzQl+S9urjdoND8HKXv4PAoSAme77FE+scfBOjQ1qWOML+oCI= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=lp9wELlr; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="lp9wELlr" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 1FAFFC2BCB4; Mon, 27 Apr 2026 17:18:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310352; bh=vbZIj5I8a11ig9c375SeIMkNInKGe+jckYb8HPag1tw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=lp9wELlrvJvoeuCK0/zMI5UANnrsQ+T4se/gBOunN8iQDrEi1RstW5G65/9vUEPmI U9UXqoktuU7EViGdT+8yzDZHjl2N0E5vd9veY8uQePuAD9x0oGHf4CotQwSmAD24go gSXTECesL9+kQmgNOk7myjSgr8UboLxcnh/OrEADTLSIqRqZRx4YwXDl6QWc3wVNsO sB/2ZwfKCyhos7Vj9aXXCgNggtmoXoIYLpWXmI9TjsLctEcOG8w3vDT/K6+fVh16nS /UcGgkCV5vp5qC1VRJeMqVPGsaV7+wscOAZaxlqKS8j4DsLjsIu1iQmtuFIX3tL+zm kvFBNXQR3j5iw== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 8/9] arm64: Add unsafe_copy_from_user() Date: Mon, 27 Apr 2026 19:13:49 +0200 Message-ID: <5b09e58a84c9edcfe5724db5cd57e45d96a96bfa.1777306795.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=2498; i=chleroy@kernel.org; h=from:subject:message-id; bh=vbZIj5I8a11ig9c375SeIMkNInKGe+jckYb8HPag1tw=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxmbKDOhb/vkF5+62Iv+nzi0lmWmkWf+tSO3HK9yP D89hV+LoaOUhUGMi0FWTJHl+H/uXTO6vqTmT92lDzOHlQlkCAMXpwBMJCKWkeGFnFP5dUbGE1cO pXr18eg3ndoc9ubDcl2tYxbdt9o1nmkyMvQuDCp+FV4TvfDP3c6+r/YFCyvUNTZlcrea/2SZ/OM dBwcA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" At the time being, x86 and arm64 are missing unsafe_copy_from_user(). Add it. Signed-off-by: Christophe Leroy (CS GROUP) --- arch/arm64/include/asm/uaccess.h | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uacc= ess.h index 1e20ec91b56f..adfdb52cd82b 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -437,7 +437,7 @@ static inline void user_access_restore(unsigned long en= abled) { } * We want the unsafe accessors to always be inlined and use * the error labels - thus the macro games. */ -#define unsafe_copy_loop(dst, src, len, type, label) \ +#define unsafe_put_loop(dst, src, len, type, label) \ while (len >=3D sizeof(type)) { \ unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \ dst +=3D sizeof(type); \ @@ -450,10 +450,29 @@ do { \ char __user *__ucu_dst =3D (_dst); \ const char *__ucu_src =3D (_src); \ size_t __ucu_len =3D (_len); \ - unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ - unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ - unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ - unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ + unsafe_put_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ + unsafe_put_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ + unsafe_put_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ + unsafe_put_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ +} while (0) + +#define unsafe_get_loop(dst, src, len, type, label) \ + while (len >=3D sizeof(type)) { \ + unsafe_get_user(*(type __user *)(src),(type *)(dst),label); \ + dst +=3D sizeof(type); \ + src +=3D sizeof(type); \ + len -=3D sizeof(type); \ + } + +#define unsafe_copy_from_user(_dst,_src,_len,label) \ +do { \ + char *__ucu_dst =3D (_dst); \ + const char __user *__ucu_src =3D (_src); \ + size_t __ucu_len =3D (_len); \ + unsafe_get_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \ + unsafe_get_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \ + unsafe_get_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \ + unsafe_get_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \ } while (0) =20 extern unsigned long __must_check __arch_clear_user(void __user *to, unsig= ned long n); --=20 2.49.0 From nobody Tue May 5 10:13:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2BC59339714; Mon, 27 Apr 2026 17:19:24 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310365; cv=none; b=FMMCJWBZRrjjYSiA3eaR054EBybd+MBN+lSN3BN5CocSCcm8tIENJh1D+pCEesMt/NXdLvmDrgdKbTCry0Ws+gXSliqYGgAikUbOn+Q+F0KGjeL6CPBJrSP/6A4kIKVX2k8i7JaCo/d+VNC51IcRSJYDEqTpxQo1Wefcb3LD8UI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1777310365; c=relaxed/simple; bh=xblTv37x9V4gU3cvmA9ILfz6xXNOv0sNe2zU9C6a5nw=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=XhQrdf7UOghUHj346nNBuNK4t8QKwv42weWTHzLB9J9H1Pg4KEcSn0eVYUQ5ec4lrY6ahBwiH6ofUxBY24/Bicz662twTEYvS8xMs3PQ8bMO23fNazIbxyRHabz7++ADrmyPHUUZtHt6Ieg+neofEfeiXp9GpsZ8FZ+KHzEUFP0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=k8kV5Zhk; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="k8kV5Zhk" Received: by smtp.kernel.org (Postfix) with ESMTPSA id A25EEC2BCB7; Mon, 27 Apr 2026 17:19:12 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1777310364; bh=xblTv37x9V4gU3cvmA9ILfz6xXNOv0sNe2zU9C6a5nw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=k8kV5Zhk7HZbxy8X7mxqUL5UgTTJPPuuLtSKHKOe5PwbIiNodL9/7ESxRbcOMv4dB NN/pd897F5mMQZv6MW+RM/3mikWvqmJGQCtupbd6YHiobStaCQu79RRkwt3Bs1rL/A WmXtcy3vIzgejB2MGSu6MrKJ33itreKKdFlemNypyCUExoUHGXA1bHbqFqaNX4ujdd iXZiXWTV/w69yfgs2V8e+zdWAilhhtsvDDxzPIT8CbbievjXPmrPwVloE5aoqLKmYP ca2+sf92YJQm2mLHcpyNlGd6mMQP6QhUed6Hx8vk/CloBjO7tSgz4fayv+CeXka+Fz NwGM6tvALNVmQ== From: "Christophe Leroy (CS GROUP)" To: Yury Norov , Andrew Morton , Linus Torvalds , David Laight , Thomas Gleixner Cc: "Christophe Leroy (CS GROUP)" , linux-alpha@vger.kernel.org, linux-kernel@vger.kernel.org, linux-snps-arc@lists.infradead.org, linux-arm-kernel@lists.infradead.org, linux-mips@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org, sparclinux@vger.kernel.org, linux-um@lists.infradead.org, dmaengine@vger.kernel.org, linux-efi@vger.kernel.org, linux-fsi@lists.ozlabs.org, amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, linux-wpan@vger.kernel.org, netdev@vger.kernel.org, linux-wireless@vger.kernel.org, linux-spi@vger.kernel.org, linux-media@vger.kernel.org, linux-staging@lists.linux.dev, linux-serial@vger.kernel.org, linux-usb@vger.kernel.org, xen-devel@lists.xenproject.org, linux-fsdevel@vger.kernel.org, ocfs2-devel@lists.linux.dev, bpf@vger.kernel.org, kasan-dev@googlegroups.com, linux-mm@kvack.org, linux-x25@vger.kernel.org, rust-for-linux@vger.kernel.org, linux-sound@vger.kernel.org, sound-open-firmware@alsa-project.org, linux-csky@vger.kernel.org, linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev, linux-m68k@lists.linux-m68k.org, linux-openrisc@vger.kernel.org, linux-parisc@vger.kernel.org, linux-sh@vger.kernel.org, linux-arch@vger.kernel.org Subject: [RFC PATCH v1 9/9] uaccess: Convert small fixed size copy_{to/from}_user() to scoped user access Date: Mon, 27 Apr 2026 19:13:50 +0200 Message-ID: <8780eb2ef80575931a339e5225bc80eb13e9be6c.1777306795.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=3638; i=chleroy@kernel.org; h=from:subject:message-id; bh=xblTv37x9V4gU3cvmA9ILfz6xXNOv0sNe2zU9C6a5nw=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWS+nxlr/n9yu2DE7uwkCZGbl0uCp6Y1JLvZTPpz+P/PY xNqPMoaOkpZGMS4GGTFFFmO/+feNaPrS2r+1F36MHNYmUCGMHBxCsBE9HwZ/ns82Ho9PV2Q+7Pp 5fqvs6O/v3YN+3EgL+Wk4b/co1rlkZ8YGVZxmDl7XFiUcmPSTPW14WGSHT6/1u4pcTY2cjD0U7P bzAkA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" copy_{to/from}_user() is a heavy function optimised for copy of large blocs of memory between user and kernel space. When the number of bytes to be copied is known at build time and small, using scoped user access removes the burden of that optimisation. Signed-off-by: Christophe Leroy (CS GROUP) --- include/linux/uaccess.h | 47 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 33b7d0f5f808..3ac544527af2 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -50,6 +50,8 @@ #define mask_user_address(src) (src) #endif =20 +#define SMALL_COPY_USER 64 + /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and @@ -191,6 +193,9 @@ _inline_copy_from_user(void *to, const void __user *fro= m, unsigned long n) return res; } =20 +static __always_inline __must_check unsigned long +_small_copy_from_user(void *to, const void __user *from, unsigned long n); + extern __must_check unsigned long _copy_from_user(void *, const void __user *, unsigned long); =20 @@ -207,6 +212,9 @@ _inline_copy_to_user(void __user *to, const void *from,= unsigned long n) return n; } =20 +static __always_inline __must_check unsigned long +_small_copy_to_user(void __user *to, const void *from, unsigned long n); + extern __must_check unsigned long _copy_to_user(void __user *, const void *, unsigned long); =20 @@ -215,6 +223,8 @@ copy_from_user_common(void *to, const void __user *from= , unsigned long n, bool p { if (!check_copy_size(to, n, false)) return n; + if (!partial && __builtin_constant_p(n) && n <=3D SMALL_COPY_USER) + return _small_copy_from_user(to, from, n); if (IS_ENABLED(ARCH_WANTS_NOINLINE_COPY_USER)) return _copy_from_user(to, from, n); else @@ -239,6 +249,8 @@ copy_to_user_common(void __user *to, const void *from, = unsigned long n, bool par if (!check_copy_size(from, n, true)) return n; =20 + if (!partial && __builtin_constant_p(n) && n <=3D SMALL_COPY_USER) + return _small_copy_to_user(to, from, n); if (IS_ENABLED(ARCH_WANTS_NOINLINE_COPY_USER)) return _copy_to_user(to, from, n); else @@ -838,6 +850,41 @@ for (bool done =3D false; !done; done =3D true) \ #define scoped_user_rw_access(uptr, elbl) \ scoped_user_rw_access_size(uptr, sizeof(*(uptr)), elbl) =20 +static __always_inline __must_check unsigned long +_small_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + might_fault(); + instrument_copy_from_user_before(to, from, n); + scoped_user_read_access_size(from, n, failed) { + /* + * Ensure that bad access_ok() speculation will not lead + * to nasty side effects *after* the copy is finished: + */ + if (!can_do_masked_user_access()) + barrier_nospec(); + unsafe_copy_from_user(to, from, n, failed); + } + instrument_copy_from_user_after(to, from, n, 0); + return 0; +failed: + instrument_copy_from_user_after(to, from, n, n); + return n; +} + +static __always_inline __must_check unsigned long +_small_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + might_fault(); + if (should_fail_usercopy()) + return n; + instrument_copy_to_user(to, from, n); + scoped_user_write_access_size(to, n, failed) + unsafe_copy_to_user(to, from, n, failed); + return 0; +failed: + return n; +} + /** * get_user_inline - Read user data inlined * @val: The variable to store the value read from user memory --=20 2.49.0