From nobody Wed Feb 11 05:03:15 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6D456326D63 for ; Wed, 24 Dec 2025 11:21:17 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575277; cv=none; b=RYKx3NjJpdiuYYiURSTtM/0xrro5tnG8uhv3U+eiJiNMx3rEA53Hvycho6hobSvbnyWvWaKBoY/SPezT1rMJhbM/ZFBnrgytm9pZahQ93cWGL8fY3lkmGSRcgoTR85TylWyTU6H/1XbrMB1N7nNpmJxrmODPY5CF0CLw8h9IsD0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575277; c=relaxed/simple; bh=M5yFgKD2Ib3qD1QX8R9fTV8O/Sif57m4EVWR5kO0sVw=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=aXQRO9huLhuwq16xfewo315sw8AcrCx+vqJpO3DVprLj2vDMyDZb2+f/1Bh5p1jwCGg2CUSvZtFXRISp8mbiLl9vQ0ZwyIKyL1hKzocd5BenIK9buHkBlwq1rDtvJw90JTMnzoZ0yfOzn8qJmg4wEopKw2of4bvZYYDgFqxtAU4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=dHe3ucKM; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="dHe3ucKM" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 18A43C16AAE; Wed, 24 Dec 2025 11:21:14 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1766575277; bh=M5yFgKD2Ib3qD1QX8R9fTV8O/Sif57m4EVWR5kO0sVw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=dHe3ucKMyAOFToZEzJLoIz/kxV9RI7kW/Ck/hov7wo82ufvmdCB5tj0svBM7oBJ/0 ie6gnxNqFzrUmmPubYdVt5B913lnF/DzBu3KyuDFSI37SNX3LCWzQM1ni7vRS5uxZd DI63KBwjgcDvIPI8JShARo72pTsPxFe8G9qdvrf1CLzJTaTRqGqAyR/LU63LknpkwD ov9sjksZ5FIV9dfYMo4j+0kovK4qoLhuwvS1ql1Wd/HvURowT1TRkvgtGdg3A/lti/ uAHFG19hYtRbIWCCmGNm60xIYEdqJ9qvfRNkiFApogQ1Ozw1MwJygJZSKsjxpJ4MPm Nhmce6e0I9CJQ== From: "Christophe Leroy (CS GROUP)" To: Michael Ellerman , Nicholas Piggin , Madhavan Srinivasan Cc: Christophe Leroy , linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v5 1/7] powerpc/uaccess: Move barrier_nospec() out of allow_read_{from/write}_user() Date: Wed, 24 Dec 2025 12:20:49 +0100 Message-ID: X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=3128; i=chleroy@kernel.org; h=from:subject:message-id; bh=FRyCNlus/chAL58mdj2A43JkP4XfcjBtB27GVJaNvtw=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWR6n5ltnzvBPqx+m9qJ7J2m0j8u12bXzBKReHBvoYZ1i 8hdq/csHaUsDGJcDLJiiizH/3PvmtH1JTV/6i59mDmsTCBDGLg4BWAiF6sY/hmLHZ/Iypa1/cSc dddbi+TXvrJ/E3LuYD3f5p7TZSKbp8xi+B/aoZKm9vv+zKCzZyryZt79kGbvbrJTyviVmewya09 +Y0YA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christophe Leroy Commit 74e19ef0ff80 ("uaccess: Add speculation barrier to copy_from_user()") added a redundant barrier_nospec() in copy_from_user(), because powerpc is already calling barrier_nospec() in allow_read_from_user() and allow_read_write_user(). But on other architectures that call to barrier_nospec() was missing. So change powerpc instead of reverting the above commit and having to fix other architectures one by one. This is now possible because barrier_nospec() has also been added in copy_from_user_iter(). Move barrier_nospec() out of allow_read_from_user() and allow_read_write_user(). This will also allow reuse of those functions when implementing masked user access which doesn't require barrier_nospec(). Don't add it back in raw_copy_from_user() as it is already called by copy_from_user() and copy_from_user_iter(). Fixes: 74e19ef0ff80 ("uaccess: Add speculation barrier to copy_from_user()") Signed-off-by: Christophe Leroy --- arch/powerpc/include/asm/kup.h | 2 -- arch/powerpc/include/asm/uaccess.h | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index dab63b82a8d4..f2009d7c8cfa 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -134,7 +134,6 @@ static __always_inline void kuap_assert_locked(void) =20 static __always_inline void allow_read_from_user(const void __user *from, = unsigned long size) { - barrier_nospec(); allow_user_access(NULL, from, size, KUAP_READ); } =20 @@ -146,7 +145,6 @@ static __always_inline void allow_write_to_user(void __= user *to, unsigned long s static __always_inline void allow_read_write_user(void __user *to, const v= oid __user *from, unsigned long size) { - barrier_nospec(); allow_user_access(to, from, size, KUAP_READ_WRITE); } =20 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/= uaccess.h index 784a00e681fa..3e622e647d62 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -301,6 +301,7 @@ do { \ __typeof__(sizeof(*(ptr))) __gu_size =3D sizeof(*(ptr)); \ \ might_fault(); \ + barrier_nospec(); \ allow_read_from_user(__gu_addr, __gu_size); \ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ prevent_read_from_user(__gu_addr, __gu_size); \ @@ -329,6 +330,7 @@ raw_copy_in_user(void __user *to, const void __user *fr= om, unsigned long n) { unsigned long ret; =20 + barrier_nospec(); allow_read_write_user(to, from, n); ret =3D __copy_tofrom_user(to, from, n); prevent_read_write_user(to, from, n); @@ -415,6 +417,7 @@ static __must_check __always_inline bool user_access_be= gin(const void __user *pt =20 might_fault(); =20 + barrier_nospec(); allow_read_write_user((void __user *)ptr, ptr, len); return true; } @@ -431,6 +434,7 @@ user_read_access_begin(const void __user *ptr, size_t l= en) =20 might_fault(); =20 + barrier_nospec(); allow_read_from_user(ptr, len); return true; } --=20 2.49.0 From nobody Wed Feb 11 05:03:15 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id CCE40326948 for ; Wed, 24 Dec 2025 11:21:19 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575279; cv=none; b=QbF9tdtsbq2c4qLQwt6SHO7etwYP6rDKAOXm7PZ2CNhYt/JGAtDH0S6tmImm13SEc7u3pOmzdq0WyWenFy6hDYcuGnzqnjvG+/KJ/tJA6PfbL3iQmXuDZAdvDrFAIAumscKSRa/xjY/ko2wpZKfmNb/1PBIT0c1Jewu2RxkmyNI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575279; c=relaxed/simple; bh=cG07iNoLtgV5OAON2L0gxG8ekPZAIsY199/aR4C/R8Y=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=bmYzvdIUCoaUnLr6NPNTJuA7UYGhPBfZiEBprKn+/g1Ty9fwgFid7hFEREwbLfkSZtXJ4nxaM9qV+yNtKpwQ4jAbuEZXsMRZ+JEKWYvkQNY8L1c8RPUfBdrqz0vBAb+QBERbXm2GAoD4yi97kp6FUtPWImttvwfedPuvhb9krzE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=o4PPGh9U; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="o4PPGh9U" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8873FC116D0; Wed, 24 Dec 2025 11:21:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1766575279; bh=cG07iNoLtgV5OAON2L0gxG8ekPZAIsY199/aR4C/R8Y=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=o4PPGh9U3f9HhNG6f76AyEoU/j5bVURFhZ5slSODJQZkM0LHG5CZeswqDFgfAe64h vp9IASxi7LhpVo9QSLTchHVUteZLWjpaKKo+WxHZK0AgmP7mS7f+kGQABbQg56R/mQ cAtEZ/8m1hwbaB5ABDQYa4wtIhhk+s2E/iMQ4aTYwDkAgpK1MOM5zf3DWuQp33RyjB CaO18OPOzu/CnrKZtlZlOi5XQL0vKiGSuvzBAR4etxG9EAfxPjHXxIWAnwClFspyDN MlAdwpEnZhNjF5exlX3lxzSVtvFB7GyGTmnEAKdckg5TPZzk74L/Ldm2znfhWHRmEd EsBr/TYGq8cBg== From: "Christophe Leroy (CS GROUP)" To: Michael Ellerman , Nicholas Piggin , Madhavan Srinivasan Cc: Christophe Leroy , linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v5 2/7] powerpc/uaccess: Remove unused size and from parameters from allow_access_user() Date: Wed, 24 Dec 2025 12:20:50 +0100 Message-ID: <4552b00707923b71150ee47b925d6eaae1b03261.1766574657.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=5354; i=chleroy@kernel.org; h=from:subject:message-id; bh=okOXIxoN+Cy3Y0MK9JK386swUng0kSsMrPoBGtdUBLc=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWR6n5mtn/R0j/sCnaBru07xz3+7NKt/+eyybZIXJgVuM Nvq57HhZEcpC4MYF4OsmCLL8f/cu2Z0fUnNn7pLH2YOKxPIEAYuTgGYSAojI0PDopx/HiFPLv6Y /16lsPnncZM9i/MKj+/p65zJsvZC7X1ORoY52176/nu14F0XU/ra3v1xRbFun+oVWLjDJqinffd aL8oIAA== X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christophe Leroy Since commit 16132529cee5 ("powerpc/32s: Rework Kernel Userspace Access Protection") the size parameter is unused on all platforms. And the 'from' parameter has never been used. Remove them. Signed-off-by: Christophe Leroy --- v2: Also remove 'from' param. --- arch/powerpc/include/asm/book3s/32/kup.h | 3 +-- arch/powerpc/include/asm/book3s/64/kup.h | 6 ++---- arch/powerpc/include/asm/kup.h | 9 ++++----- arch/powerpc/include/asm/nohash/32/kup-8xx.h | 3 +-- arch/powerpc/include/asm/nohash/kup-booke.h | 3 +-- 5 files changed, 9 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/includ= e/asm/book3s/32/kup.h index 873c5146e326..a3558419c41b 100644 --- a/arch/powerpc/include/asm/book3s/32/kup.h +++ b/arch/powerpc/include/asm/book3s/32/kup.h @@ -97,8 +97,7 @@ static __always_inline unsigned long __kuap_get_and_asser= t_locked(void) } #define __kuap_get_and_assert_locked __kuap_get_and_assert_locked =20 -static __always_inline void allow_user_access(void __user *to, const void = __user *from, - u32 size, unsigned long dir) +static __always_inline void allow_user_access(void __user *to, unsigned lo= ng dir) { BUILD_BUG_ON(!__builtin_constant_p(dir)); =20 diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/includ= e/asm/book3s/64/kup.h index 03aec3c6c851..9ccf8a5e0926 100644 --- a/arch/powerpc/include/asm/book3s/64/kup.h +++ b/arch/powerpc/include/asm/book3s/64/kup.h @@ -353,8 +353,7 @@ __bad_kuap_fault(struct pt_regs *regs, unsigned long ad= dress, bool is_write) return (regs->amr & AMR_KUAP_BLOCK_READ) =3D=3D AMR_KUAP_BLOCK_READ; } =20 -static __always_inline void allow_user_access(void __user *to, const void = __user *from, - unsigned long size, unsigned long dir) +static __always_inline void allow_user_access(void __user *to, unsigned lo= ng dir) { unsigned long thread_amr =3D 0; =20 @@ -383,8 +382,7 @@ static __always_inline unsigned long get_kuap(void) =20 static __always_inline void set_kuap(unsigned long value) { } =20 -static __always_inline void allow_user_access(void __user *to, const void = __user *from, - unsigned long size, unsigned long dir) +static __always_inline void allow_user_access(void __user *to, unsigned lo= ng dir) { } =20 #endif /* !CONFIG_PPC_KUAP */ diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index f2009d7c8cfa..3963584ac1cf 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -72,8 +72,7 @@ static __always_inline void __kuap_kernel_restore(struct = pt_regs *regs, unsigned * platforms. */ #ifndef CONFIG_PPC_BOOK3S_64 -static __always_inline void allow_user_access(void __user *to, const void = __user *from, - unsigned long size, unsigned long dir) { } +static __always_inline void allow_user_access(void __user *to, unsigned lo= ng dir) { } static __always_inline void prevent_user_access(unsigned long dir) { } static __always_inline unsigned long prevent_user_access_return(void) { re= turn 0UL; } static __always_inline void restore_user_access(unsigned long flags) { } @@ -134,18 +133,18 @@ static __always_inline void kuap_assert_locked(void) =20 static __always_inline void allow_read_from_user(const void __user *from, = unsigned long size) { - allow_user_access(NULL, from, size, KUAP_READ); + allow_user_access(NULL, KUAP_READ); } =20 static __always_inline void allow_write_to_user(void __user *to, unsigned = long size) { - allow_user_access(to, NULL, size, KUAP_WRITE); + allow_user_access(to, KUAP_WRITE); } =20 static __always_inline void allow_read_write_user(void __user *to, const v= oid __user *from, unsigned long size) { - allow_user_access(to, from, size, KUAP_READ_WRITE); + allow_user_access(to, KUAP_READ_WRITE); } =20 static __always_inline void prevent_read_from_user(const void __user *from= , unsigned long size) diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/in= clude/asm/nohash/32/kup-8xx.h index 08486b15b207..efffb5006d19 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -49,8 +49,7 @@ static __always_inline void uaccess_end_8xx(void) "i"(SPRN_MD_AP), "r"(MD_APG_KUAP), "i"(MMU_FTR_KUAP) : "memory"); } =20 -static __always_inline void allow_user_access(void __user *to, const void = __user *from, - unsigned long size, unsigned long dir) +static __always_inline void allow_user_access(void __user *to, unsigned lo= ng dir) { uaccess_begin_8xx(MD_APG_INIT); } diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/inc= lude/asm/nohash/kup-booke.h index d6bbb6d78bbe..cb2d5a96c3df 100644 --- a/arch/powerpc/include/asm/nohash/kup-booke.h +++ b/arch/powerpc/include/asm/nohash/kup-booke.h @@ -73,8 +73,7 @@ static __always_inline void uaccess_end_booke(void) "i"(SPRN_PID), "r"(0), "i"(MMU_FTR_KUAP) : "memory"); } =20 -static __always_inline void allow_user_access(void __user *to, const void = __user *from, - unsigned long size, unsigned long dir) +static __always_inline void allow_user_access(void __user *to, unsigned lo= ng dir) { uaccess_begin_booke(current->thread.pid); } --=20 2.49.0 From nobody Wed Feb 11 05:03:15 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1D9BB3195F9 for ; Wed, 24 Dec 2025 11:21:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575282; cv=none; b=DwJQTwpUBNfe0vxdMcTCJJTHAlIj3y/7N8ZXlBs7FaPSQB5aljNNkikXWg0hBDcyQ/85QOIWgfH6ULvRnSYh2nScT4df4JBjLl/St4CtXJYENt/xOZNySyE5bv6PaCTCveqcjp2aT9UNBW+6pqgGoHf7Ukl/GsUe0/rObvoeURE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575282; c=relaxed/simple; bh=ESEGqXaGhpxMotbWIttQKWdNWuiCABOjsQflksA3dTw=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=WE+J6IbrAzRZvD60oaXMH9NRNSqVaS2nFP1rNdd7gl1XaJDahrXU9AQwR7QKsc2V34QuTVv1eRqBXK52fqkCgXxFxl18AA0lYthJwqQFAD9M/ugC7lMHOynhD6TqyChFFo7bIj5KLiiR2oRzxX1HV5RryBvGlG5DiAi72rPcdZQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=E4KwOWX9; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="E4KwOWX9" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 03C1BC16AAE; Wed, 24 Dec 2025 11:21:19 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1766575281; bh=ESEGqXaGhpxMotbWIttQKWdNWuiCABOjsQflksA3dTw=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=E4KwOWX9yu344E22SVqW/xGZNgPqc9ru9JvNBMp1C1AhWDivFskmtr7Nqd2j150/u IWzTCe7qyDX9LbajSATwsK/73P0wG6p4hp0haQ/AUV00/Cc49nZxN3ZvA6Zu9vgIYx nWPWvHD9qHlijaHAVwdICWYqGlQUMmRfglejIxUJUB+/MbS90tg0YAAh4VdE0SafXi zrT56b7fH/V1CB7jYZ9GQmTNW2LjxiOxLZOR8oOjIGOLU52MmcBH4WuoPiu9smlKKO R6+f8aR37PIqubfRqflfIn5LZ+pv+r0PgM7SOqhrOql4te5dFfRNptUqXEfAH0ZiAj HbcWB1WK7jbcw== From: "Christophe Leroy (CS GROUP)" To: Michael Ellerman , Nicholas Piggin , Madhavan Srinivasan Cc: Christophe Leroy , linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v5 3/7] powerpc/uaccess: Remove {allow/prevent}_{read/write/read_write}_{from/to/}_user() Date: Wed, 24 Dec 2025 12:20:51 +0100 Message-ID: <70971f0ba81eab742a120e5bfdeff6b42d08fd98.1766574657.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=5381; i=chleroy@kernel.org; h=from:subject:message-id; bh=goSQ1fgNuMGS1XC/s9hVFP24kRreahITIRfIPqNcCd0=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWR6n5mdp9tRVuL+ZnfPh+fW1o+KVixeFHHe+wj7TrvpF /42rJwS1lHKwiDGxSArpshy/D/3rhldX1Lzp+7Sh5nDygQyhIGLUwAmMmc+I8P/XN1dDOddT+Zo eHzmLssr9RGI/BAk+Fslo45t09r1+Y8Z/nsGhdz8269Rsqvx+NclBXvkzs5TDdiY4BeXrt1euur CWm4A X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christophe Leroy The six following functions have become simple single-line fonctions that do not have much added value anymore: - allow_read_from_user() - allow_write_to_user() - allow_read_write_user() - prevent_read_from_user() - prevent_write_to_user() - prevent_read_write_user() Directly call allow_user_access() and prevent_user_access(), it doesn't reduce the readability and it removes unnecessary middle functions. Signed-off-by: Christophe Leroy --- v2: New --- arch/powerpc/include/asm/kup.h | 47 ------------------------------ arch/powerpc/include/asm/uaccess.h | 30 +++++++++---------- 2 files changed, 15 insertions(+), 62 deletions(-) diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index 3963584ac1cf..4a4145a244f2 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -131,53 +131,6 @@ static __always_inline void kuap_assert_locked(void) kuap_get_and_assert_locked(); } =20 -static __always_inline void allow_read_from_user(const void __user *from, = unsigned long size) -{ - allow_user_access(NULL, KUAP_READ); -} - -static __always_inline void allow_write_to_user(void __user *to, unsigned = long size) -{ - allow_user_access(to, KUAP_WRITE); -} - -static __always_inline void allow_read_write_user(void __user *to, const v= oid __user *from, - unsigned long size) -{ - allow_user_access(to, KUAP_READ_WRITE); -} - -static __always_inline void prevent_read_from_user(const void __user *from= , unsigned long size) -{ - prevent_user_access(KUAP_READ); -} - -static __always_inline void prevent_write_to_user(void __user *to, unsigne= d long size) -{ - prevent_user_access(KUAP_WRITE); -} - -static __always_inline void prevent_read_write_user(void __user *to, const= void __user *from, - unsigned long size) -{ - prevent_user_access(KUAP_READ_WRITE); -} - -static __always_inline void prevent_current_access_user(void) -{ - prevent_user_access(KUAP_READ_WRITE); -} - -static __always_inline void prevent_current_read_from_user(void) -{ - prevent_user_access(KUAP_READ); -} - -static __always_inline void prevent_current_write_to_user(void) -{ - prevent_user_access(KUAP_WRITE); -} - #endif /* !__ASSEMBLER__ */ =20 #endif /* _ASM_POWERPC_KUAP_H_ */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/= uaccess.h index 3e622e647d62..7846ee59e374 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -45,14 +45,14 @@ do { \ __label__ __pu_failed; \ \ - allow_write_to_user(__pu_addr, __pu_size); \ + allow_user_access(__pu_addr, KUAP_WRITE); \ __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \ - prevent_write_to_user(__pu_addr, __pu_size); \ + prevent_user_access(KUAP_WRITE); \ __pu_err =3D 0; \ break; \ \ __pu_failed: \ - prevent_write_to_user(__pu_addr, __pu_size); \ + prevent_user_access(KUAP_WRITE); \ __pu_err =3D -EFAULT; \ } while (0); \ \ @@ -302,9 +302,9 @@ do { \ \ might_fault(); \ barrier_nospec(); \ - allow_read_from_user(__gu_addr, __gu_size); \ + allow_user_access(NULL, KUAP_READ); \ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \ - prevent_read_from_user(__gu_addr, __gu_size); \ + prevent_user_access(KUAP_READ); \ (x) =3D (__typeof__(*(ptr)))__gu_val; \ \ __gu_err; \ @@ -331,9 +331,9 @@ raw_copy_in_user(void __user *to, const void __user *fr= om, unsigned long n) unsigned long ret; =20 barrier_nospec(); - allow_read_write_user(to, from, n); + allow_user_access(to, KUAP_READ_WRITE); ret =3D __copy_tofrom_user(to, from, n); - prevent_read_write_user(to, from, n); + prevent_user_access(KUAP_READ_WRITE); return ret; } #endif /* __powerpc64__ */ @@ -343,9 +343,9 @@ static inline unsigned long raw_copy_from_user(void *to, { unsigned long ret; =20 - allow_read_from_user(from, n); + allow_user_access(NULL, KUAP_READ); ret =3D __copy_tofrom_user((__force void __user *)to, from, n); - prevent_read_from_user(from, n); + prevent_user_access(KUAP_READ); return ret; } =20 @@ -354,9 +354,9 @@ raw_copy_to_user(void __user *to, const void *from, uns= igned long n) { unsigned long ret; =20 - allow_write_to_user(to, n); + allow_user_access(to, KUAP_WRITE); ret =3D __copy_tofrom_user(to, (__force const void __user *)from, n); - prevent_write_to_user(to, n); + prevent_user_access(KUAP_WRITE); return ret; } =20 @@ -367,9 +367,9 @@ static inline unsigned long __clear_user(void __user *a= ddr, unsigned long size) unsigned long ret; =20 might_fault(); - allow_write_to_user(addr, size); + allow_user_access(addr, KUAP_WRITE); ret =3D __arch_clear_user(addr, size); - prevent_write_to_user(addr, size); + prevent_user_access(KUAP_WRITE); return ret; } =20 @@ -397,9 +397,9 @@ copy_mc_to_user(void __user *to, const void *from, unsi= gned long n) { if (check_copy_size(from, n, true)) { if (access_ok(to, n)) { - allow_write_to_user(to, n); + allow_user_access(to, KUAP_WRITE); n =3D copy_mc_generic((void __force *)to, from, n); - prevent_write_to_user(to, n); + prevent_user_access(KUAP_WRITE); } } =20 --=20 2.49.0 From nobody Wed Feb 11 05:03:15 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 8B02032D0CE for ; Wed, 24 Dec 2025 11:21:24 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575284; cv=none; b=cB5COY5r67RKpyLY7Zqho/cno9NsnnFBjsXW6eOLszE9tV20Mr5JMYTioaUJrOuXjVxV6MLwyYM4egJc0rtMnQ4cB5S32HQoPdDpS4vDqjWlXjcyQZwtqu9v6ajaBqkt+1xlKA0qmob40crxD/JhGCGMsl7z6nxJ8RxoHRESIio= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575284; c=relaxed/simple; bh=uML6mmNL9b32c/J5n2U0d68VLsHlLeW3trcOev0qgcc=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=kbCYw4K3NcCgnBJLcAzosR2viXJXNqmDJ1niWeLozjPCky0sMm8SMuSkpugdSRm8C3rcLRxsJ5LQl57XEHLPNV7kJR7g2zRdhYMN2qUeWMU+HqCWjWDLHiac9CKwART878hRFIzcYvkxqGpqFagienJNw6egLGcor8wlRFYji6c= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=W0ltaU6W; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="W0ltaU6W" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 37DCEC116D0; Wed, 24 Dec 2025 11:21:21 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1766575284; bh=uML6mmNL9b32c/J5n2U0d68VLsHlLeW3trcOev0qgcc=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=W0ltaU6WOVXR9KTWeXHCk5FsTxNsCgJ5bYP8Pwe2JbSkGGNvpCSAeBl015Pcu3Gnm yv42gybIbXYrOIc/NKnhrdUjbJPr96Hv4nUKAWiWeV9JN87H/ZHpRPOLvozCgWSJTP dTv5m0tWTA2/5E4WnJ5j8zL8OCqw2BfsmDssJAei4A8743BrPQ74A4268B4V3KVIQk rLuIX/CDeoHCtp5xUIHBL2hsdhaO5ZfWp8MCXkiU4O6RlLSljqilwCfj/sfd72N/UC +D5/b66NtqfoXpCJmmPLVjaeMio8eKVOTdFZYLsRRyJ257HEgEqZX9vKyg3e0UnnZe UeIR9OYdk7Beg== From: "Christophe Leroy (CS GROUP)" To: Michael Ellerman , Nicholas Piggin , Madhavan Srinivasan Cc: Christophe Leroy , linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v5 4/7] powerpc/uaccess: Refactor user_{read/write/}_access_begin() Date: Wed, 24 Dec 2025 12:20:52 +0100 Message-ID: <2b4f9d4e521e0b56bf5cb239916b4a178c4d2007.1766574657.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=3583; i=chleroy@kernel.org; h=from:subject:message-id; bh=Hs0S56epi8xUTo2XJTHx0BIIvWB8/BwLXBAnBe9oLow=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWR6n5nz8IZah4hzWoyy18eWZRp8T0OtJuunGyxSEFyUm dmUr7ugo5SFQYyLQVZMkeX4f+5dM7q+pOZP3aUPM4eVCWQIAxenAEykX5vhf+7EzZbnPP4992nm +vTRu7Z9TdvcVxNM3BMX/OlsDGeq9WBkeH3bLTvuwKR9Cwv3ef3TYno4S7bVMPNdWvea7SuWXu/ X5gMA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christophe Leroy user_read_access_begin() and user_write_access_begin() and user_access_begin() are now very similar. Create a common __user_access_begin() that takes direction as parameter. In order to avoid a warning with the conditional call of barrier_nospec() which is sometimes an empty macro, change it to a do {} while (0). Signed-off-by: Christophe Leroy --- v4: Rebase on top of core-scoped-uaccess tag v2: New --- arch/powerpc/include/asm/barrier.h | 2 +- arch/powerpc/include/asm/uaccess.h | 46 +++++++++--------------------- 2 files changed, 14 insertions(+), 34 deletions(-) diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/= barrier.h index 9e9833faa4af..9d2f612cfb1d 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -102,7 +102,7 @@ do { \ =20 #else /* !CONFIG_PPC_BARRIER_NOSPEC */ #define barrier_nospec_asm -#define barrier_nospec() +#define barrier_nospec() do {} while (0) #endif /* CONFIG_PPC_BARRIER_NOSPEC */ =20 /* diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/= uaccess.h index 7846ee59e374..721d65dbbb2e 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -410,50 +410,30 @@ copy_mc_to_user(void __user *to, const void *from, un= signed long n) extern long __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size); =20 -static __must_check __always_inline bool user_access_begin(const void __us= er *ptr, size_t len) +static __must_check __always_inline bool __user_access_begin(const void __= user *ptr, size_t len, + unsigned long dir) { if (unlikely(!access_ok(ptr, len))) return false; =20 might_fault(); =20 - barrier_nospec(); - allow_read_write_user((void __user *)ptr, ptr, len); + if (dir & KUAP_READ) + barrier_nospec(); + allow_user_access((void __user *)ptr, dir); return true; } -#define user_access_begin user_access_begin -#define user_access_end prevent_current_access_user -#define user_access_save prevent_user_access_return -#define user_access_restore restore_user_access =20 -static __must_check __always_inline bool -user_read_access_begin(const void __user *ptr, size_t len) -{ - if (unlikely(!access_ok(ptr, len))) - return false; +#define user_access_begin(p, l) __user_access_begin(p, l, KUAP_READ_WRITE) +#define user_read_access_begin(p, l) __user_access_begin(p, l, KUAP_READ) +#define user_write_access_begin(p, l) __user_access_begin(p, l, KUAP_WRITE) =20 - might_fault(); - - barrier_nospec(); - allow_read_from_user(ptr, len); - return true; -} -#define user_read_access_begin user_read_access_begin -#define user_read_access_end prevent_current_read_from_user +#define user_access_end() prevent_user_access(KUAP_READ_WRITE) +#define user_read_access_end() prevent_user_access(KUAP_READ) +#define user_write_access_end() prevent_user_access(KUAP_WRITE) =20 -static __must_check __always_inline bool -user_write_access_begin(const void __user *ptr, size_t len) -{ - if (unlikely(!access_ok(ptr, len))) - return false; - - might_fault(); - - allow_write_to_user((void __user *)ptr, len); - return true; -} -#define user_write_access_begin user_write_access_begin -#define user_write_access_end prevent_current_write_to_user +#define user_access_save prevent_user_access_return +#define user_access_restore restore_user_access =20 #define arch_unsafe_get_user(x, p, e) do { \ __long_type(*(p)) __gu_val; \ --=20 2.49.0 From nobody Wed Feb 11 05:03:15 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DEF9C32D426 for ; Wed, 24 Dec 2025 11:21:26 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575287; cv=none; b=VrvB9p0HTE/yYdDpyChgdv7JcgRpvBKAoPem+tvXcNQI8zL267QAFgkayT3M6h3kkdD40IzK62T76UhR0phrmcmtR/FStYtwEaQL8VMCQBqJtxAaG31okMFNhE7jRnaXQgTCCMsN2Py6MVjkh3gQ0QEMiLaqFS3xh1mpmU7PdH8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575287; c=relaxed/simple; bh=sMpjUe/hhb/piJnpNO9t/nbJ3zmkkCHnDq8UFDbXN3E=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=UsiOtk9HLPS8/zDaFEfCIyRv/NP1KWyA1VjC9QGt3wM6s8tlrKMN6RPoBGB3/hOB9GRpppqbpH9a8lJSOomdJpKctAvBJZLR0xyv+mEJeOLfcSZA2PGLk7xr8qzsxEPuHdwXJ3sXWDyufs42uc9rWBWaxkjItBBkWZcuXlvIMn8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=ofh7zz7X; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="ofh7zz7X" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 96ABFC4CEFB; Wed, 24 Dec 2025 11:21:24 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1766575286; bh=sMpjUe/hhb/piJnpNO9t/nbJ3zmkkCHnDq8UFDbXN3E=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=ofh7zz7X+ylwXIgwjRZjioTZIAvCjNRStNAv+l8zG4cpSGHrEsKYZLcFdlxdwj+Ej BzPTMUJ2UB5SlXTIZyizOH0b20iMiaWr9BDSfMdOn6x0K6CWHXEBD03ctI/CjhsO9m 77F0EKSD2MdeJ5GBvURJzc6ctC3rwG8N9zHC0ZpvOnWVvip9+N8uCremlx+9pq8Gwq nv1WFfSWqthHeqIXWFpM35MVkY1W1wb3wJF+z705AaH3t/RoansN+h9/0PbLD9NLh0 4XhMSOQcpHcqjczp7fCgfF+h+Si4jbJa9IaDRPBC/E9K5rllDqRY452AodsSbSOca8 9CnghKOS5D65A== From: "Christophe Leroy (CS GROUP)" To: Michael Ellerman , Nicholas Piggin , Madhavan Srinivasan Cc: Christophe Leroy , linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v5 5/7] powerpc/32s: Fix segments setup when TASK_SIZE is not a multiple of 256M Date: Wed, 24 Dec 2025 12:20:53 +0100 Message-ID: <8928d906079e156c59794c41e826a684eaaaebb4.1766574657.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=4811; i=chleroy@kernel.org; h=from:subject:message-id; bh=UMeT/KUp8+woidCjPqCoL1mGuuOWLtvzg8TYGXLU1xw=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWR6n5kj6+CyYFaBjOmtuNx3ex1//NgmrKataLVn/Qv36 saZrd+rO0pZGMS4GGTFFFmO/+feNaPrS2r+1F36MHNYmUCGMHBxCsBEeMoY/qcVCQc+EGWr/PLc 6ubp4FuTAoWOf7EJZ/q539416NWH9lJGhrls2oKeu+wbhN/YZs6rP5O2MHWn41kh5sPbP/Jb1ba XcAEA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christophe Leroy For book3s/32 it is assumed that TASK_SIZE is a multiple of 256 Mbytes, but Kconfig allows any value for TASK_SIZE. In all relevant calculations, align TASK_SIZE to the upper 256 Mbytes boundary. Also use ASM_CONST() in the definition of TASK_SIZE to ensure it is seen as an unsigned constant. Signed-off-by: Christophe Leroy --- arch/powerpc/include/asm/book3s/32/mmu-hash.h | 5 ++++- arch/powerpc/include/asm/task_size_32.h | 2 +- arch/powerpc/kernel/asm-offsets.c | 2 +- arch/powerpc/kernel/head_book3s_32.S | 6 +++--- arch/powerpc/mm/book3s32/mmu.c | 2 +- arch/powerpc/mm/ptdump/segment_regs.c | 2 +- 6 files changed, 11 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/i= nclude/asm/book3s/32/mmu-hash.h index 8435bf3cdabf..387d370c8a35 100644 --- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h @@ -192,12 +192,15 @@ extern s32 patch__hash_page_B, patch__hash_page_C; extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A= 2; extern s32 patch__flush_hash_B; =20 +#include +#include + #include #include =20 static __always_inline void update_user_segment(u32 n, u32 val) { - if (n << 28 < TASK_SIZE) + if (n << 28 < ALIGN(TASK_SIZE, SZ_256M)) mtsr(val + n * 0x111, n << 28); } =20 diff --git a/arch/powerpc/include/asm/task_size_32.h b/arch/powerpc/include= /asm/task_size_32.h index de7290ee770f..30edc21f71fb 100644 --- a/arch/powerpc/include/asm/task_size_32.h +++ b/arch/powerpc/include/asm/task_size_32.h @@ -6,7 +6,7 @@ #error User TASK_SIZE overlaps with KERNEL_START address #endif =20 -#define TASK_SIZE (CONFIG_TASK_SIZE) +#define TASK_SIZE ASM_CONST(CONFIG_TASK_SIZE) =20 /* * This decides where the kernel will search for a free chunk of vm space = during diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-of= fsets.c index a4bc80b30410..46149f326fd4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -331,7 +331,7 @@ int main(void) =20 #ifndef CONFIG_PPC64 DEFINE(TASK_SIZE, TASK_SIZE); - DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); + DEFINE(NUM_USER_SEGMENTS, ALIGN(TASK_SIZE, SZ_256M) >> 28); #endif /* ! CONFIG_PPC64 */ =20 /* datapage offsets for use by vdso */ diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/hea= d_book3s_32.S index cb2bca76be53..c1779455ea32 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -420,7 +420,7 @@ InstructionTLBMiss: lwz r2,0(r2) /* get pmd entry */ #ifdef CONFIG_EXECMEM rlwinm r3, r0, 4, 0xf - subi r3, r3, (TASK_SIZE >> 28) & 0xf + subi r3, r3, NUM_USER_SEGMENTS #endif rlwinm. r2,r2,0,0,19 /* extract address of pte page */ beq- InstructionAddressInvalid /* return if no mapping */ @@ -475,7 +475,7 @@ DataLoadTLBMiss: lwz r2,0(r1) /* get pmd entry */ rlwinm r3, r0, 4, 0xf rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - subi r3, r3, (TASK_SIZE >> 28) & 0xf + subi r3, r3, NUM_USER_SEGMENTS beq- 2f /* bail if no mapping */ 1: rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */ lwz r2,0(r2) /* get linux-style pte */ @@ -554,7 +554,7 @@ DataStoreTLBMiss: lwz r2,0(r1) /* get pmd entry */ rlwinm r3, r0, 4, 0xf rlwinm. r2,r2,0,0,19 /* extract address of pte page */ - subi r3, r3, (TASK_SIZE >> 28) & 0xf + subi r3, r3, NUM_USER_SEGMENTS beq- 2f /* bail if no mapping */ 1: rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */ diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index c42ecdf94e48..37eefc6786a7 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -225,7 +225,7 @@ int mmu_mark_initmem_nx(void) =20 BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE); =20 - for (i =3D TASK_SIZE >> 28; i < 16; i++) { + for (i =3D ALIGN(TASK_SIZE, SZ_256M) >> 28; i < 16; i++) { /* Do not set NX on VM space for modules */ if (is_module_segment(i << 28)) continue; diff --git a/arch/powerpc/mm/ptdump/segment_regs.c b/arch/powerpc/mm/ptdump= /segment_regs.c index 9df3af8d481f..c06704b18a2c 100644 --- a/arch/powerpc/mm/ptdump/segment_regs.c +++ b/arch/powerpc/mm/ptdump/segment_regs.c @@ -31,7 +31,7 @@ static int sr_show(struct seq_file *m, void *v) int i; =20 seq_puts(m, "---[ User Segments ]---\n"); - for (i =3D 0; i < TASK_SIZE >> 28; i++) + for (i =3D 0; i < ALIGN(TASK_SIZE, SZ_256M) >> 28; i++) seg_show(m, i); =20 seq_puts(m, "\n---[ Kernel Segments ]---\n"); --=20 2.49.0 From nobody Wed Feb 11 05:03:15 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id EEDBC325716 for ; Wed, 24 Dec 2025 11:21:28 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575289; cv=none; b=cpZERew/Q4q32mM4G7hc/dn9CFxUElwtAVPbnlwQc3MQ4XP56lkfTkNWsytd5x2sIdbY/J+Vz7CK06znbOlDh0QD8p52taTSs1UyRJYUDNa11cmPMidm9Rh9UtSTueL+9Np6qmO2AeyJJ1CytpRUTHIDn2k9tvzCfdqJqM9Z+ac= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575289; c=relaxed/simple; bh=wKTECL+uhV2GewMTUnLeAs+N2tpCOWuQ3qmRO31AzZU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=hzAKqwP76mlDODflh3YABvCyimLwjvutik2eTbpiMWPExtWegTcf9vZv7vN2lKXydmltajhNU3raJDHvCmDtuLdfkHXYs6DXLqzeTd5TgOjB5BRHahh0j7VWBmWI7h2dSvx/JIpvC7mUM3WP5is/u04mMCwDPucDMrMgcjuvis4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iWPeDBA8; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iWPeDBA8" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 04B32C16AAE; Wed, 24 Dec 2025 11:21:26 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1766575288; bh=wKTECL+uhV2GewMTUnLeAs+N2tpCOWuQ3qmRO31AzZU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=iWPeDBA8oLNW9eDp4PsxIG5ToDSE+K5B19Dz2tJYX9VEH8JjPH+TI2pohxIIc5kbm AqF9Vu/NaSMUZXvsMUUqzNM8HRHvocup8CUVTaYJ0cgtY8cz/s6/B0+67wDcRFE5HM 6wpguaQzAWUd1CMY6S43aBsW7lsFFtEc5KwFpuTvMEug/OnNVHQS7YGXhC+dI65pr5 jC6Mhrx7zRi93oWBKyLxFQ492l1i0RrmuAOurXYUgXn5+5pig4gUHQBRnaIykNjOtA u64SNV+s0cPd1Hx+hCMIm/4s5NgYBhbcs3u+qKhXWq5HwrWPMRvGxrDwrYvUWHUmkw dPN81g3JcMNOg== From: "Christophe Leroy (CS GROUP)" To: Michael Ellerman , Nicholas Piggin , Madhavan Srinivasan Cc: Christophe Leroy , linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v5 6/7] powerpc/32: Automatically adapt TASK_SIZE based on constraints Date: Wed, 24 Dec 2025 12:20:54 +0100 Message-ID: <6a2575420770d075cd090b5a316730a2ffafdee4.1766574657.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=6156; i=chleroy@kernel.org; h=from:subject:message-id; bh=9s+oMFBAX9h3xrMQNyahO4dDCw3Exg61uapF1kwcX3o=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWR6n5mj+b055mGMtUj/A/4S78f7Be3XHpMsCbjFHD3lK MuflRNXdJSyMIhxMciKKbIc/8+9a0bXl9T8qbv0YeawMoEMYeDiFICJKExl+GedeoltpqxiZg8D x/fJDt+tUnaxb+udzcm0dpP8f54b2y4yMrTrdOkvfvfGN8YkapY7j/px1hqHVy6sP555tx/ninp bww4A X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christophe Leroy At the time being, TASK_SIZE can be customized by the user via Kconfig but it is not possible to check all constraints in Kconfig. Impossible setups are detected at compile time with BUILD_BUG() but that leads to build failure when setting crazy values. It is not a problem on its own because the user will usually either use the default value or set a well thought value. However build robots generate crazy random configs that lead to build failures, and build robots see it as a regression every time a patch adds such a constraint. So instead of failing the build when the custom TASK_SIZE is too big, just adjust it to the maximum possible value matching the setup. Several architectures already calculate TASK_SIZE based on other parameters and options. In order to do so, move MODULES_VADDR calculation into task_size_32.h and ensure that: - On book3s/32, userspace and module area have their own segments (256M) - On 8xx, userspace has its own full PGDIR entries (4M) Then TASK_SIZE is guaranteed to be correct so remove related BUILD_BUG()s. Signed-off-by: Christophe Leroy --- arch/powerpc/Kconfig | 3 +-- arch/powerpc/include/asm/book3s/32/pgtable.h | 4 --- arch/powerpc/include/asm/nohash/32/mmu-8xx.h | 4 --- arch/powerpc/include/asm/task_size_32.h | 26 ++++++++++++++++++++ arch/powerpc/mm/book3s32/mmu.c | 2 -- arch/powerpc/mm/mem.c | 2 -- arch/powerpc/mm/nohash/8xx.c | 2 -- 7 files changed, 27 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 9537a61ebae0..b8d36a261009 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -1293,9 +1293,8 @@ config TASK_SIZE_BOOL Say N here unless you know what you are doing. =20 config TASK_SIZE - hex "Size of user task space" if TASK_SIZE_BOOL + hex "Size of maximum user task space" if TASK_SIZE_BOOL default "0x80000000" if PPC_8xx - default "0xb0000000" if PPC_BOOK3S_32 && EXECMEM default "0xc0000000" =20 config MODULES_SIZE_BOOL diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/in= clude/asm/book3s/32/pgtable.h index 87dcca962be7..41ae404d0b7a 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -195,10 +195,6 @@ void unmap_kernel_page(unsigned long va); #define VMALLOC_END ioremap_bot #endif =20 -#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M) -#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) -#define MODULES_VADDR (MODULES_END - MODULES_SIZE) - #ifndef __ASSEMBLER__ #include #include diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/in= clude/asm/nohash/32/mmu-8xx.h index f19115db8072..74ad32e1588c 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -170,10 +170,6 @@ =20 #define mmu_linear_psize MMU_PAGE_8M =20 -#define MODULES_END PAGE_OFFSET -#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) -#define MODULES_VADDR (MODULES_END - MODULES_SIZE) - #ifndef __ASSEMBLER__ =20 #include diff --git a/arch/powerpc/include/asm/task_size_32.h b/arch/powerpc/include= /asm/task_size_32.h index 30edc21f71fb..42a64bbd1964 100644 --- a/arch/powerpc/include/asm/task_size_32.h +++ b/arch/powerpc/include/asm/task_size_32.h @@ -2,11 +2,37 @@ #ifndef _ASM_POWERPC_TASK_SIZE_32_H #define _ASM_POWERPC_TASK_SIZE_32_H =20 +#include + #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START #error User TASK_SIZE overlaps with KERNEL_START address #endif =20 +#ifdef CONFIG_PPC_8xx +#define MODULES_END ASM_CONST(CONFIG_PAGE_OFFSET) +#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) +#define MODULES_VADDR (MODULES_END - MODULES_SIZE) +#define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_4M) - 1)) +#define USER_TOP MODULES_BASE +#endif + +#ifdef CONFIG_PPC_BOOK3S_32 +#define MODULES_END (ASM_CONST(CONFIG_PAGE_OFFSET) & ~(UL(SZ_256M) - 1)) +#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) +#define MODULES_VADDR (MODULES_END - MODULES_SIZE) +#define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_256M) - 1)) +#define USER_TOP MODULES_BASE +#endif + +#ifndef USER_TOP +#define USER_TOP ASM_CONST(CONFIG_PAGE_OFFSET) +#endif + +#if CONFIG_TASK_SIZE < USER_TOP #define TASK_SIZE ASM_CONST(CONFIG_TASK_SIZE) +#else +#define TASK_SIZE USER_TOP +#endif =20 /* * This decides where the kernel will search for a free chunk of vm space = during diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 37eefc6786a7..07660e8badbd 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -223,8 +223,6 @@ int mmu_mark_initmem_nx(void) =20 update_bats(); =20 - BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, SZ_256M) < TASK_SIZE); - for (i =3D ALIGN(TASK_SIZE, SZ_256M) >> 28; i < 16; i++) { /* Do not set NX on VM space for modules */ if (is_module_segment(i << 28)) diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 3ddbfdbfa941..bc0f1a9eb0bc 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -401,8 +401,6 @@ struct execmem_info __init *execmem_arch_setup(void) #ifdef MODULES_VADDR unsigned long limit =3D (unsigned long)_etext - SZ_32M; =20 - BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); - /* First try within 32M limit from _etext to avoid branch trampolines */ if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) { start =3D limit; diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index ab1505cf42bf..a9d3f4729ead 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -209,8 +209,6 @@ void __init setup_initial_memory_limit(phys_addr_t firs= t_memblock_base, =20 /* 8xx can only access 32MB at the moment */ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M)); - - BUILD_BUG_ON(ALIGN_DOWN(MODULES_VADDR, PGDIR_SIZE) < TASK_SIZE); } =20 int pud_clear_huge(pud_t *pud) --=20 2.49.0 From nobody Wed Feb 11 05:03:15 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7D69E329E62 for ; Wed, 24 Dec 2025 11:21:31 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575291; cv=none; b=oOwlnOZOUA+pYU4rJYIspGT9oGaKVgUpIOBMmEEw6esbd4EzKlPLVs77oXYIhwd2z5wD0vyW8tt9uBANoewnSWMxhFEMyeeeA5KoArvwUmNyhy8DzTTu2itWH2fz8HUyjruvx2PhJdSUz/n87Yh23DqxkYkb3pmH6qeWocNbCw4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766575291; c=relaxed/simple; bh=fnbw5f66PyzI9FgHERL1dSAdSF91hdUdpx2k7uZy1Ys=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=DBOGtPFZlasMt7beoDI8vZTPtZJ1ozVcJUGIjPiJeNMzExRnaBkXIrSxCvd9kv0AX/JZXXDBCQRXjZCOYxZZIkvkic/u45wL+gKPbbqheJM7OCyowPgYuD2qIon3u6+vVDH4iw2G7uiLM38kCJyf/KGLSTw6/BzJHGBbADe7OrY= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iJm1yeKY; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iJm1yeKY" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4E7DDC116D0; Wed, 24 Dec 2025 11:21:29 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1766575291; bh=fnbw5f66PyzI9FgHERL1dSAdSF91hdUdpx2k7uZy1Ys=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=iJm1yeKY4bpFT3f4o2eXep6c1mySpNJd9qmHsiM1pml7xPwumN549wJkXjVnuJosi X5UKrt72oGIrbT7c4XQWE/iJRv1TU6kfS1vJvKlWPBg1UKLy3BiwtK46D85gNsysBP 5uGf7Kgpg5KbSVjuxW6rFGLLygdy3nq760tUBxoIm20cy4LCqOGbZzC0j++rAJ2vzK PPrz9VqrMX2dDDgH86bl/IAxn6F+7v4XrkhxOWMw9xeRdExw1LTRpmKO0CLfPShYdC LSYI6D6+we6HuhRbDa7YNIyzYxRX9vXHm0D4Noq/m5jpfow5nm1Hjm4KaTs5VFpziz cyBBKKiwT+ysg== From: "Christophe Leroy (CS GROUP)" To: Michael Ellerman , Nicholas Piggin , Madhavan Srinivasan Cc: Christophe Leroy , linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org Subject: [PATCH v5 7/7] powerpc/uaccess: Implement masked user access Date: Wed, 24 Dec 2025 12:20:55 +0100 Message-ID: <8f418183d9125cc0bf23922bc2ef2a1130d8b63a.1766574657.git.chleroy@kernel.org> X-Mailer: git-send-email 2.49.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-Developer-Signature: v=1; a=openpgp-sha256; l=9880; i=chleroy@kernel.org; h=from:subject:message-id; bh=a3aZ+OkGSPpLvEE6ZUhpuB+9SriLXpJrMyl4L+YDAeY=; b=owGbwMvMwCV2d0KB2p7V54MZT6slMWR6n5l78Yz63/1fjtpP92q6brk2sNt+uYv/bRvLvOf3G LkPbNpU3FHKwiDGxSArpshy/D/3rhldX1Lzp+7Sh5nDygQyhIGLUwAm8uk4w//0M/Y2u5pYKqL6 Nmz7etTi7Y6N7JtZ/SqqT6+Pn8u/ITyR4a8st8ueS0vaGExy5nOdt4r6893X2dF8rWCTRP2sxC0 cnQwA X-Developer-Key: i=chleroy@kernel.org; a=openpgp; fpr=10FFE6F8B390DE17ACC2632368A92FEB01B8DD78 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Christophe Leroy Masked user access avoids the address/size verification by access_ok(). Allthough its main purpose is to skip the speculation in the verification of user address and size hence avoid the need of spec mitigation, it also has the advantage of reducing the amount of instructions required so it even benefits to platforms that don't need speculation mitigation, especially when the size of the copy is not know at build time. So implement masked user access on powerpc. The only requirement is to have memory gap that faults between the top user space and the real start of kernel area. On 64 bits platforms the address space is divided that way: 0xffffffffffffffff +------------------+ | | | kernel space | | | 0xc000000000000000 +------------------+ <=3D=3D PAGE_OFFSET |//////////////////| |//////////////////| 0x8000000000000000 |//////////////////| |//////////////////| |//////////////////| 0x0010000000000000 +------------------+ <=3D=3D TASK_SIZE_MAX | | | user space | | | 0x0000000000000000 +------------------+ Kernel is always above 0x8000000000000000 and user always below, with a gap in-between. It leads to a 3 instructions sequence: 150: 7c 69 fe 76 sradi r9,r3,63 154: 79 29 00 40 clrldi r9,r9,1 158: 7c 63 48 78 andc r3,r3,r9 This sequence leaves r3 unmodified when it is below 0x8000000000000000 and clamps it to 0x8000000000000000 if it is above. On 32 bits it is more tricky. In theory user space can go up to 0xbfffffff while kernel will usually start at 0xc0000000. So a gap needs to be added in-between. Allthough in theory a single 4k page would suffice, it is easier and more efficient to enforce a 128k gap below kernel, as it simplifies the masking. e500 has the isel instruction which allows selecting one value or the other without branch and that instruction is not speculative, so use it. Allthough GCC usually generates code using that instruction, it is safer to use inline assembly to be sure. The result is: 14: 3d 20 bf fe lis r9,-16386 18: 7c 03 48 40 cmplw r3,r9 1c: 7c 69 18 5e iselgt r3,r9,r3 On other ones, when kernel space is over 0x80000000 and user space is below, the logic in mask_user_address_simple() leads to a 3 instruction sequence: 64: 7c 69 fe 70 srawi r9,r3,31 68: 55 29 00 7e clrlwi r9,r9,1 6c: 7c 63 48 78 andc r3,r3,r9 This is the default on powerpc 8xx. When the limit between user space and kernel space is not 0x80000000, mask_user_address_32() is used and a 6 instructions sequence is generated: 24: 54 69 7c 7e srwi r9,r3,17 28: 21 29 57 ff subfic r9,r9,22527 2c: 7d 29 fe 70 srawi r9,r9,31 30: 75 2a b0 00 andis. r10,r9,45056 34: 7c 63 48 78 andc r3,r3,r9 38: 7c 63 53 78 or r3,r3,r10 The constraint is that TASK_SIZE be aligned to 128K in order to get the most optimal number of instructions. When CONFIG_PPC_BARRIER_NOSPEC is not defined, fallback on the test-based masking as it is quicker than the 6 instructions sequence but not quicker than the 3 instructions sequences above. As an exemple, allthough barrier_nospec() voids on the 8xx, this change has the following impact on strncpy_from_user(): the length of the function is reduced from 488 to 340 bytes: Start of the function with the patch: 00000000 : 0: 7c ab 2b 79 mr. r11,r5 4: 40 81 01 40 ble 144 8: 7c 89 fe 70 srawi r9,r4,31 c: 55 29 00 7e clrlwi r9,r9,1 10: 7c 84 48 78 andc r4,r4,r9 14: 3d 20 dc 00 lis r9,-9216 18: 7d 3a c3 a6 mtspr 794,r9 1c: 2f 8b 00 03 cmpwi cr7,r11,3 20: 40 9d 00 b4 ble cr7,d4 ... Start of the function without the patch: 00000000 : 0: 7c a0 2b 79 mr. r0,r5 4: 40 81 01 10 ble 114 8: 2f 84 00 00 cmpwi cr7,r4,0 c: 41 9c 01 30 blt cr7,13c 10: 3d 20 80 00 lis r9,-32768 14: 7d 24 48 50 subf r9,r4,r9 18: 7f 80 48 40 cmplw cr7,r0,r9 1c: 7c 05 03 78 mr r5,r0 20: 41 9d 01 00 bgt cr7,120 24: 3d 20 80 00 lis r9,-32768 28: 7d 25 48 50 subf r9,r5,r9 2c: 7f 84 48 40 cmplw cr7,r4,r9 30: 38 e0 ff f2 li r7,-14 34: 41 9d 00 e4 bgt cr7,118 38: 94 21 ff e0 stwu r1,-32(r1) 3c: 3d 20 dc 00 lis r9,-9216 40: 7d 3a c3 a6 mtspr 794,r9 44: 2b 85 00 03 cmplwi cr7,r5,3 48: 40 9d 01 6c ble cr7,1b4 ... 118: 7c e3 3b 78 mr r3,r7 11c: 4e 80 00 20 blr 120: 7d 25 4b 78 mr r5,r9 124: 3d 20 80 00 lis r9,-32768 128: 7d 25 48 50 subf r9,r5,r9 12c: 7f 84 48 40 cmplw cr7,r4,r9 130: 38 e0 ff f2 li r7,-14 134: 41 bd ff e4 bgt cr7,118 138: 4b ff ff 00 b 38 13c: 38 e0 ff f2 li r7,-14 140: 4b ff ff d8 b 118 ... Signed-off-by: Christophe Leroy --- v4: Rebase on top of core-scoped-uaccess tag and simplified as suggested by= Gabriel v3: Rewrite mask_user_address_simple() for a smaller result on powerpc64, s= uggested by Gabriel v2: Added 'likely()' to the test in mask_user_address_fallback() --- arch/powerpc/include/asm/task_size_32.h | 6 +- arch/powerpc/include/asm/uaccess.h | 76 +++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/task_size_32.h b/arch/powerpc/include= /asm/task_size_32.h index 42a64bbd1964..725ddbf06217 100644 --- a/arch/powerpc/include/asm/task_size_32.h +++ b/arch/powerpc/include/asm/task_size_32.h @@ -13,7 +13,7 @@ #define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) #define MODULES_VADDR (MODULES_END - MODULES_SIZE) #define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_4M) - 1)) -#define USER_TOP MODULES_BASE +#define USER_TOP (MODULES_BASE - SZ_4M) #endif =20 #ifdef CONFIG_PPC_BOOK3S_32 @@ -21,11 +21,11 @@ #define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M) #define MODULES_VADDR (MODULES_END - MODULES_SIZE) #define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_256M) - 1)) -#define USER_TOP MODULES_BASE +#define USER_TOP (MODULES_BASE - SZ_4M) #endif =20 #ifndef USER_TOP -#define USER_TOP ASM_CONST(CONFIG_PAGE_OFFSET) +#define USER_TOP ((ASM_CONST(CONFIG_PAGE_OFFSET) - SZ_128K) & ~(UL(SZ_128K= ) - 1)) #endif =20 #if CONFIG_TASK_SIZE < USER_TOP diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/= uaccess.h index 721d65dbbb2e..ba1d878c3f40 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -2,6 +2,8 @@ #ifndef _ARCH_POWERPC_UACCESS_H #define _ARCH_POWERPC_UACCESS_H =20 +#include + #include #include #include @@ -435,6 +437,80 @@ static __must_check __always_inline bool __user_access= _begin(const void __user * #define user_access_save prevent_user_access_return #define user_access_restore restore_user_access =20 +/* + * Masking the user address is an alternative to a conditional + * user_access_begin that can avoid the fencing. This only works + * for dense accesses starting at the address. + */ +static inline void __user *mask_user_address_simple(const void __user *ptr) +{ + unsigned long addr =3D (unsigned long)ptr; + unsigned long mask =3D (unsigned long)(((long)addr >> (BITS_PER_LONG - 1)= ) & LONG_MAX); + + return (void __user *)(addr & ~mask); +} + +static inline void __user *mask_user_address_isel(const void __user *ptr) +{ + unsigned long addr; + + asm("cmplw %1, %2; iselgt %0, %2, %1" : "=3Dr"(addr) : "r"(ptr), "r"(TASK= _SIZE) : "cr0"); + + return (void __user *)addr; +} + +/* TASK_SIZE is a multiple of 128K for shifting by 17 to the right */ +static inline void __user *mask_user_address_32(const void __user *ptr) +{ + unsigned long addr =3D (unsigned long)ptr; + unsigned long mask =3D (unsigned long)((long)((TASK_SIZE >> 17) - 1 - (ad= dr >> 17)) >> 31); + + addr =3D (addr & ~mask) | (TASK_SIZE & mask); + + return (void __user *)addr; +} + +static inline void __user *mask_user_address_fallback(const void __user *p= tr) +{ + unsigned long addr =3D (unsigned long)ptr; + + return (void __user *)(likely(addr < TASK_SIZE) ? addr : TASK_SIZE); +} + +static inline void __user *mask_user_address(const void __user *ptr) +{ +#ifdef MODULES_VADDR + const unsigned long border =3D MODULES_VADDR; +#else + const unsigned long border =3D PAGE_OFFSET; +#endif + + if (IS_ENABLED(CONFIG_PPC64)) + return mask_user_address_simple(ptr); + if (IS_ENABLED(CONFIG_E500)) + return mask_user_address_isel(ptr); + if (TASK_SIZE <=3D UL(SZ_2G) && border >=3D UL(SZ_2G)) + return mask_user_address_simple(ptr); + if (IS_ENABLED(CONFIG_PPC_BARRIER_NOSPEC)) + return mask_user_address_32(ptr); + return mask_user_address_fallback(ptr); +} + +static __always_inline void __user *__masked_user_access_begin(const void = __user *p, + unsigned long dir) +{ + void __user *ptr =3D mask_user_address(p); + + might_fault(); + allow_user_access(ptr, dir); + + return ptr; +} + +#define masked_user_access_begin(p) __masked_user_access_begin(p, KUAP_REA= D_WRITE) +#define masked_user_read_access_begin(p) __masked_user_access_begin(p, KUA= P_READ) +#define masked_user_write_access_begin(p) __masked_user_access_begin(p, KU= AP_WRITE) + #define arch_unsafe_get_user(x, p, e) do { \ __long_type(*(p)) __gu_val; \ __typeof__(*(p)) __user *__gu_addr =3D (p); \ --=20 2.49.0