From nobody Mon Nov 25 07:59:05 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 26BAF74E09 for ; Tue, 29 Oct 2024 01:56:37 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730166998; cv=none; b=CqBPgAJJF11b8teI1aAVxhGj3DTYuEx1wwqgSZL8N1OCJnZmpxDu+Bk59jHcYkvbylLeweK4GaiV9bCOu1ReNbmEWWRgVz8e26QeseVOVEoiW9dSz/F+DLbkULG+l/bgAgxX5W9sVmT3jvk0yxu9AtKUkAoYGCK4u9L1L1IiCDE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730166998; c=relaxed/simple; bh=ShKRJi9mJBJXWguLrtPuEJjUJyfAsPmcAibtmIEOQO8=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=PF9BYM283qKbgyYCiDO1W7g5j5Vx69Z35bfrpShwb4rebZv4nUi7J42gnmffjjBdIEgxMmuxxL1+OHMwhIbqPZ4M/pb0sp14wN74UYdyQvpKPdw7QZTd+0XpF4kGd6U8pJNL1fH/TQn48EtVLg7SmwBHTPByzMQjTHkKWs8K9bg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=OSr+gpm7; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="OSr+gpm7" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3516CC4CECD; Tue, 29 Oct 2024 01:56:37 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730166997; bh=ShKRJi9mJBJXWguLrtPuEJjUJyfAsPmcAibtmIEOQO8=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=OSr+gpm7MdHVwqQbEDhdHQNjCS6PDXg7RsA6vbfcZ0da+Z2j6mO3IMFt2lpXgGpo+ z3L2qLDgooiXW3IubLxenWWI3QQYVhnXlwzXII+j54gb74ludg2PqnaiNXeRPbZz5H C3hqFJ4HS9yy8aUUpqkFAOLyNQ512aF5hs/xIjKw2g/vr8yoelLIoRLeWhMrKdBXQe RBjfryVchA2ZFhm0MJFVCl9KIIrmV6Qu03l65FAPhkFbUJNKx4yHRPQn82EmkCYdIn d7+n62u9QJdmmAYNZbSkeklIP22DXp1wbYv7A26Fu/eLN32SA3tAYUnDzTXnR9c8Yn dd3Vm/zhkTVEg== From: Josh Poimboeuf To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, Thomas Gleixner , Borislav Petkov , Peter Zijlstra , Pawan Gupta , Waiman Long , Dave Hansen , Ingo Molnar , Linus Torvalds , Michael Ellerman , linuxppc-dev@lists.ozlabs.org, Andrew Cooper , Mark Rutland , "Kirill A . Shutemov" Subject: [PATCH v3 1/6] x86/uaccess: Avoid barrier_nospec() in 64-bit copy_from_user() Date: Mon, 28 Oct 2024 18:56:14 -0700 Message-ID: <5b887fe4c580214900e21f6c61095adf9a142735.1730166635.git.jpoimboe@kernel.org> X-Mailer: git-send-email 2.47.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The barrier_nospec() in 64-bit copy_from_user() is slow. Instead use pointer masking to force the user pointer to all 1's if the access_ok() mispredicted true for an invalid address. The kernel test robot reports a 2.6% improvement in the per_thread_ops benchmark (see link below). To avoid regressing powerpc and 32-bit x86, move their barrier_nospec() calls to their respective raw_copy_from_user() implementations so there's no functional change there. Note that for safety on some AMD CPUs, this relies on recent commit 86e6b1547b3d ("x86: fix user address masking non-canonical speculation issue"). Link: https://lore.kernel.org/202410281344.d02c72a2-oliver.sang@intel.com Signed-off-by: Josh Poimboeuf Acked-by: Kirill A. Shutemov Reviewed-by: Josh Poimboeuf --- arch/powerpc/include/asm/uaccess.h | 2 ++ arch/x86/include/asm/uaccess_32.h | 1 + arch/x86/include/asm/uaccess_64.h | 1 + include/linux/uaccess.h | 6 ------ 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/= uaccess.h index 4f5a46a77fa2..12abb8bf5eda 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -7,6 +7,7 @@ #include #include #include +#include =20 #ifdef __powerpc64__ /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */ @@ -341,6 +342,7 @@ static inline unsigned long raw_copy_from_user(void *to, { unsigned long ret; =20 + barrier_nospec(); allow_read_from_user(from, n); ret =3D __copy_tofrom_user((__force void __user *)to, from, n); prevent_read_from_user(from, n); diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uacce= ss_32.h index 40379a1adbb8..8393ba104b2c 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -23,6 +23,7 @@ raw_copy_to_user(void __user *to, const void *from, unsig= ned long n) static __always_inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { + barrier_nospec(); return __copy_user_ll(to, (__force const void *)from, n); } =20 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uacce= ss_64.h index b0a887209400..7ce84090f0ec 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -138,6 +138,7 @@ copy_user_generic(void *to, const void *from, unsigned = long len) static __always_inline __must_check unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long size) { + src =3D mask_user_address(src); return copy_user_generic(dst, (__force void *)src, size); } =20 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 39c7cf82b0c2..dda9725a9559 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -160,12 +160,6 @@ _inline_copy_from_user(void *to, const void __user *fr= om, unsigned long n) unsigned long res =3D n; might_fault(); if (!should_fail_usercopy() && likely(access_ok(from, n))) { - /* - * Ensure that bad access_ok() speculation will not - * lead to nasty side effects *after* the copy is - * finished: - */ - barrier_nospec(); instrument_copy_from_user_before(to, from, n); res =3D raw_copy_from_user(to, from, n); instrument_copy_from_user_after(to, from, n, res); --=20 2.47.0 From nobody Mon Nov 25 07:59:05 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0B7E4145B2C for ; Tue, 29 Oct 2024 01:56:38 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730166999; cv=none; b=PRiWW/bzEdnMeJFapxVkPREz4IlECdMtUbH2ZBM9DALDSSjz/uq9kEqamsq9zIo7hucNVyZXWWN13oq4tjAayBMNx0FrOSy4wnJ8YzSZM3AnehMAr/DtPgh3XHnFwUpgmsdOvpXziUoWYUV5CHXxi6puYFRP8WHd5xi5ZQHER8c= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730166999; c=relaxed/simple; bh=1HFVF/sthwuY/nVC5nB3Tk5D/RnkHEBPEeATrZPxBVA=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Y8xYN/xhd2EjMyUKu9N02dWv5oKu+DFF55aOLvcQvWxIsO64mePe5Tn3vzzg0y24fUOx9/N/MVxXxxBfSJ/Gttub8m4/sxvKej9Z/PRTK/iA9VKnqDqybQ4FXDWSdX3qcd/ZU6Zj0ZHP0Mod6qEbPMjo187zmhSPieetUSuroAQ= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=hEx13d/g; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="hEx13d/g" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E4299C4CEE8; Tue, 29 Oct 2024 01:56:37 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730166998; bh=1HFVF/sthwuY/nVC5nB3Tk5D/RnkHEBPEeATrZPxBVA=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=hEx13d/g9zNFzT2Tm1otgP9Q6bo86MvJMnE3VeZuPF+db0/rNspoRci34fi4xQ8/A 8KShpsxrYY3uamRJLnpeK7Gg7TqHo+S9jO19TL/Gijhmz785HmXFSRwcRRUTaAgBX9 FqqenAr2bOVk2zxF4rG6uBvzY0sZHl+CCZ2G3jWG1jX2fDyu8OW+5MuONEF9y53Yvk t1MbquE+UDbl+kIl1RfltOEpe1tN8z/W5D6rBM1WB6qkl+HibSi9wE01rCh/hkhSyJ xMbSHmFDSZT48TFuStkxHtqWqyQzTxwVo3kMViedmLL9/dLBqPQotuyLcNhOrXiP7r GsiugLVijporg== From: Josh Poimboeuf To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, Thomas Gleixner , Borislav Petkov , Peter Zijlstra , Pawan Gupta , Waiman Long , Dave Hansen , Ingo Molnar , Linus Torvalds , Michael Ellerman , linuxppc-dev@lists.ozlabs.org, Andrew Cooper , Mark Rutland , "Kirill A . Shutemov" Subject: [PATCH v3 2/6] x86/uaccess: Avoid barrier_nospec() in 64-bit __get_user() Date: Mon, 28 Oct 2024 18:56:15 -0700 Message-ID: <82cbb9983fef5ecf6f1cb33661e977172d40a7e6.1730166635.git.jpoimboe@kernel.org> X-Mailer: git-send-email 2.47.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The barrier_nospec() in 64-bit __get_user() is slow. Instead use pointer masking to force the user pointer to all 1's if a previous access_ok() mispredicted true for an invalid address. Note that for safety on some AMD CPUs, this relies on recent commit 86e6b1547b3d ("x86: fix user address masking non-canonical speculation issue"). Signed-off-by: Josh Poimboeuf --- arch/x86/lib/getuser.S | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 4357ec2a0bfc..998d5be6b794 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -112,8 +112,12 @@ EXPORT_SYMBOL(__get_user_8) =20 /* .. and the same for __get_user, just without the range checks */ SYM_FUNC_START(__get_user_nocheck_1) - ASM_STAC +#ifdef CONFIG_X86_64 + check_range size=3D1 +#else ASM_BARRIER_NOSPEC +#endif + ASM_STAC UACCESS movzbl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC @@ -122,8 +126,12 @@ SYM_FUNC_END(__get_user_nocheck_1) EXPORT_SYMBOL(__get_user_nocheck_1) =20 SYM_FUNC_START(__get_user_nocheck_2) - ASM_STAC +#ifdef CONFIG_X86_64 + check_range size=3D2 +#else ASM_BARRIER_NOSPEC +#endif + ASM_STAC UACCESS movzwl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC @@ -132,8 +140,12 @@ SYM_FUNC_END(__get_user_nocheck_2) EXPORT_SYMBOL(__get_user_nocheck_2) =20 SYM_FUNC_START(__get_user_nocheck_4) - ASM_STAC +#ifdef CONFIG_X86_64 + check_range size=3D4 +#else ASM_BARRIER_NOSPEC +#endif + ASM_STAC UACCESS movl (%_ASM_AX),%edx xor %eax,%eax ASM_CLAC @@ -142,8 +154,12 @@ SYM_FUNC_END(__get_user_nocheck_4) EXPORT_SYMBOL(__get_user_nocheck_4) =20 SYM_FUNC_START(__get_user_nocheck_8) - ASM_STAC +#ifdef CONFIG_X86_64 + check_range size=3D8 +#else ASM_BARRIER_NOSPEC +#endif + ASM_STAC #ifdef CONFIG_X86_64 UACCESS movq (%_ASM_AX),%rdx #else --=20 2.47.0 From nobody Mon Nov 25 07:59:05 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6B7521714DF for ; Tue, 29 Oct 2024 01:56:39 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730166999; cv=none; b=pG+EICWuRWxUNBN+YQFBRFZdA1sU0J9b2Bwf8qFyV7ji5UR8cxW1T48wWyaYRaLblJ3D3U+eXxFP9RVHooA4B8JQhBVucoSeoA+K1YS8ctpNOHMgoz+OcyMUR/O72eJXNBUzp5zWP41xnw6wyOPtwRKs5GkmQpAJk2NjgfAg4Rw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730166999; c=relaxed/simple; bh=9rAK2frXr102qAp2BxkbEMrPlcwloGU9JexTUWLRWdk=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=q7dgVDcP9KLi74Bzaat8PNMHVSeH+JbmbHtd/9x1sZQRzUgiUb9wj6zUtQJgnCFvdKb7nx4JV+gZ+Rv2QO6usIrEyZzaaKmue07D/vJWb4XmV4o7JeQ0+sjmSLuDrSV3cYInePeXJwLfnuzmOhMDBWuToz1e2JrdadGBrAvh4Vc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=idHj/cGY; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="idHj/cGY" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B512EC4CEEA; Tue, 29 Oct 2024 01:56:38 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730166999; bh=9rAK2frXr102qAp2BxkbEMrPlcwloGU9JexTUWLRWdk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=idHj/cGYvZmY39VLR4VVUqKkiqy6kvzvZH4FIxdWPrCXtEM8XkhEBWtz0W5vUjmcY Uz7OsUN4oOijkx+PlGoDF7atcx1MhS+kXwGQ7y6mntpPB8xZMZnJ+rNbRaIeB2kaa6 4BLj0AfzgiecryPuzJehIRehKDq9smwxIQxpSasBkYh4YHbN0mfzUgzRevi8YCSAgt E1Fxa4uPv8fcd9vuYnzljzzk6A4uXXCpSBNamPPouBwGH5fPh6zckII8kjApbMxF5E nRKe8pFkbrMkTTRO8g9u3z530B5CpnSBbckZ6UYsHHmeJdiKfAnuf9zjeKbopUxkJa LDFX/7CcQfPpA== From: Josh Poimboeuf To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, Thomas Gleixner , Borislav Petkov , Peter Zijlstra , Pawan Gupta , Waiman Long , Dave Hansen , Ingo Molnar , Linus Torvalds , Michael Ellerman , linuxppc-dev@lists.ozlabs.org, Andrew Cooper , Mark Rutland , "Kirill A . Shutemov" Subject: [PATCH v3 3/6] x86/uaccess: Avoid barrier_nospec() in 32-bit copy_from_user() Date: Mon, 28 Oct 2024 18:56:16 -0700 Message-ID: <5abde43491039b577294b0b94877f3a1db1235d7.1730166635.git.jpoimboe@kernel.org> X-Mailer: git-send-email 2.47.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The barrier_nospec() in 32-bit copy_from_user() is slow. Instead use pointer masking to force the user pointer to all 1's if a previous access_ok() mispredicted true for an invalid address. Signed-off-by: Josh Poimboeuf --- arch/x86/include/asm/uaccess.h | 34 +++++++++++++++++++++++++++++++ arch/x86/include/asm/uaccess_32.h | 2 +- arch/x86/include/asm/uaccess_64.h | 29 +------------------------- 3 files changed, 36 insertions(+), 29 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 3a7755c1a441..e7ac97d42bc2 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -15,6 +15,40 @@ #include #include #include +#include + +#ifdef CONFIG_X86_64 +/* + * Virtual variable: there's no actual backing store for this, + * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)' + */ +extern unsigned long USER_PTR_MAX; +# define USER_PTR_MAX_CONST runtime_const_ptr(USER_PTR_MAX) +#else +# define USER_PTR_MAX_CONST TASK_SIZE_MAX-1 +#endif + +/* + * Masking the user address is an alternative to a conditional + * user_access_begin that can avoid the fencing. This only works + * for dense accesses starting at the address. + */ +static inline void __user *mask_user_address(const void __user *ptr) +{ + unsigned long mask; + asm("cmp %1,%0\n\t" + "sbb %0,%0" + :"=3Dr" (mask) + :"r" (ptr), + "0" (USER_PTR_MAX_CONST)); + return (__force void __user *)(mask | (__force unsigned long)ptr); +} + +#define masked_user_access_begin(x) ({ \ + __auto_type __masked_ptr =3D (x); \ + __masked_ptr =3D mask_user_address(__masked_ptr); \ + __uaccess_begin(); __masked_ptr; }) + =20 #ifdef CONFIG_X86_32 # include diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uacce= ss_32.h index 8393ba104b2c..6ec2d73f8bba 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -23,7 +23,7 @@ raw_copy_to_user(void __user *to, const void *from, unsig= ned long n) static __always_inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - barrier_nospec(); + from =3D mask_user_address(from); return __copy_user_ll(to, (__force const void *)from, n); } =20 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uacce= ss_64.h index 7ce84090f0ec..dfb78154ac26 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -12,13 +12,6 @@ #include #include #include -#include - -/* - * Virtual variable: there's no actual backing store for this, - * it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)' - */ -extern unsigned long USER_PTR_MAX; =20 #ifdef CONFIG_ADDRESS_MASKING /* @@ -54,27 +47,7 @@ static inline unsigned long __untagged_addr_remote(struc= t mm_struct *mm, #endif =20 #define valid_user_address(x) \ - ((__force unsigned long)(x) <=3D runtime_const_ptr(USER_PTR_MAX)) - -/* - * Masking the user address is an alternative to a conditional - * user_access_begin that can avoid the fencing. This only works - * for dense accesses starting at the address. - */ -static inline void __user *mask_user_address(const void __user *ptr) -{ - unsigned long mask; - asm("cmp %1,%0\n\t" - "sbb %0,%0" - :"=3Dr" (mask) - :"r" (ptr), - "0" (runtime_const_ptr(USER_PTR_MAX))); - return (__force void __user *)(mask | (__force unsigned long)ptr); -} -#define masked_user_access_begin(x) ({ \ - __auto_type __masked_ptr =3D (x); \ - __masked_ptr =3D mask_user_address(__masked_ptr); \ - __uaccess_begin(); __masked_ptr; }) + ((__force unsigned long)(x) <=3D USER_PTR_MAX_CONST) =20 /* * User pointers can have tag bits on x86-64. This scheme tolerates --=20 2.47.0 From nobody Mon Nov 25 07:59:05 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7B72218CBFE for ; Tue, 29 Oct 2024 01:56:40 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730167000; cv=none; b=r7J0b3PXiihIlz8ZjCeIGKcF8CvYpPbij87BGGF7J4kig9+z/cPJ6IOIE4xwAWmz3sdK92LAgbpBiT0IOhK6nxuFtwGtJB8aXyyjPDbODuad+/KYLED7ADiRZz9ptz9gVY5F8Z4Mmwefj+wTzbEE0JiQ24GjzCt9fqxkX/hWCso= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730167000; c=relaxed/simple; bh=QREw5gInShg8RlBYBO/KbmEW/aIWpa9O4n4WwbznW28=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=hYVv9JdqTYqvXoX9Dca93SIxsaPWgCsVPXxRin1KMijP7pdDG9mPnTUQ9G0nrPPLg4ibF34zLP+aVReqhvBf3rVPgHVQ34tgfuQnk0/p1fTVzD8rWrtbOJIQlYvYZ40blNtXtSIJ60vyYbBebrsXUnNXTHE4w88kFzaiPy5u0Z8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=p/i3rg0+; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="p/i3rg0+" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 773E1C4CEED; Tue, 29 Oct 2024 01:56:39 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730167000; bh=QREw5gInShg8RlBYBO/KbmEW/aIWpa9O4n4WwbznW28=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=p/i3rg0+GcboI8N2njvDewhf3Lx5tOneAJUvhe41ZjYhpb3nvfEGe6J3McphwC/wh bXu7gyst9fK9MmRzhBOwhUEQrC6FOuzH6CnEuN8wQSqFWo2/WRywMbL+VzO/FRGbLW ruB6Rjg1NFeygp1gYm7/HQlDhoFVS/XpgBROvm6Do1otTwtq8YuaZxpd0subXgDJz3 11hyZ4dzXXR0XUdgzdSSqCWzENIO6taqHERFvTQ/J6twLdZqbzaZcj5NxQsetiRRMj yWKT9T/MdJ7VfBPmNVGTY+WcV56uJuL7FEOpFa6d3BwaSk6y2OBZmcUPD8+Yn327At 5QxKLc3K+1nEQ== From: Josh Poimboeuf To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, Thomas Gleixner , Borislav Petkov , Peter Zijlstra , Pawan Gupta , Waiman Long , Dave Hansen , Ingo Molnar , Linus Torvalds , Michael Ellerman , linuxppc-dev@lists.ozlabs.org, Andrew Cooper , Mark Rutland , "Kirill A . Shutemov" Subject: [PATCH v3 4/6] x86/uaccess: Convert 32-bit get_user() to unconditional pointer masking Date: Mon, 28 Oct 2024 18:56:17 -0700 Message-ID: <088fc8f27c278791f3af29f94e09278b5fa07bea.1730166635.git.jpoimboe@kernel.org> X-Mailer: git-send-email 2.47.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Convert the 32-bit get_user() implementations to use the new unconditional masking scheme for consistency with 64-bit. Signed-off-by: Josh Poimboeuf --- arch/x86/lib/getuser.S | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 998d5be6b794..5bce27670baa 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -37,22 +37,19 @@ =20 #define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RD= TSC =20 -.macro check_range size:req +.macro mask_user_address size:req .if IS_ENABLED(CONFIG_X86_64) movq $0x0123456789abcdef,%rdx 1: .pushsection runtime_ptr_USER_PTR_MAX,"a" .long 1b - 8 - . .popsection - cmp %rax, %rdx - sbb %rdx, %rdx - or %rdx, %rax .else - cmp $TASK_SIZE_MAX-\size+1, %eax - jae .Lbad_get_user - sbb %edx, %edx /* array_index_mask_nospec() */ - and %edx, %eax + mov $TASK_SIZE_MAX-\size, %edx .endif + cmp %_ASM_AX, %_ASM_DX + sbb %_ASM_DX, %_ASM_DX + or %_ASM_DX, %_ASM_AX .endm =20 .macro UACCESS op src dst @@ -63,7 +60,7 @@ =20 .text SYM_FUNC_START(__get_user_1) - check_range size=3D1 + mask_user_address size=3D1 ASM_STAC UACCESS movzbl (%_ASM_AX),%edx xor %eax,%eax @@ -73,7 +70,7 @@ SYM_FUNC_END(__get_user_1) EXPORT_SYMBOL(__get_user_1) =20 SYM_FUNC_START(__get_user_2) - check_range size=3D2 + mask_user_address size=3D2 ASM_STAC UACCESS movzwl (%_ASM_AX),%edx xor %eax,%eax @@ -83,7 +80,7 @@ SYM_FUNC_END(__get_user_2) EXPORT_SYMBOL(__get_user_2) =20 SYM_FUNC_START(__get_user_4) - check_range size=3D4 + mask_user_address size=3D4 ASM_STAC UACCESS movl (%_ASM_AX),%edx xor %eax,%eax @@ -93,14 +90,12 @@ SYM_FUNC_END(__get_user_4) EXPORT_SYMBOL(__get_user_4) =20 SYM_FUNC_START(__get_user_8) -#ifndef CONFIG_X86_64 - xor %ecx,%ecx -#endif - check_range size=3D8 + mask_user_address size=3D8 ASM_STAC #ifdef CONFIG_X86_64 UACCESS movq (%_ASM_AX),%rdx #else + xor %ecx,%ecx UACCESS movl (%_ASM_AX),%edx UACCESS movl 4(%_ASM_AX),%ecx #endif @@ -113,7 +108,7 @@ EXPORT_SYMBOL(__get_user_8) /* .. and the same for __get_user, just without the range checks */ SYM_FUNC_START(__get_user_nocheck_1) #ifdef CONFIG_X86_64 - check_range size=3D1 + mask_user_address size=3D1 #else ASM_BARRIER_NOSPEC #endif @@ -127,7 +122,7 @@ EXPORT_SYMBOL(__get_user_nocheck_1) =20 SYM_FUNC_START(__get_user_nocheck_2) #ifdef CONFIG_X86_64 - check_range size=3D2 + mask_user_address size=3D2 #else ASM_BARRIER_NOSPEC #endif @@ -141,7 +136,7 @@ EXPORT_SYMBOL(__get_user_nocheck_2) =20 SYM_FUNC_START(__get_user_nocheck_4) #ifdef CONFIG_X86_64 - check_range size=3D4 + mask_user_address size=3D4 #else ASM_BARRIER_NOSPEC #endif @@ -155,7 +150,7 @@ EXPORT_SYMBOL(__get_user_nocheck_4) =20 SYM_FUNC_START(__get_user_nocheck_8) #ifdef CONFIG_X86_64 - check_range size=3D8 + mask_user_address size=3D8 #else ASM_BARRIER_NOSPEC #endif --=20 2.47.0 From nobody Mon Nov 25 07:59:05 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E84F218E764 for ; Tue, 29 Oct 2024 01:56:40 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730167001; cv=none; b=H2mStgPZT1TAMP0FGy50VZ8J4BczVS5mvxhLIHNjsIQY/PJpaPrEDQb0UhTsBUq50AEiKmbyj4L5MfznKPrSiLQ7kkppTsm1nR1HJi0MUnuviCQw2YFYLvbVOpxYbxN9Yo2mPQGRV1Mec6JQk4JeruJBuukmViboz2khGc8ahNk= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730167001; c=relaxed/simple; bh=IE8lVpfvQw2PdSSBXAG4ZavNEB/AEx7acTSv9OtsKdU=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=Q7vEX9dOK0EAkhSDMMka+j8AdJWQHN4QwAYbS14RO/NVkvV6RQ/vB0quBQdDVaVW+F9Ljqzhxejy2GMM5yyDQjigjaVxdg1Kym5P3GtUeDDT4Sme/GZXJSIOaopdaEwBn48zJexm58SHQnfAmLliH80HdPSx0s9aShLhQm2BRwM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=vP6NtVLO; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="vP6NtVLO" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3D7E1C4CEE9; Tue, 29 Oct 2024 01:56:40 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730167000; bh=IE8lVpfvQw2PdSSBXAG4ZavNEB/AEx7acTSv9OtsKdU=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=vP6NtVLOF5MSVKXfLbiGaQ7rBrz+nwyn8OXmIPidSHqN94Ang0Ey0IFWTqlQK1Y0v V1YUzspWLwVcW+caX08qDq9t+paQGzRtumCwpcxsaU4+P0U+u7Q5y5Phy8Q3imUW8m zxIW5wlVIcBdkGg52Rk3zm/U6tVT4iD9LMDOtGItWX+1MBjIT9VgEkGjLW6DajtVe/ 7xXxdQ78h4plFwmfB1nMdTs/1fryGbDiMkZ3EduhzgoHB5FruTbTRfgGGFQgm946dh 0dsmcqJ37mPTRF42IIebnKYp8XbTcyzaWb882nZlhGRmM0OHPvG0B59kJVNh/rqpK7 1tYWuy5PrYXRg== From: Josh Poimboeuf To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, Thomas Gleixner , Borislav Petkov , Peter Zijlstra , Pawan Gupta , Waiman Long , Dave Hansen , Ingo Molnar , Linus Torvalds , Michael Ellerman , linuxppc-dev@lists.ozlabs.org, Andrew Cooper , Mark Rutland , "Kirill A . Shutemov" Subject: [PATCH v3 5/6] x86/uaccess: Avoid barrier_nospec() in 32-bit __get_user() Date: Mon, 28 Oct 2024 18:56:18 -0700 Message-ID: <967768924bd5295c231b6bfd37e3cbf85dad3cb2.1730166635.git.jpoimboe@kernel.org> X-Mailer: git-send-email 2.47.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The barrier_nospec() in 34-bit __get_user() is slow. Instead use pointer masking to force the user pointer to all 1's if the access_ok() mispredicted true for an invalid address. Signed-off-by: Josh Poimboeuf --- arch/x86/lib/getuser.S | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 5bce27670baa..7da4fc75eba9 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -35,8 +35,6 @@ #include #include =20 -#define ASM_BARRIER_NOSPEC ALTERNATIVE "", "lfence", X86_FEATURE_LFENCE_RD= TSC - .macro mask_user_address size:req .if IS_ENABLED(CONFIG_X86_64) movq $0x0123456789abcdef,%rdx @@ -107,11 +105,7 @@ EXPORT_SYMBOL(__get_user_8) =20 /* .. and the same for __get_user, just without the range checks */ SYM_FUNC_START(__get_user_nocheck_1) -#ifdef CONFIG_X86_64 mask_user_address size=3D1 -#else - ASM_BARRIER_NOSPEC -#endif ASM_STAC UACCESS movzbl (%_ASM_AX),%edx xor %eax,%eax @@ -121,11 +115,7 @@ SYM_FUNC_END(__get_user_nocheck_1) EXPORT_SYMBOL(__get_user_nocheck_1) =20 SYM_FUNC_START(__get_user_nocheck_2) -#ifdef CONFIG_X86_64 mask_user_address size=3D2 -#else - ASM_BARRIER_NOSPEC -#endif ASM_STAC UACCESS movzwl (%_ASM_AX),%edx xor %eax,%eax @@ -135,11 +125,7 @@ SYM_FUNC_END(__get_user_nocheck_2) EXPORT_SYMBOL(__get_user_nocheck_2) =20 SYM_FUNC_START(__get_user_nocheck_4) -#ifdef CONFIG_X86_64 mask_user_address size=3D4 -#else - ASM_BARRIER_NOSPEC -#endif ASM_STAC UACCESS movl (%_ASM_AX),%edx xor %eax,%eax @@ -149,11 +135,7 @@ SYM_FUNC_END(__get_user_nocheck_4) EXPORT_SYMBOL(__get_user_nocheck_4) =20 SYM_FUNC_START(__get_user_nocheck_8) -#ifdef CONFIG_X86_64 mask_user_address size=3D8 -#else - ASM_BARRIER_NOSPEC -#endif ASM_STAC #ifdef CONFIG_X86_64 UACCESS movq (%_ASM_AX),%rdx --=20 2.47.0 From nobody Mon Nov 25 07:59:05 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0C0D3192597 for ; Tue, 29 Oct 2024 01:56:41 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730167002; cv=none; b=WTPaq1mqJWCivewwv+JAK4LPUWGMGSLaLRn7GauocjVVKANGxvB9aSg4A3QlfxBd+ZgqQzIPx3bN1o2tKE8JEDktNNYl5gnMk173AtcobcTUHMieQFD+nJhZaKrZ3jW80K7/hxfLKIk+yhNst5E6hDbtyeUAM0hephfXKRImpPs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1730167002; c=relaxed/simple; bh=IH1dsBAzC1/NpJlrtMMREhzqss6Zpig4JOkpaJnGywY=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=SxpsGN3uRcyiGjPP3/D+a2/YQIyZTp76W0uyWm7PNTAtckQyQVH9pvsLDdlKI4w01dza0HMXtZrghc0JokjW1cf3zwWjA4FnViw/8ecM5KYhX7UigIRzg6Lu+ROUkwqVJlemPlG+qUOzLCH9BxHqwsdYEsLLYrAMAmTqKJx6xKw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=X2peZm0O; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="X2peZm0O" Received: by smtp.kernel.org (Postfix) with ESMTPSA id F036DC4CEF0; Tue, 29 Oct 2024 01:56:40 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1730167001; bh=IH1dsBAzC1/NpJlrtMMREhzqss6Zpig4JOkpaJnGywY=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=X2peZm0O71m3epHvfIPaBj3APxpxfGhiAsSTBzDsBoPz7gMoTX2iJ8O7bUUwXqf41 FFG4jMF9Bc1/4auMk4Cow8uemoyn2VKep/jQAqriZNwKcKiLM2vQGlQQ4BTS0SanDH cXqkPX8mSf9NeN9bHG/7jZ5oVITF942ntMW2U9lBnmr4Fyn2EhGuGzXuEtGca7XYPf WL3qXZLipOH8LC+/mFN64Pt6/hS6JJYtYPCO4Iq9dT62GTtejsVShVWu8spWELKt+w c7FhnzmjYsirTRtAGVHh3TEuwKi6BX0hJ1j+D/UGtQXkAI7EdDVKVmoZUzlFKlbKfX xo2dlU2WSsCHg== From: Josh Poimboeuf To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, Thomas Gleixner , Borislav Petkov , Peter Zijlstra , Pawan Gupta , Waiman Long , Dave Hansen , Ingo Molnar , Linus Torvalds , Michael Ellerman , linuxppc-dev@lists.ozlabs.org, Andrew Cooper , Mark Rutland , "Kirill A . Shutemov" Subject: [PATCH v3 6/6] x86/uaccess: Converge [__]get_user() implementations Date: Mon, 28 Oct 2024 18:56:19 -0700 Message-ID: X-Mailer: git-send-email 2.47.0 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The x86 implementations of get_user() and __get_user() are now identical. Merge their implementations and make the __get_user() implementations aliases of their get_user() counterparts. Signed-off-by: Josh Poimboeuf --- arch/x86/lib/getuser.S | 58 +++++++++--------------------------------- 1 file changed, 12 insertions(+), 46 deletions(-) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 7da4fc75eba9..6f4dcb80dd46 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -103,53 +103,19 @@ SYM_FUNC_START(__get_user_8) SYM_FUNC_END(__get_user_8) EXPORT_SYMBOL(__get_user_8) =20 -/* .. and the same for __get_user, just without the range checks */ -SYM_FUNC_START(__get_user_nocheck_1) - mask_user_address size=3D1 - ASM_STAC - UACCESS movzbl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC - RET -SYM_FUNC_END(__get_user_nocheck_1) -EXPORT_SYMBOL(__get_user_nocheck_1) - -SYM_FUNC_START(__get_user_nocheck_2) - mask_user_address size=3D2 - ASM_STAC - UACCESS movzwl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC - RET -SYM_FUNC_END(__get_user_nocheck_2) -EXPORT_SYMBOL(__get_user_nocheck_2) - -SYM_FUNC_START(__get_user_nocheck_4) - mask_user_address size=3D4 - ASM_STAC - UACCESS movl (%_ASM_AX),%edx - xor %eax,%eax - ASM_CLAC - RET -SYM_FUNC_END(__get_user_nocheck_4) -EXPORT_SYMBOL(__get_user_nocheck_4) - -SYM_FUNC_START(__get_user_nocheck_8) - mask_user_address size=3D8 - ASM_STAC -#ifdef CONFIG_X86_64 - UACCESS movq (%_ASM_AX),%rdx -#else - xor %ecx,%ecx - UACCESS movl (%_ASM_AX),%edx - UACCESS movl 4(%_ASM_AX),%ecx -#endif - xor %eax,%eax - ASM_CLAC - RET -SYM_FUNC_END(__get_user_nocheck_8) -EXPORT_SYMBOL(__get_user_nocheck_8) +/* + * On x86-64, get_user() does address masking rather than a conditional bo= unds + * check so there's no functional difference compared to __get_user(). + */ +SYM_FUNC_ALIAS(__get_user_nocheck_1, __get_user_1); +SYM_FUNC_ALIAS(__get_user_nocheck_2, __get_user_2); +SYM_FUNC_ALIAS(__get_user_nocheck_4, __get_user_4); +SYM_FUNC_ALIAS(__get_user_nocheck_8, __get_user_8); =20 +EXPORT_SYMBOL(__get_user_nocheck_1); +EXPORT_SYMBOL(__get_user_nocheck_2); +EXPORT_SYMBOL(__get_user_nocheck_4); +EXPORT_SYMBOL(__get_user_nocheck_8); =20 SYM_CODE_START_LOCAL(__get_user_handle_exception) ASM_CLAC --=20 2.47.0