From nobody Fri May 17 11:58:43 2024 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 01F8B13A3EF for ; Tue, 16 Apr 2024 23:02:26 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1713308547; cv=none; b=CvKDB3VhIlh7Mw5Yn4NSFkX71iAi4k6s/4cLcwknEyeLxI6HFUgtksEOOzhoBGOuCfOjkbkXJhXTqDpzIaso6z6tSnlz9MDnQJQag8MlBmv80FwYm/fljVwJ5S66Z05uMCWnlf5TQQUgj5B2XzjRb/7N3cD/o+MiTPLspXuOAAU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1713308547; c=relaxed/simple; bh=EJOJeCI3+uMGp9GeIF5xksR9jkzYb+8HMy5HZYqBPKY=; h=From:To:Cc:Subject:Date:Message-ID:MIME-Version; b=qAKlhj7+yprsBEoodMUCxV7cVvgafM3qxynxQTFrmja0/fXaoULTvS80MTylGZy/WPQ33m3Mqn7fBZ8s9LvsIbnyPRyoCe/4S7bnabQrc4lWXd8zZ61Y3/pi3FgBQ97Axg9gFCLpghjeHo6EAm6PTyOBtlnSm42/0dZ0Anf+19U= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=AHWR51lS; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="AHWR51lS" Received: by smtp.kernel.org (Postfix) with ESMTPSA id E3F0FC3277B; Tue, 16 Apr 2024 23:02:25 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1713308546; bh=EJOJeCI3+uMGp9GeIF5xksR9jkzYb+8HMy5HZYqBPKY=; h=From:To:Cc:Subject:Date:From; b=AHWR51lSkq0KBFbW5Y8s3hfwDCI0HTEOKENoQqfcZjnDCCJrxT8vnOILpYOU/7+da hjIo1yuCSjNGesd6941xgWbUSyn/0olL8glLVjLfO1WJgnAAwbFOB7EfbNptUBJa43 y8MTQ6VN5ojNFKNVPy4RMr7z1m/WMYV33naeBltgLh1tErZVZwQug5yrMmQY+paCtB 80zSe3Zq9pQOfPviTgdjR4x362llQchV6ImvivyUd2Cmstr9raMgPylr44Vb1QRor8 VKuBx5pBCFjcwSdk5ohoXp0qJ+1ewOnjMf2KWEHAPsFLwANC77pYX0g1CRAisYb9zy 2TA2fivrhkyjA== From: Josh Poimboeuf To: x86@kernel.org Cc: linux-kernel@vger.kernel.org, Linus Torvalds , Daniel Sneddon , Pawan Gupta , Thomas Gleixner , Alexandre Chartre , Konrad Rzeszutek Wilk , Peter Zijlstra , Greg Kroah-Hartman , Sean Christopherson , Andrew Cooper , Dave Hansen , Nikolay Borisov , KP Singh , Waiman Long , Borislav Petkov , Ingo Molnar Subject: [PATCH v3] x86/bugs: Only harden syscalls when needed Date: Tue, 16 Apr 2024 16:02:21 -0700 Message-ID: X-Mailer: git-send-email 2.44.0 Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Syscall hardening (i.e., converting the syscall indirect branch to a series of direct branches) may cause performance regressions in certain scenarios. Only use the syscall hardening when indirect branches are considered unsafe. Fixes: 1e3ad78334a6 ("x86/syscall: Don't force use of indirect calls for sy= stem calls") Reviewed-by: Pawan Gupta Signed-off-by: Josh Poimboeuf Acked-by: Borislav Petkov (AMD) --- v3: - fix X86_FEATURE_INDIRECT_SAFE value arch/x86/entry/common.c | 15 ++++++++++++--- arch/x86/entry/syscall_32.c | 11 +---------- arch/x86/entry/syscall_64.c | 6 ------ arch/x86/entry/syscall_x32.c | 7 ++++++- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/syscall.h | 8 +++++++- arch/x86/kernel/cpu/bugs.c | 31 +++++++++++++++++++++++++++++- 7 files changed, 57 insertions(+), 22 deletions(-) diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 6de50b80702e..c0f8116291e4 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -49,7 +49,10 @@ static __always_inline bool do_syscall_x64(struct pt_reg= s *regs, int nr) =20 if (likely(unr < NR_syscalls)) { unr =3D array_index_nospec(unr, NR_syscalls); - regs->ax =3D x64_sys_call(regs, unr); + if (likely(cpu_feature_enabled(X86_FEATURE_INDIRECT_SAFE))) + regs->ax =3D sys_call_table[unr](regs); + else + regs->ax =3D x64_sys_call(regs, unr); return true; } return false; @@ -66,7 +69,10 @@ static __always_inline bool do_syscall_x32(struct pt_reg= s *regs, int nr) =20 if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) { xnr =3D array_index_nospec(xnr, X32_NR_syscalls); - regs->ax =3D x32_sys_call(regs, xnr); + if (likely(cpu_feature_enabled(X86_FEATURE_INDIRECT_SAFE))) + regs->ax =3D x32_sys_call_table[xnr](regs); + else + regs->ax =3D x32_sys_call(regs, xnr); return true; } return false; @@ -162,7 +168,10 @@ static __always_inline void do_syscall_32_irqs_on(stru= ct pt_regs *regs, int nr) =20 if (likely(unr < IA32_NR_syscalls)) { unr =3D array_index_nospec(unr, IA32_NR_syscalls); - regs->ax =3D ia32_sys_call(regs, unr); + if (likely(cpu_feature_enabled(X86_FEATURE_INDIRECT_SAFE))) + regs->ax =3D ia32_sys_call_table[unr](regs); + else + regs->ax =3D ia32_sys_call(regs, unr); } else if (nr !=3D -1) { regs->ax =3D __ia32_sys_ni_syscall(regs); } diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c index c2235bae17ef..aab31760b4e3 100644 --- a/arch/x86/entry/syscall_32.c +++ b/arch/x86/entry/syscall_32.c @@ -14,25 +14,16 @@ #endif =20 #define __SYSCALL(nr, sym) extern long __ia32_##sym(const struct pt_regs *= ); - #include #undef __SYSCALL =20 -/* - * The sys_call_table[] is no longer used for system calls, but - * kernel/trace/trace_syscalls.c still wants to know the system - * call address. - */ -#ifdef CONFIG_X86_32 #define __SYSCALL(nr, sym) __ia32_##sym, -const sys_call_ptr_t sys_call_table[] =3D { +const sys_call_ptr_t ia32_sys_call_table[] =3D { #include }; #undef __SYSCALL -#endif =20 #define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs); - long ia32_sys_call(const struct pt_regs *regs, unsigned int nr) { switch (nr) { diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c index 33b3f09e6f15..96ea1f8a1d3f 100644 --- a/arch/x86/entry/syscall_64.c +++ b/arch/x86/entry/syscall_64.c @@ -11,11 +11,6 @@ #include #undef __SYSCALL =20 -/* - * The sys_call_table[] is no longer used for system calls, but - * kernel/trace/trace_syscalls.c still wants to know the system - * call address. - */ #define __SYSCALL(nr, sym) __x64_##sym, const sys_call_ptr_t sys_call_table[] =3D { #include @@ -23,7 +18,6 @@ const sys_call_ptr_t sys_call_table[] =3D { #undef __SYSCALL =20 #define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs); - long x64_sys_call(const struct pt_regs *regs, unsigned int nr) { switch (nr) { diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c index 03de4a932131..5aef4230faca 100644 --- a/arch/x86/entry/syscall_x32.c +++ b/arch/x86/entry/syscall_x32.c @@ -11,8 +11,13 @@ #include #undef __SYSCALL =20 -#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs); +#define __SYSCALL(nr, sym) __x64_##sym, +const sys_call_ptr_t x32_sys_call_table[] =3D { +#include +}; +#undef __SYSCALL =20 +#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs); long x32_sys_call(const struct pt_regs *regs, unsigned int nr) { switch (nr) { diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpuf= eatures.h index 3c7434329661..f0ea66cbd5f1 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -470,6 +470,7 @@ #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control availa= ble */ #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control ena= bled */ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch= history at vmexit using SW loop */ +#define X86_FEATURE_INDIRECT_SAFE (21*32+ 5) /* "" Indirect branches aren'= t vulnerable to Spectre v2 */ =20 /* * BUG word(s) diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 2fc7bc3863ff..dfb59521244c 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h @@ -16,14 +16,20 @@ #include /* for TS_COMPAT */ #include =20 -/* This is used purely for kernel/trace/trace_syscalls.c */ typedef long (*sys_call_ptr_t)(const struct pt_regs *); extern const sys_call_ptr_t sys_call_table[]; =20 +#if defined(CONFIG_X86_32) +#define ia32_sys_call_table sys_call_table +#else /* * These may not exist, but still put the prototypes in so we * can use IS_ENABLED(). */ +extern const sys_call_ptr_t ia32_sys_call_table[]; +extern const sys_call_ptr_t x32_sys_call_table[]; +#endif + extern long ia32_sys_call(const struct pt_regs *, unsigned int nr); extern long x32_sys_call(const struct pt_regs *, unsigned int nr); extern long x64_sys_call(const struct pt_regs *, unsigned int nr); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ca295b0c1eee..dcb97cc2758f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1664,6 +1664,15 @@ static void __init bhi_select_mitigation(void) if (!IS_ENABLED(CONFIG_X86_64)) return; =20 + /* + * There's no hardware mitigation in place, so mark indirect branches + * as unsafe. + * + * One could argue the SW loop makes indirect branches safe again, but + * Linus prefers it this way. + */ + setup_clear_cpu_cap(X86_FEATURE_INDIRECT_SAFE); + /* Mitigate KVM by default */ setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT); pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n"); @@ -1678,6 +1687,21 @@ static void __init spectre_v2_select_mitigation(void) enum spectre_v2_mitigation_cmd cmd =3D spectre_v2_parse_cmdline(); enum spectre_v2_mitigation mode =3D SPECTRE_V2_NONE; =20 + /* + * X86_FEATURE_INDIRECT_SAFE indicates whether indirect calls can be + * considered safe. That means either: + * + * - the CPU isn't vulnerable to Spectre v2 or its variants; + * + * - a hardware mitigation is in place (e.g., IBRS, BHI_DIS_S); or + * + * - the user turned off mitigations altogether. + * + * Assume innocence until proven guilty: set the cap bit now, then + * clear it later if/when needed. + */ + setup_force_cpu_cap(X86_FEATURE_INDIRECT_SAFE); + /* * If the CPU is not affected and the command line mode is NONE or AUTO * then nothing to do. @@ -1764,11 +1788,16 @@ static void __init spectre_v2_select_mitigation(voi= d) break; =20 case SPECTRE_V2_LFENCE: + setup_clear_cpu_cap(X86_FEATURE_INDIRECT_SAFE); + fallthrough; case SPECTRE_V2_EIBRS_LFENCE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); - fallthrough; + setup_force_cpu_cap(X86_FEATURE_RETPOLINE); + break; =20 case SPECTRE_V2_RETPOLINE: + setup_clear_cpu_cap(X86_FEATURE_INDIRECT_SAFE); + fallthrough; case SPECTRE_V2_EIBRS_RETPOLINE: setup_force_cpu_cap(X86_FEATURE_RETPOLINE); break; --=20 2.44.0