Documentation/admin-guide/hw-vuln/spectre.rst | 5 +-- arch/x86/include/asm/cpufeatures.h | 1 +- arch/x86/include/asm/nospec-branch.h | 11 ++++++- arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++------ 4 files changed, 40 insertions(+), 13 deletions(-)
The following commit has been merged into the x86/cpu branch of tip:
Commit-ID: b5cbd5ff79a06395a17f8f524f6f8e90dcfe42d1
Gitweb: https://git.kernel.org/tip/b5cbd5ff79a06395a17f8f524f6f8e90dcfe42d1
Author: Amit Shah <amit.shah@amd.com>
AuthorDate: Thu, 31 Oct 2024 16:39:24 +01:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Mon, 04 Nov 2024 06:20:22 +01:00
x86/bugs: Add support for AMD ERAPS feature
Remove explicit RET stuffing / filling on VMEXITs and context switches
on AMD CPUs with the ERAPS feature (Zen5).
With the Enhanced Return Address Prediction Security feature, any
hardware TLB flush results in flushing of the RSB (aka RAP in AMD spec).
This guarantees an RSB flush across context switches. The feature also
explicitly tags host and guest addresses - eliminating the need for
explicit flushing of the RSB on VMEXIT.
The BTC_NO feature in AMD CPUs ensures RET predictions do not speculate
from outside the RSB. Together, the BTC_NO and ERAPS features ensure no
flushing or stuffing of the RSB is necessary anymore.
Feature documented in AMD PPR 57238.
[ bp: Massage commit message. ]
Signed-off-by: Amit Shah <amit.shah@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20241031153925.36216-2-amit@kernel.org
---
Documentation/admin-guide/hw-vuln/spectre.rst | 5 +--
arch/x86/include/asm/cpufeatures.h | 1 +-
arch/x86/include/asm/nospec-branch.h | 11 ++++++-
arch/x86/kernel/cpu/bugs.c | 36 ++++++++++++------
4 files changed, 40 insertions(+), 13 deletions(-)
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 132e0bc..647c10c 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -417,9 +417,10 @@ The possible values in this file are:
- Return stack buffer (RSB) protection status:
- ============= ===========================================
+ ============= ========================================================
'RSB filling' Protection of RSB on context switch enabled
- ============= ===========================================
+ 'ERAPS' Hardware RSB flush on context switches + guest/host tags
+ ============= ========================================================
- EIBRS Post-barrier Return Stack Buffer (PBRSB) protection status:
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 05e985c..7f78212 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -457,6 +457,7 @@
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
+#define X86_FEATURE_ERAPS (20*32+24) /* Enhanced RAP / RSB / RAS Security */
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index ff5f1ec..d7587b4 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -117,6 +117,17 @@
* We define a CPP macro such that it can be used from both .S files and
* inline assembly. It's possible to do a .macro and then include that
* from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
+ *
+ * AMD CPUs with the ERAPS feature may have a larger default RSB. These CPUs
+ * use the default number of entries on a host, and can optionally (based on
+ * hypervisor setup) use 32 (old) or the new default in a guest. The number
+ * of default entries is reflected in CPUID 8000_0021:EBX[23:16].
+ *
+ * With the ERAPS feature, RSB filling is not necessary anymore: the RSB is
+ * auto-cleared on a TLB flush (i.e. a context switch). Adapting the value of
+ * RSB_CLEAR_LOOPS below for ERAPS would change it to a runtime variable
+ * instead of the current compile-time constant, so leave it as-is, as this
+ * works for both older CPUs, as well as newer ones with ERAPS.
*/
#define RETPOLINE_THUNK_SIZE 32
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index d191542..3825779 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1811,9 +1811,6 @@ static void __init spectre_v2_select_mitigation(void)
* speculated return targets may come from the branch predictor,
* which could have a user-poisoned BTB or BHB entry.
*
- * AMD has it even worse: *all* returns are speculated from the BTB,
- * regardless of the state of the RSB.
- *
* When IBRS or eIBRS is enabled, the "user -> kernel" attack
* scenario is mitigated by the IBRS branch prediction isolation
* properties, so the RSB buffer filling wouldn't be necessary to
@@ -1821,6 +1818,15 @@ static void __init spectre_v2_select_mitigation(void)
*
* The "user -> user" attack scenario is mitigated by RSB filling.
*
+ * AMD CPUs without the BTC_NO bit may speculate return targets
+ * from the BTB. CPUs with BTC_NO do not speculate return targets
+ * from the BTB, even on RSB underflow.
+ *
+ * The ERAPS CPU feature (which implies the presence of BTC_NO)
+ * adds an RSB flush each time a TLB flush happens (i.e., on every
+ * context switch). So, RSB filling is not necessary for this
+ * attack type with ERAPS present.
+ *
* 2) Poisoned RSB entry
*
* If the 'next' in-kernel return stack is shorter than 'prev',
@@ -1831,17 +1837,24 @@ static void __init spectre_v2_select_mitigation(void)
* eIBRS.
*
* The "user -> user" scenario, also known as SpectreBHB, requires
- * RSB clearing.
+ * RSB clearing on processors without ERAPS.
*
* So to mitigate all cases, unconditionally fill RSB on context
- * switches.
- *
- * FIXME: Is this pointless for retbleed-affected AMD?
+ * switches when ERAPS is not present.
*/
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
+ if (!boot_cpu_has(X86_FEATURE_ERAPS)) {
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
- spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
+ /*
+ * For guest -> host (or vice versa) RSB poisoning scenarios,
+ * determine the mitigation mode here. With ERAPS, RSB
+ * entries are tagged as host or guest - ensuring that neither
+ * the host nor the guest have to clear or fill RSB entries to
+ * avoid poisoning, skip RSB filling at VMEXIT in that case.
+ */
+ spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
+ }
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
@@ -2839,7 +2852,7 @@ static ssize_t spectre_v2_show_state(char *buf)
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
- return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n",
+ return sysfs_emit(buf, "%s%s%s%s%s%s%s%s%s\n",
spectre_v2_strings[spectre_v2_enabled],
ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "",
@@ -2847,6 +2860,7 @@ static ssize_t spectre_v2_show_state(char *buf)
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "",
pbrsb_eibrs_state(),
spectre_bhi_state(),
+ boot_cpu_has(X86_FEATURE_ERAPS) ? "; ERAPS hardware RSB flush" : "",
/* this should always be at the end */
spectre_v2_module_string());
}
© 2016 - 2024 Red Hat, Inc.