The AMD Zen4 core supports a new feature called Automatic IBRS.
It is a "set-and-forget" feature that means that, like Intel's Enhanced IBRS,
h/w manages its IBRS mitigation resources automatically across CPL transitions.
The feature is advertised by CPUID_Fn80000021_EAX bit 8 and is enabled by
setting MSR C000_0080 (EFER) bit 21.
Enable Automatic IBRS by default if the CPU feature is present. It typically
provides greater performance over the incumbent generic retpolines mitigation.
Reuse the SPECTRE_V2_EIBRS spectre_v2_mitigation enum. AMD Automatic IBRS and
Intel Enhanced IBRS have similar enablement. Add NO_EIBRS_PBRSB to
cpu_vuln_whitelist, since AMD Automatic IBRS isn't affected by PBRSB-eIBRS.
The kernel command line option spectre_v2=eibrs is used to select AMD Automatic
IBRS, if available.
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
---
Documentation/admin-guide/hw-vuln/spectre.rst | 6 +++---
.../admin-guide/kernel-parameters.txt | 6 +++---
arch/x86/include/asm/cpufeatures.h | 1 +
arch/x86/include/asm/msr-index.h | 2 ++
arch/x86/kernel/cpu/bugs.c | 20 +++++++++++--------
arch/x86/kernel/cpu/common.c | 19 ++++++++++--------
6 files changed, 32 insertions(+), 22 deletions(-)
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index c4dcdb3d0d45..3fe6511c5405 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -610,9 +610,9 @@ kernel command line.
retpoline,generic Retpolines
retpoline,lfence LFENCE; indirect branch
retpoline,amd alias for retpoline,lfence
- eibrs enhanced IBRS
- eibrs,retpoline enhanced IBRS + Retpolines
- eibrs,lfence enhanced IBRS + LFENCE
+ eibrs Enhanced/Auto IBRS
+ eibrs,retpoline Enhanced/Auto IBRS + Retpolines
+ eibrs,lfence Enhanced/Auto IBRS + LFENCE
ibrs use IBRS to protect kernel
Not specifying this option is equivalent to
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 0ee891133d76..1d2f92edb5a1 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5729,9 +5729,9 @@
retpoline,generic - Retpolines
retpoline,lfence - LFENCE; indirect branch
retpoline,amd - alias for retpoline,lfence
- eibrs - enhanced IBRS
- eibrs,retpoline - enhanced IBRS + Retpolines
- eibrs,lfence - enhanced IBRS + LFENCE
+ eibrs - Enhanced/Auto IBRS
+ eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
+ eibrs,lfence - Enhanced/Auto IBRS + LFENCE
ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 8ef89d595771..fdb8e09234ba 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -434,6 +434,7 @@
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
+#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
/*
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4e0a7ad17083..ad35355ee43e 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -25,6 +25,7 @@
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
+#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
@@ -33,6 +34,7 @@
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
+#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/* Intel MSRs. Some also available on other CPUs */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 4a0add86c182..cf81848b72f4 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1238,9 +1238,9 @@ static const char * const spectre_v2_strings[] = {
[SPECTRE_V2_NONE] = "Vulnerable",
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
- [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
- [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
- [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
+ [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
+ [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
+ [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
};
@@ -1309,7 +1309,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
- pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
+ pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
@@ -1495,8 +1495,12 @@ static void __init spectre_v2_select_mitigation(void)
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
if (spectre_v2_in_ibrs_mode(mode)) {
- x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
- update_spec_ctrl(x86_spec_ctrl_base);
+ if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
+ msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
+ } else {
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ }
}
switch (mode) {
@@ -1580,8 +1584,8 @@ static void __init spectre_v2_select_mitigation(void)
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
* and Enhanced IBRS protect firmware too, so enable IBRS around
- * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
- * enabled.
+ * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
+ * otherwise enabled.
*
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
* the user might select retpoline on the kernel command line and if
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 162352d42ce0..8ce67a8a61a6 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1229,8 +1229,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
/* Zhaoxin Family 7 */
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
@@ -1341,8 +1341,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
- if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ /*
+ * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
+ * flag and protect from vendor-specific bugs via the whitelist.
+ */
+ if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+ if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+ !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+ }
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
!(ia32_cap & ARCH_CAP_MDS_NO)) {
@@ -1404,11 +1412,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
- if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
- !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
- !(ia32_cap & ARCH_CAP_PBRSB_NO))
- setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
-
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
--
2.34.1
On Tue, Jan 24, 2023 at 10:33:18AM -0600, Kim Phillips wrote: > @@ -1495,8 +1495,12 @@ static void __init spectre_v2_select_mitigation(void) > pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); > > if (spectre_v2_in_ibrs_mode(mode)) { > - x86_spec_ctrl_base |= SPEC_CTRL_IBRS; > - update_spec_ctrl(x86_spec_ctrl_base); > + if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { > + msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); Doesn't this only enable it on the boot CPU? -- Josh
On Fri, Feb 24, 2023 at 10:52:57AM -0800, Josh Poimboeuf wrote: > Doesn't this only enable it on the boot CPU? Whoops, you might be right. Lemme fix it. Thx! -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On Fri, Feb 24, 2023 at 10:08:32PM +0100, Borislav Petkov wrote: > On Fri, Feb 24, 2023 at 10:52:57AM -0800, Josh Poimboeuf wrote: > > Doesn't this only enable it on the boot CPU? > > Whoops, you might be right. Actually, we stick that MSR - EFER - into the trampoline header and then each AP gets it written to in arch/x86/realmode/rm/trampoline_64.S But this is only from code staring - I'll confirm this tomorrow. And if so, we should at least put comments in that trampoline code so that people do not remove the MSR writes. Or, actually, we should simply write it again because it is the init path and not really a hot path but it should damn well make sure that that bit gets set. Thx. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On Fri, Feb 24, 2023 at 11:51:17PM +0100, Borislav Petkov wrote: > On Fri, Feb 24, 2023 at 10:08:32PM +0100, Borislav Petkov wrote: > > On Fri, Feb 24, 2023 at 10:52:57AM -0800, Josh Poimboeuf wrote: > > > Doesn't this only enable it on the boot CPU? > > > > Whoops, you might be right. > > Actually, we stick that MSR - EFER - into the trampoline header and then > each AP gets it written to in arch/x86/realmode/rm/trampoline_64.S > > But this is only from code staring - I'll confirm this tomorrow. Ah, I had to stare it that for a bit to figure out how it works. setup_real_mode() reads MSR_EFER from the boot CPU and stores it in trampoline_header->efer. Then the other CPUs read that stored value in startup_32() and write it into their MSR. > And if so, we should at least put comments in that trampoline code so > that people do not remove the MSR writes. > > Or, actually, we should simply write it again because it is the init > path and not really a hot path but it should damn well make sure that > that bit gets set. Yeah, I think that would be good. Otherwise it's rather magical. That EFER MSR is a surprising place to put that bit. -- Josh
On Fri, Feb 24, 2023 at 04:09:31PM -0800, Josh Poimboeuf wrote:
> Ah, I had to stare it that for a bit to figure out how it works.
Yeah, it is a bit "hidden". :)
> setup_real_mode() reads MSR_EFER from the boot CPU and stores it in
> trampoline_header->efer. Then the other CPUs read that stored value in
> startup_32() and write it into their MSR.
Exactly.
> Yeah, I think that would be good. Otherwise it's rather magical.
Yap, see below.
> That EFER MSR is a surprising place to put that bit.
That MSR is very important on AMD. Consider it AMD's CR4. :-)
Thx.
---
From: "Borislav Petkov (AMD)" <bp@alien8.de>
Date: Sat, 25 Feb 2023 01:11:31 +0100
Subject: [PATCH] x86/CPU/AMD: Make sure EFER[AIBRSE] is set
The AutoIBRS bit gets set only on the BSP as part of determining which
mitigation to enable on AMD. Setting on the APs relies on the
circumstance that the APs get booted through the trampoline and EFER
- the MSR which contains that bit - gets replicated on every AP from the
BSP.
However, this can change in the future and considering the security
implications of this bit not being set on every CPU, make sure it is set
by verifying EFER later in the boot process and on every AP.
Reported-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230224185257.o3mcmloei5zqu7wa@treble
---
arch/x86/kernel/cpu/amd.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 380753b14cab..de624c1442c2 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -996,6 +996,16 @@ static void init_amd(struct cpuinfo_x86 *c)
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
+
+ /*
+ * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
+ * using the trampoline code and as part of it, EFER gets prepared there in order
+ * to be replicated onto them. Regardless, set it here again, if not set, to protect
+ * against any future refactoring/code reorganization which might miss setting
+ * this important bit.
+ */
+ if (cpu_has(c, X86_FEATURE_AUTOIBRS))
+ msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
}
#ifdef CONFIG_X86_32
--
2.35.1
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On Sat, Feb 25, 2023 at 01:20:24AM +0100, Borislav Petkov wrote: > On Fri, Feb 24, 2023 at 04:09:31PM -0800, Josh Poimboeuf wrote: > > Ah, I had to stare it that for a bit to figure out how it works. > > Yeah, it is a bit "hidden". :) > > > setup_real_mode() reads MSR_EFER from the boot CPU and stores it in > > trampoline_header->efer. Then the other CPUs read that stored value in > > startup_32() and write it into their MSR. > > Exactly. > > > Yeah, I think that would be good. Otherwise it's rather magical. > > Yap, see below. > > > That EFER MSR is a surprising place to put that bit. > > That MSR is very important on AMD. Consider it AMD's CR4. :-) > > Thx. > > --- > From: "Borislav Petkov (AMD)" <bp@alien8.de> > Date: Sat, 25 Feb 2023 01:11:31 +0100 > Subject: [PATCH] x86/CPU/AMD: Make sure EFER[AIBRSE] is set > > The AutoIBRS bit gets set only on the BSP as part of determining which > mitigation to enable on AMD. Setting on the APs relies on the > circumstance that the APs get booted through the trampoline and EFER > - the MSR which contains that bit - gets replicated on every AP from the > BSP. > > However, this can change in the future and considering the security > implications of this bit not being set on every CPU, make sure it is set > by verifying EFER later in the boot process and on every AP. > > Reported-by: Josh Poimboeuf <jpoimboe@kernel.org> > Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> > Link: https://lore.kernel.org/r/20230224185257.o3mcmloei5zqu7wa@treble > --- > arch/x86/kernel/cpu/amd.c | 10 ++++++++++ > 1 file changed, 10 insertions(+) > > diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c > index 380753b14cab..de624c1442c2 100644 > --- a/arch/x86/kernel/cpu/amd.c > +++ b/arch/x86/kernel/cpu/amd.c > @@ -996,6 +996,16 @@ static void init_amd(struct cpuinfo_x86 *c) > msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); > > check_null_seg_clears_base(c); > + > + /* > + * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up > + * using the trampoline code and as part of it, EFER gets prepared there in order > + * to be replicated onto them. Regardless, set it here again, if not set, to protect > + * against any future refactoring/code reorganization which might miss setting > + * this important bit. > + */ > + if (cpu_has(c, X86_FEATURE_AUTOIBRS)) > + msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); Is it intended to be set regardless of the spectre_v2 mitigation status?
On Fri, Feb 24, 2023 at 04:52:21PM -0800, Pawan Gupta wrote: > On Sat, Feb 25, 2023 at 01:20:24AM +0100, Borislav Petkov wrote: > > On Fri, Feb 24, 2023 at 04:09:31PM -0800, Josh Poimboeuf wrote: > > > Ah, I had to stare it that for a bit to figure out how it works. > > > > Yeah, it is a bit "hidden". :) > > > > > setup_real_mode() reads MSR_EFER from the boot CPU and stores it in > > > trampoline_header->efer. Then the other CPUs read that stored value in > > > startup_32() and write it into their MSR. > > > > Exactly. > > > > > Yeah, I think that would be good. Otherwise it's rather magical. > > > > Yap, see below. > > > > > That EFER MSR is a surprising place to put that bit. > > > > That MSR is very important on AMD. Consider it AMD's CR4. :-) > > > > Thx. > > > > --- > > From: "Borislav Petkov (AMD)" <bp@alien8.de> > > Date: Sat, 25 Feb 2023 01:11:31 +0100 > > Subject: [PATCH] x86/CPU/AMD: Make sure EFER[AIBRSE] is set > > > > The AutoIBRS bit gets set only on the BSP as part of determining which > > mitigation to enable on AMD. Setting on the APs relies on the > > circumstance that the APs get booted through the trampoline and EFER > > - the MSR which contains that bit - gets replicated on every AP from the > > BSP. > > > > However, this can change in the future and considering the security > > implications of this bit not being set on every CPU, make sure it is set > > by verifying EFER later in the boot process and on every AP. > > > > Reported-by: Josh Poimboeuf <jpoimboe@kernel.org> > > Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> > > Link: https://lore.kernel.org/r/20230224185257.o3mcmloei5zqu7wa@treble > > --- > > arch/x86/kernel/cpu/amd.c | 10 ++++++++++ > > 1 file changed, 10 insertions(+) > > > > diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c > > index 380753b14cab..de624c1442c2 100644 > > --- a/arch/x86/kernel/cpu/amd.c > > +++ b/arch/x86/kernel/cpu/amd.c > > @@ -996,6 +996,16 @@ static void init_amd(struct cpuinfo_x86 *c) > > msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); > > > > check_null_seg_clears_base(c); > > + > > + /* > > + * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up > > + * using the trampoline code and as part of it, EFER gets prepared there in order > > + * to be replicated onto them. Regardless, set it here again, if not set, to protect > > + * against any future refactoring/code reorganization which might miss setting > > + * this important bit. > > + */ > > + if (cpu_has(c, X86_FEATURE_AUTOIBRS)) > > + msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); > > Is it intended to be set regardless of the spectre_v2 mitigation status? Right, it needs to check spectre_v2_enabled. Also, this code might be a better fit in identify_secondary_cpu() with the other MSR-writing bug-related code. -- Josh
On Fri, Feb 24, 2023 at 05:32:02PM -0800, Josh Poimboeuf wrote: > > Is it intended to be set regardless of the spectre_v2 mitigation status? > > Right, it needs to check spectre_v2_enabled. Right, I realized this too this morning, while sleeping, so I made me a note on the nightstand to fix it... :-) > Also, this code might be a better fit in identify_secondary_cpu() with > the other MSR-writing bug-related code. Same path: identify_secondary_cpu->identify_cpu->this_cpu->c_init(c)->init_amd Plus, it keeps the vendor code where it belongs. v2 below, still untested. --- From: "Borislav Petkov (AMD)" <bp@alien8.de> Date: Sat, 25 Feb 2023 01:11:31 +0100 Subject: [PATCH] x86/CPU/AMD: Make sure EFER[AIBRSE] is set The AutoIBRS bit gets set only on the BSP as part of determining which mitigation to enable on AMD. Setting on the APs relies on the circumstance that the APs get booted through the trampoline and EFER - the MSR which contains that bit - gets replicated on every AP from the BSP. However, this can change in the future and considering the security implications of this bit not being set on every CPU, make sure it is set by verifying EFER later in the boot process and on every AP. Reported-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> Link: https://lore.kernel.org/r/20230224185257.o3mcmloei5zqu7wa@treble --- arch/x86/kernel/cpu/amd.c | 11 +++++++++++ arch/x86/kernel/cpu/bugs.c | 14 ++------------ arch/x86/kernel/cpu/cpu.h | 10 ++++++++++ 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 380753b14cab..aba1b43ed6fd 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -996,6 +996,17 @@ static void init_amd(struct cpuinfo_x86 *c) msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); check_null_seg_clears_base(c); + + /* + * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up + * using the trampoline code and as part of it, EFER gets prepared there in order + * to be replicated onto them. Regardless, set it here again, if not set, to protect + * against any future refactoring/code reorganization which might miss setting + * this important bit. + */ + if (spectre_v2_in_ibrs_mode(spectre_v2_enabled) && + cpu_has(c, X86_FEATURE_AUTOIBRS)) + msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 4fd43d25b483..407c73d3beb9 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -784,8 +784,7 @@ static int __init nospectre_v1_cmdline(char *str) } early_param("nospectre_v1", nospectre_v1_cmdline); -static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = - SPECTRE_V2_NONE; +enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; #undef pr_fmt #define pr_fmt(fmt) "RETBleed: " fmt @@ -1133,16 +1132,7 @@ spectre_v2_parse_user_cmdline(void) return SPECTRE_V2_USER_CMD_AUTO; } -static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) -{ - return mode == SPECTRE_V2_IBRS || - mode == SPECTRE_V2_EIBRS || - mode == SPECTRE_V2_EIBRS_RETPOLINE || - mode == SPECTRE_V2_EIBRS_LFENCE; -} - -static void __init -spectre_v2_user_select_mitigation(void) +static void __init spectre_v2_user_select_mitigation(void) { enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; bool smt_possible = IS_ENABLED(CONFIG_SMP); diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 57a5349e6954..99c507c42901 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -83,4 +83,14 @@ unsigned int aperfmperf_get_khz(int cpu); extern void x86_spec_ctrl_setup_ap(void); extern void update_srbds_msr(void); +extern enum spectre_v2_mitigation spectre_v2_enabled; + +static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) +{ + return mode == SPECTRE_V2_IBRS || + mode == SPECTRE_V2_EIBRS || + mode == SPECTRE_V2_EIBRS_RETPOLINE || + mode == SPECTRE_V2_EIBRS_LFENCE; +} + #endif /* ARCH_X86_CPU_H */ -- 2.35.1 -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On 2/25/23 04:21, Borislav Petkov wrote: > + /* > + * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up > + * using the trampoline code and as part of it, EFER gets prepared there in order > + * to be replicated onto them. Regardless, set it here again, if not set, to protect > + * against any future refactoring/code reorganization which might miss setting > + * this important bit. > + */ > + if (spectre_v2_in_ibrs_mode(spectre_v2_enabled) && > + cpu_has(c, X86_FEATURE_AUTOIBRS)) > + msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); > } I guess the belt and suspenders could be justified here by how important the bit is. But, if EFER[AIBRSE] gets clear somehow shouldn't we also dump a warning out here so the fool who botched it can fix it? Even if AIBRSE is fixed up, some less important bit could still be botched. It will freak some users out, but it does seem like the kind of thing we _want_ a bug report for.
On Mon, Feb 27, 2023 at 07:25:00AM -0800, Dave Hansen wrote: > It will freak some users out, but it does seem like the kind of thing we > _want_ a bug report for. You mean, something like: if (spectre_v2_in_ibrs_mode(spectre_v2_enabled) && cpu_has(c, X86_FEATURE_AUTOIBRS)) WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS)); ? -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On 2/27/23 07:40, Borislav Petkov wrote: > On Mon, Feb 27, 2023 at 07:25:00AM -0800, Dave Hansen wrote: >> It will freak some users out, but it does seem like the kind of thing we >> _want_ a bug report for. > You mean, something like: > > if (spectre_v2_in_ibrs_mode(spectre_v2_enabled) && > cpu_has(c, X86_FEATURE_AUTOIBRS)) > WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS)); > > ? Yep, that looks sane.
v2, with feedback addressed, and rediffed ontop of 6.3-rc1:
---
From: "Borislav Petkov (AMD)" <bp@alien8.de>
The AutoIBRS bit gets set only on the BSP as part of determining which
mitigation to enable on AMD. Setting on the APs relies on the
circumstance that the APs get booted through the trampoline and EFER
- the MSR which contains that bit - gets replicated on every AP from the
BSP.
However, this can change in the future and considering the security
implications of this bit not being set on every CPU, make sure it is set
by verifying EFER later in the boot process and on every AP.
Reported-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230224185257.o3mcmloei5zqu7wa@treble
---
arch/x86/kernel/cpu/amd.c | 11 +++++++++++
arch/x86/kernel/cpu/bugs.c | 10 +---------
arch/x86/kernel/cpu/cpu.h | 8 ++++++++
3 files changed, 20 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 380753b14cab..dd32dbc7c33e 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -996,6 +996,17 @@ static void init_amd(struct cpuinfo_x86 *c)
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
+
+ /*
+ * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
+ * using the trampoline code and as part of it, MSR_EFER gets prepared there in
+ * order to be replicated onto them. Regardless, set it here again, if not set,
+ * to protect against any future refactoring/code reorganization which might
+ * miss setting this important bit.
+ */
+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+ cpu_has(c, X86_FEATURE_AUTOIBRS))
+ WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index f9d060e71c3e..182af64387d0 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -784,8 +784,7 @@ static int __init nospectre_v1_cmdline(char *str)
}
early_param("nospectre_v1", nospectre_v1_cmdline);
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
- SPECTRE_V2_NONE;
+enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
#undef pr_fmt
#define pr_fmt(fmt) "RETBleed: " fmt
@@ -1133,13 +1132,6 @@ spectre_v2_parse_user_cmdline(void)
return SPECTRE_V2_USER_CMD_AUTO;
}
-static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
-{
- return mode == SPECTRE_V2_EIBRS ||
- mode == SPECTRE_V2_EIBRS_RETPOLINE ||
- mode == SPECTRE_V2_EIBRS_LFENCE;
-}
-
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
{
return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 57a5349e6954..f97b0fe13da8 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -83,4 +83,12 @@ unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void);
extern void update_srbds_msr(void);
+extern enum spectre_v2_mitigation spectre_v2_enabled;
+
+static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
+{
+ return mode == SPECTRE_V2_EIBRS ||
+ mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+ mode == SPECTRE_V2_EIBRS_LFENCE;
+}
#endif /* ARCH_X86_CPU_H */
--
2.35.1
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
On 3/10/23 08:22, Borislav Petkov wrote: > The AutoIBRS bit gets set only on the BSP as part of determining which > mitigation to enable on AMD. Setting on the APs relies on the > circumstance that the APs get booted through the trampoline and EFER > - the MSR which contains that bit - gets replicated on every AP from the > BSP. > > However, this can change in the future and considering the security > implications of this bit not being set on every CPU, make sure it is set > by verifying EFER later in the boot process and on every AP. > > Reported-by: Josh Poimboeuf <jpoimboe@kernel.org> > Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> > Link: https://lore.kernel.org/r/20230224185257.o3mcmloei5zqu7wa@treble Looks sane, thanks for adding the warning: Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
On Sat, Feb 25, 2023 at 01:21:49PM +0100, Borislav Petkov wrote: > On Fri, Feb 24, 2023 at 05:32:02PM -0800, Josh Poimboeuf wrote: > > > Is it intended to be set regardless of the spectre_v2 mitigation status? > > > > Right, it needs to check spectre_v2_enabled. > > Right, I realized this too this morning, while sleeping, so I made me > a note on the nightstand to fix it... :-) > > > Also, this code might be a better fit in identify_secondary_cpu() with > > the other MSR-writing bug-related code. > > Same path: > > identify_secondary_cpu->identify_cpu->this_cpu->c_init(c)->init_amd > > Plus, it keeps the vendor code where it belongs. All the other "bug" code in identify_secondary_cpu() *is* vendor-specific. And for that matter, so is most of the code in bugs.c. I'm thinking we should just move all this MSR-writing bug-related code into a new cpu_init_bugs() function in bugs.c which can be called by identify_secondary_cpu(). Then we have more "bug" code together and all the local variables/functions like spectre_v2_in_ibrs_mode() can remain local. -- Josh
On Sat, Feb 25, 2023 at 09:28:32AM -0800, Josh Poimboeuf wrote: > All the other "bug" code in identify_secondary_cpu() *is* > vendor-specific. I meant "vendor-specific" in the sense that AMD code goes to amd.c, etc. As to the identify_secondary_cpu() code - I didn't like it being slapped there either but it got stuck in there hastily during the mitigations upstreaming as back then we had bigger fish to fry than paying too much attention to clean design... > And for that matter, so is most of the code in bugs.c. > > I'm thinking we should just move all this MSR-writing bug-related code > into a new cpu_init_bugs() function in bugs.c which can be called by > identify_secondary_cpu(). I guess. > Then we have more "bug" code together and all the local > variables/functions like spectre_v2_in_ibrs_mode() can remain local. They're still local, more or less. Note the special cpu.h header which is private to arch/x86/kernel/cpu/ Thx. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On Sat, Feb 25, 2023 at 11:56:37PM +0100, Borislav Petkov wrote: > On Sat, Feb 25, 2023 at 09:28:32AM -0800, Josh Poimboeuf wrote: > > All the other "bug" code in identify_secondary_cpu() *is* > > vendor-specific. > > I meant "vendor-specific" in the sense that AMD code goes to amd.c, etc. Hm? So code in bugs.c is not vendor-specific? That seems circular and I don't get your point. > As to the identify_secondary_cpu() code - I didn't like it being > slapped there either but it got stuck in there hastily during the > mitigations upstreaming as back then we had bigger fish to fry than > paying too much attention to clean design... Right, so rather than spreading all the bug-related MSR logic around, just do it in one spot. -- Josh
On Sat, Feb 25, 2023 at 03:43:30PM -0800, Josh Poimboeuf wrote: > Hm? So code in bugs.c is not vendor-specific? That seems circular and > I don't get your point. Lemme try that again... So there's an obvious benefit of keeping vendor-specific CPU code in one place: Intel stuff in cpu/intel*, AMD stuff in cpu/amd.c The sekjority stuff is still vendor-specific CPU code. Now, if you wanna add a function pointer ->bugs_init or so, say, to struct cpu_dev and keep the respective code in amd.c or intel.c, then we get the best of both worlds: - vendor-specific code remains in the respective file - you have a vendor-specific function which does hw vuln-specific work *without* vendor checks and so on > Right, so rather than spreading all the bug-related MSR logic around, > just do it in one spot. It is all CPU init code and I'm wondering if splitting stuff by vendor wouldn't make all that maze in bugs.c a lot more palatable. And get rid of $ git grep VENDOR arch/x86/kernel/cpu/bugs.c | wc -l 11 those, for starters. There's this trade-off of 1. keeping bugs setup code in one place - but then you need to do vendor checks and the other CPU setup code is somewhere else and it is probably related, MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT in amd.c for example. or 2. separating it into their respective files. Then the respective vendor code is simple because you don't need vendor checks. It would need to be done in a slick way, though, so that it remains maintainable. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On Sun, Feb 26, 2023 at 12:18:06PM +0100, Borislav Petkov wrote: > > Right, so rather than spreading all the bug-related MSR logic around, > > just do it in one spot. > > It is all CPU init code and I'm wondering if splitting stuff by vendor > wouldn't make all that maze in bugs.c a lot more palatable. And get rid > of > > $ git grep VENDOR arch/x86/kernel/cpu/bugs.c | wc -l > 11 > > those, for starters. > > There's this trade-off of > > 1. keeping bugs setup code in one place - but then you need to do vendor > checks and the other CPU setup code is somewhere else and it is > probably related, MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT in amd.c for > example. > > or > > 2. separating it into their respective files. Then the respective vendor > code is simple because you don't need vendor checks. It would need to > be done in a slick way, though, so that it remains maintainable. At least now it's a (mostly) self-contained hornets nest. I'm not sure we want to poke it :-) And I'm not sure spreading the mess around would be an improvement. -- Josh
On Sun, Feb 26, 2023 at 09:27:26AM -0800, Josh Poimboeuf wrote: > At least now it's a (mostly) self-contained hornets nest. I'm not sure > we want to poke it :-) > > And I'm not sure spreading the mess around would be an improvement. Yah, if anything, I wanna see the change first and it has to be an obvious and good one. What I think we should finish doing, though, is documenting it. Because there are aspects/mitigations that are missing, e.g., there's no retbleed.rst in Documentation/admin-guide/hw-vuln/ -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On Fri, Feb 24, 2023 at 11:51:17PM +0100, Borislav Petkov wrote: > Or, actually, we should simply write it again because it is the init > path and not really a hot path but it should damn well make sure that > that bit gets set. Yeah, we have this fancy msr_set_bit() interface which saves us the MSR write when not needed. And it also tells us that. :-) So we can do: diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 380753b14cab..2aa089aa23db 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -996,6 +996,12 @@ static void init_amd(struct cpuinfo_x86 *c) msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT); check_null_seg_clears_base(c); + + if (cpu_has(c, X86_FEATURE_AUTOIBRS)) { + int ret = msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); + + pr_info("%s: CPU%d, ret: %d\n", __func__, smp_processor_id(), ret); + } } #ifdef CONFIG_X86_32 --- and the output looks like this: [ 3.046607] x86: Booting SMP configuration: [ 3.046609] .... node #0, CPUs: #1 [ 2.874768] init_amd: CPU1, ret: 0 [ 3.046873] #2 [ 2.874768] init_amd: CPU2, ret: 0 [ 3.049155] #3 [ 2.874768] init_amd: CPU3, ret: 0 [ 3.050834] #4 [ 2.874768] init_amd: CPU4, ret: 0 ... which says that the bit was already set - which confirms the trampoline setting thing. And doing the write again serves as a guard when in the future we decide to not set EFER anymore - I doubt it - but we can't allow ourselves to not set the autoibrs bit so one more RDMSR on init doesn't matter. Proper patch tomorrow. Thx. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On Fri, Feb 24, 2023 at 10:08:32PM +0100, Borislav Petkov wrote: > On Fri, Feb 24, 2023 at 10:52:57AM -0800, Josh Poimboeuf wrote: > > Doesn't this only enable it on the boot CPU? > > Whoops, you might be right. > > Lemme fix it. > > Thx! BTW, I wasn't copied on the patch set, despite having dedicated years of my life that file ;-) Can we add bugs.c and friends to MAINTAINERS? ---8<--- From: Josh Poimboeuf <jpoimboe@kernel.org> Subject: [PATCH] MAINTAINERS: Add x86 hardware vulnerabilities section Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> --- MAINTAINERS | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index eb6f650c6c0b..338dc7469f80 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22553,6 +22553,16 @@ S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm F: arch/x86/entry/ +X86 HARDWARE VULNERABILITIES +M: Thomas Gleixner <tglx@linutronix.de> +M: Borislav Petkov <bp@alien8.de> +M: Peter Zijlstra <peterz@infradead.org> +M: Josh Poimboeuf <jpoimboe@kernel.org> +S: Maintained +F: Documentation/admin-guide/hw-vuln/ +F: arch/x86/include/asm/nospec-branch.h +F: arch/x86/kernel/cpu/bugs.c + X86 MCE INFRASTRUCTURE M: Tony Luck <tony.luck@intel.com> M: Borislav Petkov <bp@alien8.de> -- 2.39.1
On Fri, Feb 24, 2023 at 01:35:22PM -0800, Josh Poimboeuf wrote: > BTW, I wasn't copied on the patch set, despite having dedicated years of > my life that file ;-) ... and yet, even after all that pain, you're still willing to self-inflict moar. :-P > Can we add bugs.c and friends to MAINTAINERS? Sure, might as well. Acked-by: Borislav Petkov (AMD) <bp@alien8.de> I'll queue it after the MW is over. Thx. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
>> Can we add bugs.c and friends to MAINTAINERS? > > Sure, might as well. > > Acked-by: Borislav Petkov (AMD) <bp@alien8.de> > > I'll queue it after the MW is over. Should also include Pawan as another unfortunate soul sucked into keeping that file up to date with the latest wreckage. If not as "M", at least as "R": R: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> -Tony
On Fri, Feb 24, 2023 at 10:03:16PM +0000, Luck, Tony wrote: > Should also include Pawan as another unfortunate soul sucked > into keeping that file up to date with the latest wreckage. If not > as "M", at least as "R": > > R: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> We probably should hear from him before you offer his soul into the purgatory of hardware speculation. :-P -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On Fri, Feb 24, 2023 at 11:12:29PM +0100, Borislav Petkov wrote: > On Fri, Feb 24, 2023 at 10:03:16PM +0000, Luck, Tony wrote: > > Should also include Pawan as another unfortunate soul sucked > > into keeping that file up to date with the latest wreckage. If not > > as "M", at least as "R": > > > > R: Pawan Gupta <pawan.kumar.gupta@linux.intel.com> > > We probably should hear from him before you offer his soul into the > purgatory of hardware speculation. I will be happy to review what I can. Soulfully yours, Pawan
The following commit has been merged into the x86/misc branch of tip:
Commit-ID: 5910f06503aae3cc4890e562683abc3e38857ff9
Gitweb: https://git.kernel.org/tip/5910f06503aae3cc4890e562683abc3e38857ff9
Author: Josh Poimboeuf <jpoimboe@kernel.org>
AuthorDate: Fri, 24 Feb 2023 13:35:22 -08:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Fri, 10 Mar 2023 11:13:30 +01:00
MAINTAINERS: Add x86 hardware vulnerabilities section
Add the bunch of losers who have to deal with this to MAINTAINERS so
that they can get explicitly CCed on more hw nightmares.
[ bp: Add commit message. ]
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230224213522.nofavod2jzhn22wp@treble
---
MAINTAINERS | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/MAINTAINERS b/MAINTAINERS
index 8d5bc22..d95c6cc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -22660,6 +22660,17 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm
F: arch/x86/entry/
+X86 HARDWARE VULNERABILITIES
+M: Thomas Gleixner <tglx@linutronix.de>
+M: Borislav Petkov <bp@alien8.de>
+M: Peter Zijlstra <peterz@infradead.org>
+M: Josh Poimboeuf <jpoimboe@kernel.org>
+R: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+S: Maintained
+F: Documentation/admin-guide/hw-vuln/
+F: arch/x86/include/asm/nospec-branch.h
+F: arch/x86/kernel/cpu/bugs.c
+
X86 MCE INFRASTRUCTURE
M: Tony Luck <tony.luck@intel.com>
M: Borislav Petkov <bp@alien8.de>
The following commit has been merged into the x86/cpu branch of tip:
Commit-ID: 8cc68c9c9e92dbaae51a711454c66eb668045508
Gitweb: https://git.kernel.org/tip/8cc68c9c9e92dbaae51a711454c66eb668045508
Author: Borislav Petkov (AMD) <bp@alien8.de>
AuthorDate: Sat, 25 Feb 2023 01:11:31 +01:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Thu, 16 Mar 2023 11:50:00 +01:00
x86/CPU/AMD: Make sure EFER[AIBRSE] is set
The AutoIBRS bit gets set only on the BSP as part of determining which
mitigation to enable on AMD. Setting on the APs relies on the
circumstance that the APs get booted through the trampoline and EFER
- the MSR which contains that bit - gets replicated on every AP from the
BSP.
However, this can change in the future and considering the security
implications of this bit not being set on every CPU, make sure it is set
by verifying EFER later in the boot process and on every AP.
Reported-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/r/20230224185257.o3mcmloei5zqu7wa@treble
---
arch/x86/kernel/cpu/amd.c | 11 +++++++++++
arch/x86/kernel/cpu/bugs.c | 10 +---------
arch/x86/kernel/cpu/cpu.h | 8 ++++++++
3 files changed, 20 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 380753b..dd32dbc 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -996,6 +996,17 @@ static void init_amd(struct cpuinfo_x86 *c)
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);
+
+ /*
+ * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
+ * using the trampoline code and as part of it, MSR_EFER gets prepared there in
+ * order to be replicated onto them. Regardless, set it here again, if not set,
+ * to protect against any future refactoring/code reorganization which might
+ * miss setting this important bit.
+ */
+ if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
+ cpu_has(c, X86_FEATURE_AUTOIBRS))
+ WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS));
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index f9d060e..182af64 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -784,8 +784,7 @@ static int __init nospectre_v1_cmdline(char *str)
}
early_param("nospectre_v1", nospectre_v1_cmdline);
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
- SPECTRE_V2_NONE;
+enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
#undef pr_fmt
#define pr_fmt(fmt) "RETBleed: " fmt
@@ -1133,13 +1132,6 @@ spectre_v2_parse_user_cmdline(void)
return SPECTRE_V2_USER_CMD_AUTO;
}
-static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
-{
- return mode == SPECTRE_V2_EIBRS ||
- mode == SPECTRE_V2_EIBRS_RETPOLINE ||
- mode == SPECTRE_V2_EIBRS_LFENCE;
-}
-
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
{
return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 57a5349..f97b0fe 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -83,4 +83,12 @@ unsigned int aperfmperf_get_khz(int cpu);
extern void x86_spec_ctrl_setup_ap(void);
extern void update_srbds_msr(void);
+extern enum spectre_v2_mitigation spectre_v2_enabled;
+
+static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
+{
+ return mode == SPECTRE_V2_EIBRS ||
+ mode == SPECTRE_V2_EIBRS_RETPOLINE ||
+ mode == SPECTRE_V2_EIBRS_LFENCE;
+}
#endif /* ARCH_X86_CPU_H */
The following commit has been merged into the x86/cpu branch of tip:
Commit-ID: e7862eda309ecfccc36bb5558d937ed3ace07f3f
Gitweb: https://git.kernel.org/tip/e7862eda309ecfccc36bb5558d937ed3ace07f3f
Author: Kim Phillips <kim.phillips@amd.com>
AuthorDate: Tue, 24 Jan 2023 10:33:18 -06:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Wed, 25 Jan 2023 17:16:01 +01:00
x86/cpu: Support AMD Automatic IBRS
The AMD Zen4 core supports a new feature called Automatic IBRS.
It is a "set-and-forget" feature that means that, like Intel's Enhanced IBRS,
h/w manages its IBRS mitigation resources automatically across CPL transitions.
The feature is advertised by CPUID_Fn80000021_EAX bit 8 and is enabled by
setting MSR C000_0080 (EFER) bit 21.
Enable Automatic IBRS by default if the CPU feature is present. It typically
provides greater performance over the incumbent generic retpolines mitigation.
Reuse the SPECTRE_V2_EIBRS spectre_v2_mitigation enum. AMD Automatic IBRS and
Intel Enhanced IBRS have similar enablement. Add NO_EIBRS_PBRSB to
cpu_vuln_whitelist, since AMD Automatic IBRS isn't affected by PBRSB-eIBRS.
The kernel command line option spectre_v2=eibrs is used to select AMD Automatic
IBRS, if available.
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Sean Christopherson <seanjc@google.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lore.kernel.org/r/20230124163319.2277355-8-kim.phillips@amd.com
---
Documentation/admin-guide/hw-vuln/spectre.rst | 6 ++---
Documentation/admin-guide/kernel-parameters.txt | 6 ++---
arch/x86/include/asm/cpufeatures.h | 1 +-
arch/x86/include/asm/msr-index.h | 2 ++-
arch/x86/kernel/cpu/bugs.c | 20 +++++++++-------
arch/x86/kernel/cpu/common.c | 19 ++++++++-------
6 files changed, 32 insertions(+), 22 deletions(-)
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index c4dcdb3..3fe6511 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -610,9 +610,9 @@ kernel command line.
retpoline,generic Retpolines
retpoline,lfence LFENCE; indirect branch
retpoline,amd alias for retpoline,lfence
- eibrs enhanced IBRS
- eibrs,retpoline enhanced IBRS + Retpolines
- eibrs,lfence enhanced IBRS + LFENCE
+ eibrs Enhanced/Auto IBRS
+ eibrs,retpoline Enhanced/Auto IBRS + Retpolines
+ eibrs,lfence Enhanced/Auto IBRS + LFENCE
ibrs use IBRS to protect kernel
Not specifying this option is equivalent to
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 6cfa6e3..839fa0f 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5729,9 +5729,9 @@
retpoline,generic - Retpolines
retpoline,lfence - LFENCE; indirect branch
retpoline,amd - alias for retpoline,lfence
- eibrs - enhanced IBRS
- eibrs,retpoline - enhanced IBRS + Retpolines
- eibrs,lfence - enhanced IBRS + LFENCE
+ eibrs - Enhanced/Auto IBRS
+ eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
+ eibrs,lfence - Enhanced/Auto IBRS + LFENCE
ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 86e98bd..06909dc 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -431,6 +431,7 @@
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
+#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
/*
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index cb359d6..617b29a 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -25,6 +25,7 @@
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
+#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
@@ -33,6 +34,7 @@
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
+#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/* Intel MSRs. Some also available on other CPUs */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 5f33704..b41486a 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1238,9 +1238,9 @@ static const char * const spectre_v2_strings[] = {
[SPECTRE_V2_NONE] = "Vulnerable",
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
- [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
- [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
- [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
+ [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
+ [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
+ [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
};
@@ -1309,7 +1309,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
- pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
+ pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
@@ -1495,8 +1495,12 @@ static void __init spectre_v2_select_mitigation(void)
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
if (spectre_v2_in_ibrs_mode(mode)) {
- x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
- update_spec_ctrl(x86_spec_ctrl_base);
+ if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
+ msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
+ } else {
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+ update_spec_ctrl(x86_spec_ctrl_base);
+ }
}
switch (mode) {
@@ -1580,8 +1584,8 @@ static void __init spectre_v2_select_mitigation(void)
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
* and Enhanced IBRS protect firmware too, so enable IBRS around
- * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
- * enabled.
+ * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
+ * otherwise enabled.
*
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
* the user might select retpoline on the kernel command line and if
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e6bf9b1..62c73c5 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1229,8 +1229,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
/* Zhaoxin Family 7 */
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO),
@@ -1341,8 +1341,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
- if (ia32_cap & ARCH_CAP_IBRS_ALL)
+ /*
+ * AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
+ * flag and protect from vendor-specific bugs via the whitelist.
+ */
+ if ((ia32_cap & ARCH_CAP_IBRS_ALL) || cpu_has(c, X86_FEATURE_AUTOIBRS)) {
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+ if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
+ !(ia32_cap & ARCH_CAP_PBRSB_NO))
+ setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
+ }
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
!(ia32_cap & ARCH_CAP_MDS_NO)) {
@@ -1404,11 +1412,6 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_RETBLEED);
}
- if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
- !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
- !(ia32_cap & ARCH_CAP_PBRSB_NO))
- setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
-
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;
© 2016 - 2025 Red Hat, Inc.