The current md_clear routines duplicate a lot of code, and have to be
called twice because if one of the mitigations gets enabled they all
get enabled since it's the same instruction. This approach leads to
code duplication and extra complexity.
The only piece that really changes between the first call of
*_select_mitigation() and the second is X86_FEATURE_CLEAR_CPU_BUF
being set. Determine if this feature should be set prior to calling
the _select_mitigation() routines. This not only means those functions
only get called once, but it also simplifies them as well.
Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
---
arch/x86/include/asm/processor.h | 2 +-
arch/x86/kernel/cpu/bugs.c | 206 ++++++++++++-------------------
2 files changed, 81 insertions(+), 127 deletions(-)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 4a686f0e5dbf..c855cd1a6d57 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -736,7 +736,7 @@ extern enum l1tf_mitigations l1tf_mitigation;
enum mds_mitigations {
MDS_MITIGATION_OFF,
- MDS_MITIGATION_FULL,
+ MDS_MITIGATION_VERW,
MDS_MITIGATION_VMWERV,
};
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index b26b3b554330..bbf3a03435f8 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -41,7 +41,6 @@ static void __init spectre_v2_user_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
static void __init mds_select_mitigation(void);
-static void __init md_clear_update_mitigation(void);
static void __init md_clear_select_mitigation(void);
static void __init taa_select_mitigation(void);
static void __init mmio_select_mitigation(void);
@@ -234,32 +233,23 @@ static void x86_amd_ssb_disable(void)
/* Default mitigation for MDS-affected CPUs */
static enum mds_mitigations mds_mitigation __ro_after_init =
- IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_FULL : MDS_MITIGATION_OFF;
+ IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_VERW : MDS_MITIGATION_OFF;
static bool mds_nosmt __ro_after_init = false;
static const char * const mds_strings[] = {
[MDS_MITIGATION_OFF] = "Vulnerable",
- [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
+ [MDS_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
[MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
};
static void __init mds_select_mitigation(void)
{
- if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
+ if (!boot_cpu_has_bug(X86_BUG_MDS))
mds_mitigation = MDS_MITIGATION_OFF;
- return;
- }
-
- if (mds_mitigation == MDS_MITIGATION_FULL) {
- if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
- mds_mitigation = MDS_MITIGATION_VMWERV;
-
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
-
- if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
- (mds_nosmt || cpu_mitigations_auto_nosmt()))
- cpu_smt_disable(false);
- }
+ else if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ mds_mitigation = MDS_MITIGATION_VERW;
+ else
+ mds_mitigation = MDS_MITIGATION_VMWERV;
}
static int __init mds_cmdline(char *str)
@@ -273,9 +263,9 @@ static int __init mds_cmdline(char *str)
if (!strcmp(str, "off"))
mds_mitigation = MDS_MITIGATION_OFF;
else if (!strcmp(str, "full"))
- mds_mitigation = MDS_MITIGATION_FULL;
+ mds_mitigation = MDS_MITIGATION_VERW;
else if (!strcmp(str, "full,nosmt")) {
- mds_mitigation = MDS_MITIGATION_FULL;
+ mds_mitigation = MDS_MITIGATION_VERW;
mds_nosmt = true;
}
@@ -316,25 +306,12 @@ static void __init taa_select_mitigation(void)
if (!boot_cpu_has(X86_FEATURE_RTM)) {
taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
return;
- }
-
- if (cpu_mitigations_off()) {
- taa_mitigation = TAA_MITIGATION_OFF;
- return;
- }
-
- /*
- * TAA mitigation via VERW is turned off if both
- * tsx_async_abort=off and mds=off are specified.
- */
- if (taa_mitigation == TAA_MITIGATION_OFF &&
- mds_mitigation == MDS_MITIGATION_OFF)
+ } else if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) {
+ taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
return;
-
- if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
+ } else {
taa_mitigation = TAA_MITIGATION_VERW;
- else
- taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
+ }
/*
* VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
@@ -348,18 +325,6 @@ static void __init taa_select_mitigation(void)
if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
!(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
-
- /*
- * TSX is enabled, select alternate mitigation for TAA which is
- * the same as MDS. Enable MDS static branch to clear CPU buffers.
- *
- * For guests that can't determine whether the correct microcode is
- * present on host, enable the mitigation for UCODE_NEEDED as well.
- */
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
-
- if (taa_nosmt || cpu_mitigations_auto_nosmt())
- cpu_smt_disable(false);
}
static int __init tsx_async_abort_parse_cmdline(char *str)
@@ -405,24 +370,11 @@ static const char * const mmio_strings[] = {
static void __init mmio_select_mitigation(void)
{
- if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
- boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
- cpu_mitigations_off()) {
+ if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
mmio_mitigation = MMIO_MITIGATION_OFF;
return;
}
- if (mmio_mitigation == MMIO_MITIGATION_OFF)
- return;
-
- /*
- * Enable CPU buffer clear mitigation for host and VMM, if also affected
- * by MDS or TAA. Otherwise, enable mitigation for VMM only.
- */
- if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
- boot_cpu_has(X86_FEATURE_RTM)))
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
-
/*
* X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
* mitigations, disable KVM-only mitigation in that case.
@@ -454,9 +406,6 @@ static void __init mmio_select_mitigation(void)
mmio_mitigation = MMIO_MITIGATION_VERW;
else
mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
-
- if (mmio_nosmt || cpu_mitigations_auto_nosmt())
- cpu_smt_disable(false);
}
static int __init mmio_stale_data_parse_cmdline(char *str)
@@ -501,17 +450,12 @@ static const char * const rfds_strings[] = {
static void __init rfds_select_mitigation(void)
{
- if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
+ if (!boot_cpu_has_bug(X86_BUG_RFDS))
rfds_mitigation = RFDS_MITIGATION_OFF;
- return;
- }
- if (rfds_mitigation == RFDS_MITIGATION_OFF)
- return;
-
- if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
- setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
- else
+ else if (!(x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR))
rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
+ else
+ rfds_mitigation = RFDS_MITIGATION_VERW;
}
static __init int rfds_parse_cmdline(char *str)
@@ -534,52 +478,12 @@ early_param("reg_file_data_sampling", rfds_parse_cmdline);
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
-static void __init md_clear_update_mitigation(void)
+static bool __init cpu_bug_needs_verw(void)
{
- if (cpu_mitigations_off())
- return;
-
- if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
- goto out;
-
- /*
- * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
- * Stale Data mitigation, if necessary.
- */
- if (mds_mitigation == MDS_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_MDS)) {
- mds_mitigation = MDS_MITIGATION_FULL;
- mds_select_mitigation();
- }
- if (taa_mitigation == TAA_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_TAA)) {
- taa_mitigation = TAA_MITIGATION_VERW;
- taa_select_mitigation();
- }
- /*
- * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
- * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
- */
- if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
- mmio_mitigation = MMIO_MITIGATION_VERW;
- mmio_select_mitigation();
- }
- if (rfds_mitigation == RFDS_MITIGATION_OFF &&
- boot_cpu_has_bug(X86_BUG_RFDS)) {
- rfds_mitigation = RFDS_MITIGATION_VERW;
- rfds_select_mitigation();
- }
-out:
- if (boot_cpu_has_bug(X86_BUG_MDS))
- pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
- if (boot_cpu_has_bug(X86_BUG_TAA))
- pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
- if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
- pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
- else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
- pr_info("MMIO Stale Data: Unknown: No mitigations\n");
- if (boot_cpu_has_bug(X86_BUG_RFDS))
- pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
+ return boot_cpu_has_bug(X86_BUG_MDS) ||
+ boot_cpu_has_bug(X86_BUG_TAA) ||
+ boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
+ boot_cpu_has_bug(X86_BUG_RFDS);
}
static void __init verw_mitigations_check(void)
@@ -599,20 +503,70 @@ static void __init verw_mitigations_check(void)
}
}
-static void __init md_clear_select_mitigation(void)
+static bool __init verw_mitigations_disabled(void)
{
verw_mitigations_check();
+ /*
+ * TODO: Create a single mitigation variable that will allow for setting
+ * the location of the mitigation, i.e.:
+ *
+ * kernel->user
+ * kvm->guest
+ * kvm->guest if device passthrough
+ * kernel->idle
+ */
+ return (mds_mitigation == MDS_MITIGATION_OFF &&
+ taa_mitigation == TAA_MITIGATION_OFF &&
+ mmio_mitigation == MMIO_MITIGATION_OFF &&
+ rfds_mitigation == RFDS_MITIGATION_OFF);
+}
+
+static void __init md_clear_select_mitigation(void)
+{
+ if (verw_mitigations_disabled())
+ goto out;
+
+ if (!cpu_bug_needs_verw() || cpu_mitigations_off()) {
+ mds_mitigation = MDS_MITIGATION_OFF;
+ taa_mitigation = TAA_MITIGATION_OFF;
+ mmio_mitigation = MMIO_MITIGATION_OFF;
+ rfds_mitigation = RFDS_MITIGATION_OFF;
+ goto out;
+ }
+
mds_select_mitigation();
taa_select_mitigation();
- mmio_select_mitigation();
rfds_select_mitigation();
+ if (mds_mitigation == MDS_MITIGATION_VERW ||
+ taa_mitigation == TAA_MITIGATION_VERW ||
+ rfds_mitigation == RFDS_MITIGATION_VERW)
+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+
/*
- * As these mitigations are inter-related and rely on VERW instruction
- * to clear the microarchitural buffers, update and print their status
- * after mitigation selection is done for each of these vulnerabilities.
+ * The MMIO mitigation has a dependency on X86_FEATURE_CLEAR_CPU_BUF so
+ * this must be called after it gets set
*/
- md_clear_update_mitigation();
+ mmio_select_mitigation();
+
+ if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
+ (mds_nosmt || cpu_mitigations_auto_nosmt()))
+ cpu_smt_disable(false);
+
+ if (taa_nosmt || mmio_nosmt || cpu_mitigations_auto_nosmt())
+ cpu_smt_disable(false);
+
+out:
+ if (boot_cpu_has_bug(X86_BUG_MDS))
+ pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
+ if (boot_cpu_has_bug(X86_BUG_TAA))
+ pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
+ if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
+ pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
+ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
+ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
+ if (boot_cpu_has_bug(X86_BUG_RFDS))
+ pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
}
#undef pr_fmt
@@ -1974,7 +1928,7 @@ void cpu_bugs_smt_update(void)
}
switch (mds_mitigation) {
- case MDS_MITIGATION_FULL:
+ case MDS_MITIGATION_VERW:
case MDS_MITIGATION_VMWERV:
if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
pr_warn_once(MDS_MSG_SMT);
--
2.25.1
On Mon, Oct 28, 2024 at 04:50:35PM -0700, Daniel Sneddon wrote: > @@ -599,20 +503,70 @@ static void __init verw_mitigations_check(void) > } > } > > -static void __init md_clear_select_mitigation(void) > +static bool __init verw_mitigations_disabled(void) > { > verw_mitigations_check(); > + /* > + * TODO: Create a single mitigation variable that will allow for setting A patch which introduces a TODO is basically telling me, it is not ready to go anywhere yet... > + * the location of the mitigation, i.e.: > + * > + * kernel->user > + * kvm->guest > + * kvm->guest if device passthrough > + * kernel->idle > + */ > + return (mds_mitigation == MDS_MITIGATION_OFF && > + taa_mitigation == TAA_MITIGATION_OFF && > + mmio_mitigation == MMIO_MITIGATION_OFF && > + rfds_mitigation == RFDS_MITIGATION_OFF); This should be used inside verw_mitigations_check() instead of repeated here, no? Also, pls call verw_mitigations_check() "check_verw_mitigations" - the name should start with a verb. Actually, you can merge verw_mitigations_check() and verw_mitigations_disabled(). Please do a *minimal* patch when cleaning this up - bugs.c is horrible. It should not get worse. What could also help is splitting this patch - it is hard to review as it is... Thx. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On 10/29/24 04:37, Borislav Petkov wrote: > On Mon, Oct 28, 2024 at 04:50:35PM -0700, Daniel Sneddon wrote: >> @@ -599,20 +503,70 @@ static void __init verw_mitigations_check(void) >> } >> } >> >> -static void __init md_clear_select_mitigation(void) >> +static bool __init verw_mitigations_disabled(void) >> { >> verw_mitigations_check(); >> + /* >> + * TODO: Create a single mitigation variable that will allow for setting > > A patch which introduces a TODO is basically telling me, it is not ready to go > anywhere yet... > >> + * the location of the mitigation, i.e.: >> + * >> + * kernel->user >> + * kvm->guest >> + * kvm->guest if device passthrough >> + * kernel->idle >> + */ >> + return (mds_mitigation == MDS_MITIGATION_OFF && >> + taa_mitigation == TAA_MITIGATION_OFF && >> + mmio_mitigation == MMIO_MITIGATION_OFF && >> + rfds_mitigation == RFDS_MITIGATION_OFF); > > This should be used inside verw_mitigations_check() instead of repeated here, > no? > > Also, pls call verw_mitigations_check() "check_verw_mitigations" - the name > should start with a verb. > > Actually, you can merge verw_mitigations_check() and > verw_mitigations_disabled(). Please do a *minimal* patch when cleaning this up > - bugs.c is horrible. It should not get worse. > I'll merge those two. > What could also help is splitting this patch - it is hard to review as it > is... > Sure, I'll split this up as much as possible. > Thx. >
On Tue, Oct 29, 2024 at 07:40:28AM -0700, Daniel Sneddon wrote: > Sure, I'll split this up as much as possible. Actually, thinking about this more and looking at David's rework: https://lore.kernel.org/r/20240912190857.235849-1-david.kaplan@amd.com his basically is achieving what you're doing - a post-everything routine which selects the final mitigation strategy once all the mitigation options have been parsed and evaluated. So I'm wondering if we should simply take his directly... He removes that md_clear* function: https://lore.kernel.org/r/20240912190857.235849-8-david.kaplan@amd.com in favor of doing the final selection in the ->apply* functions and keeping each mitigation functions simple. Yours does this in a single function. Practically speaking, the end result is the same. Hmm... -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On 10/29/24 08:00, Borislav Petkov wrote: > On Tue, Oct 29, 2024 at 07:40:28AM -0700, Daniel Sneddon wrote: >> Sure, I'll split this up as much as possible. > > Actually, thinking about this more and looking at David's rework: > > https://lore.kernel.org/r/20240912190857.235849-1-david.kaplan@amd.com > > his basically is achieving what you're doing - a post-everything routine which > selects the final mitigation strategy once all the mitigation options have > been parsed and evaluated. > > So I'm wondering if we should simply take his directly... > > He removes that md_clear* function: > > https://lore.kernel.org/r/20240912190857.235849-8-david.kaplan@amd.com > > in favor of doing the final selection in the ->apply* functions and keeping > each mitigation functions simple. > > Yours does this in a single function. > > Practically speaking, the end result is the same. > > Hmm... > I really like the attack vector idea David is using. I suspect people really care about "protect my kernel from bad users" or "protect my host vm from guests" more than "protect me from mds and rfds." I was trying to get rid of the need to do a call to any kind of update function where he took the existing function and split it into one for each mitigation that needs it. Like you said, different approach same end result really.
On Tue, Oct 29, 2024 at 08:33:51AM -0700, Daniel Sneddon wrote: > I really like the attack vector idea David is using. I suspect people really > care about "protect my kernel from bad users" or "protect my host vm from > guests" more than "protect me from mds and rfds." Yeah, exactly! > I was trying to get rid of the need to do a call to any kind of update > function where he took the existing function and split it into one for each > mitigation that needs it. Like you said, different approach same end result > really. Right. Ok, let's concentrate on David's set, then, so that we don't do unnecessary/doubled work. I'd appreciate it if you took a look at the Intel's side of things there but please wait until he sends a new version next week. I guess if all agree with the final result, we could look into taking it for 6.14 or so... Thx. -- Regards/Gruss, Boris. https://people.kernel.org/tglx/notes-about-netiquette
On 10/29/24 09:37, Borislav Petkov wrote: > On Tue, Oct 29, 2024 at 08:33:51AM -0700, Daniel Sneddon wrote: >> I really like the attack vector idea David is using. I suspect people really >> care about "protect my kernel from bad users" or "protect my host vm from >> guests" more than "protect me from mds and rfds." > > Yeah, exactly! > >> I was trying to get rid of the need to do a call to any kind of update >> function where he took the existing function and split it into one for each >> mitigation that needs it. Like you said, different approach same end result >> really. > > Right. > > Ok, let's concentrate on David's set, then, so that we don't do > unnecessary/doubled work. I'd appreciate it if you took a look at the Intel's > side of things there but please wait until he sends a new version next week. Will do! > > I guess if all agree with the final result, we could look into taking it for > 6.14 or so... > > Thx. >
© 2016 - 2024 Red Hat, Inc.