[PATCH 10/16] x86/amd: Always probe and configure the masking MSRs

Andrew Cooper posted 16 patches 1 week, 4 days ago
[PATCH 10/16] x86/amd: Always probe and configure the masking MSRs
Posted by Andrew Cooper 1 week, 4 days ago
This allows the infrastructure to reused for system-wide quirk/errata
adjustments.

Replace the call to ctxt_switch_levelling() with amd_ctxt_switch_masking()
instead.  The CPUID Faulting aspect is not interesting at this point in boot,
and we want to explicitly propagate the masking MSR defaults into APs.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Julian Vetter <julian.vetter@vates.tech>
CC: Teddy Astie <teddy.astie@vates.tech>
---
 xen/arch/x86/cpu/amd.c   | 15 +++++++++++----
 xen/arch/x86/cpu/cpu.h   |  1 +
 xen/arch/x86/cpu/hygon.c |  2 +-
 3 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 36fea2e0a299..e8daf7415bb0 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -162,7 +162,7 @@ static void __init noinline probe_masking_msrs(void)
  * parameter of NULL is used to context switch to the default host state (by
  * the cpu bringup-code, crash path, etc).
  */
-static void cf_check amd_ctxt_switch_masking(const struct vcpu *next)
+void cf_check amd_ctxt_switch_masking(const struct vcpu *next)
 {
 	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
 	const struct domain *nextd = next ? next->domain : NULL;
@@ -242,9 +242,12 @@ static void __init amd_init_levelling(void)
 	    boot_cpu_has(X86_FEATURE_CPUID_USER_DIS)) {
 		expected_levelling_cap |= LCAP_faulting;
 		levelling_caps |= LCAP_faulting;
-		return;
 	}
 
+	/*
+	 * Always probe for the MSRs too.  We reuse the infrastruture for
+	 * quirks/errata/etc during boot.
+	 */
 	probe_masking_msrs();
 
 	if ((levelling_caps & LCAP_1cd) == LCAP_1cd) {
@@ -299,7 +302,7 @@ static void __init amd_init_levelling(void)
 		       (uint32_t)cpuidmask_defaults._6c);
 	}
 
-	if (levelling_caps)
+	if (levelling_caps && !(levelling_caps & LCAP_faulting))
 		ctxt_switch_masking = amd_ctxt_switch_masking;
 }
 
@@ -1015,7 +1018,11 @@ static void cf_check init_amd(struct cpuinfo_x86 *c)
 	u32 l, h;
 	uint64_t value;
 
-	ctxt_switch_levelling(NULL);
+	/*
+	 * Reuse amd_ctxt_switch_masking() explicitly.  This propagates
+	 * quirk/errata adjustments made duing early_init_amd() into the APs.
+	 */
+	amd_ctxt_switch_masking(NULL);
 
 	amd_init_de_cfg(c);
 
diff --git a/xen/arch/x86/cpu/cpu.h b/xen/arch/x86/cpu/cpu.h
index d2d37d1d5eec..cd93e51755af 100644
--- a/xen/arch/x86/cpu/cpu.h
+++ b/xen/arch/x86/cpu/cpu.h
@@ -20,6 +20,7 @@ extern void detect_ht(struct cpuinfo_x86 *c);
 extern bool detect_extended_topology(struct cpuinfo_x86 *c);
 
 void cf_check early_init_amd(void);
+void cf_check amd_ctxt_switch_masking(const struct vcpu *next);
 void amd_log_freq(const struct cpuinfo_x86 *c);
 void amd_init_de_cfg(const struct cpuinfo_x86 *c);
 void amd_init_lfence_dispatch(void);
diff --git a/xen/arch/x86/cpu/hygon.c b/xen/arch/x86/cpu/hygon.c
index bb1624882499..3a04efef5028 100644
--- a/xen/arch/x86/cpu/hygon.c
+++ b/xen/arch/x86/cpu/hygon.c
@@ -32,7 +32,7 @@ static void cf_check init_hygon(struct cpuinfo_x86 *c)
 {
 	unsigned long long value;
 
-	ctxt_switch_levelling(NULL);
+	amd_ctxt_switch_masking(NULL);
 
 	amd_init_de_cfg(c);
 
-- 
2.39.5


Re: [PATCH 10/16] x86/amd: Always probe and configure the masking MSRs
Posted by Jan Beulich 1 week, 3 days ago
On 26.01.2026 18:53, Andrew Cooper wrote:
> This allows the infrastructure to reused for system-wide quirk/errata
> adjustments.
> 
> Replace the call to ctxt_switch_levelling() with amd_ctxt_switch_masking()
> instead.  The CPUID Faulting aspect is not interesting at this point in boot,
> and we want to explicitly propagate the masking MSR defaults into APs.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
with two comment nits:

> --- a/xen/arch/x86/cpu/amd.c
> +++ b/xen/arch/x86/cpu/amd.c
> @@ -162,7 +162,7 @@ static void __init noinline probe_masking_msrs(void)
>   * parameter of NULL is used to context switch to the default host state (by
>   * the cpu bringup-code, crash path, etc).
>   */
> -static void cf_check amd_ctxt_switch_masking(const struct vcpu *next)
> +void cf_check amd_ctxt_switch_masking(const struct vcpu *next)
>  {
>  	struct cpuidmasks *these_masks = &this_cpu(cpuidmasks);
>  	const struct domain *nextd = next ? next->domain : NULL;
> @@ -242,9 +242,12 @@ static void __init amd_init_levelling(void)
>  	    boot_cpu_has(X86_FEATURE_CPUID_USER_DIS)) {
>  		expected_levelling_cap |= LCAP_faulting;
>  		levelling_caps |= LCAP_faulting;
> -		return;
>  	}
>  
> +	/*
> +	 * Always probe for the MSRs too.  We reuse the infrastruture for
> +	 * quirks/errata/etc during boot.
> +	 */
>  	probe_masking_msrs();

This isn't just about boot, but also soft-onlining of CPUs and S3 resume.

> @@ -1015,7 +1018,11 @@ static void cf_check init_amd(struct cpuinfo_x86 *c)
>  	u32 l, h;
>  	uint64_t value;
>  
> -	ctxt_switch_levelling(NULL);
> +	/*
> +	 * Reuse amd_ctxt_switch_masking() explicitly.  This propagates
> +	 * quirk/errata adjustments made duing early_init_amd() into the APs.
> +	 */
> +	amd_ctxt_switch_masking(NULL);

Have the same comment also ...

> --- a/xen/arch/x86/cpu/hygon.c
> +++ b/xen/arch/x86/cpu/hygon.c
> @@ -32,7 +32,7 @@ static void cf_check init_hygon(struct cpuinfo_x86 *c)
>  {
>  	unsigned long long value;
>  
> -	ctxt_switch_levelling(NULL);
> +	amd_ctxt_switch_masking(NULL);

... here?

Jan