xen/arch/x86/cpu/mcheck/mce.h | 5 +++++ xen/arch/x86/cpu/mcheck/mce_intel.c | 16 ++++++++++++++++ xen/arch/x86/cpu/mcheck/vmce.c | 2 ++ xen/arch/x86/include/asm/msr-index.h | 3 +++ xen/arch/x86/msr.c | 2 ++ 5 files changed, 28 insertions(+)
Windows Server 2019 Essentials will unconditionally attempt to read
P5_MC_ADDR MSR at boot and throw a BSOD if injected a #GP.
Fix this by mapping MSR_P5_MC_{ADDR,TYPE} to
MSR_IA32_MCi_{ADDR,STATUS}, as reported also done by hardware in Intel
SDM "Mapping of the Pentium Processor Machine-Check Errors to the
Machine-Check Architecture" section.
Reported-by: Steffen Einsle <einsle@phptrix.de>
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v1:
- Implement in vmce_rdmsr.
---
xen/arch/x86/cpu/mcheck/mce.h | 5 +++++
xen/arch/x86/cpu/mcheck/mce_intel.c | 16 ++++++++++++++++
xen/arch/x86/cpu/mcheck/vmce.c | 2 ++
xen/arch/x86/include/asm/msr-index.h | 3 +++
xen/arch/x86/msr.c | 2 ++
5 files changed, 28 insertions(+)
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index 535d0abf8f..7c6df6df7c 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -169,6 +169,11 @@ static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr)
if (msr >= MSR_IA32_MC0_CTL2 &&
msr < MSR_IA32_MCx_CTL2(v->arch.vmce.mcg_cap & MCG_CAP_COUNT) )
return 1;
+
+ case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_SHANGHAI:
+ if (msr == MSR_P5_MC_ADDR || msr == MSR_P5_MC_TYPE)
+ return 1;
break;
case X86_VENDOR_AMD:
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 50198e0c29..63fedff418 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -1008,8 +1008,24 @@ int vmce_intel_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
{
+ const struct cpuid_policy *cp = v->domain->arch.cpuid;
unsigned int bank = msr - MSR_IA32_MC0_CTL2;
+ switch ( msr )
+ {
+ case MSR_P5_MC_ADDR:
+ /* Bank 0 is used for the 'bank 0 quirk' on older processors. */
+ *val = v->arch.vmce.bank[1].mci_addr;
+ return 1;
+
+ case MSR_P5_MC_TYPE:
+ *val = v->arch.vmce.bank[1].mci_status;
+ return 1;
+ }
+
+ if ( cp->x86_vendor & (X86_VENDOR_CENTAUR | X86_VENDOR_SHANGHAI) )
+ return 0;
+
if ( bank < GUEST_MC_BANK_NUM )
{
*val = v->arch.vmce.bank[bank].mci_ctl2;
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 458120f9ad..af30811afd 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -150,6 +150,8 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
default:
switch ( boot_cpu_data.x86_vendor )
{
+ case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_SHANGHAI:
case X86_VENDOR_INTEL:
ret = vmce_intel_rdmsr(v, msr, val);
break;
diff --git a/xen/arch/x86/include/asm/msr-index.h b/xen/arch/x86/include/asm/msr-index.h
index 3e038db618..31964b88af 100644
--- a/xen/arch/x86/include/asm/msr-index.h
+++ b/xen/arch/x86/include/asm/msr-index.h
@@ -15,6 +15,9 @@
* abbreviated name. Exceptions will be considered on a case-by-case basis.
*/
+#define MSR_P5_MC_ADDR 0
+#define MSR_P5_MC_TYPE 0x00000001
+
#define MSR_APIC_BASE 0x0000001b
#define APIC_BASE_BSP (_AC(1, ULL) << 8)
#define APIC_BASE_EXTD (_AC(1, ULL) << 10)
diff --git a/xen/arch/x86/msr.c b/xen/arch/x86/msr.c
index a1e268eea9..d87317e989 100644
--- a/xen/arch/x86/msr.c
+++ b/xen/arch/x86/msr.c
@@ -283,6 +283,8 @@ int guest_rdmsr(struct vcpu *v, uint32_t msr, uint64_t *val)
*val = msrs->misc_features_enables.raw;
break;
+ case MSR_P5_MC_ADDR:
+ case MSR_P5_MC_TYPE:
case MSR_IA32_MCG_CAP ... MSR_IA32_MCG_CTL: /* 0x179 -> 0x17b */
case MSR_IA32_MCx_CTL2(0) ... MSR_IA32_MCx_CTL2(31): /* 0x280 -> 0x29f */
case MSR_IA32_MCx_CTL(0) ... MSR_IA32_MCx_MISC(31): /* 0x400 -> 0x47f */
--
2.35.1
On 28.04.2022 11:13, Roger Pau Monne wrote: > --- a/xen/arch/x86/cpu/mcheck/mce.h > +++ b/xen/arch/x86/cpu/mcheck/mce.h > @@ -169,6 +169,11 @@ static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr) > if (msr >= MSR_IA32_MC0_CTL2 && > msr < MSR_IA32_MCx_CTL2(v->arch.vmce.mcg_cap & MCG_CAP_COUNT) ) > return 1; > + > + case X86_VENDOR_CENTAUR: > + case X86_VENDOR_SHANGHAI: > + if (msr == MSR_P5_MC_ADDR || msr == MSR_P5_MC_TYPE) > + return 1; > break; You want to have some fall-through annotation there, perhaps preferably the pseudo-keyword one. > --- a/xen/arch/x86/cpu/mcheck/mce_intel.c > +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c > @@ -1008,8 +1008,24 @@ int vmce_intel_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) > > int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) > { > + const struct cpuid_policy *cp = v->domain->arch.cpuid; > unsigned int bank = msr - MSR_IA32_MC0_CTL2; > > + switch ( msr ) > + { > + case MSR_P5_MC_ADDR: > + /* Bank 0 is used for the 'bank 0 quirk' on older processors. */ > + *val = v->arch.vmce.bank[1].mci_addr; > + return 1; > + > + case MSR_P5_MC_TYPE: > + *val = v->arch.vmce.bank[1].mci_status; > + return 1; > + } Could I ask you to add a reference to vcpu_fill_mc_msrs() in the comment? > + if ( cp->x86_vendor & (X86_VENDOR_CENTAUR | X86_VENDOR_SHANGHAI) ) > + return 0; I think this better would be !(cp->x86_vendor & X86_VENDOR_INTEL). Jan
On Thu, Apr 28, 2022 at 12:39:19PM +0200, Jan Beulich wrote: > On 28.04.2022 11:13, Roger Pau Monne wrote: > > --- a/xen/arch/x86/cpu/mcheck/mce_intel.c > > +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c > > @@ -1008,8 +1008,24 @@ int vmce_intel_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val) > > > > int vmce_intel_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val) > > { > > + const struct cpuid_policy *cp = v->domain->arch.cpuid; > > unsigned int bank = msr - MSR_IA32_MC0_CTL2; > > > > + switch ( msr ) > > + { > > + case MSR_P5_MC_ADDR: > > + /* Bank 0 is used for the 'bank 0 quirk' on older processors. */ > > + *val = v->arch.vmce.bank[1].mci_addr; > > + return 1; > > + > > + case MSR_P5_MC_TYPE: > > + *val = v->arch.vmce.bank[1].mci_status; > > + return 1; > > + } > > Could I ask you to add a reference to vcpu_fill_mc_msrs() in the comment? Sure. Thanks, Roger.
© 2016 - 2024 Red Hat, Inc.