xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +- xen/arch/x86/cpu/mcheck/mcaction.c | 2 +- xen/arch/x86/cpu/mcheck/mce.c | 30 +++++++++++++------------- xen/arch/x86/cpu/mcheck/mce.h | 2 +- xen/arch/x86/cpu/mcheck/mce_amd.c | 16 +++++++------- xen/arch/x86/cpu/mcheck/mce_intel.c | 5 +---- xen/arch/x86/cpu/mcheck/non-fatal.c | 2 +- xen/arch/x86/cpu/mcheck/vmce.c | 8 +++---- 8 files changed, 32 insertions(+), 35 deletions(-)
struct cpuinfo_x86
.x86 => .family
.x86_vendor => .vendor
.x86_model => .model
.x86_mask => .stepping
No functional change.
This work is part of making Xen safe for Intel family 18/19.
Signed-off-by: Kevin Lampis <kevin.lampis@citrix.com>
---
In xen/arch/x86/cpu/mcheck/mce.c: mcheck_init(...)
Xen only calls `intel_mcheck_init(...)` if family is 6 or 15.
Linux only calls `mce_intel_feature_init(...)` if family != 5.
Do we need to do something like extend this switch statement in
`mcheck_init(...)` to include family 18 and 19?
In xen/arch/x86/cpu/mcheck/mce.c: mce_firstbank(...)
The check
c->family == 6 && c->vendor == X86_VENDOR_INTEL && c->model < 0x1a
could be re-written as
c->vfm >= INTEL_PENTIUM_PRO && c->vfm < INTEL_NEHALEM_EP
I don't know if that would be better.
---
xen/arch/x86/cpu/mcheck/amd_nonfatal.c | 2 +-
xen/arch/x86/cpu/mcheck/mcaction.c | 2 +-
xen/arch/x86/cpu/mcheck/mce.c | 30 +++++++++++++-------------
xen/arch/x86/cpu/mcheck/mce.h | 2 +-
xen/arch/x86/cpu/mcheck/mce_amd.c | 16 +++++++-------
xen/arch/x86/cpu/mcheck/mce_intel.c | 5 +----
xen/arch/x86/cpu/mcheck/non-fatal.c | 2 +-
xen/arch/x86/cpu/mcheck/vmce.c | 8 +++----
8 files changed, 32 insertions(+), 35 deletions(-)
diff --git a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
index 7d48c9ab5f..fb52639e13 100644
--- a/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
+++ b/xen/arch/x86/cpu/mcheck/amd_nonfatal.c
@@ -191,7 +191,7 @@ static void cf_check mce_amd_work_fn(void *data)
void __init amd_nonfatal_mcheck_init(struct cpuinfo_x86 *c)
{
- if (!(c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
+ if (!(c->vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)))
return;
/* Assume we are on K8 or newer AMD or Hygon CPU here */
diff --git a/xen/arch/x86/cpu/mcheck/mcaction.c b/xen/arch/x86/cpu/mcheck/mcaction.c
index bf7a0de965..236424569a 100644
--- a/xen/arch/x86/cpu/mcheck/mcaction.c
+++ b/xen/arch/x86/cpu/mcheck/mcaction.c
@@ -101,7 +101,7 @@ mc_memerr_dhandler(struct mca_binfo *binfo,
* not always precise. In that case, fallback to broadcast.
*/
global->mc_domid != bank->mc_domid ||
- (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ (boot_cpu_data.vendor == X86_VENDOR_INTEL &&
(!(global->mc_gstatus & MCG_STATUS_LMCE) ||
!(d->vcpu[mc_vcpuid]->arch.vmce.mcg_ext_ctl &
MCG_EXT_CTL_LMCE_EN))) )
diff --git a/xen/arch/x86/cpu/mcheck/mce.c b/xen/arch/x86/cpu/mcheck/mce.c
index 9a91807cfb..10e826e3a6 100644
--- a/xen/arch/x86/cpu/mcheck/mce.c
+++ b/xen/arch/x86/cpu/mcheck/mce.c
@@ -334,7 +334,7 @@ mcheck_mca_logout(enum mca_source who, struct mca_banks *bankmask,
mca_init_global(mc_flags, mig);
/* A hook here to get global extended msrs */
if ( IS_ENABLED(CONFIG_INTEL) &&
- boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ boot_cpu_data.vendor == X86_VENDOR_INTEL )
intel_get_extended_msrs(mig, mci);
}
}
@@ -564,8 +564,8 @@ bool mce_available(const struct cpuinfo_x86 *c)
*/
unsigned int mce_firstbank(struct cpuinfo_x86 *c)
{
- return c->x86 == 6 &&
- c->x86_vendor == X86_VENDOR_INTEL && c->x86_model < 0x1a;
+ return c->family == 6 &&
+ c->vendor == X86_VENDOR_INTEL && c->model < 0x1a;
}
static int show_mca_info(int inited, struct cpuinfo_x86 *c)
@@ -596,7 +596,7 @@ static int show_mca_info(int inited, struct cpuinfo_x86 *c)
case mcheck_amd_famXX:
case mcheck_hygon:
printk("%s%s Fam%xh machine check reporting enabled\n",
- prefix, type_str[inited], c->x86);
+ prefix, type_str[inited], c->family);
break;
case mcheck_none:
@@ -766,7 +766,7 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp)
else if ( cpu_bank_alloc(cpu) )
panic("Insufficient memory for MCE bank allocations\n");
- switch ( c->x86_vendor )
+ switch ( c->vendor )
{
#ifdef CONFIG_AMD
case X86_VENDOR_AMD:
@@ -777,7 +777,7 @@ void mcheck_init(struct cpuinfo_x86 *c, bool bsp)
#ifdef CONFIG_INTEL
case X86_VENDOR_INTEL:
- switch ( c->x86 )
+ switch ( c->family )
{
case 6:
case 15:
@@ -882,7 +882,7 @@ static void x86_mcinfo_apei_save(
memset(&m, 0, sizeof(struct mce));
m.cpu = mc_global->mc_coreid;
- m.cpuvendor = xen2linux_vendor(boot_cpu_data.x86_vendor);
+ m.cpuvendor = xen2linux_vendor(boot_cpu_data.vendor);
m.cpuid = cpuid_eax(1);
m.socketid = mc_global->mc_socketid;
m.apicid = mc_global->mc_apicid;
@@ -983,10 +983,10 @@ static void cf_check __maybe_unused do_mc_get_cpu_info(void *v)
&xcp->mc_apicid, &xcp->mc_ncores,
&xcp->mc_ncores_active, &xcp->mc_nthreads);
xcp->mc_cpuid_level = c->cpuid_level;
- xcp->mc_family = c->x86;
- xcp->mc_vendor = xen2linux_vendor(c->x86_vendor);
- xcp->mc_model = c->x86_model;
- xcp->mc_step = c->x86_mask;
+ xcp->mc_family = c->family;
+ xcp->mc_vendor = xen2linux_vendor(c->vendor);
+ xcp->mc_model = c->model;
+ xcp->mc_step = c->stepping;
xcp->mc_cache_size = c->x86_cache_size;
xcp->mc_cache_alignment = c->x86_cache_alignment;
memcpy(xcp->mc_vendorid, c->x86_vendor_id, sizeof xcp->mc_vendorid);
@@ -1142,7 +1142,7 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct xen_mc_msrinject *mci)
if ( IS_MCA_BANKREG(reg, mci->mcinj_cpunr) )
{
- if ( c->x86_vendor == X86_VENDOR_AMD )
+ if ( c->vendor == X86_VENDOR_AMD )
{
/*
* On AMD we can set MCi_STATUS_WREN in the
@@ -1177,15 +1177,15 @@ static bool __maybe_unused x86_mc_msrinject_verify(struct xen_mc_msrinject *mci)
case MSR_F10_MC4_MISC1:
case MSR_F10_MC4_MISC2:
case MSR_F10_MC4_MISC3:
- if ( c->x86_vendor != X86_VENDOR_AMD )
+ if ( c->vendor != X86_VENDOR_AMD )
reason = "only supported on AMD";
- else if ( c->x86 < 0x10 )
+ else if ( c->family < 0x10 )
reason = "only supported on AMD Fam10h+";
break;
/* MSRs that the HV will take care of */
case MSR_K8_HWCR:
- if ( c->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( c->vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
reason = "HV will operate HWCR";
else
reason = "only supported on AMD or Hygon";
diff --git a/xen/arch/x86/cpu/mcheck/mce.h b/xen/arch/x86/cpu/mcheck/mce.h
index 920b075355..3b61b12487 100644
--- a/xen/arch/x86/cpu/mcheck/mce.h
+++ b/xen/arch/x86/cpu/mcheck/mce.h
@@ -137,7 +137,7 @@ void x86_mcinfo_dump(struct mc_info *mi);
static inline int mce_vendor_bank_msr(const struct vcpu *v, uint32_t msr)
{
- switch (boot_cpu_data.x86_vendor) {
+ switch (boot_cpu_data.vendor) {
case X86_VENDOR_INTEL:
if (msr >= MSR_IA32_MC0_CTL2 &&
msr < MSR_IA32_MCx_CTL2(v->arch.vmce.mcg_cap & MCG_CAP_COUNT) )
diff --git a/xen/arch/x86/cpu/mcheck/mce_amd.c b/xen/arch/x86/cpu/mcheck/mce_amd.c
index 25c29eb3d2..2d17832d9c 100644
--- a/xen/arch/x86/cpu/mcheck/mce_amd.c
+++ b/xen/arch/x86/cpu/mcheck/mce_amd.c
@@ -160,17 +160,17 @@ mcequirk_lookup_amd_quirkdata(const struct cpuinfo_x86 *c)
{
unsigned int i;
- BUG_ON(c->x86_vendor != X86_VENDOR_AMD);
+ BUG_ON(c->vendor != X86_VENDOR_AMD);
for ( i = 0; i < ARRAY_SIZE(mce_amd_quirks); i++ )
{
- if ( c->x86 != mce_amd_quirks[i].cpu_family )
+ if ( c->family != mce_amd_quirks[i].cpu_family )
continue;
if ( (mce_amd_quirks[i].cpu_model != ANY) &&
- (mce_amd_quirks[i].cpu_model != c->x86_model) )
+ (mce_amd_quirks[i].cpu_model != c->model) )
continue;
if ( (mce_amd_quirks[i].cpu_stepping != ANY) &&
- (mce_amd_quirks[i].cpu_stepping != c->x86_mask) )
+ (mce_amd_quirks[i].cpu_stepping != c->stepping) )
continue;
return mce_amd_quirks[i].quirk;
}
@@ -291,13 +291,13 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp)
uint32_t i;
enum mcequirk_amd_flags quirkflag = 0;
- if ( c->x86_vendor != X86_VENDOR_HYGON )
+ if ( c->vendor != X86_VENDOR_HYGON )
quirkflag = mcequirk_lookup_amd_quirkdata(c);
/* Assume that machine check support is available.
* The minimum provided support is at least the K8. */
if ( bsp )
- mce_handler_init(c->x86 == 0xf ? &k8_callbacks : &k10_callbacks);
+ mce_handler_init(c->family == 0xf ? &k8_callbacks : &k10_callbacks);
for ( i = 0; i < this_cpu(nr_mce_banks); i++ )
{
@@ -311,7 +311,7 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp)
}
}
- if ( c->x86 == 0xf )
+ if ( c->family == 0xf )
return mcheck_amd_k8;
if ( quirkflag == MCEQUIRK_F10_GART )
@@ -337,6 +337,6 @@ amd_mcheck_init(const struct cpuinfo_x86 *c, bool bsp)
ppin_msr = MSR_AMD_PPIN;
}
- return c->x86_vendor == X86_VENDOR_HYGON ?
+ return c->vendor == X86_VENDOR_HYGON ?
mcheck_hygon : mcheck_amd_famXX;
}
diff --git a/xen/arch/x86/cpu/mcheck/mce_intel.c b/xen/arch/x86/cpu/mcheck/mce_intel.c
index 839a0e5ba9..9100ce0f6c 100644
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -711,10 +711,7 @@ static bool mce_is_broadcast(struct cpuinfo_x86 *c)
* DisplayFamily_DisplayModel encoding of 06H_EH and above,
* a MCA signal is broadcast to all logical processors in the system
*/
- if ( c->x86_vendor == X86_VENDOR_INTEL && c->x86 == 6 &&
- c->x86_model >= 0xe )
- return true;
- return false;
+ return c->vfm >= INTEL_CORE_YONAH;
}
static bool intel_enable_lmce(void)
diff --git a/xen/arch/x86/cpu/mcheck/non-fatal.c b/xen/arch/x86/cpu/mcheck/non-fatal.c
index a9ee9bb94f..4e7c64abef 100644
--- a/xen/arch/x86/cpu/mcheck/non-fatal.c
+++ b/xen/arch/x86/cpu/mcheck/non-fatal.c
@@ -23,7 +23,7 @@ static int __init cf_check init_nonfatal_mce_checker(void)
/*
* Check for non-fatal errors every MCE_RATE s
*/
- switch (c->x86_vendor) {
+ switch (c->vendor) {
#ifdef CONFIG_AMD
case X86_VENDOR_AMD:
case X86_VENDOR_HYGON:
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 1a7e92506a..84776aeec8 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -45,7 +45,7 @@ void vmce_init_vcpu(struct vcpu *v)
int i;
/* global MCA MSRs init */
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( boot_cpu_data.vendor == X86_VENDOR_INTEL )
v->arch.vmce.mcg_cap = INTEL_GUEST_MCG_CAP;
else
v->arch.vmce.mcg_cap = AMD_GUEST_MCG_CAP;
@@ -63,7 +63,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt)
{
unsigned long guest_mcg_cap;
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( boot_cpu_data.vendor == X86_VENDOR_INTEL )
guest_mcg_cap = INTEL_GUEST_MCG_CAP | MCG_LMCE_P;
else
guest_mcg_cap = AMD_GUEST_MCG_CAP;
@@ -136,7 +136,7 @@ static int bank_mce_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
break;
default:
- switch ( boot_cpu_data.x86_vendor )
+ switch ( boot_cpu_data.vendor )
{
#ifdef CONFIG_INTEL
case X86_VENDOR_CENTAUR:
@@ -273,7 +273,7 @@ static int bank_mce_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
break;
default:
- switch ( boot_cpu_data.x86_vendor )
+ switch ( boot_cpu_data.vendor )
{
#ifdef CONFIG_INTEL
case X86_VENDOR_INTEL:
--
2.51.1
© 2016 - 2026 Red Hat, Inc.