Not a functional change.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
---
xen/arch/x86/apic.c | 2 +-
xen/arch/x86/cpu-policy.c | 8 ++++----
xen/arch/x86/cpuid.c | 5 ++---
xen/arch/x86/dom0_build.c | 2 +-
xen/arch/x86/domain.c | 12 +++++-------
xen/arch/x86/e820.c | 2 +-
xen/arch/x86/hvm/hvm.c | 3 +--
xen/arch/x86/hvm/ioreq.c | 3 +--
xen/arch/x86/hvm/vmx/vmx.c | 8 +++-----
xen/arch/x86/i8259.c | 5 ++---
xen/arch/x86/include/asm/guest_pt.h | 3 +--
xen/arch/x86/irq.c | 3 +--
xen/arch/x86/setup.c | 7 +++----
13 files changed, 26 insertions(+), 37 deletions(-)
diff --git a/xen/arch/x86/apic.c b/xen/arch/x86/apic.c
index fb38be7ec3..fd3ac07aeb 100644
--- a/xen/arch/x86/apic.c
+++ b/xen/arch/x86/apic.c
@@ -406,7 +406,7 @@ void __init init_bsp_APIC(void)
value |= APIC_SPIV_APIC_ENABLED;
/* This bit is reserved on P4/Xeon and should be cleared */
- if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15))
+ if ((cpu_vendor() & X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 15))
value &= ~APIC_SPIV_FOCUS_DISABLED;
else
value |= APIC_SPIV_FOCUS_DISABLED;
diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c
index 62aff61d8c..055862b0b2 100644
--- a/xen/arch/x86/cpu-policy.c
+++ b/xen/arch/x86/cpu-policy.c
@@ -770,7 +770,7 @@ static void __init calculate_hvm_max_policy(void)
* long mode (and init_amd() has cleared it out of host capabilities), but
* HVM guests are able if running in protected mode.
*/
- if ( (boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+ if ( (cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
raw_cpu_policy.basic.sep )
__set_bit(X86_FEATURE_SEP, fs);
@@ -963,8 +963,8 @@ void recalculate_cpuid_policy(struct domain *d)
p->basic.max_leaf = min(p->basic.max_leaf, max->basic.max_leaf);
p->feat.max_subleaf = min(p->feat.max_subleaf, max->feat.max_subleaf);
p->extd.max_leaf = 0x80000000U | min(p->extd.max_leaf & 0xffff,
- ((p->x86_vendor & (X86_VENDOR_AMD |
- X86_VENDOR_HYGON))
+ ((cpu_vendor() & (X86_VENDOR_AMD |
+ X86_VENDOR_HYGON))
? CPUID_GUEST_NR_EXTD_AMD
: CPUID_GUEST_NR_EXTD_INTEL) - 1);
@@ -998,7 +998,7 @@ void recalculate_cpuid_policy(struct domain *d)
if ( is_pv_32bit_domain(d) )
{
__clear_bit(X86_FEATURE_LM, max_fs);
- if ( !(boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+ if ( !(cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
__clear_bit(X86_FEATURE_SYSCALL, max_fs);
}
diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index 5decfad8cd..d64030bc09 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -437,8 +437,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
case 0xa:
/* TODO: Rework vPMU control in terms of toolstack choices. */
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- !vpmu_available(v) )
+ if ( !(cpu_vendor() & X86_VENDOR_INTEL) || !vpmu_available(v) )
*res = EMPTY_LEAF;
else
{
@@ -483,7 +482,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
case 0x80000001U:
/* SYSCALL is hidden outside of long mode on Intel. */
- if ( p->x86_vendor == X86_VENDOR_INTEL &&
+ if ( (cpu_vendor() & X86_VENDOR_INTEL) &&
is_hvm_domain(d) && !hvm_long_mode_active(v) )
res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
diff --git a/xen/arch/x86/dom0_build.c b/xen/arch/x86/dom0_build.c
index 0b467fd4a4..4b7f1bf034 100644
--- a/xen/arch/x86/dom0_build.c
+++ b/xen/arch/x86/dom0_build.c
@@ -572,7 +572,7 @@ int __init dom0_setup_permissions(struct domain *d)
rc |= iomem_deny_access(d, mfn, mfn);
}
/* HyperTransport range. */
- if ( boot_cpu_data.x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
{
mfn = paddr_to_pfn(1UL <<
(boot_cpu_data.x86 < 0x17 ? 40 : paddr_bits));
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 10a558e515..fd9c7f0be5 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -372,7 +372,7 @@ void domain_cpu_policy_changed(struct domain *d)
{
uint64_t mask = cpuidmask_defaults._6c;
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ if ( cpu_vendor() & X86_VENDOR_AMD )
mask &= (~0ULL << 32) | p->basic.raw[6].c;
d->arch.pv.cpuidmasks->_6c = mask;
@@ -387,8 +387,7 @@ void domain_cpu_policy_changed(struct domain *d)
* wholesale from the policy, but clamp the features in 7[0].ebx
* per usual.
*/
- if ( boot_cpu_data.x86_vendor &
- (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+ if ( cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
mask = (((uint64_t)p->feat.max_subleaf << 32) |
((uint32_t)mask & p->feat._7b0));
@@ -400,7 +399,7 @@ void domain_cpu_policy_changed(struct domain *d)
uint64_t mask = cpuidmask_defaults.Da1;
uint32_t eax = p->xstate.Da1;
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
+ if ( cpu_vendor() & X86_VENDOR_INTEL )
mask &= (~0ULL << 32) | eax;
d->arch.pv.cpuidmasks->Da1 = mask;
@@ -424,7 +423,7 @@ void domain_cpu_policy_changed(struct domain *d)
* If not emulating AMD or Hygon, clear the duplicated features
* in e1d.
*/
- if ( !(p->x86_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+ if ( cpu_vendor() & ~(X86_VENDOR_AMD | X86_VENDOR_HYGON) )
edx &= ~CPUID_COMMON_1D_FEATURES;
switch( cpu_vendor() )
@@ -457,8 +456,7 @@ void domain_cpu_policy_changed(struct domain *d)
cpu_policy_updated(v);
/* If PMU version is zero then the guest doesn't have VPMU */
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- p->basic.pmu_version == 0 )
+ if ( (cpu_vendor() & X86_VENDOR_INTEL) && p->basic.pmu_version == 0 )
vpmu_destroy(v);
}
}
diff --git a/xen/arch/x86/e820.c b/xen/arch/x86/e820.c
index ca577c0bde..0bf90f2283 100644
--- a/xen/arch/x86/e820.c
+++ b/xen/arch/x86/e820.c
@@ -426,7 +426,7 @@ static uint64_t __init mtrr_top_of_ram(void)
/* By default we check only Intel systems. */
if ( e820_mtrr_clip == -1 )
- e820_mtrr_clip = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
+ e820_mtrr_clip = cpu_vendor() == X86_VENDOR_INTEL;
if ( !e820_mtrr_clip )
return 0;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4d37a93c57..52a6cc69e5 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2681,8 +2681,7 @@ bool hvm_vcpu_virtual_to_linear(
}
else if ( last_byte > reg->limit )
goto out; /* last byte is beyond limit */
- else if ( last_byte < offset &&
- v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD )
+ else if ( (cpu_vendor() & X86_VENDOR_AMD) && last_byte < offset )
goto out; /* access wraps */
}
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index a5fa97e149..7a55b14f55 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -285,8 +285,7 @@ bool arch_ioreq_server_get_type_addr(const struct domain *d,
*type = XEN_DMOP_IO_RANGE_PCI;
*addr = ((uint64_t)sbdf.sbdf << 32) | reg;
/* AMD extended configuration space access? */
- if ( CF8_ADDR_HI(cf8) &&
- d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
+ if ( CF8_ADDR_HI(cf8) && (cpu_vendor() & X86_VENDOR_AMD) &&
(x86_fam = get_cpu_family(
d->arch.cpuid->basic.raw_fms, NULL, NULL)) >= 0x10 &&
x86_fam < 0x17 )
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 82c55f49ae..5637ee15e7 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3073,8 +3073,7 @@ static bool __init has_if_pschange_mc(void)
* IF_PSCHANGE_MC is only known to affect Intel Family 6 processors at
* this time.
*/
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- boot_cpu_data.x86 != 6 )
+ if ( !(cpu_vendor() & X86_VENDOR_INTEL) || boot_cpu_data.x86 != 6 )
return false;
switch ( boot_cpu_data.x86_model )
@@ -3431,7 +3430,7 @@ static void __init lbr_tsx_fixup_check(void)
* fixed up as well.
*/
if ( cpu_has_hle || cpu_has_rtm ||
- boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
+ !(cpu_vendor() & X86_VENDOR_INTEL) ||
boot_cpu_data.x86 != 6 )
return;
@@ -3476,8 +3475,7 @@ static void __init ler_to_fixup_check(void)
* that are not equal to bit[47]. Attempting to context switch this value
* may cause a #GP. Software should sign extend the MSR.
*/
- if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
- boot_cpu_data.x86 != 6 )
+ if ( !(cpu_vendor() & X86_VENDOR_INTEL) || boot_cpu_data.x86 != 6 )
return;
switch ( boot_cpu_data.x86_model )
diff --git a/xen/arch/x86/i8259.c b/xen/arch/x86/i8259.c
index 5c7e21a751..c19d2fe7b4 100644
--- a/xen/arch/x86/i8259.c
+++ b/xen/arch/x86/i8259.c
@@ -419,9 +419,8 @@ void __init init_IRQ(void)
* the interrupt.
*/
cpumask_copy(desc->arch.cpu_mask,
- (boot_cpu_data.x86_vendor &
- (X86_VENDOR_AMD | X86_VENDOR_HYGON) ? &cpumask_all
- : cpumask_of(cpu)));
+ ((cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON))
+ ? &cpumask_all : cpumask_of(cpu)));
desc->arch.vector = LEGACY_VECTOR(irq);
}
diff --git a/xen/arch/x86/include/asm/guest_pt.h b/xen/arch/x86/include/asm/guest_pt.h
index 21473f9bbc..cc3e9b504c 100644
--- a/xen/arch/x86/include/asm/guest_pt.h
+++ b/xen/arch/x86/include/asm/guest_pt.h
@@ -314,8 +314,7 @@ static always_inline bool guest_l4e_rsvd_bits(const struct vcpu *v,
guest_l4e_t l4e)
{
return l4e.l4 & (guest_rsvd_bits(v) | GUEST_L4_PAGETABLE_RSVD |
- ((v->domain->arch.cpuid->x86_vendor == X86_VENDOR_AMD)
- ? _PAGE_GLOBAL : 0));
+ ((cpu_vendor() & X86_VENDOR_AMD) ? _PAGE_GLOBAL : 0));
}
#endif /* GUEST_PAGING_LEVELS >= 4 */
#endif /* GUEST_PAGING_LEVELS >= 3 */
diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c
index cc2934bfca..8e3706fb98 100644
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -2011,8 +2011,7 @@ void do_IRQ(struct cpu_user_regs *regs)
* interrupts have been delivered to CPUs
* different than the BSP.
*/
- (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD |
- X86_VENDOR_HYGON))) &&
+ cpu_vendor() & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
bogus_8259A_irq(vector - FIRST_LEGACY_VECTOR)) )
{
printk("CPU%u: No irq handler for vector %02x (IRQ %d%s)\n",
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 27c63d1d97..26b17aab8f 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1406,8 +1406,7 @@ void asmlinkage __init noreturn __start_xen(void)
* CPUs with this addressed enumerate CET-SSS to indicate that
* supervisor shadow stacks are now safe to use.
*/
- bool cpu_has_bug_shstk_fracture =
- boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ bool cpu_has_bug_shstk_fracture = (cpu_vendor() & X86_VENDOR_INTEL) &&
!boot_cpu_has(X86_FEATURE_CET_SSS);
/*
@@ -2038,10 +2037,10 @@ void asmlinkage __init noreturn __start_xen(void)
/* Do not enable SMEP/SMAP in PV shim on AMD and Hygon by default */
if ( opt_smep == -1 )
- opt_smep = !pv_shim || !(boot_cpu_data.x86_vendor &
+ opt_smep = !pv_shim || !(cpu_vendor() &
(X86_VENDOR_AMD | X86_VENDOR_HYGON));
if ( opt_smap == -1 )
- opt_smap = !pv_shim || !(boot_cpu_data.x86_vendor &
+ opt_smap = !pv_shim || !(cpu_vendor() &
(X86_VENDOR_AMD | X86_VENDOR_HYGON));
if ( !opt_smep )
--
2.43.0
© 2016 - 2026 Red Hat, Inc.