[RFC PATCH 09/11] x86: Migrate spec_ctrl vendor checks to x86_vendor_is()

Alejandro Vallejo posted 11 patches 2 weeks, 3 days ago
[RFC PATCH 09/11] x86: Migrate spec_ctrl vendor checks to x86_vendor_is()
Posted by Alejandro Vallejo 2 weeks, 3 days ago
This is the file with the most dramatic effect in terms of DCE, so
single it out here.

Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
---
 xen/arch/x86/spec_ctrl.c | 34 +++++++++++++++++-----------------
 1 file changed, 17 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index e71f62c601..a464c88908 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -388,7 +388,7 @@ int8_t __ro_after_init opt_xpti_domu = -1;
 
 static __init void xpti_init_default(void)
 {
-    if ( (boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
+    if ( (x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
          cpu_has_rdcl_no )
     {
         if ( opt_xpti_hwdom < 0 )
@@ -712,7 +712,7 @@ static bool __init check_smt_enabled(void)
      * At the time of writing, it is almost completely undocumented, so isn't
      * virtualised reliably.
      */
-    if ( boot_cpu_data.vendor == X86_VENDOR_INTEL &&
+    if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_INTEL) &&
          boot_cpu_data.family != 0xf && !cpu_has_hypervisor &&
          !rdmsr_safe(MSR_INTEL_CORE_THREAD_COUNT, &val) )
         return (MASK_EXTR(val, MSR_CTC_CORE_MASK) !=
@@ -738,10 +738,10 @@ static bool __init retpoline_calculations(void)
     unsigned int ucode_rev = this_cpu(cpu_sig).rev;
     bool safe = false;
 
-    if ( boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+    if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) )
         return true;
 
-    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.family != 6 )
         return false;
 
@@ -938,7 +938,7 @@ static bool __init retpoline_calculations(void)
  */
 static bool __init rsb_is_full_width(void)
 {
-    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.family != 6 )
         return true;
 
@@ -966,7 +966,7 @@ static bool __init should_use_eager_fpu(void)
      * Assume all unrecognised processors are ok.  This is only known to
      * affect Intel Family 6 processors.
      */
-    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.family != 6 )
         return false;
 
@@ -1033,7 +1033,7 @@ static bool __init should_use_eager_fpu(void)
  */
 static void __init srso_calculations(bool hw_smt_enabled)
 {
-    if ( !(boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+    if ( !x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) )
         return;
 
     /*
@@ -1099,7 +1099,7 @@ static void __init srso_calculations(bool hw_smt_enabled)
  */
 static bool __init has_div_vuln(void)
 {
-    if ( !(boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) )
+    if ( !x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) )
         return false;
 
     if ( boot_cpu_data.family != 0x17 && boot_cpu_data.family != 0x18 )
@@ -1137,7 +1137,7 @@ static void __init ibpb_calculations(void)
         return;
     }
 
-    if ( boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON) )
+    if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) )
     {
         /*
          * AMD/Hygon CPUs to date (June 2022) don't flush the RAS.  Future
@@ -1222,7 +1222,7 @@ static __init void l1tf_calculations(void)
     l1d_maxphysaddr = paddr_bits;
 
     /* L1TF is only known to affect Intel Family 6 processors at this time. */
-    if ( boot_cpu_data.vendor == X86_VENDOR_INTEL &&
+    if ( x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_INTEL) &&
          boot_cpu_data.family == 6 )
     {
         switch ( boot_cpu_data.model )
@@ -1358,7 +1358,7 @@ static __init void l1tf_calculations(void)
 static __init void mds_calculations(void)
 {
     /* MDS is only known to affect Intel Family 6 processors at this time. */
-    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.family != 6 )
         return;
 
@@ -1469,7 +1469,7 @@ static __init void mds_calculations(void)
 static void __init rfds_calculations(void)
 {
     /* RFDS is only known to affect Intel Family 6 processors at this time. */
-    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.family != 6 )
         return;
 
@@ -1535,7 +1535,7 @@ static void __init tsa_calculations(void)
     unsigned int curr_rev, min_rev;
 
     /* TSA is only known to affect AMD processors at this time. */
-    if ( boot_cpu_data.vendor != X86_VENDOR_AMD )
+    if ( !x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD) )
         return;
 
     /* If we're virtualised, don't attempt to synthesise anything. */
@@ -1659,7 +1659,7 @@ static void __init gds_calculations(void)
     bool cpu_has_bug_gds, mitigated = false;
 
     /* GDS is only known to affect Intel Family 6 processors at this time. */
-    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.family != 6 )
         return;
 
@@ -1754,7 +1754,7 @@ static void __init gds_calculations(void)
 static bool __init cpu_has_bug_bhi(void)
 {
     /* BHI is only known to affect Intel Family 6 processors at this time. */
-    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
          boot_cpu_data.family != 6 )
         return false;
 
@@ -1878,7 +1878,7 @@ static void __init its_calculations(void)
         return;
 
     /* ITS is only known to affect Intel processors at this time. */
-    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL )
+    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) )
         return;
 
     /*
@@ -2181,7 +2181,7 @@ void __init init_speculation_mitigations(void)
          * before going idle is less overhead than flushing on PV entry.
          */
         if ( !opt_rsb_pv && hw_smt_enabled &&
-             (boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) &&
+             x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON) &&
              (boot_cpu_data.family == 0x17 || boot_cpu_data.family == 0x18) )
             setup_force_cpu_cap(X86_FEATURE_SC_RSB_IDLE);
     }
-- 
2.43.0
Re: [RFC PATCH 09/11] x86: Migrate spec_ctrl vendor checks to x86_vendor_is()
Posted by Jan Beulich 5 days, 16 hours ago
On 26.11.2025 17:44, Alejandro Vallejo wrote:
> @@ -938,7 +938,7 @@ static bool __init retpoline_calculations(void)
>   */
>  static bool __init rsb_is_full_width(void)
>  {
> -    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
> +    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||

One other aspect: If already you touch lines still using the old (being
phased out) field names, please rename at the same time. This may then
also help with line length in some cases.

Jan
Re: [RFC PATCH 09/11] x86: Migrate spec_ctrl vendor checks to x86_vendor_is()
Posted by Alejandro Vallejo 2 days, 21 hours ago
On Mon Dec 8, 2025 at 5:04 PM CET, Jan Beulich wrote:
> On 26.11.2025 17:44, Alejandro Vallejo wrote:
>> @@ -938,7 +938,7 @@ static bool __init retpoline_calculations(void)
>>   */
>>  static bool __init rsb_is_full_width(void)
>>  {
>> -    if ( boot_cpu_data.vendor != X86_VENDOR_INTEL ||
>> +    if ( !x86_vendor_is(boot_cpu_data.x86_vendor, X86_VENDOR_INTEL) ||
>
> One other aspect: If already you touch lines still using the old (being
> phased out) field names, please rename at the same time. This may then
> also help with line length in some cases.
>
> Jan

Yes, of course. I didn't even notice the difference at first. On the note on
length, I'm revising the idea, keeping the same principle but making it less
verbose.

Seeing how both you and Andrew seem onboard with dropping cross-vendor support
and having these turn into constants, I'm leaning into transforming everything
to a single "cpu_vendor", both host checks and policy checks would become
something of the form:

    if ( cpu_vendor != X86_VENDOR_INTEL )
      ...

    if ( cpu_vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON))
      ...

where cpu_vendor would be defined as a runtime check on boot_cpu_data.vendor 
masked with X86_ENABLED_VENDORS when multiple vendors are compiled in, and
the single vendor compiled-in. Perhaps something like... (untested)

#define cpu_vendor \
    ((!IS_ENABLED(CONFIG_UNKNOWN_CPU_VENDOR) && X86_ENABLED_VENDORS &&
      (X86_ENABLED_VENDORS == ISOLATE_LSB(X86_ENABLED_VENDORS)))
        ? X86_ENABLED_VENDORS
        : (boot_cpu_data.vendor & X86_ENABLED_VENDORS))

I _think_ it would have the same DCE implications, but it would be much nicer
to read at the callsites and cause far fewer line wraps.

This brings down complexity, allows for the switches to stay (DCE understands
unreachable labels) and improves the general code quality.

I'm busy atm chasing emulator woes, but a non-rfc series of this will reach
the mailing list at some point. I'll make sure to s/x86_vendor/vendor while at
it.

Cheers,
Alejandro
Re: [RFC PATCH 09/11] x86: Migrate spec_ctrl vendor checks to x86_vendor_is()
Posted by Andrew Cooper 2 days, 16 hours ago
On 11/12/2025 10:31 am, Alejandro Vallejo wrote:
> Seeing how both you and Andrew seem onboard with dropping cross-vendor support

I found another cross-vendor dropping which you'll want to look into.

struct svm_vcpu contains three guest_sysenter_* MSRs.

In AMD CPUs, these MSRs only have 32 bits of storage, with the upper
halfs write-discard.  They are switched via VMLOAD/VMSAVE.

However, in the cross-vendor case, the upper halves are needed for 64bit
kernels setting up SYSENTER support.  Therefore, they're unconditionally
intercepted so we can avoid losing the upper half.

By dropping cross-vendor support, we can get rid of these fields, allow
the guest unconditional access, and simply the MSR intercept logic a little.

~Andrew

Re: [RFC PATCH 09/11] x86: Migrate spec_ctrl vendor checks to x86_vendor_is()
Posted by Alejandro Vallejo 2 days, 15 hours ago
On Thu Dec 11, 2025 at 4:38 PM CET, Andrew Cooper wrote:
> On 11/12/2025 10:31 am, Alejandro Vallejo wrote:
>> Seeing how both you and Andrew seem onboard with dropping cross-vendor support
>
> I found another cross-vendor dropping which you'll want to look into.
>
> struct svm_vcpu contains three guest_sysenter_* MSRs.
>
> In AMD CPUs, these MSRs only have 32 bits of storage, with the upper
> halfs write-discard.  They are switched via VMLOAD/VMSAVE.
>
> However, in the cross-vendor case, the upper halves are needed for 64bit
> kernels setting up SYSENTER support.  Therefore, they're unconditionally
> intercepted so we can avoid losing the upper half.
>
> By dropping cross-vendor support, we can get rid of these fields, allow
> the guest unconditional access, and simply the MSR intercept logic a little.
>
> ~Andrew

Sounds straightforward, I'll add it to the pile.

Cheers,
Alejandro
Re: [RFC PATCH 09/11] x86: Migrate spec_ctrl vendor checks to x86_vendor_is()
Posted by Jan Beulich 5 days, 16 hours ago
On 26.11.2025 17:44, Alejandro Vallejo wrote:
> This is the file with the most dramatic effect in terms of DCE, so
> single it out here.
> 
> Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>

If we accept the basic concept, changes like this are of course okay. Just one
remark:

> --- a/xen/arch/x86/spec_ctrl.c
> +++ b/xen/arch/x86/spec_ctrl.c
> @@ -388,7 +388,7 @@ int8_t __ro_after_init opt_xpti_domu = -1;
>  
>  static __init void xpti_init_default(void)
>  {
> -    if ( (boot_cpu_data.vendor & (X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||
> +    if ( (x86_vendor_is(boot_cpu_data.vendor, X86_VENDOR_AMD | X86_VENDOR_HYGON)) ||

Here and elsewhere please keep line length within 80 chars.

Jan