[PATCH v4 2/7] x86/xstate: Cross-check dynamic XSTATE sizes at boot

Andrew Cooper posted 7 patches 5 months, 1 week ago
[PATCH v4 2/7] x86/xstate: Cross-check dynamic XSTATE sizes at boot
Posted by Andrew Cooper 5 months, 1 week ago
Right now, xstate_ctxt_size() performs a cross-check of size with CPUID in for
every call.  This is expensive, being used for domain create/migrate, as well
as to service certain guest CPUID instructions.

Instead, arrange to check the sizes once at boot.  See the code comments for
details.  Right now, it just checks hardware against the algorithm
expectations.  Later patches will add further cross-checking.

Introduce more X86_XCR0_* and X86_XSS_* constants CPUID bits.  This is to
maximise coverage in the sanity check, even if we don't expect to
use/virtualise some of these features any time soon.  Leave HDC and HWP alone
for now; we don't have CPUID bits from them stored nicely.

Only perform the cross-checks when SELF_TESTS are active.  It's only
developers or new hardware liable to trip these checks, and Xen at least
tracks "maximum value ever seen in xcr0" for the lifetime of the VM, which we
don't want to be tickling in the general case.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>

v3:
 * New
v4:
 * Rebase over CONFIG_SELF_TESTS
 * Swap one BUG_ON() for a WARN()

On Sapphire Rapids with the whole series inc diagnostics, we get this pattern:

  (XEN) *** check_new_xstate(, 0x00000003)
  (XEN) *** check_new_xstate(, 0x00000004)
  (XEN) *** check_new_xstate(, 0x000000e0)
  (XEN) *** check_new_xstate(, 0x00000200)
  (XEN) *** check_new_xstate(, 0x00060000)
  (XEN) *** check_new_xstate(, 0x00000100)
  (XEN) *** check_new_xstate(, 0x00000400)
  (XEN) *** check_new_xstate(, 0x00000800)
  (XEN) *** check_new_xstate(, 0x00001000)
  (XEN) *** check_new_xstate(, 0x00004000)
  (XEN) *** check_new_xstate(, 0x00008000)

and on Genoa, this pattern:

  (XEN) *** check_new_xstate(, 0x00000003)
  (XEN) *** check_new_xstate(, 0x00000004)
  (XEN) *** check_new_xstate(, 0x000000e0)
  (XEN) *** check_new_xstate(, 0x00000200)
  (XEN) *** check_new_xstate(, 0x00000800)
  (XEN) *** check_new_xstate(, 0x00001000)
---
 xen/arch/x86/include/asm/x86-defns.h        |  25 +++-
 xen/arch/x86/xstate.c                       | 158 ++++++++++++++++++++
 xen/include/public/arch-x86/cpufeatureset.h |   3 +
 3 files changed, 185 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/include/asm/x86-defns.h b/xen/arch/x86/include/asm/x86-defns.h
index 48d7a3b7af45..d7602ab225c4 100644
--- a/xen/arch/x86/include/asm/x86-defns.h
+++ b/xen/arch/x86/include/asm/x86-defns.h
@@ -77,7 +77,7 @@
 #define X86_CR4_PKS        0x01000000 /* Protection Key Supervisor */
 
 /*
- * XSTATE component flags in XCR0
+ * XSTATE component flags in XCR0 | MSR_XSS
  */
 #define X86_XCR0_FP_POS           0
 #define X86_XCR0_FP               (1ULL << X86_XCR0_FP_POS)
@@ -95,11 +95,34 @@
 #define X86_XCR0_ZMM              (1ULL << X86_XCR0_ZMM_POS)
 #define X86_XCR0_HI_ZMM_POS       7
 #define X86_XCR0_HI_ZMM           (1ULL << X86_XCR0_HI_ZMM_POS)
+#define X86_XSS_PROC_TRACE        (_AC(1, ULL) <<  8)
 #define X86_XCR0_PKRU_POS         9
 #define X86_XCR0_PKRU             (1ULL << X86_XCR0_PKRU_POS)
+#define X86_XSS_PASID             (_AC(1, ULL) << 10)
+#define X86_XSS_CET_U             (_AC(1, ULL) << 11)
+#define X86_XSS_CET_S             (_AC(1, ULL) << 12)
+#define X86_XSS_HDC               (_AC(1, ULL) << 13)
+#define X86_XSS_UINTR             (_AC(1, ULL) << 14)
+#define X86_XSS_LBR               (_AC(1, ULL) << 15)
+#define X86_XSS_HWP               (_AC(1, ULL) << 16)
+#define X86_XCR0_TILE_CFG         (_AC(1, ULL) << 17)
+#define X86_XCR0_TILE_DATA        (_AC(1, ULL) << 18)
 #define X86_XCR0_LWP_POS          62
 #define X86_XCR0_LWP              (1ULL << X86_XCR0_LWP_POS)
 
+#define X86_XCR0_STATES                                                 \
+    (X86_XCR0_FP | X86_XCR0_SSE | X86_XCR0_YMM | X86_XCR0_BNDREGS |     \
+     X86_XCR0_BNDCSR | X86_XCR0_OPMASK | X86_XCR0_ZMM |                 \
+     X86_XCR0_HI_ZMM | X86_XCR0_PKRU | X86_XCR0_TILE_CFG |              \
+     X86_XCR0_TILE_DATA |                                               \
+     X86_XCR0_LWP)
+
+#define X86_XSS_STATES                                                  \
+    (X86_XSS_PROC_TRACE | X86_XSS_PASID | X86_XSS_CET_U |               \
+     X86_XSS_CET_S | X86_XSS_HDC | X86_XSS_UINTR | X86_XSS_LBR |        \
+     X86_XSS_HWP |                                                      \
+     0)
+
 /*
  * Debug status flags in DR6.
  *
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 75788147966a..650206d9d2b6 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -604,9 +604,164 @@ static bool valid_xcr0(uint64_t xcr0)
     if ( !(xcr0 & X86_XCR0_BNDREGS) != !(xcr0 & X86_XCR0_BNDCSR) )
         return false;
 
+    /* TILECFG and TILEDATA must be the same. */
+    if ( !(xcr0 & X86_XCR0_TILE_CFG) != !(xcr0 & X86_XCR0_TILE_DATA) )
+        return false;
+
     return true;
 }
 
+struct xcheck_state {
+    uint64_t states;
+    uint32_t uncomp_size;
+    uint32_t comp_size;
+};
+
+static void __init check_new_xstate(struct xcheck_state *s, uint64_t new)
+{
+    uint32_t hw_size;
+
+    BUILD_BUG_ON(X86_XCR0_STATES & X86_XSS_STATES);
+
+    BUG_ON(s->states & new); /* States only increase. */
+    BUG_ON(!valid_xcr0(s->states | new)); /* Xen thinks it's a good value. */
+    BUG_ON(new & ~(X86_XCR0_STATES | X86_XSS_STATES)); /* Known state. */
+    BUG_ON((new & X86_XCR0_STATES) &&
+           (new & X86_XSS_STATES)); /* User or supervisor, not both. */
+
+    s->states |= new;
+    if ( new & X86_XCR0_STATES )
+    {
+        if ( !set_xcr0(s->states & X86_XCR0_STATES) )
+            BUG();
+    }
+    else
+        set_msr_xss(s->states & X86_XSS_STATES);
+
+    /*
+     * Check the uncompressed size.  Some XSTATEs are out-of-order and fill in
+     * prior holes in the state area, so we check that the size doesn't
+     * decrease.
+     */
+    hw_size = cpuid_count_ebx(0xd, 0);
+
+    if ( hw_size < s->uncomp_size )
+        panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, uncompressed hw size %#x < prev size %#x\n",
+              s->states, &new, hw_size, s->uncomp_size);
+
+    s->uncomp_size = hw_size;
+
+    /*
+     * Check the compressed size, if available.  All components strictly
+     * appear in index order.  In principle there are no holes, but some
+     * components have their base address 64-byte aligned for efficiency
+     * reasons (e.g. AMX-TILE) and there are other components small enough to
+     * fit in the gap (e.g. PKRU) without increasing the overall length.
+     */
+    hw_size = cpuid_count_ebx(0xd, 1);
+
+    if ( cpu_has_xsavec )
+    {
+        if ( hw_size < s->comp_size )
+            panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, compressed hw size %#x < prev size %#x\n",
+                  s->states, &new, hw_size, s->comp_size);
+
+        s->comp_size = hw_size;
+    }
+    else if ( hw_size ) /* Compressed size reported, but no XSAVEC ? */
+    {
+        static bool once;
+
+        if ( !once )
+        {
+            WARN();
+            once = true;
+        }
+    }
+}
+
+/*
+ * The {un,}compressed XSTATE sizes are reported by dynamic CPUID value, based
+ * on the current %XCR0 and MSR_XSS values.  The exact layout is also feature
+ * and vendor specific.  Cross-check Xen's understanding against real hardware
+ * on boot.
+ *
+ * Testing every combination is prohibitive, so we use a partial approach.
+ * Starting with nothing active, we add new XSTATEs and check that the CPUID
+ * dynamic values never decreases.
+ */
+static void __init noinline xstate_check_sizes(void)
+{
+    uint64_t old_xcr0 = get_xcr0();
+    uint64_t old_xss = get_msr_xss();
+    struct xcheck_state s = {};
+
+    /*
+     * User XSTATEs, increasing by index.
+     *
+     * Chronologically, Intel and AMD had identical layouts for AVX (YMM).
+     * AMD introduced LWP in Fam15h, following immediately on from YMM.  Intel
+     * left an LWP-shaped hole when adding MPX (BND{CSR,REGS}) in Skylake.
+     * AMD removed LWP in Fam17h, putting PKRU in the same space, breaking
+     * layout compatibility with Intel and having a knock-on effect on all
+     * subsequent states.
+     */
+    check_new_xstate(&s, X86_XCR0_SSE | X86_XCR0_FP);
+
+    if ( cpu_has_avx )
+        check_new_xstate(&s, X86_XCR0_YMM);
+
+    if ( cpu_has_mpx )
+        check_new_xstate(&s, X86_XCR0_BNDCSR | X86_XCR0_BNDREGS);
+
+    if ( cpu_has_avx512f )
+        check_new_xstate(&s, X86_XCR0_HI_ZMM | X86_XCR0_ZMM | X86_XCR0_OPMASK);
+
+    if ( cpu_has_pku )
+        check_new_xstate(&s, X86_XCR0_PKRU);
+
+    if ( boot_cpu_has(X86_FEATURE_AMX_TILE) )
+        check_new_xstate(&s, X86_XCR0_TILE_DATA | X86_XCR0_TILE_CFG);
+
+    if ( boot_cpu_has(X86_FEATURE_LWP) )
+        check_new_xstate(&s, X86_XCR0_LWP);
+
+    /*
+     * Supervisor XSTATEs, increasing by index.
+     *
+     * Intel Broadwell has Processor Trace but no XSAVES.  There doesn't
+     * appear to have been a new enumeration when X86_XSS_PROC_TRACE was
+     * introduced in Skylake.
+     */
+    if ( cpu_has_xsaves )
+    {
+        if ( cpu_has_proc_trace )
+            check_new_xstate(&s, X86_XSS_PROC_TRACE);
+
+        if ( boot_cpu_has(X86_FEATURE_ENQCMD) )
+            check_new_xstate(&s, X86_XSS_PASID);
+
+        if ( boot_cpu_has(X86_FEATURE_CET_SS) ||
+             boot_cpu_has(X86_FEATURE_CET_IBT) )
+        {
+            check_new_xstate(&s, X86_XSS_CET_U);
+            check_new_xstate(&s, X86_XSS_CET_S);
+        }
+
+        if ( boot_cpu_has(X86_FEATURE_UINTR) )
+            check_new_xstate(&s, X86_XSS_UINTR);
+
+        if ( boot_cpu_has(X86_FEATURE_ARCH_LBR) )
+            check_new_xstate(&s, X86_XSS_LBR);
+    }
+
+    /* Restore old state now the test is done. */
+    if ( !set_xcr0(old_xcr0) )
+        BUG();
+    if ( cpu_has_xsaves )
+        set_msr_xss(old_xss);
+}
+
 /* Collect the information of processor's extended state */
 void xstate_init(struct cpuinfo_x86 *c)
 {
@@ -683,6 +838,9 @@ void xstate_init(struct cpuinfo_x86 *c)
 
     if ( setup_xstate_features(bsp) && bsp )
         BUG();
+
+    if ( IS_ENABLED(CONFIG_SELF_TESTS) && bsp )
+        xstate_check_sizes();
 }
 
 int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum,
diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h
index 6627453e3985..d9eba5e9a714 100644
--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -266,6 +266,7 @@ XEN_CPUFEATURE(IBPB_RET,      8*32+30) /*A  IBPB clears RSB/RAS too. */
 XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network Instructions */
 XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation Single Precision */
 XEN_CPUFEATURE(FSRM,          9*32+ 4) /*A  Fast Short REP MOVS */
+XEN_CPUFEATURE(UINTR,         9*32+ 5) /*   User-mode Interrupts */
 XEN_CPUFEATURE(AVX512_VP2INTERSECT, 9*32+8) /*a  VP2INTERSECT{D,Q} insns */
 XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and RNGDS_MITG_DIS. */
 XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*!A| VERW clears microarchitectural buffers */
@@ -274,8 +275,10 @@ XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
 XEN_CPUFEATURE(SERIALIZE,     9*32+14) /*A  SERIALIZE insn */
 XEN_CPUFEATURE(HYBRID,        9*32+15) /*   Heterogeneous platform */
 XEN_CPUFEATURE(TSXLDTRK,      9*32+16) /*a  TSX load tracking suspend/resume insns */
+XEN_CPUFEATURE(ARCH_LBR,      9*32+19) /*   Architectural Last Branch Record */
 XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
 XEN_CPUFEATURE(AVX512_FP16,   9*32+23) /*A  AVX512 FP16 instructions */
+XEN_CPUFEATURE(AMX_TILE,      9*32+24) /*   AMX Tile architecture */
 XEN_CPUFEATURE(IBRSB,         9*32+26) /*A  IBRS and IBPB support (used by Intel) */
 XEN_CPUFEATURE(STIBP,         9*32+27) /*A  STIBP */
 XEN_CPUFEATURE(L1D_FLUSH,     9*32+28) /*S  MSR_FLUSH_CMD and L1D flush. */
-- 
2.39.2


Re: [PATCH v4 2/7] x86/xstate: Cross-check dynamic XSTATE sizes at boot
Posted by Jan Beulich 5 months ago
On 17.06.2024 19:39, Andrew Cooper wrote:
> Right now, xstate_ctxt_size() performs a cross-check of size with CPUID in for
> every call.  This is expensive, being used for domain create/migrate, as well
> as to service certain guest CPUID instructions.
> 
> Instead, arrange to check the sizes once at boot.  See the code comments for
> details.  Right now, it just checks hardware against the algorithm
> expectations.  Later patches will add further cross-checking.
> 
> Introduce more X86_XCR0_* and X86_XSS_* constants CPUID bits.  This is to
> maximise coverage in the sanity check, even if we don't expect to
> use/virtualise some of these features any time soon.  Leave HDC and HWP alone
> for now; we don't have CPUID bits from them stored nicely.
> 
> Only perform the cross-checks when SELF_TESTS are active.  It's only
> developers or new hardware liable to trip these checks, and Xen at least
> tracks "maximum value ever seen in xcr0" for the lifetime of the VM, which we
> don't want to be tickling in the general case.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

I may certainly give R-b on the patch as it is, but I have a few questions
first:

> --- a/xen/arch/x86/xstate.c
> +++ b/xen/arch/x86/xstate.c
> @@ -604,9 +604,164 @@ static bool valid_xcr0(uint64_t xcr0)
>      if ( !(xcr0 & X86_XCR0_BNDREGS) != !(xcr0 & X86_XCR0_BNDCSR) )
>          return false;
>  
> +    /* TILECFG and TILEDATA must be the same. */
> +    if ( !(xcr0 & X86_XCR0_TILE_CFG) != !(xcr0 & X86_XCR0_TILE_DATA) )
> +        return false;
> +
>      return true;
>  }
>  
> +struct xcheck_state {
> +    uint64_t states;
> +    uint32_t uncomp_size;
> +    uint32_t comp_size;
> +};
> +
> +static void __init check_new_xstate(struct xcheck_state *s, uint64_t new)
> +{
> +    uint32_t hw_size;
> +
> +    BUILD_BUG_ON(X86_XCR0_STATES & X86_XSS_STATES);
> +
> +    BUG_ON(s->states & new); /* States only increase. */
> +    BUG_ON(!valid_xcr0(s->states | new)); /* Xen thinks it's a good value. */
> +    BUG_ON(new & ~(X86_XCR0_STATES | X86_XSS_STATES)); /* Known state. */
> +    BUG_ON((new & X86_XCR0_STATES) &&
> +           (new & X86_XSS_STATES)); /* User or supervisor, not both. */
> +
> +    s->states |= new;
> +    if ( new & X86_XCR0_STATES )
> +    {
> +        if ( !set_xcr0(s->states & X86_XCR0_STATES) )
> +            BUG();
> +    }
> +    else
> +        set_msr_xss(s->states & X86_XSS_STATES);
> +
> +    /*
> +     * Check the uncompressed size.  Some XSTATEs are out-of-order and fill in
> +     * prior holes in the state area, so we check that the size doesn't
> +     * decrease.
> +     */
> +    hw_size = cpuid_count_ebx(0xd, 0);

Going forward, do we mean to get rid of XSTATE_CPUID? Else imo it should be
used here (and again below).

> +    if ( hw_size < s->uncomp_size )
> +        panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, uncompressed hw size %#x < prev size %#x\n",
> +              s->states, &new, hw_size, s->uncomp_size);
> +
> +    s->uncomp_size = hw_size;

Since XSS state doesn't affect uncompressed layout, this looks like largely
dead code for that case. Did you consider moving this into the if() above?
Alternatively, should the comparison use == when dealing with XSS bits?

> +    /*
> +     * Check the compressed size, if available.  All components strictly
> +     * appear in index order.  In principle there are no holes, but some
> +     * components have their base address 64-byte aligned for efficiency
> +     * reasons (e.g. AMX-TILE) and there are other components small enough to
> +     * fit in the gap (e.g. PKRU) without increasing the overall length.
> +     */
> +    hw_size = cpuid_count_ebx(0xd, 1);
> +
> +    if ( cpu_has_xsavec )
> +    {
> +        if ( hw_size < s->comp_size )
> +            panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, compressed hw size %#x < prev size %#x\n",
> +                  s->states, &new, hw_size, s->comp_size);

Unlike for uncompressed size, can't it be <= here, for - as the comment
says - it being strictly index order, and no component having zero size?

> +        s->comp_size = hw_size;
> +    }
> +    else if ( hw_size ) /* Compressed size reported, but no XSAVEC ? */
> +    {
> +        static bool once;
> +
> +        if ( !once )
> +        {
> +            WARN();
> +            once = true;
> +        }
> +    }
> +}
> +
> +/*
> + * The {un,}compressed XSTATE sizes are reported by dynamic CPUID value, based
> + * on the current %XCR0 and MSR_XSS values.  The exact layout is also feature
> + * and vendor specific.  Cross-check Xen's understanding against real hardware
> + * on boot.
> + *
> + * Testing every combination is prohibitive, so we use a partial approach.
> + * Starting with nothing active, we add new XSTATEs and check that the CPUID
> + * dynamic values never decreases.
> + */
> +static void __init noinline xstate_check_sizes(void)
> +{
> +    uint64_t old_xcr0 = get_xcr0();
> +    uint64_t old_xss = get_msr_xss();
> +    struct xcheck_state s = {};
> +
> +    /*
> +     * User XSTATEs, increasing by index.
> +     *
> +     * Chronologically, Intel and AMD had identical layouts for AVX (YMM).
> +     * AMD introduced LWP in Fam15h, following immediately on from YMM.  Intel
> +     * left an LWP-shaped hole when adding MPX (BND{CSR,REGS}) in Skylake.
> +     * AMD removed LWP in Fam17h, putting PKRU in the same space, breaking
> +     * layout compatibility with Intel and having a knock-on effect on all
> +     * subsequent states.
> +     */
> +    check_new_xstate(&s, X86_XCR0_SSE | X86_XCR0_FP);
> +
> +    if ( cpu_has_avx )
> +        check_new_xstate(&s, X86_XCR0_YMM);
> +
> +    if ( cpu_has_mpx )
> +        check_new_xstate(&s, X86_XCR0_BNDCSR | X86_XCR0_BNDREGS);
> +
> +    if ( cpu_has_avx512f )
> +        check_new_xstate(&s, X86_XCR0_HI_ZMM | X86_XCR0_ZMM | X86_XCR0_OPMASK);
> +
> +    if ( cpu_has_pku )
> +        check_new_xstate(&s, X86_XCR0_PKRU);
> +
> +    if ( boot_cpu_has(X86_FEATURE_AMX_TILE) )
> +        check_new_xstate(&s, X86_XCR0_TILE_DATA | X86_XCR0_TILE_CFG);
> +
> +    if ( boot_cpu_has(X86_FEATURE_LWP) )
> +        check_new_xstate(&s, X86_XCR0_LWP);
> +
> +    /*
> +     * Supervisor XSTATEs, increasing by index.
> +     *
> +     * Intel Broadwell has Processor Trace but no XSAVES.  There doesn't
> +     * appear to have been a new enumeration when X86_XSS_PROC_TRACE was
> +     * introduced in Skylake.
> +     */
> +    if ( cpu_has_xsaves )
> +    {
> +        if ( cpu_has_proc_trace )
> +            check_new_xstate(&s, X86_XSS_PROC_TRACE);
> +
> +        if ( boot_cpu_has(X86_FEATURE_ENQCMD) )
> +            check_new_xstate(&s, X86_XSS_PASID);
> +
> +        if ( boot_cpu_has(X86_FEATURE_CET_SS) ||
> +             boot_cpu_has(X86_FEATURE_CET_IBT) )
> +        {
> +            check_new_xstate(&s, X86_XSS_CET_U);
> +            check_new_xstate(&s, X86_XSS_CET_S);
> +        }
> +
> +        if ( boot_cpu_has(X86_FEATURE_UINTR) )
> +            check_new_xstate(&s, X86_XSS_UINTR);
> +
> +        if ( boot_cpu_has(X86_FEATURE_ARCH_LBR) )
> +            check_new_xstate(&s, X86_XSS_LBR);
> +    }

In principle compressed state checking could be extended to also verify
the offsets are strictly increasing. That, however, would require to
interleave XCR0 and XSS checks, strictly by index. Did you consider (and
then discard) doing so?

Jan
Re: [PATCH v4 2/7] x86/xstate: Cross-check dynamic XSTATE sizes at boot
Posted by Andrew Cooper 5 months ago
On 18/06/2024 11:05 am, Jan Beulich wrote:
> On 17.06.2024 19:39, Andrew Cooper wrote:
>> Right now, xstate_ctxt_size() performs a cross-check of size with CPUID in for
>> every call.  This is expensive, being used for domain create/migrate, as well
>> as to service certain guest CPUID instructions.
>>
>> Instead, arrange to check the sizes once at boot.  See the code comments for
>> details.  Right now, it just checks hardware against the algorithm
>> expectations.  Later patches will add further cross-checking.
>>
>> Introduce more X86_XCR0_* and X86_XSS_* constants CPUID bits.  This is to
>> maximise coverage in the sanity check, even if we don't expect to
>> use/virtualise some of these features any time soon.  Leave HDC and HWP alone
>> for now; we don't have CPUID bits from them stored nicely.
>>
>> Only perform the cross-checks when SELF_TESTS are active.  It's only
>> developers or new hardware liable to trip these checks, and Xen at least
>> tracks "maximum value ever seen in xcr0" for the lifetime of the VM, which we
>> don't want to be tickling in the general case.
>>
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> I may certainly give R-b on the patch as it is, but I have a few questions
> first:
>
>> --- a/xen/arch/x86/xstate.c
>> +++ b/xen/arch/x86/xstate.c
>> @@ -604,9 +604,164 @@ static bool valid_xcr0(uint64_t xcr0)
>>      if ( !(xcr0 & X86_XCR0_BNDREGS) != !(xcr0 & X86_XCR0_BNDCSR) )
>>          return false;
>>  
>> +    /* TILECFG and TILEDATA must be the same. */
>> +    if ( !(xcr0 & X86_XCR0_TILE_CFG) != !(xcr0 & X86_XCR0_TILE_DATA) )
>> +        return false;
>> +
>>      return true;
>>  }
>>  
>> +struct xcheck_state {
>> +    uint64_t states;
>> +    uint32_t uncomp_size;
>> +    uint32_t comp_size;
>> +};
>> +
>> +static void __init check_new_xstate(struct xcheck_state *s, uint64_t new)
>> +{
>> +    uint32_t hw_size;
>> +
>> +    BUILD_BUG_ON(X86_XCR0_STATES & X86_XSS_STATES);
>> +
>> +    BUG_ON(s->states & new); /* States only increase. */
>> +    BUG_ON(!valid_xcr0(s->states | new)); /* Xen thinks it's a good value. */
>> +    BUG_ON(new & ~(X86_XCR0_STATES | X86_XSS_STATES)); /* Known state. */
>> +    BUG_ON((new & X86_XCR0_STATES) &&
>> +           (new & X86_XSS_STATES)); /* User or supervisor, not both. */
>> +
>> +    s->states |= new;
>> +    if ( new & X86_XCR0_STATES )
>> +    {
>> +        if ( !set_xcr0(s->states & X86_XCR0_STATES) )
>> +            BUG();
>> +    }
>> +    else
>> +        set_msr_xss(s->states & X86_XSS_STATES);
>> +
>> +    /*
>> +     * Check the uncompressed size.  Some XSTATEs are out-of-order and fill in
>> +     * prior holes in the state area, so we check that the size doesn't
>> +     * decrease.
>> +     */
>> +    hw_size = cpuid_count_ebx(0xd, 0);
> Going forward, do we mean to get rid of XSTATE_CPUID? Else imo it should be
> used here (and again below).

All documentation about CPUID, from the vendors and from secondary
sources, is written in terms of numerals, not names.

XSTATE_CPUID is less meaningful than 0xd, and I would prefer to phase it
out.


>> +    if ( hw_size < s->uncomp_size )
>> +        panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, uncompressed hw size %#x < prev size %#x\n",
>> +              s->states, &new, hw_size, s->uncomp_size);
>> +
>> +    s->uncomp_size = hw_size;
> Since XSS state doesn't affect uncompressed layout, this looks like largely
> dead code for that case. Did you consider moving this into the if() above?

If that were a printk() rather than a panic(), then having the
assignment in the if() would be wrong.

So while it doesn't really matter given the way the logic is currently
written, it's more code, and interferes with manual debugging to move
the check into the if().

> Alternatively, should the comparison use == when dealing with XSS bits?

Hmm.  We probably can make this check work, given that we ascend through
user states first, and the supervisor states second.

Although I'd need to rerun such a change through the entire hardware
lab.  There have been enough unexpected surprises with "obvious" changes
already.

>> +    /*
>> +     * Check the compressed size, if available.  All components strictly
>> +     * appear in index order.  In principle there are no holes, but some
>> +     * components have their base address 64-byte aligned for efficiency
>> +     * reasons (e.g. AMX-TILE) and there are other components small enough to
>> +     * fit in the gap (e.g. PKRU) without increasing the overall length.
>> +     */
>> +    hw_size = cpuid_count_ebx(0xd, 1);
>> +
>> +    if ( cpu_has_xsavec )
>> +    {
>> +        if ( hw_size < s->comp_size )
>> +            panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, compressed hw size %#x < prev size %#x\n",
>> +                  s->states, &new, hw_size, s->comp_size);
> Unlike for uncompressed size, can't it be <= here, for - as the comment
> says - it being strictly index order, and no component having zero size?

The first version of this patch did have <=, and it really failed on SPR.

When you activate AMX first, then PKRU next, PKRU really does fit in the
alignment hole, and the overall compressed size is the same.


It's a consequence of doing all the user states first, then all the
supervisor states second.  I did have them strictly in index order to
begin with, but then hit the enumeration issue on Broadwell and reworked
xstate_check_sizes() to have a single common cpu_has_xsaves around all
supervisor states.

I could potentially undo that, but the consequence is needing an
cpu_has_xsaves in every call passing in X86_XSS_*.
>> +        s->comp_size = hw_size;
>> +    }
>> +    else if ( hw_size ) /* Compressed size reported, but no XSAVEC ? */
>> +    {
>> +        static bool once;
>> +
>> +        if ( !once )
>> +        {
>> +            WARN();
>> +            once = true;
>> +        }
>> +    }
>> +}
>> +
>> +/*
>> + * The {un,}compressed XSTATE sizes are reported by dynamic CPUID value, based
>> + * on the current %XCR0 and MSR_XSS values.  The exact layout is also feature
>> + * and vendor specific.  Cross-check Xen's understanding against real hardware
>> + * on boot.
>> + *
>> + * Testing every combination is prohibitive, so we use a partial approach.
>> + * Starting with nothing active, we add new XSTATEs and check that the CPUID
>> + * dynamic values never decreases.
>> + */
>> +static void __init noinline xstate_check_sizes(void)
>> +{
>> +    uint64_t old_xcr0 = get_xcr0();
>> +    uint64_t old_xss = get_msr_xss();
>> +    struct xcheck_state s = {};
>> +
>> +    /*
>> +     * User XSTATEs, increasing by index.
>> +     *
>> +     * Chronologically, Intel and AMD had identical layouts for AVX (YMM).
>> +     * AMD introduced LWP in Fam15h, following immediately on from YMM.  Intel
>> +     * left an LWP-shaped hole when adding MPX (BND{CSR,REGS}) in Skylake.
>> +     * AMD removed LWP in Fam17h, putting PKRU in the same space, breaking
>> +     * layout compatibility with Intel and having a knock-on effect on all
>> +     * subsequent states.
>> +     */
>> +    check_new_xstate(&s, X86_XCR0_SSE | X86_XCR0_FP);
>> +
>> +    if ( cpu_has_avx )
>> +        check_new_xstate(&s, X86_XCR0_YMM);
>> +
>> +    if ( cpu_has_mpx )
>> +        check_new_xstate(&s, X86_XCR0_BNDCSR | X86_XCR0_BNDREGS);
>> +
>> +    if ( cpu_has_avx512f )
>> +        check_new_xstate(&s, X86_XCR0_HI_ZMM | X86_XCR0_ZMM | X86_XCR0_OPMASK);
>> +
>> +    if ( cpu_has_pku )
>> +        check_new_xstate(&s, X86_XCR0_PKRU);
>> +
>> +    if ( boot_cpu_has(X86_FEATURE_AMX_TILE) )
>> +        check_new_xstate(&s, X86_XCR0_TILE_DATA | X86_XCR0_TILE_CFG);
>> +
>> +    if ( boot_cpu_has(X86_FEATURE_LWP) )
>> +        check_new_xstate(&s, X86_XCR0_LWP);
>> +
>> +    /*
>> +     * Supervisor XSTATEs, increasing by index.
>> +     *
>> +     * Intel Broadwell has Processor Trace but no XSAVES.  There doesn't
>> +     * appear to have been a new enumeration when X86_XSS_PROC_TRACE was
>> +     * introduced in Skylake.
>> +     */
>> +    if ( cpu_has_xsaves )
>> +    {
>> +        if ( cpu_has_proc_trace )
>> +            check_new_xstate(&s, X86_XSS_PROC_TRACE);
>> +
>> +        if ( boot_cpu_has(X86_FEATURE_ENQCMD) )
>> +            check_new_xstate(&s, X86_XSS_PASID);
>> +
>> +        if ( boot_cpu_has(X86_FEATURE_CET_SS) ||
>> +             boot_cpu_has(X86_FEATURE_CET_IBT) )
>> +        {
>> +            check_new_xstate(&s, X86_XSS_CET_U);
>> +            check_new_xstate(&s, X86_XSS_CET_S);
>> +        }
>> +
>> +        if ( boot_cpu_has(X86_FEATURE_UINTR) )
>> +            check_new_xstate(&s, X86_XSS_UINTR);
>> +
>> +        if ( boot_cpu_has(X86_FEATURE_ARCH_LBR) )
>> +            check_new_xstate(&s, X86_XSS_LBR);
>> +    }
> In principle compressed state checking could be extended to also verify
> the offsets are strictly increasing. That, however, would require to
> interleave XCR0 and XSS checks, strictly by index. Did you consider (and
> then discard) doing so?

What offsets are you referring to?

Compressed images have no offset information.  Every "row" which has
ecx.xss set has ebx (offset) reported as 0.  The offset information for
the user rows are only applicable for uncompressed images.

The layout of a compressed image is a strict function of RFBM derived
from component sizes alone.

~Andrew

Re: [PATCH v4 2/7] x86/xstate: Cross-check dynamic XSTATE sizes at boot
Posted by Jan Beulich 5 months ago
On 18.06.2024 17:21, Andrew Cooper wrote:
> On 18/06/2024 11:05 am, Jan Beulich wrote:
>> On 17.06.2024 19:39, Andrew Cooper wrote:
>>> --- a/xen/arch/x86/xstate.c
>>> +++ b/xen/arch/x86/xstate.c
>>> @@ -604,9 +604,164 @@ static bool valid_xcr0(uint64_t xcr0)
>>>      if ( !(xcr0 & X86_XCR0_BNDREGS) != !(xcr0 & X86_XCR0_BNDCSR) )
>>>          return false;
>>>  
>>> +    /* TILECFG and TILEDATA must be the same. */
>>> +    if ( !(xcr0 & X86_XCR0_TILE_CFG) != !(xcr0 & X86_XCR0_TILE_DATA) )
>>> +        return false;
>>> +
>>>      return true;
>>>  }
>>>  
>>> +struct xcheck_state {
>>> +    uint64_t states;
>>> +    uint32_t uncomp_size;
>>> +    uint32_t comp_size;
>>> +};
>>> +
>>> +static void __init check_new_xstate(struct xcheck_state *s, uint64_t new)
>>> +{
>>> +    uint32_t hw_size;
>>> +
>>> +    BUILD_BUG_ON(X86_XCR0_STATES & X86_XSS_STATES);
>>> +
>>> +    BUG_ON(s->states & new); /* States only increase. */
>>> +    BUG_ON(!valid_xcr0(s->states | new)); /* Xen thinks it's a good value. */
>>> +    BUG_ON(new & ~(X86_XCR0_STATES | X86_XSS_STATES)); /* Known state. */
>>> +    BUG_ON((new & X86_XCR0_STATES) &&
>>> +           (new & X86_XSS_STATES)); /* User or supervisor, not both. */
>>> +
>>> +    s->states |= new;
>>> +    if ( new & X86_XCR0_STATES )
>>> +    {
>>> +        if ( !set_xcr0(s->states & X86_XCR0_STATES) )
>>> +            BUG();
>>> +    }
>>> +    else
>>> +        set_msr_xss(s->states & X86_XSS_STATES);
>>> +
>>> +    /*
>>> +     * Check the uncompressed size.  Some XSTATEs are out-of-order and fill in
>>> +     * prior holes in the state area, so we check that the size doesn't
>>> +     * decrease.
>>> +     */
>>> +    hw_size = cpuid_count_ebx(0xd, 0);
>> Going forward, do we mean to get rid of XSTATE_CPUID? Else imo it should be
>> used here (and again below).
> 
> All documentation about CPUID, from the vendors and from secondary
> sources, is written in terms of numerals, not names.
> 
> XSTATE_CPUID is less meaningful than 0xd, and I would prefer to phase it
> out.

Fair enough; hence why I was asking.

>>> +    if ( hw_size < s->uncomp_size )
>>> +        panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, uncompressed hw size %#x < prev size %#x\n",
>>> +              s->states, &new, hw_size, s->uncomp_size);
>>> +
>>> +    s->uncomp_size = hw_size;
>> Since XSS state doesn't affect uncompressed layout, this looks like largely
>> dead code for that case. Did you consider moving this into the if() above?
> 
> If that were a printk() rather than a panic(), then having the
> assignment in the if() would be wrong.

Would it? For an XSS component the uncompressed size isn't supposed to
change. (Or else doing an == check, as per below, wouldn't be an option.)

> So while it doesn't really matter given the way the logic is currently
> written, it's more code, and interferes with manual debugging to move
> the check into the if().
> 
>> Alternatively, should the comparison use == when dealing with XSS bits?
> 
> Hmm.  We probably can make this check work, given that we ascend through
> user states first, and the supervisor states second.
> 
> Although I'd need to rerun such a change through the entire hardware
> lab.  There have been enough unexpected surprises with "obvious" changes
> already.

Well, we can of course decide to go with what you have for now, and then
see about tightening the check. I fear though that doing so may then be
forgotten ...

>>> +    /*
>>> +     * Check the compressed size, if available.  All components strictly
>>> +     * appear in index order.  In principle there are no holes, but some
>>> +     * components have their base address 64-byte aligned for efficiency
>>> +     * reasons (e.g. AMX-TILE) and there are other components small enough to
>>> +     * fit in the gap (e.g. PKRU) without increasing the overall length.
>>> +     */
>>> +    hw_size = cpuid_count_ebx(0xd, 1);
>>> +
>>> +    if ( cpu_has_xsavec )
>>> +    {
>>> +        if ( hw_size < s->comp_size )
>>> +            panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, compressed hw size %#x < prev size %#x\n",
>>> +                  s->states, &new, hw_size, s->comp_size);
>> Unlike for uncompressed size, can't it be <= here, for - as the comment
>> says - it being strictly index order, and no component having zero size?
> 
> The first version of this patch did have <=, and it really failed on SPR.
> 
> When you activate AMX first, then PKRU next, PKRU really does fit in the
> alignment hole, and the overall compressed size is the same.
> 
> 
> It's a consequence of doing all the user states first, then all the
> supervisor states second.  I did have them strictly in index order to
> begin with, but then hit the enumeration issue on Broadwell and reworked
> xstate_check_sizes() to have a single common cpu_has_xsaves around all
> supervisor states.
> 
> I could potentially undo that, but the consequence is needing an
> cpu_has_xsaves in every call passing in X86_XSS_*.

Just to say as much: To me, doing things strictly in order would feel more
"natural" (even if, see below, the other reason that would have seemed
desirable, really has vaporized). Other than the need for cpu_has_xsaves,
is there a particular reason you split things as all XCR0 first, then all
XSS? (I'm a little worried anyway that this code will need updating for
each new component addition. Yet "automating" this won't work, because of
the need for the cpu_has_* checks.)

>>> +        s->comp_size = hw_size;
>>> +    }
>>> +    else if ( hw_size ) /* Compressed size reported, but no XSAVEC ? */
>>> +    {
>>> +        static bool once;
>>> +
>>> +        if ( !once )
>>> +        {
>>> +            WARN();
>>> +            once = true;
>>> +        }
>>> +    }
>>> +}
>>> +
>>> +/*
>>> + * The {un,}compressed XSTATE sizes are reported by dynamic CPUID value, based
>>> + * on the current %XCR0 and MSR_XSS values.  The exact layout is also feature
>>> + * and vendor specific.  Cross-check Xen's understanding against real hardware
>>> + * on boot.
>>> + *
>>> + * Testing every combination is prohibitive, so we use a partial approach.
>>> + * Starting with nothing active, we add new XSTATEs and check that the CPUID
>>> + * dynamic values never decreases.
>>> + */
>>> +static void __init noinline xstate_check_sizes(void)
>>> +{
>>> +    uint64_t old_xcr0 = get_xcr0();
>>> +    uint64_t old_xss = get_msr_xss();
>>> +    struct xcheck_state s = {};
>>> +
>>> +    /*
>>> +     * User XSTATEs, increasing by index.
>>> +     *
>>> +     * Chronologically, Intel and AMD had identical layouts for AVX (YMM).
>>> +     * AMD introduced LWP in Fam15h, following immediately on from YMM.  Intel
>>> +     * left an LWP-shaped hole when adding MPX (BND{CSR,REGS}) in Skylake.
>>> +     * AMD removed LWP in Fam17h, putting PKRU in the same space, breaking
>>> +     * layout compatibility with Intel and having a knock-on effect on all
>>> +     * subsequent states.
>>> +     */
>>> +    check_new_xstate(&s, X86_XCR0_SSE | X86_XCR0_FP);
>>> +
>>> +    if ( cpu_has_avx )
>>> +        check_new_xstate(&s, X86_XCR0_YMM);
>>> +
>>> +    if ( cpu_has_mpx )
>>> +        check_new_xstate(&s, X86_XCR0_BNDCSR | X86_XCR0_BNDREGS);
>>> +
>>> +    if ( cpu_has_avx512f )
>>> +        check_new_xstate(&s, X86_XCR0_HI_ZMM | X86_XCR0_ZMM | X86_XCR0_OPMASK);
>>> +
>>> +    if ( cpu_has_pku )
>>> +        check_new_xstate(&s, X86_XCR0_PKRU);
>>> +
>>> +    if ( boot_cpu_has(X86_FEATURE_AMX_TILE) )
>>> +        check_new_xstate(&s, X86_XCR0_TILE_DATA | X86_XCR0_TILE_CFG);
>>> +
>>> +    if ( boot_cpu_has(X86_FEATURE_LWP) )
>>> +        check_new_xstate(&s, X86_XCR0_LWP);
>>> +
>>> +    /*
>>> +     * Supervisor XSTATEs, increasing by index.
>>> +     *
>>> +     * Intel Broadwell has Processor Trace but no XSAVES.  There doesn't
>>> +     * appear to have been a new enumeration when X86_XSS_PROC_TRACE was
>>> +     * introduced in Skylake.
>>> +     */
>>> +    if ( cpu_has_xsaves )
>>> +    {
>>> +        if ( cpu_has_proc_trace )
>>> +            check_new_xstate(&s, X86_XSS_PROC_TRACE);
>>> +
>>> +        if ( boot_cpu_has(X86_FEATURE_ENQCMD) )
>>> +            check_new_xstate(&s, X86_XSS_PASID);
>>> +
>>> +        if ( boot_cpu_has(X86_FEATURE_CET_SS) ||
>>> +             boot_cpu_has(X86_FEATURE_CET_IBT) )
>>> +        {
>>> +            check_new_xstate(&s, X86_XSS_CET_U);
>>> +            check_new_xstate(&s, X86_XSS_CET_S);
>>> +        }
>>> +
>>> +        if ( boot_cpu_has(X86_FEATURE_UINTR) )
>>> +            check_new_xstate(&s, X86_XSS_UINTR);
>>> +
>>> +        if ( boot_cpu_has(X86_FEATURE_ARCH_LBR) )
>>> +            check_new_xstate(&s, X86_XSS_LBR);
>>> +    }
>> In principle compressed state checking could be extended to also verify
>> the offsets are strictly increasing. That, however, would require to
>> interleave XCR0 and XSS checks, strictly by index. Did you consider (and
>> then discard) doing so?
> 
> What offsets are you referring to?
> 
> Compressed images have no offset information.  Every "row" which has
> ecx.xss set has ebx (offset) reported as 0.  The offset information for
> the user rows are only applicable for uncompressed images.

Hmm, right, nothing to compare our calculations against. And for the
compressed form the (calculated) offsets aren't any different from the
previous component's accumulated size.

Jan

[PATCH for-4.19 v4.5 2/7] x86/xstate: Cross-check dynamic XSTATE sizes at boot
Posted by Andrew Cooper 5 months ago
Right now, xstate_ctxt_size() performs a cross-check of size with CPUID in for
every call.  This is expensive, being used for domain create/migrate, as well
as to service certain guest CPUID instructions.

Instead, arrange to check the sizes once at boot.  See the code comments for
details.  Right now, it just checks hardware against the algorithm
expectations.  Later patches will cross-check Xen's XSTATE calculations too.

Introduce more X86_XCR0_* and X86_XSS_* constants CPUID bits.  This is to
maximise coverage in the sanity check, even if we don't expect to
use/virtualise some of these features any time soon.  Leave HDC and HWP alone
for now; we don't have CPUID bits from them stored nicely.

Only perform the cross-checks when SELF_TESTS are active.  It's only
developers or new hardware liable to trip these checks, and Xen at least
tracks "maximum value ever seen in xcr0" for the lifetime of the VM, which we
don't want to be tickling in the general case.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>

v3:
 * New
v4:
 * Rebase over CONFIG_SELF_TESTS
 * Swap one BUG_ON() for a WARN()

v4.5:
 * Reorder xstate_check_sizes() to strictly increase by index.  In turn this
   strengthens the compressed check to "size always increases".
 * For new superivsor states, check that the uncomressed size doesn't change.

On Sapphire Rapids with the whole series inc diagnostics, we get this pattern:

  (XEN) *** check_new_xstate(, 0x00000003)
  (XEN) *** check_new_xstate(, 0x00000004)
  (XEN) *** check_new_xstate(, 0x000000e0)
  (XEN) *** check_new_xstate(, 0x00000100)
  (XEN) *** check_new_xstate(, 0x00000200)
  (XEN) *** check_new_xstate(, 0x00000400)
  (XEN) *** check_new_xstate(, 0x00000800)
  (XEN) *** check_new_xstate(, 0x00001000)
  (XEN) *** check_new_xstate(, 0x00004000)
  (XEN) *** check_new_xstate(, 0x00008000)
  (XEN) *** check_new_xstate(, 0x00060000)

and on Genoa, this pattern:

  (Xen) *** check_new_xstate(, 0x00000003)
  (Xen) *** check_new_xstate(, 0x00000004)
  (Xen) *** check_new_xstate(, 0x000000e0)
  (Xen) *** check_new_xstate(, 0x00000200)
  (Xen) *** check_new_xstate(, 0x00000800)
  (Xen) *** check_new_xstate(, 0x00001000)
---
 xen/arch/x86/include/asm/x86-defns.h        |  25 ++-
 xen/arch/x86/xstate.c                       | 170 ++++++++++++++++++++
 xen/include/public/arch-x86/cpufeatureset.h |   3 +
 3 files changed, 197 insertions(+), 1 deletion(-)

diff --git a/xen/arch/x86/include/asm/x86-defns.h b/xen/arch/x86/include/asm/x86-defns.h
index 48d7a3b7af45..d7602ab225c4 100644
--- a/xen/arch/x86/include/asm/x86-defns.h
+++ b/xen/arch/x86/include/asm/x86-defns.h
@@ -77,7 +77,7 @@
 #define X86_CR4_PKS        0x01000000 /* Protection Key Supervisor */
 
 /*
- * XSTATE component flags in XCR0
+ * XSTATE component flags in XCR0 | MSR_XSS
  */
 #define X86_XCR0_FP_POS           0
 #define X86_XCR0_FP               (1ULL << X86_XCR0_FP_POS)
@@ -95,11 +95,34 @@
 #define X86_XCR0_ZMM              (1ULL << X86_XCR0_ZMM_POS)
 #define X86_XCR0_HI_ZMM_POS       7
 #define X86_XCR0_HI_ZMM           (1ULL << X86_XCR0_HI_ZMM_POS)
+#define X86_XSS_PROC_TRACE        (_AC(1, ULL) <<  8)
 #define X86_XCR0_PKRU_POS         9
 #define X86_XCR0_PKRU             (1ULL << X86_XCR0_PKRU_POS)
+#define X86_XSS_PASID             (_AC(1, ULL) << 10)
+#define X86_XSS_CET_U             (_AC(1, ULL) << 11)
+#define X86_XSS_CET_S             (_AC(1, ULL) << 12)
+#define X86_XSS_HDC               (_AC(1, ULL) << 13)
+#define X86_XSS_UINTR             (_AC(1, ULL) << 14)
+#define X86_XSS_LBR               (_AC(1, ULL) << 15)
+#define X86_XSS_HWP               (_AC(1, ULL) << 16)
+#define X86_XCR0_TILE_CFG         (_AC(1, ULL) << 17)
+#define X86_XCR0_TILE_DATA        (_AC(1, ULL) << 18)
 #define X86_XCR0_LWP_POS          62
 #define X86_XCR0_LWP              (1ULL << X86_XCR0_LWP_POS)
 
+#define X86_XCR0_STATES                                                 \
+    (X86_XCR0_FP | X86_XCR0_SSE | X86_XCR0_YMM | X86_XCR0_BNDREGS |     \
+     X86_XCR0_BNDCSR | X86_XCR0_OPMASK | X86_XCR0_ZMM |                 \
+     X86_XCR0_HI_ZMM | X86_XCR0_PKRU | X86_XCR0_TILE_CFG |              \
+     X86_XCR0_TILE_DATA |                                               \
+     X86_XCR0_LWP)
+
+#define X86_XSS_STATES                                                  \
+    (X86_XSS_PROC_TRACE | X86_XSS_PASID | X86_XSS_CET_U |               \
+     X86_XSS_CET_S | X86_XSS_HDC | X86_XSS_UINTR | X86_XSS_LBR |        \
+     X86_XSS_HWP |                                                      \
+     0)
+
 /*
  * Debug status flags in DR6.
  *
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 75788147966a..408d9dd10897 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -604,9 +604,176 @@ static bool valid_xcr0(uint64_t xcr0)
     if ( !(xcr0 & X86_XCR0_BNDREGS) != !(xcr0 & X86_XCR0_BNDCSR) )
         return false;
 
+    /* TILECFG and TILEDATA must be the same. */
+    if ( !(xcr0 & X86_XCR0_TILE_CFG) != !(xcr0 & X86_XCR0_TILE_DATA) )
+        return false;
+
     return true;
 }
 
+struct xcheck_state {
+    uint64_t states;
+    uint32_t uncomp_size;
+    uint32_t comp_size;
+};
+
+static void __init check_new_xstate(struct xcheck_state *s, uint64_t new)
+{
+    uint32_t hw_size;
+
+    BUILD_BUG_ON(X86_XCR0_STATES & X86_XSS_STATES);
+
+    BUG_ON(new <= s->states); /* States strictly increase by index. */
+    BUG_ON(s->states & new);  /* States only accumulate. */
+    BUG_ON(!valid_xcr0(s->states | new)); /* Xen thinks it's a good value. */
+    BUG_ON(new & ~(X86_XCR0_STATES | X86_XSS_STATES)); /* Known state. */
+    BUG_ON((new & X86_XCR0_STATES) &&
+           (new & X86_XSS_STATES)); /* User or supervisor, not both. */
+
+    s->states |= new;
+    if ( new & X86_XCR0_STATES )
+    {
+        if ( !set_xcr0(s->states & X86_XCR0_STATES) )
+            BUG();
+    }
+    else
+        set_msr_xss(s->states & X86_XSS_STATES);
+
+    /*
+     * Check the uncompressed size.  First ask hardware.
+     */
+    hw_size = cpuid_count_ebx(0xd, 0);
+
+    if ( new & X86_XSS_STATES )
+    {
+        /*
+         * Supervisor states don't exist in an uncompressed image, so check
+         * that the uncompressed size doesn't change.  Otherwise...
+         */
+        if ( hw_size != s->uncomp_size )
+            panic("XSTATE 0x%016"PRIx64", new sup bits {%63pbl}, uncompressed hw size %#x != prev size %#x\n",
+                  s->states, &new, hw_size, s->uncomp_size);
+    }
+    else
+    {
+        /*
+         * ... some user XSTATEs are out-of-order and fill in prior holes.
+         * The best check we make is that the size never decreases.
+         */
+        if ( hw_size < s->uncomp_size )
+            panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, uncompressed hw size %#x < prev size %#x\n",
+                  s->states, &new, hw_size, s->uncomp_size);
+    }
+
+    s->uncomp_size = hw_size;
+
+    /*
+     * Check the compressed size, if available.
+     */
+    hw_size = cpuid_count_ebx(0xd, 1);
+
+    if ( cpu_has_xsavec )
+    {
+        /*
+         * All components strictly appear in index order, irrespective of
+         * whether they're user or supervisor.  As each component also has
+         * non-zero size, the accumulated size should strictly increase.
+         */
+        if ( hw_size <= s->comp_size )
+            panic("XSTATE 0x%016"PRIx64", new bits {%63pbl}, compressed hw size %#x <= prev size %#x\n",
+                  s->states, &new, hw_size, s->comp_size);
+
+        s->comp_size = hw_size;
+    }
+    else if ( hw_size ) /* Compressed size reported, but no XSAVEC ? */
+    {
+        static bool once;
+
+        if ( !once )
+        {
+            WARN();
+            once = true;
+        }
+    }
+}
+
+/*
+ * The {un,}compressed XSTATE sizes are reported by dynamic CPUID value, based
+ * on the current %XCR0 and MSR_XSS values.  The exact layout is also feature
+ * and vendor specific.  Cross-check Xen's understanding against real hardware
+ * on boot.
+ *
+ * Testing every combination is prohibitive, so we use a partial approach.
+ * Starting with nothing active, we add new XSTATEs and check that the CPUID
+ * dynamic values never decreases.
+ */
+static void __init noinline xstate_check_sizes(void)
+{
+    uint64_t old_xcr0 = get_xcr0();
+    uint64_t old_xss = get_msr_xss();
+    struct xcheck_state s = {};
+
+    /*
+     * User and supervisor XSTATEs, increasing by index.
+     *
+     * Chronologically, Intel and AMD had identical layouts for AVX (YMM).
+     * AMD introduced LWP in Fam15h, following immediately on from YMM.  Intel
+     * left an LWP-shaped hole when adding MPX (BND{CSR,REGS}) in Skylake.
+     * AMD removed LWP in Fam17h, putting PKRU in the same space, breaking
+     * layout compatibility with Intel and having a knock-on effect on all
+     * subsequent states.
+     */
+    check_new_xstate(&s, X86_XCR0_SSE | X86_XCR0_FP);
+
+    if ( cpu_has_avx )
+        check_new_xstate(&s, X86_XCR0_YMM);
+
+    if ( cpu_has_mpx )
+        check_new_xstate(&s, X86_XCR0_BNDCSR | X86_XCR0_BNDREGS);
+
+    if ( cpu_has_avx512f )
+        check_new_xstate(&s, X86_XCR0_HI_ZMM | X86_XCR0_ZMM | X86_XCR0_OPMASK);
+
+    /*
+     * Intel Broadwell has Processor Trace but no XSAVES.  There doesn't
+     * appear to have been a new enumeration when X86_XSS_PROC_TRACE was
+     * introduced in Skylake.
+     */
+    if ( cpu_has_xsaves && cpu_has_proc_trace )
+        check_new_xstate(&s, X86_XSS_PROC_TRACE);
+
+    if ( cpu_has_pku )
+        check_new_xstate(&s, X86_XCR0_PKRU);
+
+    if ( cpu_has_xsaves && boot_cpu_has(X86_FEATURE_ENQCMD) )
+        check_new_xstate(&s, X86_XSS_PASID);
+
+    if ( cpu_has_xsaves && (boot_cpu_has(X86_FEATURE_CET_SS) ||
+                            boot_cpu_has(X86_FEATURE_CET_IBT)) )
+    {
+        check_new_xstate(&s, X86_XSS_CET_U);
+        check_new_xstate(&s, X86_XSS_CET_S);
+    }
+
+    if ( cpu_has_xsaves && boot_cpu_has(X86_FEATURE_UINTR) )
+        check_new_xstate(&s, X86_XSS_UINTR);
+
+    if ( cpu_has_xsaves && boot_cpu_has(X86_FEATURE_ARCH_LBR) )
+        check_new_xstate(&s, X86_XSS_LBR);
+
+    if ( boot_cpu_has(X86_FEATURE_AMX_TILE) )
+        check_new_xstate(&s, X86_XCR0_TILE_DATA | X86_XCR0_TILE_CFG);
+
+    if ( boot_cpu_has(X86_FEATURE_LWP) )
+        check_new_xstate(&s, X86_XCR0_LWP);
+
+    /* Restore old state now the test is done. */
+    if ( !set_xcr0(old_xcr0) )
+        BUG();
+    if ( cpu_has_xsaves )
+        set_msr_xss(old_xss);
+}
+
 /* Collect the information of processor's extended state */
 void xstate_init(struct cpuinfo_x86 *c)
 {
@@ -683,6 +850,9 @@ void xstate_init(struct cpuinfo_x86 *c)
 
     if ( setup_xstate_features(bsp) && bsp )
         BUG();
+
+    if ( IS_ENABLED(CONFIG_SELF_TESTS) && bsp )
+        xstate_check_sizes();
 }
 
 int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum,
diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h
index 6627453e3985..d9eba5e9a714 100644
--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -266,6 +266,7 @@ XEN_CPUFEATURE(IBPB_RET,      8*32+30) /*A  IBPB clears RSB/RAS too. */
 XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A  AVX512 Neural Network Instructions */
 XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A  AVX512 Multiply Accumulation Single Precision */
 XEN_CPUFEATURE(FSRM,          9*32+ 4) /*A  Fast Short REP MOVS */
+XEN_CPUFEATURE(UINTR,         9*32+ 5) /*   User-mode Interrupts */
 XEN_CPUFEATURE(AVX512_VP2INTERSECT, 9*32+8) /*a  VP2INTERSECT{D,Q} insns */
 XEN_CPUFEATURE(SRBDS_CTRL,    9*32+ 9) /*   MSR_MCU_OPT_CTRL and RNGDS_MITG_DIS. */
 XEN_CPUFEATURE(MD_CLEAR,      9*32+10) /*!A| VERW clears microarchitectural buffers */
@@ -274,8 +275,10 @@ XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
 XEN_CPUFEATURE(SERIALIZE,     9*32+14) /*A  SERIALIZE insn */
 XEN_CPUFEATURE(HYBRID,        9*32+15) /*   Heterogeneous platform */
 XEN_CPUFEATURE(TSXLDTRK,      9*32+16) /*a  TSX load tracking suspend/resume insns */
+XEN_CPUFEATURE(ARCH_LBR,      9*32+19) /*   Architectural Last Branch Record */
 XEN_CPUFEATURE(CET_IBT,       9*32+20) /*   CET - Indirect Branch Tracking */
 XEN_CPUFEATURE(AVX512_FP16,   9*32+23) /*A  AVX512 FP16 instructions */
+XEN_CPUFEATURE(AMX_TILE,      9*32+24) /*   AMX Tile architecture */
 XEN_CPUFEATURE(IBRSB,         9*32+26) /*A  IBRS and IBPB support (used by Intel) */
 XEN_CPUFEATURE(STIBP,         9*32+27) /*A  STIBP */
 XEN_CPUFEATURE(L1D_FLUSH,     9*32+28) /*S  MSR_FLUSH_CMD and L1D flush. */
-- 
2.39.2


Re: [PATCH for-4.19 v4.5 2/7] x86/xstate: Cross-check dynamic XSTATE sizes at boot
Posted by Jan Beulich 5 months ago
On 19.06.2024 12:46, Andrew Cooper wrote:
> Right now, xstate_ctxt_size() performs a cross-check of size with CPUID in for
> every call.  This is expensive, being used for domain create/migrate, as well
> as to service certain guest CPUID instructions.
> 
> Instead, arrange to check the sizes once at boot.  See the code comments for
> details.  Right now, it just checks hardware against the algorithm
> expectations.  Later patches will cross-check Xen's XSTATE calculations too.
> 
> Introduce more X86_XCR0_* and X86_XSS_* constants CPUID bits.  This is to
> maximise coverage in the sanity check, even if we don't expect to
> use/virtualise some of these features any time soon.  Leave HDC and HWP alone
> for now; we don't have CPUID bits from them stored nicely.
> 
> Only perform the cross-checks when SELF_TESTS are active.  It's only
> developers or new hardware liable to trip these checks, and Xen at least
> tracks "maximum value ever seen in xcr0" for the lifetime of the VM, which we
> don't want to be tickling in the general case.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>