[PATCH v4 7/7] x86/defns: Clean up X86_{XCR0,XSS}_* constants

Andrew Cooper posted 7 patches 5 months, 1 week ago
[PATCH v4 7/7] x86/defns: Clean up X86_{XCR0,XSS}_* constants
Posted by Andrew Cooper 5 months, 1 week ago
With the exception of one case in read_bndcfgu() which can use ilog2(),
the *_POS defines are unused.  Drop them.

X86_XCR0_X87 is the name used by both the SDM and APM, rather than
X86_XCR0_FP.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Oleksii Kurochko <oleksii.kurochko@gmail.com>

v3:
 * New
---
 xen/arch/x86/i387.c                  |  2 +-
 xen/arch/x86/include/asm/x86-defns.h | 32 ++++++++++------------------
 xen/arch/x86/include/asm/xstate.h    |  4 ++--
 xen/arch/x86/xstate.c                | 18 ++++++++--------
 4 files changed, 23 insertions(+), 33 deletions(-)

diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 7a4297cc921e..fcdee10a6e69 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -369,7 +369,7 @@ void vcpu_setup_fpu(struct vcpu *v, struct xsave_struct *xsave_area,
         {
             v->arch.xsave_area->xsave_hdr.xstate_bv &= ~XSTATE_FP_SSE;
             if ( fcw_default != FCW_DEFAULT )
-                v->arch.xsave_area->xsave_hdr.xstate_bv |= X86_XCR0_FP;
+                v->arch.xsave_area->xsave_hdr.xstate_bv |= X86_XCR0_X87;
         }
     }
 
diff --git a/xen/arch/x86/include/asm/x86-defns.h b/xen/arch/x86/include/asm/x86-defns.h
index d7602ab225c4..3bcdbaccd3aa 100644
--- a/xen/arch/x86/include/asm/x86-defns.h
+++ b/xen/arch/x86/include/asm/x86-defns.h
@@ -79,25 +79,16 @@
 /*
  * XSTATE component flags in XCR0 | MSR_XSS
  */
-#define X86_XCR0_FP_POS           0
-#define X86_XCR0_FP               (1ULL << X86_XCR0_FP_POS)
-#define X86_XCR0_SSE_POS          1
-#define X86_XCR0_SSE              (1ULL << X86_XCR0_SSE_POS)
-#define X86_XCR0_YMM_POS          2
-#define X86_XCR0_YMM              (1ULL << X86_XCR0_YMM_POS)
-#define X86_XCR0_BNDREGS_POS      3
-#define X86_XCR0_BNDREGS          (1ULL << X86_XCR0_BNDREGS_POS)
-#define X86_XCR0_BNDCSR_POS       4
-#define X86_XCR0_BNDCSR           (1ULL << X86_XCR0_BNDCSR_POS)
-#define X86_XCR0_OPMASK_POS       5
-#define X86_XCR0_OPMASK           (1ULL << X86_XCR0_OPMASK_POS)
-#define X86_XCR0_ZMM_POS          6
-#define X86_XCR0_ZMM              (1ULL << X86_XCR0_ZMM_POS)
-#define X86_XCR0_HI_ZMM_POS       7
-#define X86_XCR0_HI_ZMM           (1ULL << X86_XCR0_HI_ZMM_POS)
+#define X86_XCR0_X87              (_AC(1, ULL) <<  0)
+#define X86_XCR0_SSE              (_AC(1, ULL) <<  1)
+#define X86_XCR0_YMM              (_AC(1, ULL) <<  2)
+#define X86_XCR0_BNDREGS          (_AC(1, ULL) <<  3)
+#define X86_XCR0_BNDCSR           (_AC(1, ULL) <<  4)
+#define X86_XCR0_OPMASK           (_AC(1, ULL) <<  5)
+#define X86_XCR0_ZMM              (_AC(1, ULL) <<  6)
+#define X86_XCR0_HI_ZMM           (_AC(1, ULL) <<  7)
 #define X86_XSS_PROC_TRACE        (_AC(1, ULL) <<  8)
-#define X86_XCR0_PKRU_POS         9
-#define X86_XCR0_PKRU             (1ULL << X86_XCR0_PKRU_POS)
+#define X86_XCR0_PKRU             (_AC(1, ULL) <<  9)
 #define X86_XSS_PASID             (_AC(1, ULL) << 10)
 #define X86_XSS_CET_U             (_AC(1, ULL) << 11)
 #define X86_XSS_CET_S             (_AC(1, ULL) << 12)
@@ -107,11 +98,10 @@
 #define X86_XSS_HWP               (_AC(1, ULL) << 16)
 #define X86_XCR0_TILE_CFG         (_AC(1, ULL) << 17)
 #define X86_XCR0_TILE_DATA        (_AC(1, ULL) << 18)
-#define X86_XCR0_LWP_POS          62
-#define X86_XCR0_LWP              (1ULL << X86_XCR0_LWP_POS)
+#define X86_XCR0_LWP              (_AC(1, ULL) << 62)
 
 #define X86_XCR0_STATES                                                 \
-    (X86_XCR0_FP | X86_XCR0_SSE | X86_XCR0_YMM | X86_XCR0_BNDREGS |     \
+    (X86_XCR0_X87 | X86_XCR0_SSE | X86_XCR0_YMM | X86_XCR0_BNDREGS |    \
      X86_XCR0_BNDCSR | X86_XCR0_OPMASK | X86_XCR0_ZMM |                 \
      X86_XCR0_HI_ZMM | X86_XCR0_PKRU | X86_XCR0_TILE_CFG |              \
      X86_XCR0_TILE_DATA |                                               \
diff --git a/xen/arch/x86/include/asm/xstate.h b/xen/arch/x86/include/asm/xstate.h
index da1d89d2f416..f4a8e5f814a0 100644
--- a/xen/arch/x86/include/asm/xstate.h
+++ b/xen/arch/x86/include/asm/xstate.h
@@ -29,8 +29,8 @@ extern uint32_t mxcsr_mask;
 #define XSAVE_HDR_OFFSET          FXSAVE_SIZE
 #define XSTATE_AREA_MIN_SIZE      (FXSAVE_SIZE + XSAVE_HDR_SIZE)
 
-#define XSTATE_FP_SSE  (X86_XCR0_FP | X86_XCR0_SSE)
-#define XCNTXT_MASK    (X86_XCR0_FP | X86_XCR0_SSE | X86_XCR0_YMM | \
+#define XSTATE_FP_SSE  (X86_XCR0_X87 | X86_XCR0_SSE)
+#define XCNTXT_MASK    (X86_XCR0_X87 | X86_XCR0_SSE | X86_XCR0_YMM | \
                         X86_XCR0_OPMASK | X86_XCR0_ZMM | X86_XCR0_HI_ZMM | \
                         XSTATE_NONLAZY)
 
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 31bf2dc95f57..2acd02449dba 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -313,7 +313,7 @@ void xsave(struct vcpu *v, uint64_t mask)
                            "=m" (*ptr), \
                            "a" (lmask), "d" (hmask), "D" (ptr))
 
-    if ( fip_width == 8 || !(mask & X86_XCR0_FP) )
+    if ( fip_width == 8 || !(mask & X86_XCR0_X87) )
     {
         XSAVE("0x48,");
     }
@@ -366,7 +366,7 @@ void xsave(struct vcpu *v, uint64_t mask)
             fip_width = 8;
     }
 #undef XSAVE
-    if ( mask & X86_XCR0_FP )
+    if ( mask & X86_XCR0_X87 )
         ptr->fpu_sse.x[FPU_WORD_SIZE_OFFSET] = fip_width;
 }
 
@@ -558,7 +558,7 @@ void xstate_free_save_area(struct vcpu *v)
 static bool valid_xcr0(uint64_t xcr0)
 {
     /* FP must be unconditionally set. */
-    if ( !(xcr0 & X86_XCR0_FP) )
+    if ( !(xcr0 & X86_XCR0_X87) )
         return false;
 
     /* YMM depends on SSE. */
@@ -597,7 +597,7 @@ unsigned int xstate_uncompressed_size(uint64_t xcr0)
     if ( xcr0 == 0 )
         return 0;
 
-    if ( xcr0 <= (X86_XCR0_SSE | X86_XCR0_FP) )
+    if ( xcr0 <= (X86_XCR0_SSE | X86_XCR0_X87) )
         return size;
 
     /*
@@ -605,7 +605,7 @@ unsigned int xstate_uncompressed_size(uint64_t xcr0)
      * maximum offset+size.  Some states (e.g. LWP, APX_F) are out-of-order
      * with respect their index.
      */
-    xcr0 &= ~(X86_XCR0_SSE | X86_XCR0_FP);
+    xcr0 &= ~(X86_XCR0_SSE | X86_XCR0_X87);
     for_each_set_bit ( i, &xcr0, 63 )
     {
         const struct xstate_component *c = &raw_cpu_policy.xstate.comp[i];
@@ -626,14 +626,14 @@ unsigned int xstate_compressed_size(uint64_t xstates)
     if ( xstates == 0 )
         return 0;
 
-    if ( xstates <= (X86_XCR0_SSE | X86_XCR0_FP) )
+    if ( xstates <= (X86_XCR0_SSE | X86_XCR0_X87) )
         return size;
 
     /*
      * For the compressed size, every non-legacy component matters.  Some
      * componenets require aligning to 64 first.
      */
-    xstates &= ~(X86_XCR0_SSE | X86_XCR0_FP);
+    xstates &= ~(X86_XCR0_SSE | X86_XCR0_X87);
     for_each_set_bit ( i, &xstates, 63 )
     {
         const struct xstate_component *c = &raw_cpu_policy.xstate.comp[i];
@@ -756,7 +756,7 @@ static void __init noinline xstate_check_sizes(void)
      * layout compatibility with Intel and having a knock-on effect on all
      * subsequent states.
      */
-    check_new_xstate(&s, X86_XCR0_SSE | X86_XCR0_FP);
+    check_new_xstate(&s, X86_XCR0_SSE | X86_XCR0_X87);
 
     if ( cpu_has_avx )
         check_new_xstate(&s, X86_XCR0_YMM);
@@ -1008,7 +1008,7 @@ uint64_t read_bndcfgu(void)
               : "=m" (*xstate)
               : "a" (X86_XCR0_BNDCSR), "d" (0), "D" (xstate) );
 
-        bndcsr = (void *)xstate + xstate_offsets[X86_XCR0_BNDCSR_POS];
+        bndcsr = (void *)xstate + xstate_offsets[ilog2(X86_XCR0_BNDCSR)];
     }
 
     if ( cr0 & X86_CR0_TS )
-- 
2.39.2