Add a new save code type CPU_XSAVES_CODE containing a compressed XSAVES
image.
Signed-off-by: Tu Dinh <ngoc-tu.dinh@vates.tech>
---
xen/arch/x86/hvm/hvm.c | 67 +++++++++++++++++++++-----
xen/arch/x86/xstate.c | 3 +-
xen/include/public/arch-x86/hvm/save.h | 4 +-
3 files changed, 60 insertions(+), 14 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c7b93c7d91..e5a50d9fca 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1238,6 +1238,36 @@ static int cf_check hvm_save_cpu_xsave_states(
return 0;
}
+#define HVM_CPU_XSAVES_SIZE(xcr0) (offsetof(struct hvm_hw_cpu_xsave, \
+ save_area) + \
+ xstate_compressed_size(xcr0))
+
+static int cf_check hvm_save_cpu_xsaves_states(
+ struct vcpu *v, hvm_domain_context_t *h)
+{
+ struct hvm_hw_cpu_xsave *ctxt;
+ unsigned int size;
+ int err;
+
+ if ( !xsave_enabled(v) )
+ return 0; /* do nothing */
+
+ size = HVM_CPU_XSAVES_SIZE(v->arch.xcr0_accum);
+ err = _hvm_init_entry(h, CPU_XSAVES_CODE, v->vcpu_id, size);
+ if ( err )
+ return err;
+
+ ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
+ h->cur += size;
+ ctxt->xfeature_mask = xfeature_mask;
+ ctxt->xcr0 = v->arch.xcr0;
+ ctxt->xcr0_accum = v->arch.xcr0_accum;
+
+ memcpy(&ctxt->save_area, v->arch.xsave_area, size);
+
+ return 0;
+}
+
/*
* Structure layout conformity checks, documenting correctness of the cast in
* the invocation of validate_xstate() below.
@@ -1311,6 +1341,10 @@ static int cf_check hvm_load_cpu_xsave_states(
ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
h->cur += desc->length;
+ if ( !cpu_has_xsaves &&
+ xsave_area_compressed((const void *)&ctxt->save_area) )
+ return -EOPNOTSUPP;
+
err = validate_xstate(d, ctxt->xcr0, ctxt->xcr0_accum,
(const void *)&ctxt->save_area.xsave_hdr);
if ( err )
@@ -1322,7 +1356,10 @@ static int cf_check hvm_load_cpu_xsave_states(
ctxt->xcr0, ctxt->save_area.xsave_hdr.xstate_bv, err);
return err;
}
- size = HVM_CPU_XSAVE_SIZE(ctxt->xcr0_accum);
+ if ( xsave_area_compressed((const void *)&ctxt->save_area) )
+ size = HVM_CPU_XSAVES_SIZE(ctxt->xcr0_accum);
+ else
+ size = HVM_CPU_XSAVE_SIZE(ctxt->xcr0_accum);
desc_length = desc->length;
if ( desc_length > size )
{
@@ -1348,14 +1385,7 @@ static int cf_check hvm_load_cpu_xsave_states(
desc_length = size;
}
- if ( xsave_area_compressed((const void *)&ctxt->save_area) )
- {
- printk(XENLOG_G_WARNING
- "HVM%d.%u restore: compressed xsave state not supported\n",
- d->domain_id, vcpuid);
- return -EOPNOTSUPP;
- }
- else if ( desc_length != size )
+ if ( desc_length != size )
{
printk(XENLOG_G_WARNING
"HVM%d.%u restore mismatch: xsave length %#x != %#x\n",
@@ -1367,8 +1397,13 @@ static int cf_check hvm_load_cpu_xsave_states(
v->arch.xcr0 = ctxt->xcr0;
v->arch.xcr0_accum = ctxt->xcr0_accum;
v->arch.nonlazy_xstate_used = ctxt->xcr0_accum & XSTATE_NONLAZY;
- compress_xsave_states(v, &ctxt->save_area,
- size - offsetof(struct hvm_hw_cpu_xsave, save_area));
+ if ( xsave_area_compressed((const void *)&ctxt->save_area) )
+ memcpy(v->arch.xsave_area, &ctxt->save_area,
+ size - offsetof(struct hvm_hw_cpu_xsave, save_area));
+ else
+ compress_xsave_states(v, &ctxt->save_area,
+ size - offsetof(struct hvm_hw_cpu_xsave,
+ save_area));
return 0;
}
@@ -1385,6 +1420,7 @@ static const uint32_t msrs_to_send[] = {
MSR_AMD64_DR1_ADDRESS_MASK,
MSR_AMD64_DR2_ADDRESS_MASK,
MSR_AMD64_DR3_ADDRESS_MASK,
+ MSR_LBR_DEPTH,
};
static int cf_check hvm_save_cpu_msrs(struct vcpu *v, hvm_domain_context_t *h)
@@ -1572,6 +1608,15 @@ static int __init cf_check hvm_register_CPU_save_and_restore(void)
sizeof(struct hvm_save_descriptor),
HVMSR_PER_VCPU);
+ hvm_register_savevm(CPU_XSAVES_CODE,
+ "CPU_XSAVES",
+ hvm_save_cpu_xsaves_states,
+ NULL,
+ hvm_load_cpu_xsave_states,
+ HVM_CPU_XSAVES_SIZE(xfeature_mask) +
+ sizeof(struct hvm_save_descriptor),
+ HVMSR_PER_VCPU);
+
hvm_register_savevm(CPU_MSR_CODE,
"CPU_MSR",
hvm_save_cpu_msrs,
diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 607bf9c8a5..1c7a39e778 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -946,8 +946,7 @@ int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum,
!valid_xcr0(xcr0_accum) )
return -EINVAL;
- if ( (xcr0_accum & ~xfeature_mask) ||
- hdr->xcomp_bv )
+ if ( xcr0_accum & ~xfeature_mask )
return -EOPNOTSUPP;
for ( i = 0; i < ARRAY_SIZE(hdr->reserved); ++i )
diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h
index 7ecacadde1..89651f3dd3 100644
--- a/xen/include/public/arch-x86/hvm/save.h
+++ b/xen/include/public/arch-x86/hvm/save.h
@@ -624,12 +624,14 @@ struct hvm_msr {
#define CPU_MSR_CODE 20
+#define CPU_XSAVES_CODE 21
+
/* Range 22 - 34 (inclusive) reserved for Amazon */
/*
* Largest type-code in use
*/
-#define HVM_SAVE_CODE_MAX 20
+#define HVM_SAVE_CODE_MAX 21
#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
--
2.43.0
Ngoc Tu Dinh | Vates XCP-ng Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
On 02.01.2025 09:45, Tu Dinh wrote:
> Add a new save code type CPU_XSAVES_CODE containing a compressed XSAVES
> image.
>
> Signed-off-by: Tu Dinh <ngoc-tu.dinh@vates.tech>
I'm afraid this way too little of a description here. First unanswered
question would be why it is that we need a new save code in the first
place. Second question then would be what the interaction is when both
old and new save records are present. After all aiui ...
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -1238,6 +1238,36 @@ static int cf_check hvm_save_cpu_xsave_states(
> return 0;
> }
>
> +#define HVM_CPU_XSAVES_SIZE(xcr0) (offsetof(struct hvm_hw_cpu_xsave, \
> + save_area) + \
> + xstate_compressed_size(xcr0))
> +
> +static int cf_check hvm_save_cpu_xsaves_states(
> + struct vcpu *v, hvm_domain_context_t *h)
> +{
> + struct hvm_hw_cpu_xsave *ctxt;
> + unsigned int size;
> + int err;
> +
> + if ( !xsave_enabled(v) )
> + return 0; /* do nothing */
> +
> + size = HVM_CPU_XSAVES_SIZE(v->arch.xcr0_accum);
> + err = _hvm_init_entry(h, CPU_XSAVES_CODE, v->vcpu_id, size);
> + if ( err )
> + return err;
> +
> + ctxt = (struct hvm_hw_cpu_xsave *)&h->data[h->cur];
> + h->cur += size;
> + ctxt->xfeature_mask = xfeature_mask;
> + ctxt->xcr0 = v->arch.xcr0;
> + ctxt->xcr0_accum = v->arch.xcr0_accum;
> +
> + memcpy(&ctxt->save_area, v->arch.xsave_area, size);
> +
> + return 0;
> +}
... you save all states under this new code, not just the XSS-controlled
ones. Plus you're going through all of this even if there are no XSS-
controlled components, i.e. in particular also when there's no XSAVES
support in hardware. This way you then end up saving twice the exact
same data, just differently arranged.
> --- a/xen/arch/x86/xstate.c
> +++ b/xen/arch/x86/xstate.c
> @@ -946,8 +946,7 @@ int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum,
> !valid_xcr0(xcr0_accum) )
> return -EINVAL;
>
> - if ( (xcr0_accum & ~xfeature_mask) ||
> - hdr->xcomp_bv )
> + if ( xcr0_accum & ~xfeature_mask )
> return -EOPNOTSUPP;
>
> for ( i = 0; i < ARRAY_SIZE(hdr->reserved); ++i )
Can you really merely delete the check? Don't you need to validate
non-zero ->xcomp_bv then instead?
Jan
© 2016 - 2025 Red Hat, Inc.