[PATCH v4 19/24] xen/domctl: wrap hvm_save{,load} with CONFIG_MGMT_HYPERCALLS

Penny Zheng posted 24 patches 3 weeks, 1 day ago
There is a newer version of this series
[PATCH v4 19/24] xen/domctl: wrap hvm_save{,load} with CONFIG_MGMT_HYPERCALLS
Posted by Penny Zheng 3 weeks, 1 day ago
The following functions have been referenced in places which is either guarded
with CONFIG_MGMT_HYPERCALLS or CONFIG_MEM_SHARING:
- arch_hvm_save
- arch_hvm_check
- arch_hvm_load
- hvm_save_size
- hvm_save
- hvm_load
While CONFIG_MEM_SHARING is also dependent on CONFIG_MGMT_HYPERCALLS.
So they shall be wrapped under MGMT_HYPERCALLS, otherwise they will become
unreachable codes when MGMT_HYPERCALLS=n, and hence violating Misra rule 2.1.
We move arch_hvm_save(), arch_hvm_check(), arch_hvm_load() and hvm_save_size()
nearer to the left functions, to avoid scattered #ifdef-wrapping.

Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
---
v3 -> v4
- new commit
---
 xen/arch/x86/hvm/save.c | 154 ++++++++++++++++++++--------------------
 1 file changed, 78 insertions(+), 76 deletions(-)

diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index 8ab6405706..5e99dd5998 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -15,62 +15,6 @@
 
 #include <public/hvm/save.h>
 
-static void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
-{
-    uint32_t eax, ebx, ecx, edx;
-
-    /* Save some CPUID bits */
-    cpuid(1, &eax, &ebx, &ecx, &edx);
-    hdr->cpuid = eax;
-
-    /* Save guest's preferred TSC. */
-    hdr->gtsc_khz = d->arch.tsc_khz;
-
-    /* Time when saving started */
-    d->arch.hvm.sync_tsc = rdtsc();
-}
-
-static int arch_hvm_check(const struct domain *d,
-                          const struct hvm_save_header *hdr)
-{
-    uint32_t eax, ebx, ecx, edx;
-
-    if ( hdr->magic != HVM_FILE_MAGIC )
-    {
-        printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n",
-               d->domain_id, hdr->magic);
-        return -EINVAL;
-    }
-
-    if ( hdr->version != HVM_FILE_VERSION )
-    {
-        printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n",
-               d->domain_id, hdr->version);
-        return -EINVAL;
-    }
-
-    cpuid(1, &eax, &ebx, &ecx, &edx);
-    /* CPUs ought to match but with feature-masking they might not */
-    if ( (hdr->cpuid & ~0x0fUL) != (eax & ~0x0fUL) )
-        printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU "
-               "(%#"PRIx32") and restored on another (%#"PRIx32").\n",
-               d->domain_id, hdr->cpuid, eax);
-
-    return 0;
-}
-
-static void arch_hvm_load(struct domain *d, const struct hvm_save_header *hdr)
-{
-    /* Restore guest's preferred TSC frequency. */
-    if ( hdr->gtsc_khz )
-        d->arch.tsc_khz = hdr->gtsc_khz;
-    if ( d->arch.vtsc )
-        hvm_set_rdtsc_exiting(d, 1);
-
-    /* Time when restore started  */
-    d->arch.hvm.sync_tsc = rdtsc();
-}
-
 /* List of handlers for various HVM save and restore types */
 static struct {
     hvm_save_handler save;
@@ -101,26 +45,6 @@ void __init hvm_register_savevm(uint16_t typecode,
     hvm_sr_handlers[typecode].kind = kind;
 }
 
-size_t hvm_save_size(struct domain *d)
-{
-    struct vcpu *v;
-    size_t sz;
-    int i;
-
-    /* Basic overhead for header and footer */
-    sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
-
-    /* Plus space for each thing we will be saving */
-    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
-        if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
-            for_each_vcpu(d, v)
-                sz += hvm_sr_handlers[i].size;
-        else
-            sz += hvm_sr_handlers[i].size;
-
-    return sz;
-}
-
 /*
  * Extract a single instance of a save record, by marshalling all records of
  * that type and copying out the one we need.
@@ -196,6 +120,83 @@ int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int instance,
     return rv;
 }
 
+#ifdef CONFIG_MGMT_HYPERCALLS
+static void arch_hvm_save(struct domain *d, struct hvm_save_header *hdr)
+{
+    uint32_t eax, ebx, ecx, edx;
+
+    /* Save some CPUID bits */
+    cpuid(1, &eax, &ebx, &ecx, &edx);
+    hdr->cpuid = eax;
+
+    /* Save guest's preferred TSC. */
+    hdr->gtsc_khz = d->arch.tsc_khz;
+
+    /* Time when saving started */
+    d->arch.hvm.sync_tsc = rdtsc();
+}
+
+static int arch_hvm_check(const struct domain *d,
+                          const struct hvm_save_header *hdr)
+{
+    uint32_t eax, ebx, ecx, edx;
+
+    if ( hdr->magic != HVM_FILE_MAGIC )
+    {
+        printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n",
+               d->domain_id, hdr->magic);
+        return -EINVAL;
+    }
+
+    if ( hdr->version != HVM_FILE_VERSION )
+    {
+        printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n",
+               d->domain_id, hdr->version);
+        return -EINVAL;
+    }
+
+    cpuid(1, &eax, &ebx, &ecx, &edx);
+    /* CPUs ought to match but with feature-masking they might not */
+    if ( (hdr->cpuid & ~0x0fUL) != (eax & ~0x0fUL) )
+        printk(XENLOG_G_INFO "HVM%d restore: VM saved on one CPU "
+               "(%#"PRIx32") and restored on another (%#"PRIx32").\n",
+               d->domain_id, hdr->cpuid, eax);
+
+    return 0;
+}
+
+static void arch_hvm_load(struct domain *d, const struct hvm_save_header *hdr)
+{
+    /* Restore guest's preferred TSC frequency. */
+    if ( hdr->gtsc_khz )
+        d->arch.tsc_khz = hdr->gtsc_khz;
+    if ( d->arch.vtsc )
+        hvm_set_rdtsc_exiting(d, 1);
+
+    /* Time when restore started  */
+    d->arch.hvm.sync_tsc = rdtsc();
+}
+
+size_t hvm_save_size(struct domain *d)
+{
+    struct vcpu *v;
+    size_t sz;
+    unsigned int i;
+
+    /* Basic overhead for header and footer */
+    sz = (2 * sizeof (struct hvm_save_descriptor)) + HVM_SAVE_LENGTH(HEADER);
+
+    /* Plus space for each thing we will be saving */
+    for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
+        if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
+            for_each_vcpu(d, v)
+                sz += hvm_sr_handlers[i].size;
+        else
+            sz += hvm_sr_handlers[i].size;
+
+    return sz;
+}
+
 int hvm_save(struct domain *d, hvm_domain_context_t *h)
 {
     char *c;
@@ -390,6 +391,7 @@ int hvm_load(struct domain *d, bool real, hvm_domain_context_t *h)
 
     /* Not reached */
 }
+#endif /* CONFIG_MGMT_HYPERCALLS */
 
 int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst,
                     uint32_t len)
-- 
2.34.1
Re: [PATCH v4 19/24] xen/domctl: wrap hvm_save{,load} with CONFIG_MGMT_HYPERCALLS
Posted by Jan Beulich 2 weeks, 4 days ago
On 21.11.2025 11:57, Penny Zheng wrote:
> The following functions have been referenced in places which is either guarded
> with CONFIG_MGMT_HYPERCALLS or CONFIG_MEM_SHARING:
> - arch_hvm_save
> - arch_hvm_check
> - arch_hvm_load
> - hvm_save_size
> - hvm_save
> - hvm_load
> While CONFIG_MEM_SHARING is also dependent on CONFIG_MGMT_HYPERCALLS.
> So they shall be wrapped under MGMT_HYPERCALLS, otherwise they will become
> unreachable codes when MGMT_HYPERCALLS=n, and hence violating Misra rule 2.1.
> We move arch_hvm_save(), arch_hvm_check(), arch_hvm_load() and hvm_save_size()
> nearer to the left functions, to avoid scattered #ifdef-wrapping.

Why would you move things? What is in this source file that is of any use when
MGMT_HYPERCALLS=n? The only caller of hvm_save_one() lives in x86/domctl.c. With
that also removed, hvm_sr_handlers[] is only ever written to afaict, which means
that's an effectively dead array then too.

The final few functions ...

> @@ -390,6 +391,7 @@ int hvm_load(struct domain *d, bool real, hvm_domain_context_t *h)
>  
>      /* Not reached */
>  }
> +#endif /* CONFIG_MGMT_HYPERCALLS */
>  
>  int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst,
>                      uint32_t len)

... here and below are only helpers for the save/restore machinery, i.e. that
_all_ is also usable only when MGMT_HYPERCALLS=y. Yes, that's a lot of further
work, but what do you do? You'll then have quite a bit more code removed from
the set that as per coverage analysis is never reached.

Jan
Re: [PATCH v4 19/24] xen/domctl: wrap hvm_save{,load} with CONFIG_MGMT_HYPERCALLS
Posted by Grygorii Strashko 2 weeks, 4 days ago
Hi Jan, Penny,

On 25.11.25 17:59, Jan Beulich wrote:
> On 21.11.2025 11:57, Penny Zheng wrote:
>> The following functions have been referenced in places which is either guarded
>> with CONFIG_MGMT_HYPERCALLS or CONFIG_MEM_SHARING:
>> - arch_hvm_save
>> - arch_hvm_check
>> - arch_hvm_load
>> - hvm_save_size
>> - hvm_save
>> - hvm_load
>> While CONFIG_MEM_SHARING is also dependent on CONFIG_MGMT_HYPERCALLS.
>> So they shall be wrapped under MGMT_HYPERCALLS, otherwise they will become
>> unreachable codes when MGMT_HYPERCALLS=n, and hence violating Misra rule 2.1.
>> We move arch_hvm_save(), arch_hvm_check(), arch_hvm_load() and hvm_save_size()
>> nearer to the left functions, to avoid scattered #ifdef-wrapping.
> 
> Why would you move things? What is in this source file that is of any use when
> MGMT_HYPERCALLS=n? The only caller of hvm_save_one() lives in x86/domctl.c. With
> that also removed, hvm_sr_handlers[] is only ever written to afaict, which means
> that's an effectively dead array then too.
> 
> The final few functions ...
> 
>> @@ -390,6 +391,7 @@ int hvm_load(struct domain *d, bool real, hvm_domain_context_t *h)
>>   
>>       /* Not reached */
>>   }
>> +#endif /* CONFIG_MGMT_HYPERCALLS */
>>   
>>   int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst,
>>                       uint32_t len)
> 
> ... here and below are only helpers for the save/restore machinery, i.e. that
> _all_ is also usable only when MGMT_HYPERCALLS=y. Yes, that's a lot of further
> work, but what do you do? You'll then have quite a bit more code removed from
> the set that as per coverage analysis is never reached.

I have a local patch which allows to disable all HVM save/load code at once by using
separated Kconfig option SAVE_RESTORE.

+++ b/xen/arch/x86/hvm/Kconfig
@@ -127,4 +127,8 @@ config VHPET

+config SAVE_RESTORE
+    depends on MGMT_HYPERCALLS
+    def_bool y

SAVE_RESTORE - annotates all HVM save/load code and, in general, could made a feature by
allowing it to be selectable.
Of course, It all can be done by just using MGMT_HYPERCALLS.

So, I'd be appreciated for you opinion - does it make sense to have separate SAVE_RESTORE?

-- 
Best regards,
-grygorii
Re: [PATCH v4 19/24] xen/domctl: wrap hvm_save{,load} with CONFIG_MGMT_HYPERCALLS
Posted by Jan Beulich 2 weeks, 4 days ago
On 25.11.2025 17:30, Grygorii Strashko wrote:
> On 25.11.25 17:59, Jan Beulich wrote:
>> On 21.11.2025 11:57, Penny Zheng wrote:
>>> The following functions have been referenced in places which is either guarded
>>> with CONFIG_MGMT_HYPERCALLS or CONFIG_MEM_SHARING:
>>> - arch_hvm_save
>>> - arch_hvm_check
>>> - arch_hvm_load
>>> - hvm_save_size
>>> - hvm_save
>>> - hvm_load
>>> While CONFIG_MEM_SHARING is also dependent on CONFIG_MGMT_HYPERCALLS.
>>> So they shall be wrapped under MGMT_HYPERCALLS, otherwise they will become
>>> unreachable codes when MGMT_HYPERCALLS=n, and hence violating Misra rule 2.1.
>>> We move arch_hvm_save(), arch_hvm_check(), arch_hvm_load() and hvm_save_size()
>>> nearer to the left functions, to avoid scattered #ifdef-wrapping.
>>
>> Why would you move things? What is in this source file that is of any use when
>> MGMT_HYPERCALLS=n? The only caller of hvm_save_one() lives in x86/domctl.c. With
>> that also removed, hvm_sr_handlers[] is only ever written to afaict, which means
>> that's an effectively dead array then too.
>>
>> The final few functions ...
>>
>>> @@ -390,6 +391,7 @@ int hvm_load(struct domain *d, bool real, hvm_domain_context_t *h)
>>>   
>>>       /* Not reached */
>>>   }
>>> +#endif /* CONFIG_MGMT_HYPERCALLS */
>>>   
>>>   int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst,
>>>                       uint32_t len)
>>
>> ... here and below are only helpers for the save/restore machinery, i.e. that
>> _all_ is also usable only when MGMT_HYPERCALLS=y. Yes, that's a lot of further
>> work, but what do you do? You'll then have quite a bit more code removed from
>> the set that as per coverage analysis is never reached.
> 
> I have a local patch which allows to disable all HVM save/load code at once by using
> separated Kconfig option SAVE_RESTORE.
> 
> +++ b/xen/arch/x86/hvm/Kconfig
> @@ -127,4 +127,8 @@ config VHPET
> 
> +config SAVE_RESTORE
> +    depends on MGMT_HYPERCALLS
> +    def_bool y
> 
> SAVE_RESTORE - annotates all HVM save/load code and, in general, could made a feature by
> allowing it to be selectable.

Oh, one more thing: SAVE_RESTORE, simply by its name, promises to cover PV as well.
That either wants to be the case, or the name may want adjusting.

Jan
Re: [PATCH v4 19/24] xen/domctl: wrap hvm_save{,load} with CONFIG_MGMT_HYPERCALLS
Posted by Jan Beulich 2 weeks, 4 days ago
On 25.11.2025 17:30, Grygorii Strashko wrote:
> On 25.11.25 17:59, Jan Beulich wrote:
>> On 21.11.2025 11:57, Penny Zheng wrote:
>>> The following functions have been referenced in places which is either guarded
>>> with CONFIG_MGMT_HYPERCALLS or CONFIG_MEM_SHARING:
>>> - arch_hvm_save
>>> - arch_hvm_check
>>> - arch_hvm_load
>>> - hvm_save_size
>>> - hvm_save
>>> - hvm_load
>>> While CONFIG_MEM_SHARING is also dependent on CONFIG_MGMT_HYPERCALLS.
>>> So they shall be wrapped under MGMT_HYPERCALLS, otherwise they will become
>>> unreachable codes when MGMT_HYPERCALLS=n, and hence violating Misra rule 2.1.
>>> We move arch_hvm_save(), arch_hvm_check(), arch_hvm_load() and hvm_save_size()
>>> nearer to the left functions, to avoid scattered #ifdef-wrapping.
>>
>> Why would you move things? What is in this source file that is of any use when
>> MGMT_HYPERCALLS=n? The only caller of hvm_save_one() lives in x86/domctl.c. With
>> that also removed, hvm_sr_handlers[] is only ever written to afaict, which means
>> that's an effectively dead array then too.
>>
>> The final few functions ...
>>
>>> @@ -390,6 +391,7 @@ int hvm_load(struct domain *d, bool real, hvm_domain_context_t *h)
>>>   
>>>       /* Not reached */
>>>   }
>>> +#endif /* CONFIG_MGMT_HYPERCALLS */
>>>   
>>>   int _hvm_init_entry(struct hvm_domain_context *h, uint16_t tc, uint16_t inst,
>>>                       uint32_t len)
>>
>> ... here and below are only helpers for the save/restore machinery, i.e. that
>> _all_ is also usable only when MGMT_HYPERCALLS=y. Yes, that's a lot of further
>> work, but what do you do? You'll then have quite a bit more code removed from
>> the set that as per coverage analysis is never reached.
> 
> I have a local patch which allows to disable all HVM save/load code at once by using
> separated Kconfig option SAVE_RESTORE.
> 
> +++ b/xen/arch/x86/hvm/Kconfig
> @@ -127,4 +127,8 @@ config VHPET
> 
> +config SAVE_RESTORE
> +    depends on MGMT_HYPERCALLS
> +    def_bool y
> 
> SAVE_RESTORE - annotates all HVM save/load code and, in general, could made a feature by
> allowing it to be selectable.
> Of course, It all can be done by just using MGMT_HYPERCALLS.
> 
> So, I'd be appreciated for you opinion - does it make sense to have separate SAVE_RESTORE?

Yes, why not? The granularity of MGMT_HYPERCALLS is getting a little unwieldy
anyway, so why not leverage what you have to split it up at least some. (Of
course much depends on how intrusive that change is. Then again the same
intrusiveness would have to be expected if it all went under MGMT_HYPERCALLS.)

Jan
[XEN][PATCH] x86/hvm: move save/restore under HVM_SAVE_RESTORE config
Posted by Grygorii Strashko 2 weeks, 4 days ago
From: Grygorii Strashko <grygorii_strashko@epam.com>

Introduce separate HVM_SAVE_RESTORE config for HVM save/restore feature,
which is enabled by default for HVM and depends on MGMT_HYPERCALLS config.

This allows to make MGMT_HYPERCALLS specific changes more granular and, if
required, make HVM save/restore optional, selectable feature.

Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
---
I'd like to propose this patch as a replacement of Patch 19 [1]

[1] https://patchwork.kernel.org/project/xen-devel/patch/20251121105801.1251262-20-Penny.Zheng@amd.com/

 xen/arch/x86/cpu/mcheck/vmce.c       | 4 ++--
 xen/arch/x86/emul-i8254.c            | 4 +++-
 xen/arch/x86/hvm/Kconfig             | 6 ++++++
 xen/arch/x86/hvm/Makefile            | 2 +-
 xen/arch/x86/hvm/hpet.c              | 3 ++-
 xen/arch/x86/hvm/hvm.c               | 4 ++++
 xen/arch/x86/hvm/irq.c               | 2 ++
 xen/arch/x86/hvm/mtrr.c              | 2 ++
 xen/arch/x86/hvm/pmtimer.c           | 2 ++
 xen/arch/x86/hvm/rtc.c               | 2 ++
 xen/arch/x86/hvm/vioapic.c           | 2 ++
 xen/arch/x86/hvm/viridian/viridian.c | 2 ++
 xen/arch/x86/hvm/vlapic.c            | 3 ++-
 xen/arch/x86/hvm/vpic.c              | 2 ++
 xen/arch/x86/include/asm/hvm/save.h  | 5 ++++-
 15 files changed, 38 insertions(+), 7 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 1a7e92506ac8..ba27f6f8bd91 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -349,7 +349,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
     return ret;
 }
 
-#if CONFIG_HVM
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct hvm_vmce_vcpu ctxt = {
@@ -380,10 +380,10 @@ static int cf_check vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *
 
     return err ?: vmce_restore_vcpu(v, &ctxt);
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, NULL,
                           vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
-#endif
 
 /*
  * for Intel MCE, broadcast vMCE to all vcpus
diff --git a/xen/arch/x86/emul-i8254.c b/xen/arch/x86/emul-i8254.c
index f106ab794c4f..d33723fa32c6 100644
--- a/xen/arch/x86/emul-i8254.c
+++ b/xen/arch/x86/emul-i8254.c
@@ -409,7 +409,9 @@ void pit_stop_channel0_irq(PITState *pit)
     destroy_periodic_time(&pit->pt0);
     spin_unlock(&pit->lock);
 }
+#endif
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check pit_save(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct domain *d = v->domain;
@@ -507,9 +509,9 @@ static int cf_check pit_load(struct domain *d, hvm_domain_context_t *h)
 
     return rc;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_check, pit_load, 1, HVMSR_PER_DOM);
-#endif
 
 /* The intercept action for PIT DM retval: 0--not handled; 1--handled. */
 static int cf_check handle_pit_io(
diff --git a/xen/arch/x86/hvm/Kconfig b/xen/arch/x86/hvm/Kconfig
index 156deab29139..48c2e1e33469 100644
--- a/xen/arch/x86/hvm/Kconfig
+++ b/xen/arch/x86/hvm/Kconfig
@@ -93,4 +93,10 @@ config MEM_SHARING
 	depends on INTEL_VMX
 	depends on MGMT_HYPERCALLS
 
+config HVM_SAVE_RESTORE
+	depends on MGMT_HYPERCALLS
+	def_bool y
+	help
+	  Enables HVM save/load functionality.
+
 endif
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index f34fb03934c0..f1602cc659ea 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -22,7 +22,7 @@ obj-y += nestedhvm.o
 obj-y += pmtimer.o
 obj-y += quirks.o
 obj-y += rtc.o
-obj-y += save.o
+obj-$(CONFIG_HVM_SAVE_RESTORE) += save.o
 obj-y += stdvga.o
 obj-y += vioapic.o
 obj-y += vlapic.o
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index f0e5f877f4a1..7df8abfa348d 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -570,7 +570,7 @@ static const struct hvm_mmio_ops hpet_mmio_ops = {
     .write = hpet_write
 };
 
-
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check hpet_save(struct vcpu *v, hvm_domain_context_t *h)
 {
     const struct domain *d = v->domain;
@@ -691,6 +691,7 @@ static int cf_check hpet_load(struct domain *d, hvm_domain_context_t *h)
 
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, NULL, hpet_load, 1, HVMSR_PER_DOM);
 
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b1e03bbb3bd3..832fca54f3a2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -796,6 +796,7 @@ void hvm_domain_destroy(struct domain *d)
     destroy_vpci_mmcfg(d);
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct hvm_tsc_adjust ctxt = {
@@ -943,6 +944,7 @@ static int cf_check hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
 
     return hvm_save_entry(CPU, v->vcpu_id, h, &ctxt);
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 /* Return a string indicating the error, or NULL for valid. */
 const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
@@ -1020,6 +1022,7 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d)
             0);
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
     const struct cpu_policy *p = d->arch.cpu_policy;
@@ -1602,6 +1605,7 @@ static int __init cf_check hvm_register_CPU_save_and_restore(void)
     return 0;
 }
 __initcall(hvm_register_CPU_save_and_restore);
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 static void cf_check hvm_assert_evtchn_irq_tasklet(void *v)
 {
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 5f643611130f..af5f2c83577e 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -649,6 +649,7 @@ static int __init cf_check dump_irq_info_key_init(void)
 }
 __initcall(dump_irq_info_key_init);
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check irq_save_pci(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct domain *d = v->domain;
@@ -799,6 +800,7 @@ static int cf_check irq_load_link(struct domain *d, hvm_domain_context_t *h)
 
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, NULL, irq_load_pci,
                           1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index e0bef8c82dc8..c5e83dd7a602 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -695,6 +695,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
     return rc;
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h)
 {
     const struct mtrr_state *m = &v->arch.hvm.mtrr;
@@ -782,6 +783,7 @@ static int cf_check hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
 
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, NULL, hvm_load_mtrr_msr, 1,
                           HVMSR_PER_VCPU);
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 87a7a01c9f52..86f85d381a20 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -240,6 +240,7 @@ static int cf_check handle_pmt_io(
     return X86EMUL_OKAY;
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check acpi_save(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct domain *d = v->domain;
@@ -300,6 +301,7 @@ static int cf_check acpi_load(struct domain *d, hvm_domain_context_t *h)
     
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(PMTIMER, acpi_save, NULL, acpi_load,
                           1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index e33a8ec10821..a451cd2e6880 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -742,6 +742,7 @@ void rtc_migrate_timers(struct vcpu *v)
     }
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 /* Save RTC hardware state */
 static int cf_check rtc_save(struct vcpu *v, hvm_domain_context_t *h)
 {
@@ -797,6 +798,7 @@ static int cf_check rtc_load(struct domain *d, hvm_domain_context_t *h)
 
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, NULL, rtc_load, 1, HVMSR_PER_DOM);
 
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 7c725f9e471f..5fad31074288 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -594,6 +594,7 @@ int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi)
     return vioapic->redirtbl[pin].fields.trig_mode;
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
 {
     const struct domain *d = v->domain;
@@ -629,6 +630,7 @@ static int cf_check ioapic_load(struct domain *d, hvm_domain_context_t *h)
 
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, NULL, ioapic_load, 1,
                           HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
index 90e749ceb581..ab2a7078515c 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -1092,6 +1092,7 @@ void viridian_unmap_guest_page(struct viridian_page *vp)
     put_page_and_type(page);
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check viridian_save_domain_ctxt(
     struct vcpu *v, hvm_domain_context_t *h)
 {
@@ -1171,6 +1172,7 @@ static int cf_check viridian_load_vcpu_ctxt(
 
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt, NULL,
                           viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 79697487ba90..ea7d186ff1ea 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1481,6 +1481,7 @@ void vlapic_reset(struct vlapic *vlapic)
     vlapic_do_init(vlapic);
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 /* rearm the actimer if needed, after a HVM restore */
 static void lapic_rearm(struct vlapic *s)
 {
@@ -1578,7 +1579,6 @@ static void lapic_load_fixup(struct vlapic *vlapic)
                v, vlapic->loaded.id, vlapic->loaded.ldr, good_ldr);
 }
 
-
 static int lapic_check_common(const struct domain *d, unsigned int vcpuid)
 {
     if ( !has_vlapic(d) )
@@ -1675,6 +1675,7 @@ static int cf_check lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
     lapic_rearm(s);
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_check_hidden,
                           lapic_load_hidden, 1, HVMSR_PER_VCPU);
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index 30ce367c082d..af49c372f502 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -402,6 +402,7 @@ static int cf_check vpic_intercept_elcr_io(
     return X86EMUL_OKAY;
 }
 
+#if defined(CONFIG_HVM_SAVE_RESTORE)
 static int cf_check vpic_save(struct vcpu *v, hvm_domain_context_t *h)
 {
     struct domain *d = v->domain;
@@ -475,6 +476,7 @@ static int cf_check vpic_load(struct domain *d, hvm_domain_context_t *h)
 
     return 0;
 }
+#endif /* CONFIG_HVM_SAVE_RESTORE */
 
 HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_check, vpic_load, 2,
                           HVMSR_PER_DOM);
diff --git a/xen/arch/x86/include/asm/hvm/save.h b/xen/arch/x86/include/asm/hvm/save.h
index ec8de029319d..501a6cfb214a 100644
--- a/xen/arch/x86/include/asm/hvm/save.h
+++ b/xen/arch/x86/include/asm/hvm/save.h
@@ -123,6 +123,7 @@ void hvm_register_savevm(uint16_t typecode,
 
 /* Syntactic sugar around that function: specify the max number of
  * saves, and this calculates the size of buffer needed */
+#ifdef CONFIG_HVM_SAVE_RESTORE
 #define HVM_REGISTER_SAVE_RESTORE(_x, _save, check, _load, _num, _k)      \
 static int __init cf_check __hvm_register_##_x##_save_and_restore(void)   \
 {                                                                         \
@@ -137,7 +138,9 @@ static int __init cf_check __hvm_register_##_x##_save_and_restore(void)   \
     return 0;                                                             \
 }                                                                         \
 __initcall(__hvm_register_##_x##_save_and_restore);
-
+#else
+#define HVM_REGISTER_SAVE_RESTORE(_x, _save, check, _load, _num, _k)
+#endif
 
 /* Entry points for saving and restoring HVM domain state */
 size_t hvm_save_size(struct domain *d);
-- 
2.34.1
Re: [XEN][PATCH] x86/hvm: move save/restore under HVM_SAVE_RESTORE config
Posted by Jan Beulich 2 weeks, 3 days ago
On 26.11.2025 00:00, Grygorii Strashko wrote:
> From: Grygorii Strashko <grygorii_strashko@epam.com>
> 
> Introduce separate HVM_SAVE_RESTORE config for HVM save/restore feature,
> which is enabled by default for HVM and depends on MGMT_HYPERCALLS config.
> 
> This allows to make MGMT_HYPERCALLS specific changes more granular and, if
> required, make HVM save/restore optional, selectable feature.
> 
> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
> ---
> I'd like to propose this patch as a replacement of Patch 19 [1]
> 
> [1] https://patchwork.kernel.org/project/xen-devel/patch/20251121105801.1251262-20-Penny.Zheng@amd.com/
> 
>  xen/arch/x86/cpu/mcheck/vmce.c       | 4 ++--
>  xen/arch/x86/emul-i8254.c            | 4 +++-
>  xen/arch/x86/hvm/Kconfig             | 6 ++++++
>  xen/arch/x86/hvm/Makefile            | 2 +-
>  xen/arch/x86/hvm/hpet.c              | 3 ++-
>  xen/arch/x86/hvm/hvm.c               | 4 ++++
>  xen/arch/x86/hvm/irq.c               | 2 ++
>  xen/arch/x86/hvm/mtrr.c              | 2 ++
>  xen/arch/x86/hvm/pmtimer.c           | 2 ++
>  xen/arch/x86/hvm/rtc.c               | 2 ++
>  xen/arch/x86/hvm/vioapic.c           | 2 ++
>  xen/arch/x86/hvm/viridian/viridian.c | 2 ++
>  xen/arch/x86/hvm/vlapic.c            | 3 ++-
>  xen/arch/x86/hvm/vpic.c              | 2 ++
>  xen/arch/x86/include/asm/hvm/save.h  | 5 ++++-
>  15 files changed, 38 insertions(+), 7 deletions(-)
> 
> diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
> index 1a7e92506ac8..ba27f6f8bd91 100644
> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -349,7 +349,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
>      return ret;
>  }
>  
> -#if CONFIG_HVM
> +#if defined(CONFIG_HVM_SAVE_RESTORE)

#if wasn't really correct to use here; #ifdef was and is wanted.

>  static int cf_check vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
>  {
>      struct hvm_vmce_vcpu ctxt = {
> @@ -380,10 +380,10 @@ static int cf_check vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *
>  
>      return err ?: vmce_restore_vcpu(v, &ctxt);
>  }
> +#endif /* CONFIG_HVM_SAVE_RESTORE */
>  
>  HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, NULL,
>                            vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
> -#endif

Why would this #endif move? (It gaining a comment is fine of course.)

> --- a/xen/arch/x86/emul-i8254.c
> +++ b/xen/arch/x86/emul-i8254.c
> @@ -409,7 +409,9 @@ void pit_stop_channel0_irq(PITState *pit)
>      destroy_periodic_time(&pit->pt0);
>      spin_unlock(&pit->lock);
>  }
> +#endif
>  
> +#if defined(CONFIG_HVM_SAVE_RESTORE)

Hmm, again - please use the shorter #ifdef.

> @@ -507,9 +509,9 @@ static int cf_check pit_load(struct domain *d, hvm_domain_context_t *h)
>  
>      return rc;
>  }
> +#endif /* CONFIG_HVM_SAVE_RESTORE */
>  
>  HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_check, pit_load, 1, HVMSR_PER_DOM);
> -#endif

And again - why move it?

> --- a/xen/arch/x86/hvm/Kconfig
> +++ b/xen/arch/x86/hvm/Kconfig
> @@ -93,4 +93,10 @@ config MEM_SHARING
>  	depends on INTEL_VMX
>  	depends on MGMT_HYPERCALLS
>  
> +config HVM_SAVE_RESTORE
> +	depends on MGMT_HYPERCALLS
> +	def_bool y
> +	help
> +	  Enables HVM save/load functionality.
> +
>  endif

This wants to move up some imo; MEM_SHARING is clearing the more niche feature.

> --- a/xen/arch/x86/include/asm/hvm/save.h
> +++ b/xen/arch/x86/include/asm/hvm/save.h
> @@ -123,6 +123,7 @@ void hvm_register_savevm(uint16_t typecode,
>  
>  /* Syntactic sugar around that function: specify the max number of
>   * saves, and this calculates the size of buffer needed */
> +#ifdef CONFIG_HVM_SAVE_RESTORE
>  #define HVM_REGISTER_SAVE_RESTORE(_x, _save, check, _load, _num, _k)      \
>  static int __init cf_check __hvm_register_##_x##_save_and_restore(void)   \
>  {                                                                         \
> @@ -137,7 +138,9 @@ static int __init cf_check __hvm_register_##_x##_save_and_restore(void)   \
>      return 0;                                                             \
>  }                                                                         \
>  __initcall(__hvm_register_##_x##_save_and_restore);
> -
> +#else
> +#define HVM_REGISTER_SAVE_RESTORE(_x, _save, check, _load, _num, _k)
> +#endif

By suitably moving the #endif-s I would hope we can get away without this
dummy #define.

Jan
Re: [XEN][PATCH] x86/hvm: move save/restore under HVM_SAVE_RESTORE config
Posted by Grygorii Strashko 2 weeks, 3 days ago

On 26.11.25 08:59, Jan Beulich wrote:
> On 26.11.2025 00:00, Grygorii Strashko wrote:
>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>
>> Introduce separate HVM_SAVE_RESTORE config for HVM save/restore feature,
>> which is enabled by default for HVM and depends on MGMT_HYPERCALLS config.
>>
>> This allows to make MGMT_HYPERCALLS specific changes more granular and, if
>> required, make HVM save/restore optional, selectable feature.
>>
>> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
>> ---
>> I'd like to propose this patch as a replacement of Patch 19 [1]
>>
>> [1] https://patchwork.kernel.org/project/xen-devel/patch/20251121105801.1251262-20-Penny.Zheng@amd.com/
>>
>>   xen/arch/x86/cpu/mcheck/vmce.c       | 4 ++--
>>   xen/arch/x86/emul-i8254.c            | 4 +++-
>>   xen/arch/x86/hvm/Kconfig             | 6 ++++++
>>   xen/arch/x86/hvm/Makefile            | 2 +-
>>   xen/arch/x86/hvm/hpet.c              | 3 ++-
>>   xen/arch/x86/hvm/hvm.c               | 4 ++++
>>   xen/arch/x86/hvm/irq.c               | 2 ++
>>   xen/arch/x86/hvm/mtrr.c              | 2 ++
>>   xen/arch/x86/hvm/pmtimer.c           | 2 ++
>>   xen/arch/x86/hvm/rtc.c               | 2 ++
>>   xen/arch/x86/hvm/vioapic.c           | 2 ++
>>   xen/arch/x86/hvm/viridian/viridian.c | 2 ++
>>   xen/arch/x86/hvm/vlapic.c            | 3 ++-
>>   xen/arch/x86/hvm/vpic.c              | 2 ++
>>   xen/arch/x86/include/asm/hvm/save.h  | 5 ++++-
>>   15 files changed, 38 insertions(+), 7 deletions(-)
>>
>> diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
>> index 1a7e92506ac8..ba27f6f8bd91 100644
>> --- a/xen/arch/x86/cpu/mcheck/vmce.c
>> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
>> @@ -349,7 +349,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
>>       return ret;
>>   }
>>   
>> -#if CONFIG_HVM
>> +#if defined(CONFIG_HVM_SAVE_RESTORE)
> 
> #if wasn't really correct to use here; #ifdef was and is wanted.
> 
>>   static int cf_check vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
>>   {
>>       struct hvm_vmce_vcpu ctxt = {
>> @@ -380,10 +380,10 @@ static int cf_check vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *
>>   
>>       return err ?: vmce_restore_vcpu(v, &ctxt);
>>   }
>> +#endif /* CONFIG_HVM_SAVE_RESTORE */
>>   
>>   HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, NULL,
>>                             vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
>> -#endif
> 
> Why would this #endif move? (It gaining a comment is fine of course.)

Huh. Initially I've used __maybe_unused with save/restore callbacks and
HVM_REGISTER_SAVE_RESTORE() defines as NOP.

I'll correct and drop empty HVM_REGISTER_SAVE_RESTORE()

>> --- a/xen/arch/x86/hvm/Kconfig
>> +++ b/xen/arch/x86/hvm/Kconfig
>> @@ -93,4 +93,10 @@ config MEM_SHARING
>>   	depends on INTEL_VMX
>>   	depends on MGMT_HYPERCALLS
>>   
>> +config HVM_SAVE_RESTORE
>> +	depends on MGMT_HYPERCALLS
>> +	def_bool y
>> +	help
>> +	  Enables HVM save/load functionality.
>> +
>>   endif
> 
> This wants to move up some imo; MEM_SHARING is clearing the more niche feature.
> 

Could you clarify preferred place - before which Kconfig option in hvm/Kconfig?


-- 
Best regards,
-grygorii
Re: [XEN][PATCH] x86/hvm: move save/restore under HVM_SAVE_RESTORE config
Posted by Jan Beulich 2 weeks, 3 days ago
On 26.11.2025 11:13, Grygorii Strashko wrote:
> On 26.11.25 08:59, Jan Beulich wrote:
>> On 26.11.2025 00:00, Grygorii Strashko wrote:
>>> --- a/xen/arch/x86/hvm/Kconfig
>>> +++ b/xen/arch/x86/hvm/Kconfig
>>> @@ -93,4 +93,10 @@ config MEM_SHARING
>>>   	depends on INTEL_VMX
>>>   	depends on MGMT_HYPERCALLS
>>>   
>>> +config HVM_SAVE_RESTORE
>>> +	depends on MGMT_HYPERCALLS
>>> +	def_bool y
>>> +	help
>>> +	  Enables HVM save/load functionality.
>>> +
>>>   endif
>>
>> This wants to move up some imo; MEM_SHARING is clearing the more niche feature.
> 
> Could you clarify preferred place - before which Kconfig option in hvm/Kconfig?

There's no "ideal" place imo, as the ordering is somewhat clumsy anyway. Before
HVM_FEP or after VIRIDIAN, I would say (with a slight preference to the former).

Jan