xen/arch/x86/cpu/mcheck/vmce.c | 4 ++-- xen/arch/x86/emul-i8254.c | 4 +++- xen/arch/x86/hvm/Kconfig | 6 ++++++ xen/arch/x86/hvm/Makefile | 2 +- xen/arch/x86/hvm/hpet.c | 3 ++- xen/arch/x86/hvm/hvm.c | 4 ++++ xen/arch/x86/hvm/irq.c | 2 ++ xen/arch/x86/hvm/mtrr.c | 2 ++ xen/arch/x86/hvm/pmtimer.c | 2 ++ xen/arch/x86/hvm/rtc.c | 2 ++ xen/arch/x86/hvm/vioapic.c | 2 ++ xen/arch/x86/hvm/viridian/viridian.c | 2 ++ xen/arch/x86/hvm/vlapic.c | 3 ++- xen/arch/x86/hvm/vpic.c | 2 ++ xen/arch/x86/include/asm/hvm/save.h | 5 ++++- 15 files changed, 38 insertions(+), 7 deletions(-)
From: Grygorii Strashko <grygorii_strashko@epam.com>
Introduce separate HVM_SAVE_RESTORE config for HVM save/restore feature,
which is enabled by default for HVM and depends on MGMT_HYPERCALLS config.
This allows to make MGMT_HYPERCALLS specific changes more granular and, if
required, make HVM save/restore optional, selectable feature.
Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
---
I'd like to propose this patch as a replacement of Patch 19 [1]
[1] https://patchwork.kernel.org/project/xen-devel/patch/20251121105801.1251262-20-Penny.Zheng@amd.com/
xen/arch/x86/cpu/mcheck/vmce.c | 4 ++--
xen/arch/x86/emul-i8254.c | 4 +++-
xen/arch/x86/hvm/Kconfig | 6 ++++++
xen/arch/x86/hvm/Makefile | 2 +-
xen/arch/x86/hvm/hpet.c | 3 ++-
xen/arch/x86/hvm/hvm.c | 4 ++++
xen/arch/x86/hvm/irq.c | 2 ++
xen/arch/x86/hvm/mtrr.c | 2 ++
xen/arch/x86/hvm/pmtimer.c | 2 ++
xen/arch/x86/hvm/rtc.c | 2 ++
xen/arch/x86/hvm/vioapic.c | 2 ++
xen/arch/x86/hvm/viridian/viridian.c | 2 ++
xen/arch/x86/hvm/vlapic.c | 3 ++-
xen/arch/x86/hvm/vpic.c | 2 ++
xen/arch/x86/include/asm/hvm/save.h | 5 ++++-
15 files changed, 38 insertions(+), 7 deletions(-)
diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 1a7e92506ac8..ba27f6f8bd91 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -349,7 +349,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
return ret;
}
-#if CONFIG_HVM
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
{
struct hvm_vmce_vcpu ctxt = {
@@ -380,10 +380,10 @@ static int cf_check vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *
return err ?: vmce_restore_vcpu(v, &ctxt);
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, NULL,
vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
-#endif
/*
* for Intel MCE, broadcast vMCE to all vcpus
diff --git a/xen/arch/x86/emul-i8254.c b/xen/arch/x86/emul-i8254.c
index f106ab794c4f..d33723fa32c6 100644
--- a/xen/arch/x86/emul-i8254.c
+++ b/xen/arch/x86/emul-i8254.c
@@ -409,7 +409,9 @@ void pit_stop_channel0_irq(PITState *pit)
destroy_periodic_time(&pit->pt0);
spin_unlock(&pit->lock);
}
+#endif
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check pit_save(struct vcpu *v, hvm_domain_context_t *h)
{
struct domain *d = v->domain;
@@ -507,9 +509,9 @@ static int cf_check pit_load(struct domain *d, hvm_domain_context_t *h)
return rc;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_check, pit_load, 1, HVMSR_PER_DOM);
-#endif
/* The intercept action for PIT DM retval: 0--not handled; 1--handled. */
static int cf_check handle_pit_io(
diff --git a/xen/arch/x86/hvm/Kconfig b/xen/arch/x86/hvm/Kconfig
index 156deab29139..48c2e1e33469 100644
--- a/xen/arch/x86/hvm/Kconfig
+++ b/xen/arch/x86/hvm/Kconfig
@@ -93,4 +93,10 @@ config MEM_SHARING
depends on INTEL_VMX
depends on MGMT_HYPERCALLS
+config HVM_SAVE_RESTORE
+ depends on MGMT_HYPERCALLS
+ def_bool y
+ help
+ Enables HVM save/load functionality.
+
endif
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index f34fb03934c0..f1602cc659ea 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -22,7 +22,7 @@ obj-y += nestedhvm.o
obj-y += pmtimer.o
obj-y += quirks.o
obj-y += rtc.o
-obj-y += save.o
+obj-$(CONFIG_HVM_SAVE_RESTORE) += save.o
obj-y += stdvga.o
obj-y += vioapic.o
obj-y += vlapic.o
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index f0e5f877f4a1..7df8abfa348d 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -570,7 +570,7 @@ static const struct hvm_mmio_ops hpet_mmio_ops = {
.write = hpet_write
};
-
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check hpet_save(struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
@@ -691,6 +691,7 @@ static int cf_check hpet_load(struct domain *d, hvm_domain_context_t *h)
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, NULL, hpet_load, 1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b1e03bbb3bd3..832fca54f3a2 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -796,6 +796,7 @@ void hvm_domain_destroy(struct domain *d)
destroy_vpci_mmcfg(d);
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check hvm_save_tsc_adjust(struct vcpu *v, hvm_domain_context_t *h)
{
struct hvm_tsc_adjust ctxt = {
@@ -943,6 +944,7 @@ static int cf_check hvm_save_cpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
return hvm_save_entry(CPU, v->vcpu_id, h, &ctxt);
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
/* Return a string indicating the error, or NULL for valid. */
const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
@@ -1020,6 +1022,7 @@ unsigned long hvm_cr4_guest_valid_bits(const struct domain *d)
0);
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
{
const struct cpu_policy *p = d->arch.cpu_policy;
@@ -1602,6 +1605,7 @@ static int __init cf_check hvm_register_CPU_save_and_restore(void)
return 0;
}
__initcall(hvm_register_CPU_save_and_restore);
+#endif /* CONFIG_HVM_SAVE_RESTORE */
static void cf_check hvm_assert_evtchn_irq_tasklet(void *v)
{
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 5f643611130f..af5f2c83577e 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -649,6 +649,7 @@ static int __init cf_check dump_irq_info_key_init(void)
}
__initcall(dump_irq_info_key_init);
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check irq_save_pci(struct vcpu *v, hvm_domain_context_t *h)
{
struct domain *d = v->domain;
@@ -799,6 +800,7 @@ static int cf_check irq_load_link(struct domain *d, hvm_domain_context_t *h)
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, NULL, irq_load_pci,
1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index e0bef8c82dc8..c5e83dd7a602 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -695,6 +695,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
return rc;
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h)
{
const struct mtrr_state *m = &v->arch.hvm.mtrr;
@@ -782,6 +783,7 @@ static int cf_check hvm_load_mtrr_msr(struct domain *d, hvm_domain_context_t *h)
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, NULL, hvm_load_mtrr_msr, 1,
HVMSR_PER_VCPU);
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 87a7a01c9f52..86f85d381a20 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -240,6 +240,7 @@ static int cf_check handle_pmt_io(
return X86EMUL_OKAY;
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check acpi_save(struct vcpu *v, hvm_domain_context_t *h)
{
struct domain *d = v->domain;
@@ -300,6 +301,7 @@ static int cf_check acpi_load(struct domain *d, hvm_domain_context_t *h)
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(PMTIMER, acpi_save, NULL, acpi_load,
1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index e33a8ec10821..a451cd2e6880 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -742,6 +742,7 @@ void rtc_migrate_timers(struct vcpu *v)
}
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
/* Save RTC hardware state */
static int cf_check rtc_save(struct vcpu *v, hvm_domain_context_t *h)
{
@@ -797,6 +798,7 @@ static int cf_check rtc_load(struct domain *d, hvm_domain_context_t *h)
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, NULL, rtc_load, 1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 7c725f9e471f..5fad31074288 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -594,6 +594,7 @@ int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi)
return vioapic->redirtbl[pin].fields.trig_mode;
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check ioapic_save(struct vcpu *v, hvm_domain_context_t *h)
{
const struct domain *d = v->domain;
@@ -629,6 +630,7 @@ static int cf_check ioapic_load(struct domain *d, hvm_domain_context_t *h)
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, NULL, ioapic_load, 1,
HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
index 90e749ceb581..ab2a7078515c 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -1092,6 +1092,7 @@ void viridian_unmap_guest_page(struct viridian_page *vp)
put_page_and_type(page);
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check viridian_save_domain_ctxt(
struct vcpu *v, hvm_domain_context_t *h)
{
@@ -1171,6 +1172,7 @@ static int cf_check viridian_load_vcpu_ctxt(
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt, NULL,
viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 79697487ba90..ea7d186ff1ea 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1481,6 +1481,7 @@ void vlapic_reset(struct vlapic *vlapic)
vlapic_do_init(vlapic);
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
/* rearm the actimer if needed, after a HVM restore */
static void lapic_rearm(struct vlapic *s)
{
@@ -1578,7 +1579,6 @@ static void lapic_load_fixup(struct vlapic *vlapic)
v, vlapic->loaded.id, vlapic->loaded.ldr, good_ldr);
}
-
static int lapic_check_common(const struct domain *d, unsigned int vcpuid)
{
if ( !has_vlapic(d) )
@@ -1675,6 +1675,7 @@ static int cf_check lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
lapic_rearm(s);
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_check_hidden,
lapic_load_hidden, 1, HVMSR_PER_VCPU);
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index 30ce367c082d..af49c372f502 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -402,6 +402,7 @@ static int cf_check vpic_intercept_elcr_io(
return X86EMUL_OKAY;
}
+#if defined(CONFIG_HVM_SAVE_RESTORE)
static int cf_check vpic_save(struct vcpu *v, hvm_domain_context_t *h)
{
struct domain *d = v->domain;
@@ -475,6 +476,7 @@ static int cf_check vpic_load(struct domain *d, hvm_domain_context_t *h)
return 0;
}
+#endif /* CONFIG_HVM_SAVE_RESTORE */
HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_check, vpic_load, 2,
HVMSR_PER_DOM);
diff --git a/xen/arch/x86/include/asm/hvm/save.h b/xen/arch/x86/include/asm/hvm/save.h
index ec8de029319d..501a6cfb214a 100644
--- a/xen/arch/x86/include/asm/hvm/save.h
+++ b/xen/arch/x86/include/asm/hvm/save.h
@@ -123,6 +123,7 @@ void hvm_register_savevm(uint16_t typecode,
/* Syntactic sugar around that function: specify the max number of
* saves, and this calculates the size of buffer needed */
+#ifdef CONFIG_HVM_SAVE_RESTORE
#define HVM_REGISTER_SAVE_RESTORE(_x, _save, check, _load, _num, _k) \
static int __init cf_check __hvm_register_##_x##_save_and_restore(void) \
{ \
@@ -137,7 +138,9 @@ static int __init cf_check __hvm_register_##_x##_save_and_restore(void) \
return 0; \
} \
__initcall(__hvm_register_##_x##_save_and_restore);
-
+#else
+#define HVM_REGISTER_SAVE_RESTORE(_x, _save, check, _load, _num, _k)
+#endif
/* Entry points for saving and restoring HVM domain state */
size_t hvm_save_size(struct domain *d);
--
2.34.1
On 26.11.2025 00:00, Grygorii Strashko wrote:
> From: Grygorii Strashko <grygorii_strashko@epam.com>
>
> Introduce separate HVM_SAVE_RESTORE config for HVM save/restore feature,
> which is enabled by default for HVM and depends on MGMT_HYPERCALLS config.
>
> This allows to make MGMT_HYPERCALLS specific changes more granular and, if
> required, make HVM save/restore optional, selectable feature.
>
> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
> ---
> I'd like to propose this patch as a replacement of Patch 19 [1]
>
> [1] https://patchwork.kernel.org/project/xen-devel/patch/20251121105801.1251262-20-Penny.Zheng@amd.com/
>
> xen/arch/x86/cpu/mcheck/vmce.c | 4 ++--
> xen/arch/x86/emul-i8254.c | 4 +++-
> xen/arch/x86/hvm/Kconfig | 6 ++++++
> xen/arch/x86/hvm/Makefile | 2 +-
> xen/arch/x86/hvm/hpet.c | 3 ++-
> xen/arch/x86/hvm/hvm.c | 4 ++++
> xen/arch/x86/hvm/irq.c | 2 ++
> xen/arch/x86/hvm/mtrr.c | 2 ++
> xen/arch/x86/hvm/pmtimer.c | 2 ++
> xen/arch/x86/hvm/rtc.c | 2 ++
> xen/arch/x86/hvm/vioapic.c | 2 ++
> xen/arch/x86/hvm/viridian/viridian.c | 2 ++
> xen/arch/x86/hvm/vlapic.c | 3 ++-
> xen/arch/x86/hvm/vpic.c | 2 ++
> xen/arch/x86/include/asm/hvm/save.h | 5 ++++-
> 15 files changed, 38 insertions(+), 7 deletions(-)
>
> diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
> index 1a7e92506ac8..ba27f6f8bd91 100644
> --- a/xen/arch/x86/cpu/mcheck/vmce.c
> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
> @@ -349,7 +349,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
> return ret;
> }
>
> -#if CONFIG_HVM
> +#if defined(CONFIG_HVM_SAVE_RESTORE)
#if wasn't really correct to use here; #ifdef was and is wanted.
> static int cf_check vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
> {
> struct hvm_vmce_vcpu ctxt = {
> @@ -380,10 +380,10 @@ static int cf_check vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *
>
> return err ?: vmce_restore_vcpu(v, &ctxt);
> }
> +#endif /* CONFIG_HVM_SAVE_RESTORE */
>
> HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, NULL,
> vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
> -#endif
Why would this #endif move? (It gaining a comment is fine of course.)
> --- a/xen/arch/x86/emul-i8254.c
> +++ b/xen/arch/x86/emul-i8254.c
> @@ -409,7 +409,9 @@ void pit_stop_channel0_irq(PITState *pit)
> destroy_periodic_time(&pit->pt0);
> spin_unlock(&pit->lock);
> }
> +#endif
>
> +#if defined(CONFIG_HVM_SAVE_RESTORE)
Hmm, again - please use the shorter #ifdef.
> @@ -507,9 +509,9 @@ static int cf_check pit_load(struct domain *d, hvm_domain_context_t *h)
>
> return rc;
> }
> +#endif /* CONFIG_HVM_SAVE_RESTORE */
>
> HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_check, pit_load, 1, HVMSR_PER_DOM);
> -#endif
And again - why move it?
> --- a/xen/arch/x86/hvm/Kconfig
> +++ b/xen/arch/x86/hvm/Kconfig
> @@ -93,4 +93,10 @@ config MEM_SHARING
> depends on INTEL_VMX
> depends on MGMT_HYPERCALLS
>
> +config HVM_SAVE_RESTORE
> + depends on MGMT_HYPERCALLS
> + def_bool y
> + help
> + Enables HVM save/load functionality.
> +
> endif
This wants to move up some imo; MEM_SHARING is clearing the more niche feature.
> --- a/xen/arch/x86/include/asm/hvm/save.h
> +++ b/xen/arch/x86/include/asm/hvm/save.h
> @@ -123,6 +123,7 @@ void hvm_register_savevm(uint16_t typecode,
>
> /* Syntactic sugar around that function: specify the max number of
> * saves, and this calculates the size of buffer needed */
> +#ifdef CONFIG_HVM_SAVE_RESTORE
> #define HVM_REGISTER_SAVE_RESTORE(_x, _save, check, _load, _num, _k) \
> static int __init cf_check __hvm_register_##_x##_save_and_restore(void) \
> { \
> @@ -137,7 +138,9 @@ static int __init cf_check __hvm_register_##_x##_save_and_restore(void) \
> return 0; \
> } \
> __initcall(__hvm_register_##_x##_save_and_restore);
> -
> +#else
> +#define HVM_REGISTER_SAVE_RESTORE(_x, _save, check, _load, _num, _k)
> +#endif
By suitably moving the #endif-s I would hope we can get away without this
dummy #define.
Jan
On 26.11.25 08:59, Jan Beulich wrote:
> On 26.11.2025 00:00, Grygorii Strashko wrote:
>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>
>> Introduce separate HVM_SAVE_RESTORE config for HVM save/restore feature,
>> which is enabled by default for HVM and depends on MGMT_HYPERCALLS config.
>>
>> This allows to make MGMT_HYPERCALLS specific changes more granular and, if
>> required, make HVM save/restore optional, selectable feature.
>>
>> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
>> ---
>> I'd like to propose this patch as a replacement of Patch 19 [1]
>>
>> [1] https://patchwork.kernel.org/project/xen-devel/patch/20251121105801.1251262-20-Penny.Zheng@amd.com/
>>
>> xen/arch/x86/cpu/mcheck/vmce.c | 4 ++--
>> xen/arch/x86/emul-i8254.c | 4 +++-
>> xen/arch/x86/hvm/Kconfig | 6 ++++++
>> xen/arch/x86/hvm/Makefile | 2 +-
>> xen/arch/x86/hvm/hpet.c | 3 ++-
>> xen/arch/x86/hvm/hvm.c | 4 ++++
>> xen/arch/x86/hvm/irq.c | 2 ++
>> xen/arch/x86/hvm/mtrr.c | 2 ++
>> xen/arch/x86/hvm/pmtimer.c | 2 ++
>> xen/arch/x86/hvm/rtc.c | 2 ++
>> xen/arch/x86/hvm/vioapic.c | 2 ++
>> xen/arch/x86/hvm/viridian/viridian.c | 2 ++
>> xen/arch/x86/hvm/vlapic.c | 3 ++-
>> xen/arch/x86/hvm/vpic.c | 2 ++
>> xen/arch/x86/include/asm/hvm/save.h | 5 ++++-
>> 15 files changed, 38 insertions(+), 7 deletions(-)
>>
>> diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
>> index 1a7e92506ac8..ba27f6f8bd91 100644
>> --- a/xen/arch/x86/cpu/mcheck/vmce.c
>> +++ b/xen/arch/x86/cpu/mcheck/vmce.c
>> @@ -349,7 +349,7 @@ int vmce_wrmsr(uint32_t msr, uint64_t val)
>> return ret;
>> }
>>
>> -#if CONFIG_HVM
>> +#if defined(CONFIG_HVM_SAVE_RESTORE)
>
> #if wasn't really correct to use here; #ifdef was and is wanted.
>
>> static int cf_check vmce_save_vcpu_ctxt(struct vcpu *v, hvm_domain_context_t *h)
>> {
>> struct hvm_vmce_vcpu ctxt = {
>> @@ -380,10 +380,10 @@ static int cf_check vmce_load_vcpu_ctxt(struct domain *d, hvm_domain_context_t *
>>
>> return err ?: vmce_restore_vcpu(v, &ctxt);
>> }
>> +#endif /* CONFIG_HVM_SAVE_RESTORE */
>>
>> HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt, NULL,
>> vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
>> -#endif
>
> Why would this #endif move? (It gaining a comment is fine of course.)
Huh. Initially I've used __maybe_unused with save/restore callbacks and
HVM_REGISTER_SAVE_RESTORE() defines as NOP.
I'll correct and drop empty HVM_REGISTER_SAVE_RESTORE()
>> --- a/xen/arch/x86/hvm/Kconfig
>> +++ b/xen/arch/x86/hvm/Kconfig
>> @@ -93,4 +93,10 @@ config MEM_SHARING
>> depends on INTEL_VMX
>> depends on MGMT_HYPERCALLS
>>
>> +config HVM_SAVE_RESTORE
>> + depends on MGMT_HYPERCALLS
>> + def_bool y
>> + help
>> + Enables HVM save/load functionality.
>> +
>> endif
>
> This wants to move up some imo; MEM_SHARING is clearing the more niche feature.
>
Could you clarify preferred place - before which Kconfig option in hvm/Kconfig?
--
Best regards,
-grygorii
On 26.11.2025 11:13, Grygorii Strashko wrote: > On 26.11.25 08:59, Jan Beulich wrote: >> On 26.11.2025 00:00, Grygorii Strashko wrote: >>> --- a/xen/arch/x86/hvm/Kconfig >>> +++ b/xen/arch/x86/hvm/Kconfig >>> @@ -93,4 +93,10 @@ config MEM_SHARING >>> depends on INTEL_VMX >>> depends on MGMT_HYPERCALLS >>> >>> +config HVM_SAVE_RESTORE >>> + depends on MGMT_HYPERCALLS >>> + def_bool y >>> + help >>> + Enables HVM save/load functionality. >>> + >>> endif >> >> This wants to move up some imo; MEM_SHARING is clearing the more niche feature. > > Could you clarify preferred place - before which Kconfig option in hvm/Kconfig? There's no "ideal" place imo, as the ordering is somewhat clumsy anyway. Before HVM_FEP or after VIRIDIAN, I would say (with a slight preference to the former). Jan
© 2016 - 2025 Red Hat, Inc.