xen/arch/x86/hvm/Kconfig | 10 ++++++++++ xen/arch/x86/hvm/Makefile | 2 +- xen/arch/x86/hvm/hvm.c | 27 ++++++++++++++++++--------- xen/arch/x86/hvm/viridian/viridian.c | 14 ++++++++++---- xen/arch/x86/hvm/vlapic.c | 11 +++++++---- xen/arch/x86/include/asm/hvm/domain.h | 2 ++ xen/arch/x86/include/asm/hvm/hvm.h | 3 ++- xen/arch/x86/include/asm/hvm/vcpu.h | 2 ++ 8 files changed, 53 insertions(+), 19 deletions(-)
From: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
Add config option VIRIDIAN that covers viridian code within HVM.
Calls to viridian functions guarded by is_viridian_domain() and related macros.
Having this option may be beneficial by reducing code footprint for systems
that are not using Hyper-V.
[grygorii_strashko@epam.com: fixed NULL pointer deref in
viridian_save_domain_ctxt()]
Signed-off-by: Sergiy Kibrik <Sergiy_Kibrik@epam.com>
Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
---
changes in v4:
- s/HVM_VIRIDIAN/VIRIDIAN
- add "depends on AMD_SVM || INTEL_VMX"
- add guard !is_viridian_vcpu() checks in viridian_load_vcpu_ctxt/viridian_load_domain_ctxt
changes in v3:
- fixed NULL pointer deref in viridian_save_domain_ctxt() reported for v2,
which caused v2 revert by commit 1fffcf10cd71 ("Revert "x86: make Viridian
support optional"")
v3: https://patchwork.kernel.org/project/xen-devel/patch/20250916134114.2214104-1-grygorii_strashko@epam.com/
v2: https://patchwork.kernel.org/project/xen-devel/patch/20250321092633.3982645-1-Sergiy_Kibrik@epam.com/
xen/arch/x86/hvm/Kconfig | 10 ++++++++++
xen/arch/x86/hvm/Makefile | 2 +-
xen/arch/x86/hvm/hvm.c | 27 ++++++++++++++++++---------
xen/arch/x86/hvm/viridian/viridian.c | 14 ++++++++++----
xen/arch/x86/hvm/vlapic.c | 11 +++++++----
xen/arch/x86/include/asm/hvm/domain.h | 2 ++
xen/arch/x86/include/asm/hvm/hvm.h | 3 ++-
xen/arch/x86/include/asm/hvm/vcpu.h | 2 ++
8 files changed, 53 insertions(+), 19 deletions(-)
diff --git a/xen/arch/x86/hvm/Kconfig b/xen/arch/x86/hvm/Kconfig
index 5cb9f2904255..aed799fcb9c2 100644
--- a/xen/arch/x86/hvm/Kconfig
+++ b/xen/arch/x86/hvm/Kconfig
@@ -62,6 +62,17 @@ config ALTP2M
If unsure, stay with defaults.
+config VIRIDIAN
+ bool "Hyper-V enlightenments for guests" if EXPERT
+ depends on AMD_SVM || INTEL_VMX
+ default y
+ help
+ Support optimizations for Hyper-V guests such as faster hypercalls,
+ efficient timer and interrupt handling, and enhanced paravirtualized
+ I/O. This is to improve performance and compatibility of Windows VMs.
+
+ If unsure, say Y.
+
config MEM_PAGING
bool "Xen memory paging support (UNSUPPORTED)" if UNSUPPORTED
depends on VM_EVENT
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 6ec2c8f2db56..736eb3f966e9 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_AMD_SVM) += svm/
obj-$(CONFIG_INTEL_VMX) += vmx/
-obj-y += viridian/
+obj-$(CONFIG_VIRIDIAN) += viridian/
obj-y += asid.o
obj-y += dm.o
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 23bd7f078a1d..95a80369b9b8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -701,9 +701,12 @@ int hvm_domain_initialise(struct domain *d,
if ( hvm_tsc_scaling_supported )
d->arch.hvm.tsc_scaling_ratio = hvm_default_tsc_scaling_ratio;
- rc = viridian_domain_init(d);
- if ( rc )
- goto fail2;
+ if ( is_viridian_domain(d) )
+ {
+ rc = viridian_domain_init(d);
+ if ( rc )
+ goto fail2;
+ }
rc = alternative_call(hvm_funcs.domain_initialise, d);
if ( rc != 0 )
@@ -739,7 +742,8 @@ void hvm_domain_relinquish_resources(struct domain *d)
if ( hvm_funcs.nhvm_domain_relinquish_resources )
alternative_vcall(hvm_funcs.nhvm_domain_relinquish_resources, d);
- viridian_domain_deinit(d);
+ if ( is_viridian_domain(d) )
+ viridian_domain_deinit(d);
ioreq_server_destroy_all(d);
@@ -1643,9 +1647,12 @@ int hvm_vcpu_initialise(struct vcpu *v)
&& (rc = nestedhvm_vcpu_initialise(v)) < 0 ) /* teardown: nestedhvm_vcpu_destroy */
goto fail5;
- rc = viridian_vcpu_init(v);
- if ( rc )
- goto fail6;
+ if ( is_viridian_domain(d) )
+ {
+ rc = viridian_vcpu_init(v);
+ if ( rc )
+ goto fail6;
+ }
rc = ioreq_server_add_vcpu_all(d, v);
if ( rc != 0 )
@@ -1675,13 +1682,15 @@ int hvm_vcpu_initialise(struct vcpu *v)
fail2:
hvm_vcpu_cacheattr_destroy(v);
fail1:
- viridian_vcpu_deinit(v);
+ if ( is_viridian_domain(d) )
+ viridian_vcpu_deinit(v);
return rc;
}
void hvm_vcpu_destroy(struct vcpu *v)
{
- viridian_vcpu_deinit(v);
+ if ( is_viridian_domain(v->domain) )
+ viridian_vcpu_deinit(v);
ioreq_server_remove_vcpu_all(v->domain, v);
diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
index c0be24bd2210..5e49fc286d76 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -1116,14 +1116,14 @@ static int cf_check viridian_save_domain_ctxt(
{
const struct domain *d = v->domain;
const struct viridian_domain *vd = d->arch.hvm.viridian;
- struct hvm_viridian_domain_context ctxt = {
- .hypercall_gpa = vd->hypercall_gpa.raw,
- .guest_os_id = vd->guest_os_id.raw,
- };
+ struct hvm_viridian_domain_context ctxt = {};
if ( !is_viridian_domain(d) )
return 0;
+ ctxt.hypercall_gpa = vd->hypercall_gpa.raw;
+ ctxt.guest_os_id = vd->guest_os_id.raw,
+
viridian_time_save_domain_ctxt(d, &ctxt);
viridian_synic_save_domain_ctxt(d, &ctxt);
@@ -1136,6 +1136,9 @@ static int cf_check viridian_load_domain_ctxt(
struct viridian_domain *vd = d->arch.hvm.viridian;
struct hvm_viridian_domain_context ctxt;
+ if ( !is_viridian_domain(d) )
+ return 0;
+
if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
return -EINVAL;
@@ -1172,6 +1175,9 @@ static int cf_check viridian_load_vcpu_ctxt(
struct vcpu *v;
struct hvm_viridian_vcpu_context ctxt;
+ if ( !is_viridian_domain(d) )
+ return 0;
+
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
dprintk(XENLOG_G_ERR, "HVM restore: dom%d has no vcpu%u\n",
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index 993e972cd71e..79697487ba90 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -426,7 +426,8 @@ void vlapic_EOI_set(struct vlapic *vlapic)
* priority vector and then recurse to handle the lower priority
* vector.
*/
- bool missed_eoi = viridian_apic_assist_completed(v);
+ bool missed_eoi = has_viridian_apic_assist(v->domain) &&
+ viridian_apic_assist_completed(v);
int vector;
again:
@@ -442,7 +443,7 @@ void vlapic_EOI_set(struct vlapic *vlapic)
* NOTE: It is harmless to call viridian_apic_assist_clear() on a
* recursion, even though it is not necessary.
*/
- if ( !missed_eoi )
+ if ( has_viridian_apic_assist(v->domain) && !missed_eoi )
viridian_apic_assist_clear(v);
vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
@@ -1360,7 +1361,8 @@ int vlapic_has_pending_irq(struct vcpu *v)
* If so, we need to emulate the EOI here before comparing ISR
* with IRR.
*/
- if ( viridian_apic_assist_completed(v) )
+ if ( has_viridian_apic_assist(v->domain) &&
+ viridian_apic_assist_completed(v) )
vlapic_EOI_set(vlapic);
isr = vlapic_find_highest_isr(vlapic);
@@ -1373,7 +1375,8 @@ int vlapic_has_pending_irq(struct vcpu *v)
if ( isr >= 0 &&
(irr & 0xf0) <= (isr & 0xf0) )
{
- viridian_apic_assist_clear(v);
+ if ( has_viridian_apic_assist(v->domain) )
+ viridian_apic_assist_clear(v);
return -1;
}
diff --git a/xen/arch/x86/include/asm/hvm/domain.h b/xen/arch/x86/include/asm/hvm/domain.h
index 333501d5f2ac..95d9336a28f0 100644
--- a/xen/arch/x86/include/asm/hvm/domain.h
+++ b/xen/arch/x86/include/asm/hvm/domain.h
@@ -111,7 +111,9 @@ struct hvm_domain {
/* hypervisor intercepted msix table */
struct list_head msixtbl_list;
+#ifdef CONFIG_VIRIDIAN
struct viridian_domain *viridian;
+#endif
/*
* TSC value that VCPUs use to calculate their tsc_offset value.
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h
index f02183691ea6..7312cdd878e1 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -510,7 +510,8 @@ hvm_get_cpl(struct vcpu *v)
(has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)
#define is_viridian_domain(d) \
- (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
+ (IS_ENABLED(CONFIG_VIRIDIAN) && \
+ is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))
#define is_viridian_vcpu(v) \
is_viridian_domain((v)->domain)
diff --git a/xen/arch/x86/include/asm/hvm/vcpu.h b/xen/arch/x86/include/asm/hvm/vcpu.h
index 924af890c5b2..9ed9eaff3bc5 100644
--- a/xen/arch/x86/include/asm/hvm/vcpu.h
+++ b/xen/arch/x86/include/asm/hvm/vcpu.h
@@ -176,7 +176,9 @@ struct hvm_vcpu {
/* Pending hw/sw interrupt (.vector = -1 means nothing pending). */
struct x86_event inject_event;
+#ifdef CONFIG_VIRIDIAN
struct viridian_vcpu *viridian;
+#endif
};
#endif /* __ASM_X86_HVM_VCPU_H__ */
--
2.34.1
On 19.09.2025 18:31, Grygorii Strashko wrote: > --- a/xen/arch/x86/hvm/Kconfig > +++ b/xen/arch/x86/hvm/Kconfig > @@ -62,6 +62,17 @@ config ALTP2M > > If unsure, stay with defaults. > > +config VIRIDIAN > + bool "Hyper-V enlightenments for guests" if EXPERT > + depends on AMD_SVM || INTEL_VMX Looks like either there was a misunderstanding, or I wasn't clear enough. Here the dependency should strictly be HVM. If anything, the dependency above could appear for HVM (but as said, as of now it's deliberately not there). > @@ -1136,6 +1136,9 @@ static int cf_check viridian_load_domain_ctxt( > struct viridian_domain *vd = d->arch.hvm.viridian; > struct hvm_viridian_domain_context ctxt; > > + if ( !is_viridian_domain(d) ) > + return 0; > + > if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 ) > return -EINVAL; > > @@ -1172,6 +1175,9 @@ static int cf_check viridian_load_vcpu_ctxt( > struct vcpu *v; > struct hvm_viridian_vcpu_context ctxt; > > + if ( !is_viridian_domain(d) ) > + return 0; I don't think we should let these go through, but rather flag an error. And perhaps an intentionally exotic one (e.g. EILSEQ or something yet more "odd"). Jan
On 19.09.25 23:49, Jan Beulich wrote: > On 19.09.2025 18:31, Grygorii Strashko wrote: >> --- a/xen/arch/x86/hvm/Kconfig >> +++ b/xen/arch/x86/hvm/Kconfig >> @@ -62,6 +62,17 @@ config ALTP2M >> >> If unsure, stay with defaults. >> >> +config VIRIDIAN >> + bool "Hyper-V enlightenments for guests" if EXPERT >> + depends on AMD_SVM || INTEL_VMX > > Looks like either there was a misunderstanding, or I wasn't clear enough. > Here the dependency should strictly be HVM. If anything, the dependency > above could appear for HVM (but as said, as of now it's deliberately not > there). Sorry, I misunderstood you. I'll drop above "depends on". > >> @@ -1136,6 +1136,9 @@ static int cf_check viridian_load_domain_ctxt( >> struct viridian_domain *vd = d->arch.hvm.viridian; >> struct hvm_viridian_domain_context ctxt; >> >> + if ( !is_viridian_domain(d) ) >> + return 0; >> + >> if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 ) >> return -EINVAL; >> >> @@ -1172,6 +1175,9 @@ static int cf_check viridian_load_vcpu_ctxt( >> struct vcpu *v; >> struct hvm_viridian_vcpu_context ctxt; >> >> + if ( !is_viridian_domain(d) ) >> + return 0; > > I don't think we should let these go through, but rather flag an error. > And perhaps an intentionally exotic one (e.g. EILSEQ or something yet > more "odd"). Most of existing load_x() returns -ENODEV (for example pit_load() for !has_vpit()). Some -EOPNOTSUPP. I'd be very much appreciated if you could explicitly specify err code to be used. -EILSEQ? or -ENODEV? or .. -- Best regards, -grygorii
On 24.09.2025 12:13, Grygorii Strashko wrote: > On 19.09.25 23:49, Jan Beulich wrote: >> On 19.09.2025 18:31, Grygorii Strashko wrote: >>> @@ -1136,6 +1136,9 @@ static int cf_check viridian_load_domain_ctxt( >>> struct viridian_domain *vd = d->arch.hvm.viridian; >>> struct hvm_viridian_domain_context ctxt; >>> >>> + if ( !is_viridian_domain(d) ) >>> + return 0; >>> + >>> if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 ) >>> return -EINVAL; >>> >>> @@ -1172,6 +1175,9 @@ static int cf_check viridian_load_vcpu_ctxt( >>> struct vcpu *v; >>> struct hvm_viridian_vcpu_context ctxt; >>> >>> + if ( !is_viridian_domain(d) ) >>> + return 0; >> >> I don't think we should let these go through, but rather flag an error. >> And perhaps an intentionally exotic one (e.g. EILSEQ or something yet >> more "odd"). > > Most of existing load_x() returns -ENODEV (for example pit_load() for !has_vpit()). > Some -EOPNOTSUPP. > > I'd be very much appreciated if you could explicitly specify err code to be used. > -EILSEQ? or -ENODEV? or .. Well, I did already suggest EILSEQ, didn't I? I merely wanted to leave open for you to pick "something yet more odd". Jan
© 2016 - 2025 Red Hat, Inc.