File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.
Futhermore, features about monitor_op and memory access are both based on
vm event subsystem, so monitor.o/mem_access.o shall be wrapped under
CONFIG_VM_EVENT.
Although CONFIG_VM_EVENT is forcibly enabled on x86, we could disable it
through disabling CONFIG_MGMT_HYPERCALLS in the future.
In consequence, a few functions, like the ones defined in hvm/monitor.h,
needs stub to pass compilation when CONFIG_VM_EVENT=n.
Remove the CONFIG_VM_EVENT wrapper for "#include <asm/mem_access.h>", as
we need stub of "p2m_mem_access_check()" to pass compilation on
CONFIG_VM_EVENT=n
The following functions are developed on the basis of vm event framework, or
only invoked by vm_event.c/monitor.c/mem_access.c, so they all shall be
wrapped with CONFIG_VM_EVENT:
- hvm_toggle_singlestep
- hvm_fast_singlestep
- hvm_enable_msr_interception
- hvm_function_table.enable_msr_interception
- hvm_has_set_descriptor_access_existing
- hvm_function_table.set_descriptor_access_existing
Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
---
v1 -> v2:
- split out XSM changes
- remove unnecessary stubs
- move "struct p2m_domain" declaration ahead of the #ifdef
---
xen/arch/x86/Makefile | 2 +-
xen/arch/x86/hvm/Makefile | 4 +-
xen/arch/x86/hvm/hvm.c | 2 +
xen/arch/x86/hvm/svm/svm.c | 8 +++
xen/arch/x86/hvm/vmx/vmx.c | 10 ++++
xen/arch/x86/include/asm/hvm/hvm.h | 10 ++++
xen/arch/x86/include/asm/hvm/monitor.h | 65 ++++++++++++++++++++++++-
xen/arch/x86/include/asm/hvm/vm_event.h | 4 ++
xen/arch/x86/include/asm/mem_access.h | 9 ++++
xen/arch/x86/include/asm/monitor.h | 7 +++
xen/include/xen/mem_access.h | 35 +++++++++++--
xen/include/xen/monitor.h | 8 ++-
xen/include/xen/vm_event.h | 24 ++++++++-
13 files changed, 176 insertions(+), 12 deletions(-)
diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index a9fdba0b4c..a7bfe4c0b1 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -76,7 +76,7 @@ obj-y += usercopy.o
obj-y += x86_emulate.o
obj-$(CONFIG_TBOOT) += tboot.o
obj-y += hpet.o
-obj-y += vm_event.o
+obj-$(CONFIG_VM_EVENT) += vm_event.o
obj-y += xstate.o
ifneq ($(CONFIG_PV_SHIM_EXCLUSIVE),y)
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 6ec2c8f2db..952db00dd7 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -16,7 +16,7 @@ obj-y += io.o
obj-y += ioreq.o
obj-y += irq.o
obj-y += mmio.o
-obj-y += monitor.o
+obj-$(CONFIG_VM_EVENT) += monitor.o
obj-y += mtrr.o
obj-y += nestedhvm.o
obj-y += pmtimer.o
@@ -26,7 +26,7 @@ obj-y += save.o
obj-y += stdvga.o
obj-y += vioapic.o
obj-y += vlapic.o
-obj-y += vm_event.o
+obj-$(CONFIG_VM_EVENT) += vm_event.o
obj-y += vmsi.o
obj-y += vpic.o
obj-y += vpt.o
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 23bd7f078a..b044dc2ecb 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5266,6 +5266,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
+#ifdef CONFIG_VM_EVENT
void hvm_toggle_singlestep(struct vcpu *v)
{
ASSERT(atomic_read(&v->pause_count));
@@ -5275,6 +5276,7 @@ void hvm_toggle_singlestep(struct vcpu *v)
v->arch.hvm.single_step = !v->arch.hvm.single_step;
}
+#endif /* CONFIG_VM_EVENT */
#ifdef CONFIG_ALTP2M
void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b54f9d9af5..b726d760d4 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -298,6 +298,7 @@ void svm_intercept_msr(struct vcpu *v, uint32_t msr, int flags)
__clear_bit(msr * 2 + 1, msr_bit);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t msr)
{
struct vcpu *v;
@@ -305,6 +306,7 @@ static void cf_check svm_enable_msr_interception(struct domain *d, uint32_t msr)
for_each_vcpu ( d, v )
svm_intercept_msr(v, msr, MSR_INTERCEPT_WRITE);
}
+#endif /* CONFIG_VM_EVENT */
static void svm_save_dr(struct vcpu *v)
{
@@ -825,6 +827,7 @@ static void cf_check svm_set_rdtsc_exiting(struct vcpu *v, bool enable)
vmcb_set_general2_intercepts(vmcb, general2_intercepts);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check svm_set_descriptor_access_exiting(
struct vcpu *v, bool enable)
{
@@ -842,6 +845,7 @@ static void cf_check svm_set_descriptor_access_exiting(
vmcb_set_general1_intercepts(vmcb, general1_intercepts);
}
+#endif /* CONFIG_VM_EVENT */
static unsigned int cf_check svm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
{
@@ -2456,9 +2460,13 @@ static struct hvm_function_table __initdata_cf_clobber svm_function_table = {
.fpu_dirty_intercept = svm_fpu_dirty_intercept,
.msr_read_intercept = svm_msr_read_intercept,
.msr_write_intercept = svm_msr_write_intercept,
+#ifdef CONFIG_VM_EVENT
.enable_msr_interception = svm_enable_msr_interception,
+#endif
.set_rdtsc_exiting = svm_set_rdtsc_exiting,
+#ifdef CONFIG_VM_EVENT
.set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
+#endif
.get_insn_bytes = svm_get_insn_bytes,
.nhvm_vcpu_initialise = nsvm_vcpu_initialise,
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e2b5077654..4cf5da70ad 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1519,6 +1519,7 @@ static void cf_check vmx_set_rdtsc_exiting(struct vcpu *v, bool enable)
vmx_vmcs_exit(v);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check vmx_set_descriptor_access_exiting(
struct vcpu *v, bool enable)
{
@@ -1533,6 +1534,7 @@ static void cf_check vmx_set_descriptor_access_exiting(
vmx_update_secondary_exec_control(v);
vmx_vmcs_exit(v);
}
+#endif /* CONFIG_VM_EVENT */
static void cf_check vmx_init_hypercall_page(void *p)
{
@@ -2412,6 +2414,7 @@ static void cf_check vmx_handle_eoi(uint8_t vector, int isr)
printk_once(XENLOG_WARNING "EOI for %02x but SVI=%02x\n", vector, old_svi);
}
+#ifdef CONFIG_VM_EVENT
static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t msr)
{
struct vcpu *v;
@@ -2419,6 +2422,7 @@ static void cf_check vmx_enable_msr_interception(struct domain *d, uint32_t msr)
for_each_vcpu ( d, v )
vmx_set_msr_intercept(v, msr, VMX_MSR_W);
}
+#endif /* CONFIG_VM_EVENT */
#ifdef CONFIG_ALTP2M
@@ -2870,7 +2874,9 @@ static struct hvm_function_table __initdata_cf_clobber vmx_function_table = {
.nhvm_domain_relinquish_resources = nvmx_domain_relinquish_resources,
.update_vlapic_mode = vmx_vlapic_msr_changed,
.nhvm_hap_walk_L1_p2m = nvmx_hap_walk_L1_p2m,
+#ifdef CONFIG_VM_EVENT
.enable_msr_interception = vmx_enable_msr_interception,
+#endif
#ifdef CONFIG_ALTP2M
.altp2m_vcpu_update_p2m = vmx_vcpu_update_eptp,
.altp2m_vcpu_update_vmfunc_ve = vmx_vcpu_update_vmfunc_ve,
@@ -3078,9 +3084,11 @@ const struct hvm_function_table * __init start_vmx(void)
vmx_function_table.caps.singlestep = cpu_has_monitor_trap_flag;
+#ifdef CONFIG_VM_EVENT
if ( cpu_has_vmx_dt_exiting )
vmx_function_table.set_descriptor_access_exiting =
vmx_set_descriptor_access_exiting;
+#endif
/*
* Do not enable EPT when (!cpu_has_vmx_pat), to prevent security hole
@@ -3151,8 +3159,10 @@ void __init vmx_fill_funcs(void)
if ( !cpu_has_xen_ibt )
return;
+#ifdef CONFIG_VM_EVENT
vmx_function_table.set_descriptor_access_exiting =
vmx_set_descriptor_access_exiting;
+#endif
vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
vmx_function_table.process_isr = vmx_process_isr;
diff --git a/xen/arch/x86/include/asm/hvm/hvm.h b/xen/arch/x86/include/asm/hvm/hvm.h
index f02183691e..b2c75b733e 100644
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -192,7 +192,9 @@ struct hvm_function_table {
void (*handle_cd)(struct vcpu *v, unsigned long value);
void (*set_info_guest)(struct vcpu *v);
void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
+#ifdef CONFIG_VM_EVENT
void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);
+#endif
/* Nested HVM */
int (*nhvm_vcpu_initialise)(struct vcpu *v);
@@ -224,7 +226,9 @@ struct hvm_function_table {
paddr_t *L1_gpa, unsigned int *page_order,
uint8_t *p2m_acc, struct npfec npfec);
+#ifdef CONFIG_VM_EVENT
void (*enable_msr_interception)(struct domain *d, uint32_t msr);
+#endif
#ifdef CONFIG_ALTP2M
/* Alternate p2m */
@@ -435,7 +439,11 @@ static inline bool using_svm(void)
static inline bool hvm_has_set_descriptor_access_exiting(void)
{
+#ifdef CONFIG_VM_EVENT
return hvm_funcs.set_descriptor_access_exiting;
+#else
+ return false;
+#endif
}
static inline void hvm_domain_creation_finished(struct domain *d)
@@ -681,7 +689,9 @@ static inline int nhvm_hap_walk_L1_p2m(
static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
{
+#ifdef CONFIG_VM_EVENT
alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
+#endif
}
static inline bool hvm_is_singlestep_supported(void)
diff --git a/xen/arch/x86/include/asm/hvm/monitor.h b/xen/arch/x86/include/asm/hvm/monitor.h
index 02021be47b..561ca2e585 100644
--- a/xen/arch/x86/include/asm/hvm/monitor.h
+++ b/xen/arch/x86/include/asm/hvm/monitor.h
@@ -17,14 +17,16 @@ enum hvm_monitor_debug_type
HVM_MONITOR_DEBUG_EXCEPTION,
};
+#define hvm_monitor_crX(cr, new, old) \
+ hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
+
+#ifdef CONFIG_VM_EVENT
/*
* Called for current VCPU on crX/MSR changes by guest. Bool return signals
* whether emulation should be postponed.
*/
bool hvm_monitor_cr(unsigned int index, unsigned long value,
unsigned long old);
-#define hvm_monitor_crX(cr, new, old) \
- hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
bool hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value);
void hvm_monitor_descriptor_access(uint64_t exit_info,
uint64_t vmx_exit_qualification,
@@ -45,6 +47,65 @@ int hvm_monitor_vmexit(unsigned long exit_reason,
int hvm_monitor_io(unsigned int port, unsigned int bytes,
bool in, bool str);
+#else
+static inline bool hvm_monitor_cr(unsigned int index, unsigned long value,
+ unsigned long old)
+{
+ return false;
+}
+
+static inline bool hvm_monitor_msr(unsigned int msr, uint64_t new_value,
+ uint64_t old_value)
+{
+ return false;
+}
+
+static inline void hvm_monitor_descriptor_access(uint64_t exit_info,
+ uint64_t vmx_exit_qualification,
+ uint8_t descriptor, bool is_write) {}
+
+static inline int hvm_monitor_debug(unsigned long rip,
+ enum hvm_monitor_debug_type type,
+ unsigned int trap_type,
+ unsigned int insn_length,
+ unsigned int pending_dbg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hvm_monitor_cpuid(unsigned long insn_length,
+ unsigned int leaf, unsigned int subleaf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void hvm_monitor_interrupt(unsigned int vector,
+ unsigned int type,
+ unsigned int err, uint64_t cr2) {}
+
+static inline bool hvm_monitor_emul_unimplemented(void)
+{
+ return false;
+}
+
+static inline bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn,
+ uint32_t pfec, uint16_t kind)
+{
+ return false;
+}
+
+static inline int hvm_monitor_vmexit(unsigned long exit_reason,
+ unsigned long exit_qualification)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int hvm_monitor_io(unsigned int port, unsigned int bytes,
+ bool in, bool str)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_VM_EVENT */
#endif /* __ASM_X86_HVM_MONITOR_H__ */
diff --git a/xen/arch/x86/include/asm/hvm/vm_event.h b/xen/arch/x86/include/asm/hvm/vm_event.h
index 506a85c774..1628230182 100644
--- a/xen/arch/x86/include/asm/hvm/vm_event.h
+++ b/xen/arch/x86/include/asm/hvm/vm_event.h
@@ -8,7 +8,11 @@
#ifndef __ASM_X86_HVM_VM_EVENT_H__
#define __ASM_X86_HVM_VM_EVENT_H__
+#ifdef CONFIG_VM_EVENT
void hvm_vm_event_do_resume(struct vcpu *v);
+#else
+static inline void hvm_vm_event_do_resume(struct vcpu *v) {}
+#endif /* CONFIG_VM_EVENT */
#endif /* __ASM_X86_HVM_VM_EVENT_H__ */
diff --git a/xen/arch/x86/include/asm/mem_access.h b/xen/arch/x86/include/asm/mem_access.h
index 1a52a10322..c786116310 100644
--- a/xen/arch/x86/include/asm/mem_access.h
+++ b/xen/arch/x86/include/asm/mem_access.h
@@ -14,6 +14,7 @@
#ifndef __ASM_X86_MEM_ACCESS_H__
#define __ASM_X86_MEM_ACCESS_H__
+#ifdef CONFIG_VM_EVENT
/*
* Setup vm_event request based on the access (gla is -1ull if not available).
* Handles the rw2rx conversion. Boolean return value indicates if event type
@@ -25,6 +26,14 @@
bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
struct npfec npfec,
struct vm_event_st **req_ptr);
+#else
+static inline bool p2m_mem_access_check(paddr_t gpa, unsigned long gla,
+ struct npfec npfec,
+ struct vm_event_st **req_ptr)
+{
+ return false;
+}
+#endif /* CONFIG_VM_EVENT */
/* Check for emulation and mark vcpu for skipping one instruction
* upon rescheduling if required. */
diff --git a/xen/arch/x86/include/asm/monitor.h b/xen/arch/x86/include/asm/monitor.h
index 3c64d8258f..850c0798d7 100644
--- a/xen/arch/x86/include/asm/monitor.h
+++ b/xen/arch/x86/include/asm/monitor.h
@@ -123,7 +123,14 @@ static inline void arch_monitor_cleanup_domain(struct domain *d) {}
#endif
+#ifdef CONFIG_VM_EVENT
bool monitored_msr(const struct domain *d, u32 msr);
+#else
+static inline bool monitored_msr(const struct domain *d, u32 msr)
+{
+ return false;
+}
+#endif
bool monitored_msr_onchangeonly(const struct domain *d, u32 msr);
#endif /* __ASM_X86_MONITOR_H__ */
diff --git a/xen/include/xen/mem_access.h b/xen/include/xen/mem_access.h
index 4de651038d..efbb26b703 100644
--- a/xen/include/xen/mem_access.h
+++ b/xen/include/xen/mem_access.h
@@ -33,9 +33,7 @@
*/
struct vm_event_st;
-#ifdef CONFIG_VM_EVENT
#include <asm/mem_access.h>
-#endif
/*
* Additional access types, which are used to further restrict
@@ -74,6 +72,7 @@ typedef enum {
} p2m_access_t;
struct p2m_domain;
+#ifdef CONFIG_VM_EVENT
bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
xenmem_access_t xaccess,
p2m_access_t *paccess);
@@ -99,10 +98,40 @@ long p2m_set_mem_access_multi(struct domain *d,
int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
unsigned int altp2m_idx);
-#ifdef CONFIG_VM_EVENT
int mem_access_memop(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
#else
+static inline bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
+ xenmem_access_t xaccess,
+ p2m_access_t *paccess)
+{
+ return false;
+}
+
+static inline long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
+ uint32_t start, uint32_t mask,
+ xenmem_access_t access,
+ unsigned int altp2m_idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline long p2m_set_mem_access_multi(struct domain *d,
+ const XEN_GUEST_HANDLE(const_uint64) pfn_list,
+ const XEN_GUEST_HANDLE(const_uint8) access_list,
+ uint32_t nr, uint32_t start, uint32_t mask,
+ unsigned int altp2m_idx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int p2m_get_mem_access(struct domain *d, gfn_t gfn,
+ xenmem_access_t *access,
+ unsigned int altp2m_idx)
+{
+ return -EOPNOTSUPP;
+}
+
static inline
int mem_access_memop(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg)
diff --git a/xen/include/xen/monitor.h b/xen/include/xen/monitor.h
index c086c4390c..1b7984909e 100644
--- a/xen/include/xen/monitor.h
+++ b/xen/include/xen/monitor.h
@@ -30,6 +30,7 @@ struct xen_domctl_monitor_op;
#ifdef CONFIG_VM_EVENT
int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop);
void monitor_guest_request(void);
+int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req);
#else /* !CONFIG_VM_EVENT */
static inline int monitor_domctl(struct domain *d,
struct xen_domctl_monitor_op *mop)
@@ -37,8 +38,11 @@ static inline int monitor_domctl(struct domain *d,
return -EOPNOTSUPP;
}
static inline void monitor_guest_request(void) {}
+static inline int monitor_traps(struct vcpu *v, bool sync,
+ vm_event_request_t *req)
+{
+ return -EOPNOTSUPP;
+}
#endif /* !CONFIG_VM_EVENT */
-int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req);
-
#endif /* __XEN_MONITOR_H__ */
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 27d0c74216..4b3d0d15ec 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -50,6 +50,7 @@ struct vm_event_domain
unsigned int last_vcpu_wake_up;
};
+#ifdef CONFIG_VM_EVENT
/* Returns whether a ring has been set up */
bool vm_event_check_ring(struct vm_event_domain *ved);
@@ -68,6 +69,20 @@ bool vm_event_check_ring(struct vm_event_domain *ved);
*/
int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
bool allow_sleep);
+#else
+static inline bool vm_event_check_ring(struct vm_event_domain *ved)
+{
+ return false;
+}
+
+static inline int __vm_event_claim_slot(struct domain *d,
+ struct vm_event_domain *ved,
+ bool allow_sleep)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_VM_EVENT */
+
static inline int vm_event_claim_slot(struct domain *d,
struct vm_event_domain *ved)
{
@@ -82,23 +97,28 @@ static inline int vm_event_claim_slot_nosleep(struct domain *d,
void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved);
+#ifdef CONFIG_VM_EVENT
void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
vm_event_request_t *req);
-#ifdef CONFIG_VM_EVENT
/* Clean up on domain destruction */
void vm_event_cleanup(struct domain *d);
int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec);
+
+void vm_event_vcpu_pause(struct vcpu *v);
#else /* !CONFIG_VM_EVENT */
+static inline void vm_event_put_request(struct domain *d,
+ struct vm_event_domain *ved,
+ vm_event_request_t *req) {}
static inline void vm_event_cleanup(struct domain *d) {}
static inline int vm_event_domctl(struct domain *d,
struct xen_domctl_vm_event_op *vec)
{
return -EOPNOTSUPP;
}
+static inline void vm_event_vcpu_pause(struct vcpu *v) {};
#endif /* !CONFIG_VM_EVENT */
-void vm_event_vcpu_pause(struct vcpu *v);
void vm_event_vcpu_unpause(struct vcpu *v);
void vm_event_fill_regs(vm_event_request_t *req);
--
2.34.1
On 10.09.2025 09:38, Penny Zheng wrote:
> @@ -2456,9 +2460,13 @@ static struct hvm_function_table __initdata_cf_clobber svm_function_table = {
> .fpu_dirty_intercept = svm_fpu_dirty_intercept,
> .msr_read_intercept = svm_msr_read_intercept,
> .msr_write_intercept = svm_msr_write_intercept,
> +#ifdef CONFIG_VM_EVENT
> .enable_msr_interception = svm_enable_msr_interception,
> +#endif
> .set_rdtsc_exiting = svm_set_rdtsc_exiting,
> +#ifdef CONFIG_VM_EVENT
> .set_descriptor_access_exiting = svm_set_descriptor_access_exiting,
> +#endif
I think in such a case it would be preferable to move one of the existing
lines, so we can get away with just a single #ifdef.
> --- a/xen/arch/x86/include/asm/hvm/hvm.h
> +++ b/xen/arch/x86/include/asm/hvm/hvm.h
> @@ -192,7 +192,9 @@ struct hvm_function_table {
> void (*handle_cd)(struct vcpu *v, unsigned long value);
> void (*set_info_guest)(struct vcpu *v);
> void (*set_rdtsc_exiting)(struct vcpu *v, bool enable);
> +#ifdef CONFIG_VM_EVENT
> void (*set_descriptor_access_exiting)(struct vcpu *v, bool enable);
> +#endif
>
> /* Nested HVM */
> int (*nhvm_vcpu_initialise)(struct vcpu *v);
> @@ -224,7 +226,9 @@ struct hvm_function_table {
> paddr_t *L1_gpa, unsigned int *page_order,
> uint8_t *p2m_acc, struct npfec npfec);
>
> +#ifdef CONFIG_VM_EVENT
> void (*enable_msr_interception)(struct domain *d, uint32_t msr);
> +#endif
Possibly same here.
> @@ -435,7 +439,11 @@ static inline bool using_svm(void)
>
> static inline bool hvm_has_set_descriptor_access_exiting(void)
> {
> +#ifdef CONFIG_VM_EVENT
> return hvm_funcs.set_descriptor_access_exiting;
> +#else
> + return false;
> +#endif
> }
This is actively wrong. It being only monitor.[ch] which use the function,
I don't see why it can't just be wrapped in an #ifdef. With what you do,
some new caller might function fine until run in a VM_EVENT=n build.
> @@ -681,7 +689,9 @@ static inline int nhvm_hap_walk_L1_p2m(
>
> static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
> {
> +#ifdef CONFIG_VM_EVENT
> alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
> +#endif
> }
Mostly the same here.
> --- a/xen/arch/x86/include/asm/hvm/monitor.h
> +++ b/xen/arch/x86/include/asm/hvm/monitor.h
> @@ -17,14 +17,16 @@ enum hvm_monitor_debug_type
> HVM_MONITOR_DEBUG_EXCEPTION,
> };
>
> +#define hvm_monitor_crX(cr, new, old) \
> + hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
> +
> +#ifdef CONFIG_VM_EVENT
> /*
> * Called for current VCPU on crX/MSR changes by guest. Bool return signals
> * whether emulation should be postponed.
> */
> bool hvm_monitor_cr(unsigned int index, unsigned long value,
> unsigned long old);
> -#define hvm_monitor_crX(cr, new, old) \
> - hvm_monitor_cr(VM_EVENT_X86_##cr, new, old)
> bool hvm_monitor_msr(unsigned int msr, uint64_t new_value, uint64_t old_value);
> void hvm_monitor_descriptor_access(uint64_t exit_info,
> uint64_t vmx_exit_qualification,
> @@ -45,6 +47,65 @@ int hvm_monitor_vmexit(unsigned long exit_reason,
>
> int hvm_monitor_io(unsigned int port, unsigned int bytes,
> bool in, bool str);
> +#else
> +static inline bool hvm_monitor_cr(unsigned int index, unsigned long value,
> + unsigned long old)
> +{
> + return false;
> +}
> +
> +static inline bool hvm_monitor_msr(unsigned int msr, uint64_t new_value,
> + uint64_t old_value)
> +{
> + return false;
> +}
> +
> +static inline void hvm_monitor_descriptor_access(uint64_t exit_info,
> + uint64_t vmx_exit_qualification,
> + uint8_t descriptor, bool is_write) {}
> +
> +static inline int hvm_monitor_debug(unsigned long rip,
> + enum hvm_monitor_debug_type type,
> + unsigned int trap_type,
> + unsigned int insn_length,
> + unsigned int pending_dbg)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline int hvm_monitor_cpuid(unsigned long insn_length,
> + unsigned int leaf, unsigned int subleaf)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline void hvm_monitor_interrupt(unsigned int vector,
> + unsigned int type,
> + unsigned int err, uint64_t cr2) {}
> +
> +static inline bool hvm_monitor_emul_unimplemented(void)
> +{
> + return false;
> +}
> +
> +static inline bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn,
> + uint32_t pfec, uint16_t kind)
> +{
> + return false;
> +}
> +
> +static inline int hvm_monitor_vmexit(unsigned long exit_reason,
> + unsigned long exit_qualification)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline int hvm_monitor_io(unsigned int port, unsigned int bytes,
> + bool in, bool str)
> +{
> + return -EOPNOTSUPP;
> +}
For this one it's perhaps easiest to see that -EOPNOTSUPP (or in fact any
negative value) is wrong to return from the stub: Just go look at both
use sites. Guests wouldn't be able to use I/O insns anymore for intercepted
ports. Others look to have similar issues, while the ones returning "false"
look okay.
> --- a/xen/include/xen/mem_access.h
> +++ b/xen/include/xen/mem_access.h
> @@ -33,9 +33,7 @@
> */
> struct vm_event_st;
>
> -#ifdef CONFIG_VM_EVENT
> #include <asm/mem_access.h>
> -#endif
Aiui this breaks the build on PPC and RISC-V, which don't have such a
header. If this change is really needed (which I'm not convinced of, as
x86's hvm/hvm.c could as well include asm/mem_access.h directly), you'll
need to use has_include() here.
> @@ -74,6 +72,7 @@ typedef enum {
> } p2m_access_t;
>
> struct p2m_domain;
> +#ifdef CONFIG_VM_EVENT
> bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
> xenmem_access_t xaccess,
> p2m_access_t *paccess);
> @@ -99,10 +98,40 @@ long p2m_set_mem_access_multi(struct domain *d,
> int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
> unsigned int altp2m_idx);
>
> -#ifdef CONFIG_VM_EVENT
> int mem_access_memop(unsigned long cmd,
> XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
> #else
> +static inline bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
> + xenmem_access_t xaccess,
> + p2m_access_t *paccess)
> +{
> + return false;
> +}
So this is needed when VM_EVENT=n and ALTP2M=y. Tamas, is this a configuration
which makes sense?
> +static inline long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
> + uint32_t start, uint32_t mask,
> + xenmem_access_t access,
> + unsigned int altp2m_idx)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline long p2m_set_mem_access_multi(struct domain *d,
> + const XEN_GUEST_HANDLE(const_uint64) pfn_list,
> + const XEN_GUEST_HANDLE(const_uint8) access_list,
> + uint32_t nr, uint32_t start, uint32_t mask,
> + unsigned int altp2m_idx)
> +{
> + return -EOPNOTSUPP;
> +}
> +
> +static inline int p2m_get_mem_access(struct domain *d, gfn_t gfn,
> + xenmem_access_t *access,
> + unsigned int altp2m_idx)
> +{
> + return -EOPNOTSUPP;
> +}
Instead of these, I wonder whether a single #ifdef in do_altp2m_op()
wouldn't be more appropriate (assuming the above config makes some sense
in the first place). Actually, it would need to be two #ifdef-s, one in
each of the two switch() blocks.
> --- a/xen/include/xen/monitor.h
> +++ b/xen/include/xen/monitor.h
> @@ -30,6 +30,7 @@ struct xen_domctl_monitor_op;
> #ifdef CONFIG_VM_EVENT
> int monitor_domctl(struct domain *d, struct xen_domctl_monitor_op *mop);
> void monitor_guest_request(void);
> +int monitor_traps(struct vcpu *v, bool sync, vm_event_request_t *req);
> #else /* !CONFIG_VM_EVENT */
> static inline int monitor_domctl(struct domain *d,
> struct xen_domctl_monitor_op *mop)
> @@ -37,8 +38,11 @@ static inline int monitor_domctl(struct domain *d,
> return -EOPNOTSUPP;
> }
> static inline void monitor_guest_request(void) {}
> +static inline int monitor_traps(struct vcpu *v, bool sync,
> + vm_event_request_t *req)
> +{
> + return -EOPNOTSUPP;
> +}
Is this needed? There's only one call that needs taking care of afaics,
in hvm_hap_nested_page_fault(). That's gated on "req_ptr" being non-NULL
though, which isn't possible when p2m_mem_access_check() also is a stub.
Hence the compiler ought to be able to DCE the call.
> --- a/xen/include/xen/vm_event.h
> +++ b/xen/include/xen/vm_event.h
> @@ -50,6 +50,7 @@ struct vm_event_domain
> unsigned int last_vcpu_wake_up;
> };
>
> +#ifdef CONFIG_VM_EVENT
> /* Returns whether a ring has been set up */
> bool vm_event_check_ring(struct vm_event_domain *ved);
>
> @@ -68,6 +69,20 @@ bool vm_event_check_ring(struct vm_event_domain *ved);
> */
> int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
> bool allow_sleep);
> +#else
> +static inline bool vm_event_check_ring(struct vm_event_domain *ved)
> +{
> + return false;
> +}
Which call site is in need of this stub? I was first considering
mem_paging_enabled(), but MEM_PAGING already now depends on VM_EVENT.
> +static inline int __vm_event_claim_slot(struct domain *d,
> + struct vm_event_domain *ved,
> + bool allow_sleep)
> +{
> + return -EOPNOTSUPP;
> +}
Sadly this looks to be needed when MEM_SHARING=y and VM_EVENT=n.
> @@ -82,23 +97,28 @@ static inline int vm_event_claim_slot_nosleep(struct domain *d,
>
> void vm_event_cancel_slot(struct domain *d, struct vm_event_domain *ved);
>
> +#ifdef CONFIG_VM_EVENT
> void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
> vm_event_request_t *req);
>
> -#ifdef CONFIG_VM_EVENT
> /* Clean up on domain destruction */
> void vm_event_cleanup(struct domain *d);
> int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec);
> +
> +void vm_event_vcpu_pause(struct vcpu *v);
> #else /* !CONFIG_VM_EVENT */
> +static inline void vm_event_put_request(struct domain *d,
> + struct vm_event_domain *ved,
> + vm_event_request_t *req) {}
Same here and ...
> static inline void vm_event_cleanup(struct domain *d) {}
> static inline int vm_event_domctl(struct domain *d,
> struct xen_domctl_vm_event_op *vec)
> {
> return -EOPNOTSUPP;
> }
> +static inline void vm_event_vcpu_pause(struct vcpu *v) {};
... here.
> #endif /* !CONFIG_VM_EVENT */
>
> -void vm_event_vcpu_pause(struct vcpu *v);
> void vm_event_vcpu_unpause(struct vcpu *v);
Please move vm_event_vcpu_unpause() as well (without adding a stub). The
two would better stay together.
Jan
> > @@ -99,10 +98,40 @@ long p2m_set_mem_access_multi(struct domain *d,
> > int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
> > unsigned int altp2m_idx);
> >
> > -#ifdef CONFIG_VM_EVENT
> > int mem_access_memop(unsigned long cmd,
> > XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
> > #else
> > +static inline bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
> > + xenmem_access_t xaccess,
> > + p2m_access_t *paccess)
> > +{
> > + return false;
> > +}
>
> So this is needed when VM_EVENT=n and ALTP2M=y. Tamas, is this a configuration
> which makes sense?
Yes, altp2m should be functional without vm_event being enabled. There
could very well be in-guest only use of altp2m via #VE. This function
is used in p2m_init_next_altp2m which means it being stubbed out like
this when vm_event is disabled breaks altp2m.
Tamas
On 14.09.2025 01:31, Tamas K Lengyel wrote:
>>> @@ -99,10 +98,40 @@ long p2m_set_mem_access_multi(struct domain *d,
>>> int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
>>> unsigned int altp2m_idx);
>>>
>>> -#ifdef CONFIG_VM_EVENT
>>> int mem_access_memop(unsigned long cmd,
>>> XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t) arg);
>>> #else
>>> +static inline bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
>>> + xenmem_access_t xaccess,
>>> + p2m_access_t *paccess)
>>> +{
>>> + return false;
>>> +}
>>
>> So this is needed when VM_EVENT=n and ALTP2M=y. Tamas, is this a configuration
>> which makes sense?
>
> Yes, altp2m should be functional without vm_event being enabled. There
> could very well be in-guest only use of altp2m via #VE. This function
> is used in p2m_init_next_altp2m which means it being stubbed out like
> this when vm_event is disabled breaks altp2m.
Oh, indeed - the stub still needs to handle XENMEM_access_default. Of course
with MEM_ACCESS=n it's not quite clear to me what p2m->default_access ought
to be; imo in principle that field ought to also go away in that case
(becoming hard-coded p2m_access_rwx). While doing that will be a larger
patch, perhaps using the hard-coded value here should be done right away.
Once the code correctly handles MEM_ACCESS=n as an implication from
VM_EVENT=n, it's also questionable whether MEM_ACCESS_ALWAYS_ON should be
retained.
Jan
[Public]
Hi,
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Sunday, September 14, 2025 10:04 PM
> To: Tamas K Lengyel <tamas@tklengyel.com>; Penny, Zheng
> <penny.zheng@amd.com>
> Cc: Huang, Ray <Ray.Huang@amd.com>; Andrew Cooper
> <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>;
> Alexandru Isaila <aisaila@bitdefender.com>; Petre Pircalabu
> <ppircalabu@bitdefender.com>; Daniel P. Smith <dpsmith@apertussolutions.com>;
> xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v2 04/26] xen: consolidate CONFIG_VM_EVENT
>
> On 14.09.2025 01:31, Tamas K Lengyel wrote:
> >>> @@ -99,10 +98,40 @@ long p2m_set_mem_access_multi(struct domain *d,
> >>> int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t
> *access,
> >>> unsigned int altp2m_idx);
> >>>
> >>> -#ifdef CONFIG_VM_EVENT
> >>> int mem_access_memop(unsigned long cmd,
> >>> XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t)
> >>> arg); #else
> >>> +static inline bool xenmem_access_to_p2m_access(const struct p2m_domain
> *p2m,
> >>> + xenmem_access_t xaccess,
> >>> + p2m_access_t
> >>> +*paccess) {
> >>> + return false;
> >>> +}
> >>
> >> So this is needed when VM_EVENT=n and ALTP2M=y. Tamas, is this a
> >> configuration which makes sense?
> >
> > Yes, altp2m should be functional without vm_event being enabled. There
> > could very well be in-guest only use of altp2m via #VE. This function
> > is used in p2m_init_next_altp2m which means it being stubbed out like
> > this when vm_event is disabled breaks altp2m.
>
> Oh, indeed - the stub still needs to handle XENMEM_access_default. Of course
> with MEM_ACCESS=n it's not quite clear to me what p2m->default_access ought
> to be; imo in principle that field ought to also go away in that case (becoming hard-
> coded p2m_access_rwx). While doing that will be a larger patch, perhaps using the
> hard-coded value here should be done right away.
>
> Once the code correctly handles MEM_ACCESS=n as an implication from
> VM_EVENT=n, it's also questionable whether MEM_ACCESS_ALWAYS_ON
> should be retained.
>
If we intend to remove MEM_ACCESS_ALWAYS_ON, I suggest to do the following modification on VM_EVENT to still keep y on default on x86:
```
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index 7bd8a04730..61d48a5120 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -170,13 +170,10 @@ config HAS_VMAP
config LIBFDT
bool
-config MEM_ACCESS_ALWAYS_ON
- bool
-
config VM_EVENT
- def_bool MEM_ACCESS_ALWAYS_ON
- prompt "Memory Access and VM events" if !MEM_ACCESS_ALWAYS_ON
+ bool "Memory Access and VM events"
depends on HVM
+ default X86
help
Framework to configure memory access types for guests and receive
```
> Jan
On 24.09.2025 08:39, Penny, Zheng wrote:
> [Public]
>
> Hi,
>
>> -----Original Message-----
>> From: Jan Beulich <jbeulich@suse.com>
>> Sent: Sunday, September 14, 2025 10:04 PM
>> To: Tamas K Lengyel <tamas@tklengyel.com>; Penny, Zheng
>> <penny.zheng@amd.com>
>> Cc: Huang, Ray <Ray.Huang@amd.com>; Andrew Cooper
>> <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>;
>> Alexandru Isaila <aisaila@bitdefender.com>; Petre Pircalabu
>> <ppircalabu@bitdefender.com>; Daniel P. Smith <dpsmith@apertussolutions.com>;
>> xen-devel@lists.xenproject.org
>> Subject: Re: [PATCH v2 04/26] xen: consolidate CONFIG_VM_EVENT
>>
>> On 14.09.2025 01:31, Tamas K Lengyel wrote:
>>>>> @@ -99,10 +98,40 @@ long p2m_set_mem_access_multi(struct domain *d,
>>>>> int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t
>> *access,
>>>>> unsigned int altp2m_idx);
>>>>>
>>>>> -#ifdef CONFIG_VM_EVENT
>>>>> int mem_access_memop(unsigned long cmd,
>>>>> XEN_GUEST_HANDLE_PARAM(xen_mem_access_op_t)
>>>>> arg); #else
>>>>> +static inline bool xenmem_access_to_p2m_access(const struct p2m_domain
>> *p2m,
>>>>> + xenmem_access_t xaccess,
>>>>> + p2m_access_t
>>>>> +*paccess) {
>>>>> + return false;
>>>>> +}
>>>>
>>>> So this is needed when VM_EVENT=n and ALTP2M=y. Tamas, is this a
>>>> configuration which makes sense?
>>>
>>> Yes, altp2m should be functional without vm_event being enabled. There
>>> could very well be in-guest only use of altp2m via #VE. This function
>>> is used in p2m_init_next_altp2m which means it being stubbed out like
>>> this when vm_event is disabled breaks altp2m.
>>
>> Oh, indeed - the stub still needs to handle XENMEM_access_default. Of course
>> with MEM_ACCESS=n it's not quite clear to me what p2m->default_access ought
>> to be; imo in principle that field ought to also go away in that case (becoming hard-
>> coded p2m_access_rwx). While doing that will be a larger patch, perhaps using the
>> hard-coded value here should be done right away.
>>
>> Once the code correctly handles MEM_ACCESS=n as an implication from
>> VM_EVENT=n, it's also questionable whether MEM_ACCESS_ALWAYS_ON
>> should be retained.
>>
>
> If we intend to remove MEM_ACCESS_ALWAYS_ON, I suggest to do the following modification on VM_EVENT to still keep y on default on x86:
> ```
> diff --git a/xen/common/Kconfig b/xen/common/Kconfig
> index 7bd8a04730..61d48a5120 100644
> --- a/xen/common/Kconfig
> +++ b/xen/common/Kconfig
> @@ -170,13 +170,10 @@ config HAS_VMAP
> config LIBFDT
> bool
>
> -config MEM_ACCESS_ALWAYS_ON
> - bool
> -
> config VM_EVENT
> - def_bool MEM_ACCESS_ALWAYS_ON
> - prompt "Memory Access and VM events" if !MEM_ACCESS_ALWAYS_ON
> + bool "Memory Access and VM events"
> depends on HVM
> + default X86
> help
>
> Framework to configure memory access types for guests and receive
> ```
Yes (at least for the time being; eventually we may want to make this default N
even on x86).
Jan
[Public]
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Wednesday, September 10, 2025 10:57 PM
> To: Penny, Zheng <penny.zheng@amd.com>; Tamas K Lengyel
> <tamas@tklengyel.com>
> Cc: Huang, Ray <Ray.Huang@amd.com>; Andrew Cooper
> <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>;
> Alexandru Isaila <aisaila@bitdefender.com>; Petre Pircalabu
> <ppircalabu@bitdefender.com>; Daniel P. Smith <dpsmith@apertussolutions.com>;
> xen-devel@lists.xenproject.org
> Subject: Re: [PATCH v2 04/26] xen: consolidate CONFIG_VM_EVENT
>
> On 10.09.2025 09:38, Penny Zheng wrote:
>
> > --- a/xen/include/xen/vm_event.h
> > +++ b/xen/include/xen/vm_event.h
> > @@ -50,6 +50,7 @@ struct vm_event_domain
> > unsigned int last_vcpu_wake_up;
> > };
> >
> > +#ifdef CONFIG_VM_EVENT
> > /* Returns whether a ring has been set up */ bool
> > vm_event_check_ring(struct vm_event_domain *ved);
> >
> > @@ -68,6 +69,20 @@ bool vm_event_check_ring(struct vm_event_domain
> *ved);
> > */
> > int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
> > bool allow_sleep);
> > +#else
> > +static inline bool vm_event_check_ring(struct vm_event_domain *ved) {
> > + return false;
> > +}
>
> Which call site is in need of this stub? I was first considering
> mem_paging_enabled(), but MEM_PAGING already now depends on VM_EVENT.
>
It is used in hvm.c to check whether vm_event_share ring is empty. And it has the same problem as the below: whether we support the configuration: VM_EVENT=n and MEM_SHARING=y. I'm not very familiar with it and may need help on it.
If the combination is not supported, I suggest to make MEM_SHARING depend on VM_EVENT, most of the below stubs could be removed.
> > +static inline int __vm_event_claim_slot(struct domain *d,
> > + struct vm_event_domain *ved,
> > + bool allow_sleep) {
> > + return -EOPNOTSUPP;
> > +}
>
> Sadly this looks to be needed when MEM_SHARING=y and VM_EVENT=n.
>
> > @@ -82,23 +97,28 @@ static inline int
> > vm_event_claim_slot_nosleep(struct domain *d,
> >
> > void vm_event_cancel_slot(struct domain *d, struct vm_event_domain
> > *ved);
> >
> > +#ifdef CONFIG_VM_EVENT
> > void vm_event_put_request(struct domain *d, struct vm_event_domain *ved,
> > vm_event_request_t *req);
> >
> > -#ifdef CONFIG_VM_EVENT
> > /* Clean up on domain destruction */
> > void vm_event_cleanup(struct domain *d); int vm_event_domctl(struct
> > domain *d, struct xen_domctl_vm_event_op *vec);
> > +
> > +void vm_event_vcpu_pause(struct vcpu *v);
> > #else /* !CONFIG_VM_EVENT */
> > +static inline void vm_event_put_request(struct domain *d,
> > + struct vm_event_domain *ved,
> > + vm_event_request_t *req) {}
>
> Same here and ...
>
> > static inline void vm_event_cleanup(struct domain *d) {} static
> > inline int vm_event_domctl(struct domain *d,
> > struct xen_domctl_vm_event_op *vec)
> > {
> > return -EOPNOTSUPP;
> > }
> > +static inline void vm_event_vcpu_pause(struct vcpu *v) {};
>
> ... here.
>
> > #endif /* !CONFIG_VM_EVENT */
> >
> Jan
On 11.09.2025 11:20, Penny, Zheng wrote:
>> -----Original Message-----
>> From: Jan Beulich <jbeulich@suse.com>
>> Sent: Wednesday, September 10, 2025 10:57 PM
>>
>> On 10.09.2025 09:38, Penny Zheng wrote:
>>> --- a/xen/include/xen/vm_event.h
>>> +++ b/xen/include/xen/vm_event.h
>>> @@ -50,6 +50,7 @@ struct vm_event_domain
>>> unsigned int last_vcpu_wake_up;
>>> };
>>>
>>> +#ifdef CONFIG_VM_EVENT
>>> /* Returns whether a ring has been set up */ bool
>>> vm_event_check_ring(struct vm_event_domain *ved);
>>>
>>> @@ -68,6 +69,20 @@ bool vm_event_check_ring(struct vm_event_domain
>> *ved);
>>> */
>>> int __vm_event_claim_slot(struct domain *d, struct vm_event_domain *ved,
>>> bool allow_sleep);
>>> +#else
>>> +static inline bool vm_event_check_ring(struct vm_event_domain *ved) {
>>> + return false;
>>> +}
>>
>> Which call site is in need of this stub? I was first considering
>> mem_paging_enabled(), but MEM_PAGING already now depends on VM_EVENT.
>>
>
> It is used in hvm.c to check whether vm_event_share ring is empty. And it has the same problem as the below: whether we support the configuration: VM_EVENT=n and MEM_SHARING=y.
Hmm, yes, I must have overlooked that. This needs to stay, I expect.
Jan
© 2016 - 2026 Red Hat, Inc.