[PATCH v1 7/7] xen/vm_event: consolidate CONFIG_VM_EVENT

Penny Zheng posted 7 patches 5 days, 22 hours ago
Only 5 patches received!
[PATCH v1 7/7] xen/vm_event: consolidate CONFIG_VM_EVENT
Posted by Penny Zheng 5 days, 22 hours ago
File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.

Although CONFIG_VM_EVENT is right now forcibly enabled on x86 via
MEM_ACCESS_ALWAYS_ON, we could disable it through disabling
CONFIG_MGMT_HYPERCALLS later. So we remove MEM_ACCESS_ALWAYS_ON and
make VM_EVENT=y on default only on x86 to retain the same.

The following functions are developed on the basis of vm event framework, or
only invoked by vm_event.c, so they all shall be wrapped with CONFIG_VM_EVENT
(otherwise they will become unreachable and
violate Misra rule 2.1 when VM_EVENT=n):
- hvm_toggle_singlestep
- hvm_fast_singlestep
- hvm_emulate_one_vm_event
    - hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_discard
And Function vm_event_check_ring() needs stub to pass compilation when
VM_EVENT=n.

Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
---
v1 -> v2:
- split out XSM changes
- remove unnecessary stubs
- move "struct p2m_domain" declaration ahead of the #ifdef
---
v2 -> v3:
- move .enable_msr_interception and .set_descriptor_access_exiting together
- with the introduction of "vm_event_is_enabled()", all hvm_monitor_xxx()
stubs are no longer needed
- change to use in-place stubs in do_altp2m_op()
- no need to add stub for monitor_traps(), __vm_event_claim_slot(),
vm_event_put_request() and vm_event_vcpu_pause()
- remove MEM_ACCESS_ALWAYS_ON
- return default p2m_access_rwx for xenmem_access_to_p2m_access() when
VM_EVENT=n
- add wrapping for hvm_emulate_one_vm_event/
hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_discard
---
 xen/arch/x86/Makefile      |  2 +-
 xen/arch/x86/hvm/Kconfig   |  1 -
 xen/arch/x86/hvm/Makefile  |  2 +-
 xen/arch/x86/hvm/emulate.c | 58 ++++++++++++++++++++------------------
 xen/arch/x86/hvm/hvm.c     |  2 ++
 xen/common/Kconfig         |  7 ++---
 xen/include/xen/vm_event.h |  7 +++++
 7 files changed, 44 insertions(+), 35 deletions(-)

diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index 407571c510..f71ca9f18b 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -75,7 +75,7 @@ obj-y += usercopy.o
 obj-y += x86_emulate.o
 obj-$(CONFIG_TBOOT) += tboot.o
 obj-y += hpet.o
-obj-y += vm_event.o
+obj-$(CONFIG_VM_EVENT) += vm_event.o
 obj-y += xstate.o
 
 ifneq ($(CONFIG_PV_SHIM_EXCLUSIVE),y)
diff --git a/xen/arch/x86/hvm/Kconfig b/xen/arch/x86/hvm/Kconfig
index f10a2b3744..ea957363a9 100644
--- a/xen/arch/x86/hvm/Kconfig
+++ b/xen/arch/x86/hvm/Kconfig
@@ -4,7 +4,6 @@ menuconfig HVM
 	default !PV_SHIM
 	select COMPAT
 	select IOREQ_SERVER
-	select MEM_ACCESS_ALWAYS_ON
 	help
 	  Interfaces to support HVM domains.  HVM domains require hardware
 	  virtualisation extensions (e.g. Intel VT-x, AMD SVM), but can boot
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index 50e0b6e63b..952db00dd7 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -26,7 +26,7 @@ obj-y += save.o
 obj-y += stdvga.o
 obj-y += vioapic.o
 obj-y += vlapic.o
-obj-y += vm_event.o
+obj-$(CONFIG_VM_EVENT) += vm_event.o
 obj-y += vmsi.o
 obj-y += vpic.o
 obj-y += vpt.o
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index fe75b0516d..d56ef02baf 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1615,6 +1615,7 @@ static int cf_check hvmemul_blk(
     return rc;
 }
 
+#ifdef CONFIG_VM_EVENT
 static int cf_check hvmemul_write_discard(
     enum x86_segment seg,
     unsigned long offset,
@@ -1717,6 +1718,7 @@ static int cf_check hvmemul_cache_op_discard(
 {
     return X86EMUL_OKAY;
 }
+#endif /* CONFIG_VM_EVENT */
 
 static int cf_check hvmemul_cmpxchg(
     enum x86_segment seg,
@@ -2750,33 +2752,6 @@ static const struct x86_emulate_ops hvm_emulate_ops = {
     .vmfunc        = hvmemul_vmfunc,
 };
 
-static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
-    .read          = hvmemul_read,
-    .insn_fetch    = hvmemul_insn_fetch,
-    .write         = hvmemul_write_discard,
-    .cmpxchg       = hvmemul_cmpxchg_discard,
-    .rep_ins       = hvmemul_rep_ins_discard,
-    .rep_outs      = hvmemul_rep_outs_discard,
-    .rep_movs      = hvmemul_rep_movs_discard,
-    .rep_stos      = hvmemul_rep_stos_discard,
-    .read_segment  = hvmemul_read_segment,
-    .write_segment = hvmemul_write_segment,
-    .read_io       = hvmemul_read_io_discard,
-    .write_io      = hvmemul_write_io_discard,
-    .read_cr       = hvmemul_read_cr,
-    .write_cr      = hvmemul_write_cr,
-    .read_xcr      = hvmemul_read_xcr,
-    .write_xcr     = hvmemul_write_xcr,
-    .read_msr      = hvmemul_read_msr,
-    .write_msr     = hvmemul_write_msr_discard,
-    .cache_op      = hvmemul_cache_op_discard,
-    .tlb_op        = hvmemul_tlb_op,
-    .cpuid         = x86emul_cpuid,
-    .get_fpu       = hvmemul_get_fpu,
-    .put_fpu       = hvmemul_put_fpu,
-    .vmfunc        = hvmemul_vmfunc,
-};
-
 /*
  * Note that passing VIO_no_completion into this function serves as kind
  * of (but not fully) an "auto select completion" indicator.  When there's
@@ -2887,6 +2862,34 @@ int hvm_emulate_one(
     return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops, completion);
 }
 
+#ifdef CONFIG_VM_EVENT
+static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
+    .read          = hvmemul_read,
+    .insn_fetch    = hvmemul_insn_fetch,
+    .write         = hvmemul_write_discard,
+    .cmpxchg       = hvmemul_cmpxchg_discard,
+    .rep_ins       = hvmemul_rep_ins_discard,
+    .rep_outs      = hvmemul_rep_outs_discard,
+    .rep_movs      = hvmemul_rep_movs_discard,
+    .rep_stos      = hvmemul_rep_stos_discard,
+    .read_segment  = hvmemul_read_segment,
+    .write_segment = hvmemul_write_segment,
+    .read_io       = hvmemul_read_io_discard,
+    .write_io      = hvmemul_write_io_discard,
+    .read_cr       = hvmemul_read_cr,
+    .write_cr      = hvmemul_write_cr,
+    .read_xcr      = hvmemul_read_xcr,
+    .write_xcr     = hvmemul_write_xcr,
+    .read_msr      = hvmemul_read_msr,
+    .write_msr     = hvmemul_write_msr_discard,
+    .cache_op      = hvmemul_cache_op_discard,
+    .tlb_op        = hvmemul_tlb_op,
+    .cpuid         = x86emul_cpuid,
+    .get_fpu       = hvmemul_get_fpu,
+    .put_fpu       = hvmemul_put_fpu,
+    .vmfunc        = hvmemul_vmfunc,
+};
+
 void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
     unsigned int errcode)
 {
@@ -2949,6 +2952,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
 
     hvm_emulate_writeback(&ctx);
 }
+#endif /* CONFIG_VM_EVENT */
 
 void hvm_emulate_init_once(
     struct hvm_emulate_ctxt *hvmemul_ctxt,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index c3417b0593..196de2db67 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5297,6 +5297,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
     return rc;
 }
 
+#ifdef CONFIG_VM_EVENT
 void hvm_toggle_singlestep(struct vcpu *v)
 {
     ASSERT(atomic_read(&v->pause_count));
@@ -5323,6 +5324,7 @@ void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
     v->arch.hvm.fast_single_step.p2midx = p2midx;
 }
 #endif
+#endif /* CONFIG_VM_EVENT */
 
 /*
  * Segment caches in VMCB/VMCS are inconsistent about which bits are checked,
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index 53f681bbb2..0070fe42a6 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -170,13 +170,10 @@ config HAS_VMAP
 config LIBFDT
 	bool
 
-config MEM_ACCESS_ALWAYS_ON
-	bool
-
 config VM_EVENT
-	def_bool MEM_ACCESS_ALWAYS_ON
-	prompt "Memory Access and VM events" if !MEM_ACCESS_ALWAYS_ON
+	bool "Memory Access and VM events"
 	depends on HVM
+	default X86
 	help
 
 	  Framework to configure memory access types for guests and receive
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 27d0c74216..1b76ce632e 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -51,7 +51,14 @@ struct vm_event_domain
 };
 
 /* Returns whether a ring has been set up */
+#ifdef CONFIG_VM_EVENT
 bool vm_event_check_ring(struct vm_event_domain *ved);
+#else
+static inline bool vm_event_check_ring(struct vm_event_domain *ved)
+{
+    return false;
+}
+#endif /* CONFIG_VM_EVENT */
 
 /* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
  * available space and the caller is a foreign domain. If the guest itself
-- 
2.34.1
Re: [PATCH v1 7/7] xen/vm_event: consolidate CONFIG_VM_EVENT
Posted by Jan Beulich 5 days, 15 hours ago
On 13.11.2025 04:16, Penny Zheng wrote:
> File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
> routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.
> 
> Although CONFIG_VM_EVENT is right now forcibly enabled on x86 via
> MEM_ACCESS_ALWAYS_ON, we could disable it through disabling
> CONFIG_MGMT_HYPERCALLS later. So we remove MEM_ACCESS_ALWAYS_ON and
> make VM_EVENT=y on default only on x86 to retain the same.
> 
> The following functions are developed on the basis of vm event framework, or
> only invoked by vm_event.c, so they all shall be wrapped with CONFIG_VM_EVENT
> (otherwise they will become unreachable and
> violate Misra rule 2.1 when VM_EVENT=n):
> - hvm_toggle_singlestep
> - hvm_fast_singlestep
> - hvm_emulate_one_vm_event
>     - hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_discard
> And Function vm_event_check_ring() needs stub to pass compilation when
> VM_EVENT=n.
> 
> Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
> ---
> v1 -> v2:
> - split out XSM changes

Isn't that split out patch also needed as a prereq to the one here? The one
here on its own:
Acked-by: Jan Beulich <jbeulich@suse.com>

But the possibly missing dependency question needs addressing before this may
go in.

> - remove unnecessary stubs
> - move "struct p2m_domain" declaration ahead of the #ifdef
> ---
> v2 -> v3:
> - move .enable_msr_interception and .set_descriptor_access_exiting together
> - with the introduction of "vm_event_is_enabled()", all hvm_monitor_xxx()
> stubs are no longer needed
> - change to use in-place stubs in do_altp2m_op()
> - no need to add stub for monitor_traps(), __vm_event_claim_slot(),
> vm_event_put_request() and vm_event_vcpu_pause()
> - remove MEM_ACCESS_ALWAYS_ON
> - return default p2m_access_rwx for xenmem_access_to_p2m_access() when
> VM_EVENT=n
> - add wrapping for hvm_emulate_one_vm_event/
> hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_discard
> ---

No changes at all in this version? The v3 patch was much larger, after all.

Jan
Re: [PATCH v1 7/7] xen/vm_event: consolidate CONFIG_VM_EVENT
Posted by Jason Andryuk 2 hours ago
On 2025-11-13 05:16, Jan Beulich wrote:
> On 13.11.2025 04:16, Penny Zheng wrote:
>> File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
>> routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.
>>
>> Although CONFIG_VM_EVENT is right now forcibly enabled on x86 via
>> MEM_ACCESS_ALWAYS_ON, we could disable it through disabling
>> CONFIG_MGMT_HYPERCALLS later. So we remove MEM_ACCESS_ALWAYS_ON and
>> make VM_EVENT=y on default only on x86 to retain the same.
>>
>> The following functions are developed on the basis of vm event framework, or
>> only invoked by vm_event.c, so they all shall be wrapped with CONFIG_VM_EVENT
>> (otherwise they will become unreachable and
>> violate Misra rule 2.1 when VM_EVENT=n):
>> - hvm_toggle_singlestep
>> - hvm_fast_singlestep
>> - hvm_emulate_one_vm_event
>>      - hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_discard
>> And Function vm_event_check_ring() needs stub to pass compilation when
>> VM_EVENT=n.
>>
>> Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
>> ---
>> v1 -> v2:
>> - split out XSM changes
> 
> Isn't that split out patch also needed as a prereq to the one here? The one
> here on its own:
> Acked-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>