File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.
Although CONFIG_VM_EVENT is right now forcibly enabled on x86 via
MEM_ACCESS_ALWAYS_ON, we could disable it through disabling
CONFIG_MGMT_HYPERCALLS later. So we remove MEM_ACCESS_ALWAYS_ON and
make VM_EVENT=y on default only on x86 to retain the same.
The following functions are developed on the basis of vm event framework, or
only invoked by vm_event.c, so they all shall be wrapped with CONFIG_VM_EVENT
(otherwise they will become unreachable and
violate Misra rule 2.1 when VM_EVENT=n):
- hvm_toggle_singlestep
- hvm_fast_singlestep
- hvm_emulate_one_vm_event
- hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_discard
And Function vm_event_check_ring() needs stub to pass compilation when
VM_EVENT=n.
Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>
---
As the last commit, plz be commited either in the last, or shall be commited
together with prereq commit 8d708e98ad, 8b4147009f, dbfccb5918, ae931f63a0,
37ec0e2b75.
---
xen/arch/x86/Makefile | 2 +-
xen/arch/x86/hvm/Kconfig | 1 -
xen/arch/x86/hvm/Makefile | 2 +-
xen/arch/x86/hvm/emulate.c | 58 ++++++++++++++++++++------------------
xen/arch/x86/hvm/hvm.c | 2 ++
xen/common/Kconfig | 7 ++---
xen/include/xen/vm_event.h | 7 +++++
7 files changed, 44 insertions(+), 35 deletions(-)
diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index d8b41cec16..5bf3578983 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -69,7 +69,7 @@ obj-$(CONFIG_INTEL) += tsx.o
obj-y += x86_emulate.o
obj-$(CONFIG_TBOOT) += tboot.o
obj-y += hpet.o
-obj-y += vm_event.o
+obj-$(CONFIG_VM_EVENT) += vm_event.o
obj-y += xstate.o
ifneq ($(CONFIG_PV_SHIM_EXCLUSIVE),y)
diff --git a/xen/arch/x86/hvm/Kconfig b/xen/arch/x86/hvm/Kconfig
index c1a131d185..25eb3e374f 100644
--- a/xen/arch/x86/hvm/Kconfig
+++ b/xen/arch/x86/hvm/Kconfig
@@ -4,7 +4,6 @@ menuconfig HVM
default !PV_SHIM
select COMPAT
select IOREQ_SERVER
- select MEM_ACCESS_ALWAYS_ON
help
Interfaces to support HVM domains. HVM domains require hardware
virtualisation extensions (e.g. Intel VT-x, AMD SVM), but can boot
diff --git a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile
index ee4b45a4ee..f34fb03934 100644
--- a/xen/arch/x86/hvm/Makefile
+++ b/xen/arch/x86/hvm/Makefile
@@ -26,7 +26,7 @@ obj-y += save.o
obj-y += stdvga.o
obj-y += vioapic.o
obj-y += vlapic.o
-obj-y += vm_event.o
+obj-$(CONFIG_VM_EVENT) += vm_event.o
obj-y += vmsi.o
obj-y += vpic.o
obj-y += vpt.o
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index fe75b0516d..d56ef02baf 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1615,6 +1615,7 @@ static int cf_check hvmemul_blk(
return rc;
}
+#ifdef CONFIG_VM_EVENT
static int cf_check hvmemul_write_discard(
enum x86_segment seg,
unsigned long offset,
@@ -1717,6 +1718,7 @@ static int cf_check hvmemul_cache_op_discard(
{
return X86EMUL_OKAY;
}
+#endif /* CONFIG_VM_EVENT */
static int cf_check hvmemul_cmpxchg(
enum x86_segment seg,
@@ -2750,33 +2752,6 @@ static const struct x86_emulate_ops hvm_emulate_ops = {
.vmfunc = hvmemul_vmfunc,
};
-static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
- .read = hvmemul_read,
- .insn_fetch = hvmemul_insn_fetch,
- .write = hvmemul_write_discard,
- .cmpxchg = hvmemul_cmpxchg_discard,
- .rep_ins = hvmemul_rep_ins_discard,
- .rep_outs = hvmemul_rep_outs_discard,
- .rep_movs = hvmemul_rep_movs_discard,
- .rep_stos = hvmemul_rep_stos_discard,
- .read_segment = hvmemul_read_segment,
- .write_segment = hvmemul_write_segment,
- .read_io = hvmemul_read_io_discard,
- .write_io = hvmemul_write_io_discard,
- .read_cr = hvmemul_read_cr,
- .write_cr = hvmemul_write_cr,
- .read_xcr = hvmemul_read_xcr,
- .write_xcr = hvmemul_write_xcr,
- .read_msr = hvmemul_read_msr,
- .write_msr = hvmemul_write_msr_discard,
- .cache_op = hvmemul_cache_op_discard,
- .tlb_op = hvmemul_tlb_op,
- .cpuid = x86emul_cpuid,
- .get_fpu = hvmemul_get_fpu,
- .put_fpu = hvmemul_put_fpu,
- .vmfunc = hvmemul_vmfunc,
-};
-
/*
* Note that passing VIO_no_completion into this function serves as kind
* of (but not fully) an "auto select completion" indicator. When there's
@@ -2887,6 +2862,34 @@ int hvm_emulate_one(
return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops, completion);
}
+#ifdef CONFIG_VM_EVENT
+static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
+ .read = hvmemul_read,
+ .insn_fetch = hvmemul_insn_fetch,
+ .write = hvmemul_write_discard,
+ .cmpxchg = hvmemul_cmpxchg_discard,
+ .rep_ins = hvmemul_rep_ins_discard,
+ .rep_outs = hvmemul_rep_outs_discard,
+ .rep_movs = hvmemul_rep_movs_discard,
+ .rep_stos = hvmemul_rep_stos_discard,
+ .read_segment = hvmemul_read_segment,
+ .write_segment = hvmemul_write_segment,
+ .read_io = hvmemul_read_io_discard,
+ .write_io = hvmemul_write_io_discard,
+ .read_cr = hvmemul_read_cr,
+ .write_cr = hvmemul_write_cr,
+ .read_xcr = hvmemul_read_xcr,
+ .write_xcr = hvmemul_write_xcr,
+ .read_msr = hvmemul_read_msr,
+ .write_msr = hvmemul_write_msr_discard,
+ .cache_op = hvmemul_cache_op_discard,
+ .tlb_op = hvmemul_tlb_op,
+ .cpuid = x86emul_cpuid,
+ .get_fpu = hvmemul_get_fpu,
+ .put_fpu = hvmemul_put_fpu,
+ .vmfunc = hvmemul_vmfunc,
+};
+
void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
unsigned int errcode)
{
@@ -2949,6 +2952,7 @@ void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
hvm_emulate_writeback(&ctx);
}
+#endif /* CONFIG_VM_EVENT */
void hvm_emulate_init_once(
struct hvm_emulate_ctxt *hvmemul_ctxt,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index b34cd29629..4d37a93c57 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5250,6 +5250,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
return rc;
}
+#ifdef CONFIG_VM_EVENT
void hvm_toggle_singlestep(struct vcpu *v)
{
ASSERT(atomic_read(&v->pause_count));
@@ -5276,6 +5277,7 @@ void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx)
v->arch.hvm.fast_single_step.p2midx = p2midx;
}
#endif
+#endif /* CONFIG_VM_EVENT */
/*
* Segment caches in VMCB/VMCS are inconsistent about which bits are checked,
diff --git a/xen/common/Kconfig b/xen/common/Kconfig
index 38320b248a..d7e79e752a 100644
--- a/xen/common/Kconfig
+++ b/xen/common/Kconfig
@@ -173,13 +173,10 @@ config HAS_VMAP
config LIBFDT
bool
-config MEM_ACCESS_ALWAYS_ON
- bool
-
config VM_EVENT
- def_bool MEM_ACCESS_ALWAYS_ON
- prompt "Memory Access and VM events" if !MEM_ACCESS_ALWAYS_ON
+ bool "Memory Access and VM events"
depends on HVM
+ default X86
help
Framework to configure memory access types for guests and receive
diff --git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h
index 27d0c74216..1b76ce632e 100644
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -51,7 +51,14 @@ struct vm_event_domain
};
/* Returns whether a ring has been set up */
+#ifdef CONFIG_VM_EVENT
bool vm_event_check_ring(struct vm_event_domain *ved);
+#else
+static inline bool vm_event_check_ring(struct vm_event_domain *ved)
+{
+ return false;
+}
+#endif /* CONFIG_VM_EVENT */
/* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
* available space and the caller is a foreign domain. If the guest itself
--
2.34.1
[Public]
Hi, Tamas,
May I ask a review on this commit?
Many thanks
Penny Zheng
> -----Original Message-----
> From: Penny, Zheng <penny.zheng@amd.com>
> Sent: Thursday, January 15, 2026 5:29 PM
> To: xen-devel@lists.xenproject.org; Andryuk, Jason <Jason.Andryuk@amd.com>
> Cc: Huang, Ray <Ray.Huang@amd.com>; Penny, Zheng
> <penny.zheng@amd.com>; Jan Beulich <jbeulich@suse.com>; Andrew Cooper
> <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>;
> Anthony PERARD <anthony.perard@vates.tech>; Orzel, Michal
> <Michal.Orzel@amd.com>; Julien Grall <julien@xen.org>; Stefano Stabellini
> <sstabellini@kernel.org>; Tamas K Lengyel <tamas@tklengyel.com>; Alexandru
> Isaila <aisaila@bitdefender.com>; Petre Pircalabu <ppircalabu@bitdefender.com>
> Subject: [PATCH v4 6/6] xen/vm_event: consolidate CONFIG_VM_EVENT
>
> File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
> routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.
>
> Although CONFIG_VM_EVENT is right now forcibly enabled on x86 via
> MEM_ACCESS_ALWAYS_ON, we could disable it through disabling
> CONFIG_MGMT_HYPERCALLS later. So we remove
> MEM_ACCESS_ALWAYS_ON and make VM_EVENT=y on default only on x86 to
> retain the same.
>
> The following functions are developed on the basis of vm event framework, or only
> invoked by vm_event.c, so they all shall be wrapped with CONFIG_VM_EVENT
> (otherwise they will become unreachable and violate Misra rule 2.1 when
> VM_EVENT=n):
> - hvm_toggle_singlestep
> - hvm_fast_singlestep
> - hvm_emulate_one_vm_event
> -
> hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_dis
> card
> And Function vm_event_check_ring() needs stub to pass compilation when
> VM_EVENT=n.
>
> Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
> Acked-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>
> ---
> As the last commit, plz be commited either in the last, or shall be commited
> together with prereq commit 8d708e98ad, 8b4147009f, dbfccb5918, ae931f63a0,
> 37ec0e2b75.
> ---
> xen/arch/x86/Makefile | 2 +-
> xen/arch/x86/hvm/Kconfig | 1 -
> xen/arch/x86/hvm/Makefile | 2 +-
> xen/arch/x86/hvm/emulate.c | 58 ++++++++++++++++++++------------------
> xen/arch/x86/hvm/hvm.c | 2 ++
> xen/common/Kconfig | 7 ++---
> xen/include/xen/vm_event.h | 7 +++++
> 7 files changed, 44 insertions(+), 35 deletions(-)
>
> diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile index
> d8b41cec16..5bf3578983 100644
> --- a/xen/arch/x86/Makefile
> +++ b/xen/arch/x86/Makefile
> @@ -69,7 +69,7 @@ obj-$(CONFIG_INTEL) += tsx.o obj-y += x86_emulate.o
> obj-$(CONFIG_TBOOT) += tboot.o
> obj-y += hpet.o
> -obj-y += vm_event.o
> +obj-$(CONFIG_VM_EVENT) += vm_event.o
> obj-y += xstate.o
>
> ifneq ($(CONFIG_PV_SHIM_EXCLUSIVE),y)
> diff --git a/xen/arch/x86/hvm/Kconfig b/xen/arch/x86/hvm/Kconfig index
> c1a131d185..25eb3e374f 100644
> --- a/xen/arch/x86/hvm/Kconfig
> +++ b/xen/arch/x86/hvm/Kconfig
> @@ -4,7 +4,6 @@ menuconfig HVM
> default !PV_SHIM
> select COMPAT
> select IOREQ_SERVER
> - select MEM_ACCESS_ALWAYS_ON
> help
> Interfaces to support HVM domains. HVM domains require hardware
> virtualisation extensions (e.g. Intel VT-x, AMD SVM), but can boot diff --git
> a/xen/arch/x86/hvm/Makefile b/xen/arch/x86/hvm/Makefile index
> ee4b45a4ee..f34fb03934 100644
> --- a/xen/arch/x86/hvm/Makefile
> +++ b/xen/arch/x86/hvm/Makefile
> @@ -26,7 +26,7 @@ obj-y += save.o
> obj-y += stdvga.o
> obj-y += vioapic.o
> obj-y += vlapic.o
> -obj-y += vm_event.o
> +obj-$(CONFIG_VM_EVENT) += vm_event.o
> obj-y += vmsi.o
> obj-y += vpic.o
> obj-y += vpt.o
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index
> fe75b0516d..d56ef02baf 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1615,6 +1615,7 @@ static int cf_check hvmemul_blk(
> return rc;
> }
>
> +#ifdef CONFIG_VM_EVENT
> static int cf_check hvmemul_write_discard(
> enum x86_segment seg,
> unsigned long offset,
> @@ -1717,6 +1718,7 @@ static int cf_check hvmemul_cache_op_discard( {
> return X86EMUL_OKAY;
> }
> +#endif /* CONFIG_VM_EVENT */
>
> static int cf_check hvmemul_cmpxchg(
> enum x86_segment seg,
> @@ -2750,33 +2752,6 @@ static const struct x86_emulate_ops hvm_emulate_ops
> = {
> .vmfunc = hvmemul_vmfunc,
> };
>
> -static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
> - .read = hvmemul_read,
> - .insn_fetch = hvmemul_insn_fetch,
> - .write = hvmemul_write_discard,
> - .cmpxchg = hvmemul_cmpxchg_discard,
> - .rep_ins = hvmemul_rep_ins_discard,
> - .rep_outs = hvmemul_rep_outs_discard,
> - .rep_movs = hvmemul_rep_movs_discard,
> - .rep_stos = hvmemul_rep_stos_discard,
> - .read_segment = hvmemul_read_segment,
> - .write_segment = hvmemul_write_segment,
> - .read_io = hvmemul_read_io_discard,
> - .write_io = hvmemul_write_io_discard,
> - .read_cr = hvmemul_read_cr,
> - .write_cr = hvmemul_write_cr,
> - .read_xcr = hvmemul_read_xcr,
> - .write_xcr = hvmemul_write_xcr,
> - .read_msr = hvmemul_read_msr,
> - .write_msr = hvmemul_write_msr_discard,
> - .cache_op = hvmemul_cache_op_discard,
> - .tlb_op = hvmemul_tlb_op,
> - .cpuid = x86emul_cpuid,
> - .get_fpu = hvmemul_get_fpu,
> - .put_fpu = hvmemul_put_fpu,
> - .vmfunc = hvmemul_vmfunc,
> -};
> -
> /*
> * Note that passing VIO_no_completion into this function serves as kind
> * of (but not fully) an "auto select completion" indicator. When there's @@ -
> 2887,6 +2862,34 @@ int hvm_emulate_one(
> return _hvm_emulate_one(hvmemul_ctxt, &hvm_emulate_ops, completion); }
>
> +#ifdef CONFIG_VM_EVENT
> +static const struct x86_emulate_ops hvm_emulate_ops_no_write = {
> + .read = hvmemul_read,
> + .insn_fetch = hvmemul_insn_fetch,
> + .write = hvmemul_write_discard,
> + .cmpxchg = hvmemul_cmpxchg_discard,
> + .rep_ins = hvmemul_rep_ins_discard,
> + .rep_outs = hvmemul_rep_outs_discard,
> + .rep_movs = hvmemul_rep_movs_discard,
> + .rep_stos = hvmemul_rep_stos_discard,
> + .read_segment = hvmemul_read_segment,
> + .write_segment = hvmemul_write_segment,
> + .read_io = hvmemul_read_io_discard,
> + .write_io = hvmemul_write_io_discard,
> + .read_cr = hvmemul_read_cr,
> + .write_cr = hvmemul_write_cr,
> + .read_xcr = hvmemul_read_xcr,
> + .write_xcr = hvmemul_write_xcr,
> + .read_msr = hvmemul_read_msr,
> + .write_msr = hvmemul_write_msr_discard,
> + .cache_op = hvmemul_cache_op_discard,
> + .tlb_op = hvmemul_tlb_op,
> + .cpuid = x86emul_cpuid,
> + .get_fpu = hvmemul_get_fpu,
> + .put_fpu = hvmemul_put_fpu,
> + .vmfunc = hvmemul_vmfunc,
> +};
> +
> void hvm_emulate_one_vm_event(enum emul_kind kind, unsigned int trapnr,
> unsigned int errcode)
> {
> @@ -2949,6 +2952,7 @@ void hvm_emulate_one_vm_event(enum emul_kind
> kind, unsigned int trapnr,
>
> hvm_emulate_writeback(&ctx);
> }
> +#endif /* CONFIG_VM_EVENT */
>
> void hvm_emulate_init_once(
> struct hvm_emulate_ctxt *hvmemul_ctxt, diff --git a/xen/arch/x86/hvm/hvm.c
> b/xen/arch/x86/hvm/hvm.c index b34cd29629..4d37a93c57 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -5250,6 +5250,7 @@ int hvm_debug_op(struct vcpu *v, int32_t op)
> return rc;
> }
>
> +#ifdef CONFIG_VM_EVENT
> void hvm_toggle_singlestep(struct vcpu *v) {
> ASSERT(atomic_read(&v->pause_count));
> @@ -5276,6 +5277,7 @@ void hvm_fast_singlestep(struct vcpu *v, uint16_t
> p2midx)
> v->arch.hvm.fast_single_step.p2midx = p2midx; } #endif
> +#endif /* CONFIG_VM_EVENT */
>
> /*
> * Segment caches in VMCB/VMCS are inconsistent about which bits are checked,
> diff --git a/xen/common/Kconfig b/xen/common/Kconfig index
> 38320b248a..d7e79e752a 100644
> --- a/xen/common/Kconfig
> +++ b/xen/common/Kconfig
> @@ -173,13 +173,10 @@ config HAS_VMAP
> config LIBFDT
> bool
>
> -config MEM_ACCESS_ALWAYS_ON
> - bool
> -
> config VM_EVENT
> - def_bool MEM_ACCESS_ALWAYS_ON
> - prompt "Memory Access and VM events" if !MEM_ACCESS_ALWAYS_ON
> + bool "Memory Access and VM events"
> depends on HVM
> + default X86
> help
>
> Framework to configure memory access types for guests and receive diff --
> git a/xen/include/xen/vm_event.h b/xen/include/xen/vm_event.h index
> 27d0c74216..1b76ce632e 100644
> --- a/xen/include/xen/vm_event.h
> +++ b/xen/include/xen/vm_event.h
> @@ -51,7 +51,14 @@ struct vm_event_domain };
>
> /* Returns whether a ring has been set up */
> +#ifdef CONFIG_VM_EVENT
> bool vm_event_check_ring(struct vm_event_domain *ved);
> +#else
> +static inline bool vm_event_check_ring(struct vm_event_domain *ved) {
> + return false;
> +}
> +#endif /* CONFIG_VM_EVENT */
>
> /* Returns 0 on success, -ENOSYS if there is no ring, -EBUSY if there is no
> * available space and the caller is a foreign domain. If the guest itself
> --
> 2.34.1
On Thu, Jan 15, 2026 at 4:29 AM Penny Zheng <Penny.Zheng@amd.com> wrote:
> File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
> routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.
>
> Although CONFIG_VM_EVENT is right now forcibly enabled on x86 via
> MEM_ACCESS_ALWAYS_ON, we could disable it through disabling
> CONFIG_MGMT_HYPERCALLS later. So we remove MEM_ACCESS_ALWAYS_ON and
> make VM_EVENT=y on default only on x86 to retain the same.
>
> The following functions are developed on the basis of vm event framework,
> or
> only invoked by vm_event.c, so they all shall be wrapped with
> CONFIG_VM_EVENT
> (otherwise they will become unreachable and
> violate Misra rule 2.1 when VM_EVENT=n):
> - hvm_toggle_singlestep
> - hvm_fast_singlestep
> - hvm_emulate_one_vm_event
> -
> hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_discard
> And Function vm_event_check_ring() needs stub to pass compilation when
> VM_EVENT=n.
>
> Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
> Acked-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>
>
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
On 15.01.2026 10:28, Penny Zheng wrote:
> File hvm/vm_event.c and x86/vm_event.c are the extend to vm_event handling
> routines, and its compilation shall be guarded by CONFIG_VM_EVENT too.
>
> Although CONFIG_VM_EVENT is right now forcibly enabled on x86 via
> MEM_ACCESS_ALWAYS_ON, we could disable it through disabling
> CONFIG_MGMT_HYPERCALLS later. So we remove MEM_ACCESS_ALWAYS_ON and
> make VM_EVENT=y on default only on x86 to retain the same.
>
> The following functions are developed on the basis of vm event framework, or
> only invoked by vm_event.c, so they all shall be wrapped with CONFIG_VM_EVENT
> (otherwise they will become unreachable and
> violate Misra rule 2.1 when VM_EVENT=n):
> - hvm_toggle_singlestep
> - hvm_fast_singlestep
> - hvm_emulate_one_vm_event
> - hvmemul_write{,cmpxchg,rep_ins,rep_outs,rep_movs,rep_stos,read_io,write_io}_discard
> And Function vm_event_check_ring() needs stub to pass compilation when
> VM_EVENT=n.
>
> Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
> Acked-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>
> ---
> As the last commit, plz be commited either in the last, or shall be commited
> together with prereq commit 8d708e98ad, 8b4147009f, dbfccb5918, ae931f63a0,
> 37ec0e2b75.
What do these hashes refer to? Also (assuming these might be the hashes of the
commits in your private tree), as I'm pretty sure I said before, committing a
series in-order is the default thing to happen. It's patches that are
independent of earlier ones which may want to call out that fact, for them to
possibly go in early.
Jan
© 2016 - 2026 Red Hat, Inc.