xen/arch/x86/hvm/emulate.c | 6 ++--- xen/arch/x86/hvm/hvm.c | 41 +++++++++++++---------------- xen/arch/x86/hvm/svm/intr.c | 2 +- xen/arch/x86/hvm/vmx/intr.c | 2 +- xen/arch/x86/include/asm/vm_event.h | 9 +++++++ 5 files changed, 33 insertions(+), 27 deletions(-)
Function vm_event_is_enabled() is introduced to check if vm event is enabled,
and also make the checking conditional upon CONFIG_VM_EVENT, which could help
DCE a lot calls/codes, such as hvm_monitor_io(), etc when VM_EVENT=n.
In-place assertion of arch.vm_event is kinds of redundant and could be
removed.
Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
---
xen/arch/x86/hvm/emulate.c | 6 ++---
xen/arch/x86/hvm/hvm.c | 41 +++++++++++++----------------
xen/arch/x86/hvm/svm/intr.c | 2 +-
xen/arch/x86/hvm/vmx/intr.c | 2 +-
xen/arch/x86/include/asm/vm_event.h | 9 +++++++
5 files changed, 33 insertions(+), 27 deletions(-)
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 2af4f30359..75567db403 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -105,7 +105,7 @@ static int set_context_data(void *buffer, unsigned int size)
{
struct vcpu *curr = current;
- if ( curr->arch.vm_event )
+ if ( vm_event_is_enabled(curr) )
{
unsigned int safe_size =
min(size, curr->arch.vm_event->emul.read.size);
@@ -771,7 +771,7 @@ static void *hvmemul_map_linear_addr(
ASSERT(p2mt == p2m_ram_logdirty || !p2m_is_readonly(p2mt));
}
- if ( unlikely(curr->arch.vm_event) &&
+ if ( unlikely(vm_event_is_enabled(curr)) &&
curr->arch.vm_event->send_event &&
hvm_monitor_check_p2m(addr, gfn, pfec, npfec_kind_with_gla) )
{
@@ -1870,7 +1870,7 @@ static int hvmemul_rep_outs_set_context(
int rc = X86EMUL_OKAY;
ASSERT(bytes_per_rep <= 4);
- if ( !ev )
+ if ( !vm_event_is_enabled(current) )
return X86EMUL_UNHANDLEABLE;
ptr = ev->emul.read.data;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 23bd7f078a..e3abd2849a 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -532,7 +532,7 @@ void hvm_do_resume(struct vcpu *v)
if ( !vcpu_ioreq_handle_completion(v) )
return;
- if ( unlikely(v->arch.vm_event) )
+ if ( unlikely(vm_event_is_enabled(v)) )
hvm_vm_event_do_resume(v);
/* Inject pending hw/sw event */
@@ -546,11 +546,12 @@ void hvm_do_resume(struct vcpu *v)
v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
}
- if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
+ if ( unlikely(vm_event_is_enabled(v)) &&
+ v->arch.monitor.next_interrupt_enabled )
{
struct x86_event info;
- if ( hvm_get_pending_event(v, &info) )
+ if ( hvm_get_pending_event(v, &info) && vm_event_is_enabled(v) )
{
hvm_monitor_interrupt(info.vector, info.type, info.error_code,
info.cr2);
@@ -2088,7 +2089,7 @@ int hvm_handle_xsetbv(u32 index, u64 new_bv)
{
int rc;
- if ( index == 0 )
+ if ( index == 0 && vm_event_is_enabled(current) )
hvm_monitor_crX(XCR0, new_bv, current->arch.xcr0);
rc = x86emul_write_xcr(index, new_bv, NULL);
@@ -2337,9 +2338,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) )
{
- ASSERT(v->arch.vm_event);
-
- if ( hvm_monitor_crX(CR0, value, old_value) )
+ if ( vm_event_is_enabled(v) && hvm_monitor_crX(CR0, value, old_value) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
v->arch.vm_event->write_data.do_write.cr0 = 1;
@@ -2462,9 +2461,8 @@ int hvm_set_cr3(unsigned long value, bool noflush, bool may_defer)
if ( may_defer && unlikely(currd->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) )
{
- ASSERT(curr->arch.vm_event);
-
- if ( hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3]) )
+ if ( vm_event_is_enabled(curr) &&
+ hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3]) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
curr->arch.vm_event->write_data.do_write.cr3 = 1;
@@ -2544,9 +2542,7 @@ int hvm_set_cr4(unsigned long value, bool may_defer)
if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) )
{
- ASSERT(v->arch.vm_event);
-
- if ( hvm_monitor_crX(CR4, value, old_cr) )
+ if ( vm_event_is_enabled(v) && hvm_monitor_crX(CR4, value, old_cr) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
v->arch.vm_event->write_data.do_write.cr4 = 1;
@@ -3407,7 +3403,7 @@ static enum hvm_translation_result __hvm_copy(
return HVMTRANS_bad_gfn_to_mfn;
}
- if ( unlikely(v->arch.vm_event) &&
+ if ( unlikely(vm_event_is_enabled(v)) &&
(flags & HVMCOPY_linear) &&
v->arch.vm_event->send_event &&
hvm_monitor_check_p2m(addr, gfn, pfec, npfec_kind_with_gla) )
@@ -3538,6 +3534,7 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len)
struct vcpu *curr = current;
unsigned int leaf = regs->eax, subleaf = regs->ecx;
struct cpuid_leaf res;
+ int ret = 0;
if ( curr->arch.msrs->misc_features_enables.cpuid_faulting &&
hvm_get_cpl(curr) > 0 )
@@ -3554,7 +3551,10 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len)
regs->rcx = res.c;
regs->rdx = res.d;
- return hvm_monitor_cpuid(inst_len, leaf, subleaf);
+ if ( vm_event_is_enabled(curr) )
+ ret = hvm_monitor_cpuid(inst_len, leaf, subleaf);
+
+ return ret;
}
void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
@@ -3694,9 +3694,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
if ( ret != X86EMUL_OKAY )
return ret;
- ASSERT(v->arch.vm_event);
-
- if ( hvm_monitor_msr(msr, msr_content, msr_old_content) )
+ if ( vm_event_is_enabled(v) &&
+ hvm_monitor_msr(msr, msr_content, msr_old_content) )
{
/* The actual write will occur in hvm_do_resume(), if permitted. */
v->arch.vm_event->write_data.do_write.msr = 1;
@@ -3854,12 +3853,10 @@ int hvm_descriptor_access_intercept(uint64_t exit_info,
struct vcpu *curr = current;
struct domain *currd = curr->domain;
- if ( currd->arch.monitor.descriptor_access_enabled )
- {
- ASSERT(curr->arch.vm_event);
+ if ( currd->arch.monitor.descriptor_access_enabled &&
+ vm_event_is_enabled(curr) )
hvm_monitor_descriptor_access(exit_info, vmx_exit_qualification,
descriptor, is_write);
- }
else if ( !hvm_emulate_one_insn(is_sysdesc_access, "sysdesc access") )
domain_crash(currd);
diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index 46186a1102..557ebc98d8 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -130,7 +130,7 @@ void asmlinkage svm_intr_assist(void)
enum hvm_intblk intblk;
/* Block event injection while handling a sync vm_event. */
- if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
+ if ( unlikely(vm_event_is_enabled(v)) && v->arch.vm_event->sync_event )
return;
/* Crank the handle on interrupt state. */
diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
index b35dc8c586..a8ced95871 100644
--- a/xen/arch/x86/hvm/vmx/intr.c
+++ b/xen/arch/x86/hvm/vmx/intr.c
@@ -239,7 +239,7 @@ void asmlinkage vmx_intr_assist(void)
}
/* Block event injection while handling a sync vm_event. */
- if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
+ if ( unlikely(vm_event_is_enabled(v)) && v->arch.vm_event->sync_event )
return;
#ifdef CONFIG_MEM_SHARING
diff --git a/xen/arch/x86/include/asm/vm_event.h b/xen/arch/x86/include/asm/vm_event.h
index 46e77ed6d9..446d02c7d5 100644
--- a/xen/arch/x86/include/asm/vm_event.h
+++ b/xen/arch/x86/include/asm/vm_event.h
@@ -45,4 +45,13 @@ void vm_event_sync_event(struct vcpu *v, bool value);
void vm_event_reset_vmtrace(struct vcpu *v);
+static inline bool vm_event_is_enabled(struct vcpu *v)
+{
+#ifdef CONFIG_VM_EVENT
+ return v->arch.vm_event != NULL;
+#else
+ return false;
+#endif
+}
+
#endif /* __ASM_X86_VM_EVENT_H__ */
--
2.34.1
On 12.09.2025 06:52, Penny Zheng wrote:
> Function vm_event_is_enabled() is introduced to check if vm event is enabled,
> and also make the checking conditional upon CONFIG_VM_EVENT, which could help
> DCE a lot calls/codes, such as hvm_monitor_io(), etc when VM_EVENT=n.
> In-place assertion of arch.vm_event is kinds of redundant and could be
> removed.
>
> Signed-off-by: Penny Zheng <Penny.Zheng@amd.com>
Why is this sent standalone, without even a reference to the domctl series?
Without the connection, this clearly wouldn't be valid to consider for 4.21.
Also you will want to Cc Oleksii on such past-the-deadline submissions.
> ---
> xen/arch/x86/hvm/emulate.c | 6 ++---
> xen/arch/x86/hvm/hvm.c | 41 +++++++++++++----------------
> xen/arch/x86/hvm/svm/intr.c | 2 +-
> xen/arch/x86/hvm/vmx/intr.c | 2 +-
> xen/arch/x86/include/asm/vm_event.h | 9 +++++++
> 5 files changed, 33 insertions(+), 27 deletions(-)
With this diffstat, I think the subject prefix is misleading (should perhaps
be x86/vm_event: or x86/hvm:).
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -105,7 +105,7 @@ static int set_context_data(void *buffer, unsigned int size)
> {
> struct vcpu *curr = current;
>
> - if ( curr->arch.vm_event )
> + if ( vm_event_is_enabled(curr) )
> {
> unsigned int safe_size =
> min(size, curr->arch.vm_event->emul.read.size);
> @@ -771,7 +771,7 @@ static void *hvmemul_map_linear_addr(
> ASSERT(p2mt == p2m_ram_logdirty || !p2m_is_readonly(p2mt));
> }
>
> - if ( unlikely(curr->arch.vm_event) &&
> + if ( unlikely(vm_event_is_enabled(curr)) &&
> curr->arch.vm_event->send_event &&
> hvm_monitor_check_p2m(addr, gfn, pfec, npfec_kind_with_gla) )
> {
> @@ -1870,7 +1870,7 @@ static int hvmemul_rep_outs_set_context(
> int rc = X86EMUL_OKAY;
>
> ASSERT(bytes_per_rep <= 4);
> - if ( !ev )
> + if ( !vm_event_is_enabled(current) )
> return X86EMUL_UNHANDLEABLE;
I wonder if in a case like this one the assignment (to ev) would better move
past the predicate check.
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -532,7 +532,7 @@ void hvm_do_resume(struct vcpu *v)
> if ( !vcpu_ioreq_handle_completion(v) )
> return;
>
> - if ( unlikely(v->arch.vm_event) )
> + if ( unlikely(vm_event_is_enabled(v)) )
> hvm_vm_event_do_resume(v);
>
> /* Inject pending hw/sw event */
> @@ -546,11 +546,12 @@ void hvm_do_resume(struct vcpu *v)
> v->arch.hvm.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
> }
>
> - if ( unlikely(v->arch.vm_event) && v->arch.monitor.next_interrupt_enabled )
> + if ( unlikely(vm_event_is_enabled(v)) &&
With this, ...
> + v->arch.monitor.next_interrupt_enabled )
> {
> struct x86_event info;
>
> - if ( hvm_get_pending_event(v, &info) )
> + if ( hvm_get_pending_event(v, &info) && vm_event_is_enabled(v) )
... why this?
> @@ -2088,7 +2089,7 @@ int hvm_handle_xsetbv(u32 index, u64 new_bv)
> {
> int rc;
>
> - if ( index == 0 )
> + if ( index == 0 && vm_event_is_enabled(current) )
> hvm_monitor_crX(XCR0, new_bv, current->arch.xcr0);
>
> rc = x86emul_write_xcr(index, new_bv, NULL);
> @@ -2337,9 +2338,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
> if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
> monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) )
> {
> - ASSERT(v->arch.vm_event);
> -
> - if ( hvm_monitor_crX(CR0, value, old_value) )
> + if ( vm_event_is_enabled(v) && hvm_monitor_crX(CR0, value, old_value) )
> {
I don't think assertions (here and below) should be replaced like this.
Can't you e.g. force "may_defer" to false at the top of the function when
vm_event_is_enabled() returns false?
> @@ -2462,9 +2461,8 @@ int hvm_set_cr3(unsigned long value, bool noflush, bool may_defer)
> if ( may_defer && unlikely(currd->arch.monitor.write_ctrlreg_enabled &
> monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) )
> {
> - ASSERT(curr->arch.vm_event);
> -
> - if ( hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3]) )
> + if ( vm_event_is_enabled(curr) &&
> + hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3]) )
> {
> /* The actual write will occur in hvm_do_resume(), if permitted. */
> curr->arch.vm_event->write_data.do_write.cr3 = 1;
> @@ -2544,9 +2542,7 @@ int hvm_set_cr4(unsigned long value, bool may_defer)
> if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
> monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) )
> {
> - ASSERT(v->arch.vm_event);
> -
> - if ( hvm_monitor_crX(CR4, value, old_cr) )
> + if ( vm_event_is_enabled(v) && hvm_monitor_crX(CR4, value, old_cr) )
> {
> /* The actual write will occur in hvm_do_resume(), if permitted. */
> v->arch.vm_event->write_data.do_write.cr4 = 1;
> @@ -3407,7 +3403,7 @@ static enum hvm_translation_result __hvm_copy(
> return HVMTRANS_bad_gfn_to_mfn;
> }
>
> - if ( unlikely(v->arch.vm_event) &&
> + if ( unlikely(vm_event_is_enabled(v)) &&
> (flags & HVMCOPY_linear) &&
> v->arch.vm_event->send_event &&
> hvm_monitor_check_p2m(addr, gfn, pfec, npfec_kind_with_gla) )
> @@ -3538,6 +3534,7 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len)
> struct vcpu *curr = current;
> unsigned int leaf = regs->eax, subleaf = regs->ecx;
> struct cpuid_leaf res;
> + int ret = 0;
>
> if ( curr->arch.msrs->misc_features_enables.cpuid_faulting &&
> hvm_get_cpl(curr) > 0 )
> @@ -3554,7 +3551,10 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len)
> regs->rcx = res.c;
> regs->rdx = res.d;
>
> - return hvm_monitor_cpuid(inst_len, leaf, subleaf);
> + if ( vm_event_is_enabled(curr) )
> + ret = hvm_monitor_cpuid(inst_len, leaf, subleaf);
> +
> + return ret;
> }
>
> void hvm_rdtsc_intercept(struct cpu_user_regs *regs)
> @@ -3694,9 +3694,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
> if ( ret != X86EMUL_OKAY )
> return ret;
>
> - ASSERT(v->arch.vm_event);
> -
> - if ( hvm_monitor_msr(msr, msr_content, msr_old_content) )
> + if ( vm_event_is_enabled(v) &&
> + hvm_monitor_msr(msr, msr_content, msr_old_content) )
> {
> /* The actual write will occur in hvm_do_resume(), if permitted. */
> v->arch.vm_event->write_data.do_write.msr = 1;
> @@ -3854,12 +3853,10 @@ int hvm_descriptor_access_intercept(uint64_t exit_info,
> struct vcpu *curr = current;
> struct domain *currd = curr->domain;
>
> - if ( currd->arch.monitor.descriptor_access_enabled )
> - {
> - ASSERT(curr->arch.vm_event);
> + if ( currd->arch.monitor.descriptor_access_enabled &&
> + vm_event_is_enabled(curr) )
> hvm_monitor_descriptor_access(exit_info, vmx_exit_qualification,
> descriptor, is_write);
> - }
> else if ( !hvm_emulate_one_insn(is_sysdesc_access, "sysdesc access") )
> domain_crash(currd);
Following "xen: consolidate CONFIG_VM_EVENT" this function is actually unreachable
when VM_EVENT=n, so no change should be needed here. It's instead the unreachability
which needs properly taking care of (to satisfy Misra requirements) there.
> --- a/xen/arch/x86/hvm/svm/intr.c
> +++ b/xen/arch/x86/hvm/svm/intr.c
> @@ -130,7 +130,7 @@ void asmlinkage svm_intr_assist(void)
> enum hvm_intblk intblk;
>
> /* Block event injection while handling a sync vm_event. */
> - if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
> + if ( unlikely(vm_event_is_enabled(v)) && v->arch.vm_event->sync_event )
> return;
>
> /* Crank the handle on interrupt state. */
> diff --git a/xen/arch/x86/hvm/vmx/intr.c b/xen/arch/x86/hvm/vmx/intr.c
> index b35dc8c586..a8ced95871 100644
> --- a/xen/arch/x86/hvm/vmx/intr.c
> +++ b/xen/arch/x86/hvm/vmx/intr.c
> @@ -239,7 +239,7 @@ void asmlinkage vmx_intr_assist(void)
> }
>
> /* Block event injection while handling a sync vm_event. */
> - if ( unlikely(v->arch.vm_event) && v->arch.vm_event->sync_event )
> + if ( unlikely(vm_event_is_enabled(v)) && v->arch.vm_event->sync_event )
> return;
>
> #ifdef CONFIG_MEM_SHARING
> diff --git a/xen/arch/x86/include/asm/vm_event.h b/xen/arch/x86/include/asm/vm_event.h
> index 46e77ed6d9..446d02c7d5 100644
> --- a/xen/arch/x86/include/asm/vm_event.h
> +++ b/xen/arch/x86/include/asm/vm_event.h
> @@ -45,4 +45,13 @@ void vm_event_sync_event(struct vcpu *v, bool value);
>
> void vm_event_reset_vmtrace(struct vcpu *v);
>
> +static inline bool vm_event_is_enabled(struct vcpu *v)
> +{
> +#ifdef CONFIG_VM_EVENT
> + return v->arch.vm_event != NULL;
Is "enabled" (in the function name) a good description of this condition, Tamas?
Jan
[Public]
> -----Original Message-----
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Friday, September 12, 2025 3:30 PM
> To: Penny, Zheng <penny.zheng@amd.com>; Tamas K Lengyel
> <tamas@tklengyel.com>
> Cc: Huang, Ray <Ray.Huang@amd.com>; Andrew Cooper
> <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>;
> Alexandru Isaila <aisaila@bitdefender.com>; Petre Pircalabu
> <ppircalabu@bitdefender.com>; xen-devel@lists.xenproject.org; Oleksii Kurochko
> <oleksii.kurochko@gmail.com>
> Subject: Re: [PATCH] xen/vm_event: introduce vm_event_is_enabled()
>
> On 12.09.2025 06:52, Penny Zheng wrote:
> > @@ -2462,9 +2461,8 @@ int hvm_set_cr3(unsigned long value, bool noflush,
> bool may_defer)
> > if ( may_defer && unlikely(currd->arch.monitor.write_ctrlreg_enabled &
> > monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) )
> > {
> > - ASSERT(curr->arch.vm_event);
> > -
> > - if ( hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3]) )
> > + if ( vm_event_is_enabled(curr) &&
> > + hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3])
> > + )
> > {
> > /* The actual write will occur in hvm_do_resume(), if permitted. */
> > curr->arch.vm_event->write_data.do_write.cr3 = 1; @@
> > -2544,9 +2542,7 @@ int hvm_set_cr4(unsigned long value, bool may_defer)
> > if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
> > monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) )
> > {
> > - ASSERT(v->arch.vm_event);
> > -
> > - if ( hvm_monitor_crX(CR4, value, old_cr) )
> > + if ( vm_event_is_enabled(v) && hvm_monitor_crX(CR4, value,
> > + old_cr) )
> > {
> > /* The actual write will occur in hvm_do_resume(), if permitted. */
> > v->arch.vm_event->write_data.do_write.cr4 = 1; @@ -3407,7
> > +3403,7 @@ static enum hvm_translation_result __hvm_copy(
> > return HVMTRANS_bad_gfn_to_mfn;
> > }
> >
> > - if ( unlikely(v->arch.vm_event) &&
> > + if ( unlikely(vm_event_is_enabled(v)) &&
> > (flags & HVMCOPY_linear) &&
> > v->arch.vm_event->send_event &&
> > hvm_monitor_check_p2m(addr, gfn, pfec,
> > npfec_kind_with_gla) ) @@ -3538,6 +3534,7 @@ int hvm_vmexit_cpuid(struct
> cpu_user_regs *regs, unsigned int inst_len)
> > struct vcpu *curr = current;
> > unsigned int leaf = regs->eax, subleaf = regs->ecx;
> > struct cpuid_leaf res;
> > + int ret = 0;
> >
> > if ( curr->arch.msrs->misc_features_enables.cpuid_faulting &&
> > hvm_get_cpl(curr) > 0 )
> > @@ -3554,7 +3551,10 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs,
> unsigned int inst_len)
> > regs->rcx = res.c;
> > regs->rdx = res.d;
> >
> > - return hvm_monitor_cpuid(inst_len, leaf, subleaf);
> > + if ( vm_event_is_enabled(curr) )
> > + ret = hvm_monitor_cpuid(inst_len, leaf, subleaf);
> > +
> > + return ret;
> > }
> >
> > void hvm_rdtsc_intercept(struct cpu_user_regs *regs) @@ -3694,9
> > +3694,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
> msr_content,
> > if ( ret != X86EMUL_OKAY )
> > return ret;
> >
> > - ASSERT(v->arch.vm_event);
> > -
> > - if ( hvm_monitor_msr(msr, msr_content, msr_old_content) )
> > + if ( vm_event_is_enabled(v) &&
> > + hvm_monitor_msr(msr, msr_content, msr_old_content) )
> > {
> > /* The actual write will occur in hvm_do_resume(), if permitted. */
> > v->arch.vm_event->write_data.do_write.msr = 1; @@
> > -3854,12 +3853,10 @@ int hvm_descriptor_access_intercept(uint64_t exit_info,
> > struct vcpu *curr = current;
> > struct domain *currd = curr->domain;
> >
> > - if ( currd->arch.monitor.descriptor_access_enabled )
> > - {
> > - ASSERT(curr->arch.vm_event);
> > + if ( currd->arch.monitor.descriptor_access_enabled &&
> > + vm_event_is_enabled(curr) )
> > hvm_monitor_descriptor_access(exit_info, vmx_exit_qualification,
> > descriptor, is_write);
> > - }
> > else if ( !hvm_emulate_one_insn(is_sysdesc_access, "sysdesc access") )
> > domain_crash(currd);
>
> Following "xen: consolidate CONFIG_VM_EVENT" this function is actually
> unreachable when VM_EVENT=n, so no change should be needed here. It's instead
> the unreachability which needs properly taking care of (to satisfy Misra
> requirements) there.
>
I'm a bit confused and may not understand you correctly here.
Just because that hvm_monitor_descriptor_access() will become unreachable codes when VM_EVENT=n, and to avoid writing stubs, we added the vm_event_xxx check here. Or maybe you want me to add description to say the new checking also helps compiling out unreachable codes?
>
> Jan
On 23.09.2025 10:19, Penny, Zheng wrote:
> [Public]
>
>> -----Original Message-----
>> From: Jan Beulich <jbeulich@suse.com>
>> Sent: Friday, September 12, 2025 3:30 PM
>> To: Penny, Zheng <penny.zheng@amd.com>; Tamas K Lengyel
>> <tamas@tklengyel.com>
>> Cc: Huang, Ray <Ray.Huang@amd.com>; Andrew Cooper
>> <andrew.cooper3@citrix.com>; Roger Pau Monné <roger.pau@citrix.com>;
>> Alexandru Isaila <aisaila@bitdefender.com>; Petre Pircalabu
>> <ppircalabu@bitdefender.com>; xen-devel@lists.xenproject.org; Oleksii Kurochko
>> <oleksii.kurochko@gmail.com>
>> Subject: Re: [PATCH] xen/vm_event: introduce vm_event_is_enabled()
>>
>> On 12.09.2025 06:52, Penny Zheng wrote:
>>> @@ -2462,9 +2461,8 @@ int hvm_set_cr3(unsigned long value, bool noflush,
>> bool may_defer)
>>> if ( may_defer && unlikely(currd->arch.monitor.write_ctrlreg_enabled &
>>> monitor_ctrlreg_bitmask(VM_EVENT_X86_CR3)) )
>>> {
>>> - ASSERT(curr->arch.vm_event);
>>> -
>>> - if ( hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3]) )
>>> + if ( vm_event_is_enabled(curr) &&
>>> + hvm_monitor_crX(CR3, value, curr->arch.hvm.guest_cr[3])
>>> + )
>>> {
>>> /* The actual write will occur in hvm_do_resume(), if permitted. */
>>> curr->arch.vm_event->write_data.do_write.cr3 = 1; @@
>>> -2544,9 +2542,7 @@ int hvm_set_cr4(unsigned long value, bool may_defer)
>>> if ( may_defer && unlikely(v->domain->arch.monitor.write_ctrlreg_enabled &
>>> monitor_ctrlreg_bitmask(VM_EVENT_X86_CR4)) )
>>> {
>>> - ASSERT(v->arch.vm_event);
>>> -
>>> - if ( hvm_monitor_crX(CR4, value, old_cr) )
>>> + if ( vm_event_is_enabled(v) && hvm_monitor_crX(CR4, value,
>>> + old_cr) )
>>> {
>>> /* The actual write will occur in hvm_do_resume(), if permitted. */
>>> v->arch.vm_event->write_data.do_write.cr4 = 1; @@ -3407,7
>>> +3403,7 @@ static enum hvm_translation_result __hvm_copy(
>>> return HVMTRANS_bad_gfn_to_mfn;
>>> }
>>>
>>> - if ( unlikely(v->arch.vm_event) &&
>>> + if ( unlikely(vm_event_is_enabled(v)) &&
>>> (flags & HVMCOPY_linear) &&
>>> v->arch.vm_event->send_event &&
>>> hvm_monitor_check_p2m(addr, gfn, pfec,
>>> npfec_kind_with_gla) ) @@ -3538,6 +3534,7 @@ int hvm_vmexit_cpuid(struct
>> cpu_user_regs *regs, unsigned int inst_len)
>>> struct vcpu *curr = current;
>>> unsigned int leaf = regs->eax, subleaf = regs->ecx;
>>> struct cpuid_leaf res;
>>> + int ret = 0;
>>>
>>> if ( curr->arch.msrs->misc_features_enables.cpuid_faulting &&
>>> hvm_get_cpl(curr) > 0 )
>>> @@ -3554,7 +3551,10 @@ int hvm_vmexit_cpuid(struct cpu_user_regs *regs,
>> unsigned int inst_len)
>>> regs->rcx = res.c;
>>> regs->rdx = res.d;
>>>
>>> - return hvm_monitor_cpuid(inst_len, leaf, subleaf);
>>> + if ( vm_event_is_enabled(curr) )
>>> + ret = hvm_monitor_cpuid(inst_len, leaf, subleaf);
>>> +
>>> + return ret;
>>> }
>>>
>>> void hvm_rdtsc_intercept(struct cpu_user_regs *regs) @@ -3694,9
>>> +3694,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t
>> msr_content,
>>> if ( ret != X86EMUL_OKAY )
>>> return ret;
>>>
>>> - ASSERT(v->arch.vm_event);
>>> -
>>> - if ( hvm_monitor_msr(msr, msr_content, msr_old_content) )
>>> + if ( vm_event_is_enabled(v) &&
>>> + hvm_monitor_msr(msr, msr_content, msr_old_content) )
>>> {
>>> /* The actual write will occur in hvm_do_resume(), if permitted. */
>>> v->arch.vm_event->write_data.do_write.msr = 1; @@
>>> -3854,12 +3853,10 @@ int hvm_descriptor_access_intercept(uint64_t exit_info,
>>> struct vcpu *curr = current;
>>> struct domain *currd = curr->domain;
>>>
>>> - if ( currd->arch.monitor.descriptor_access_enabled )
>>> - {
>>> - ASSERT(curr->arch.vm_event);
>>> + if ( currd->arch.monitor.descriptor_access_enabled &&
>>> + vm_event_is_enabled(curr) )
>>> hvm_monitor_descriptor_access(exit_info, vmx_exit_qualification,
>>> descriptor, is_write);
>>> - }
>>> else if ( !hvm_emulate_one_insn(is_sysdesc_access, "sysdesc access") )
>>> domain_crash(currd);
>>
>> Following "xen: consolidate CONFIG_VM_EVENT" this function is actually
>> unreachable when VM_EVENT=n, so no change should be needed here. It's instead
>> the unreachability which needs properly taking care of (to satisfy Misra
>> requirements) there.
>>
>
> I'm a bit confused and may not understand you correctly here.
> Just because that hvm_monitor_descriptor_access() will become unreachable codes when VM_EVENT=n, and to avoid writing stubs, we added the vm_event_xxx check here. Or maybe you want me to add description to say the new checking also helps compiling out unreachable codes?
If the function becomes unreachable, it's not its contents which need
altering. Instead, the unreachable function should be "removed" (by
#ifdef-ary) altogether in the respective configuration. Recall that
unreachability is a Misra violation (or a rule that iirc we accepted).
Jan
> > +static inline bool vm_event_is_enabled(struct vcpu *v)
> > +{
> > +#ifdef CONFIG_VM_EVENT
> > + return v->arch.vm_event != NULL;
>
> Is "enabled" (in the function name) a good description of this condition, Tamas?
Sure, sounds fine to me.
Tamas
On 14.09.2025 01:24, Tamas K Lengyel wrote:
>>> +static inline bool vm_event_is_enabled(struct vcpu *v)
>>> +{
>>> +#ifdef CONFIG_VM_EVENT
>>> + return v->arch.vm_event != NULL;
>>
>> Is "enabled" (in the function name) a good description of this condition, Tamas?
>
> Sure, sounds fine to me.
That is the pointer alone being non-NULL identifies "enabled"? And not
just e.g. "active" or "available" (can be enabled with further things
set up)?
Jan
On Sun, Sep 14, 2025 at 9:49 AM Jan Beulich <jbeulich@suse.com> wrote:
>
> On 14.09.2025 01:24, Tamas K Lengyel wrote:
> >>> +static inline bool vm_event_is_enabled(struct vcpu *v)
> >>> +{
> >>> +#ifdef CONFIG_VM_EVENT
> >>> + return v->arch.vm_event != NULL;
> >>
> >> Is "enabled" (in the function name) a good description of this condition, Tamas?
> >
> > Sure, sounds fine to me.
>
> That is the pointer alone being non-NULL identifies "enabled"? And not
> just e.g. "active" or "available" (can be enabled with further things
> set up)?
Nope, just that the struct is non-NULL means vm_event is enabled.
There is no meaningful distinction of enabled vs active vs available
in the contexts this is being checked.
Tamas
© 2016 - 2026 Red Hat, Inc.