xen/arch/x86/hvm/hvm.c | 3 ++- xen/arch/x86/hvm/svm/svm.c | 2 +- xen/arch/x86/hvm/viridian/viridian.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-)
do_sched_op(SCHEDOP_yield) just calls vcpu_yield(). Remove the indirection
through the hypercall handler and use the function directly.
Perform the same for SCHEDOP_block.
Not a functional change.
Signed-off-by: Teddy Astie <teddy.astie@vates.tech>
---
xen/arch/x86/hvm/hvm.c | 3 ++-
xen/arch/x86/hvm/svm/svm.c | 2 +-
xen/arch/x86/hvm/viridian/viridian.c | 2 +-
xen/arch/x86/hvm/vmx/vmx.c | 2 +-
4 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 4cb2e13046..5c3e9ad72e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1736,7 +1736,8 @@ void hvm_hlt(unsigned int eflags)
if ( unlikely(!(eflags & X86_EFLAGS_IF)) )
return hvm_vcpu_down(curr);
- do_sched_op(SCHEDOP_block, guest_handle_from_ptr(NULL, void));
+ local_event_delivery_enable();
+ vcpu_block();
TRACE(TRC_HVM_HLT, /* pending = */ vcpu_runnable(curr));
}
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index e33a38c1e4..b0bcd4b1e7 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2159,7 +2159,7 @@ static void svm_vmexit_do_pause(struct cpu_user_regs *regs)
* Do something useful, like reschedule the guest
*/
perfc_incr(pauseloop_exits);
- do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void));
+ vcpu_yield();
}
static void
diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c
index 33d54e587e..7ea6c90168 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -959,7 +959,7 @@ int viridian_hypercall(struct cpu_user_regs *regs)
/*
* See section 14.5.1 of the specification.
*/
- do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void));
+ vcpu_yield();
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d8879c304e..1b9fbc4f4e 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -4768,7 +4768,7 @@ void asmlinkage vmx_vmexit_handler(struct cpu_user_regs *regs)
case EXIT_REASON_PAUSE_INSTRUCTION:
perfc_incr(pauseloop_exits);
- do_sched_op(SCHEDOP_yield, guest_handle_from_ptr(NULL, void));
+ vcpu_yield();
break;
case EXIT_REASON_XSETBV:
--
2.50.1
Teddy Astie | Vates XCP-ng Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
On 2025-07-22 14:07, Teddy Astie wrote: > do_sched_op(SCHEDOP_yield) just calls vcpu_yield(). Remove the indirection > through the hypercall handler and use the function directly. > > Perform the same for SCHEDOP_block. > > Not a functional change. > > Signed-off-by: Teddy Astie <teddy.astie@vates.tech> > --- > xen/arch/x86/hvm/hvm.c | 3 ++- > xen/arch/x86/hvm/svm/svm.c | 2 +- > xen/arch/x86/hvm/viridian/viridian.c | 2 +- > xen/arch/x86/hvm/vmx/vmx.c | 2 +- > 4 files changed, 5 insertions(+), 4 deletions(-) > > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index 4cb2e13046..5c3e9ad72e 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -1736,7 +1736,8 @@ void hvm_hlt(unsigned int eflags) > if ( unlikely(!(eflags & X86_EFLAGS_IF)) ) > return hvm_vcpu_down(curr); > > - do_sched_op(SCHEDOP_block, guest_handle_from_ptr(NULL, void)); > + local_event_delivery_enable(); > + vcpu_block(); I think it would be better to export and call vcpu_block_enable_events(). This ensures they stay in-sync. The vcpu_vield() conversion looks good to me. Regards, Jason > > TRACE(TRC_HVM_HLT, /* pending = */ vcpu_runnable(curr)); > }
On 22/07/2025 1:05 pm, Jason Andryuk wrote: > On 2025-07-22 14:07, Teddy Astie wrote: >> do_sched_op(SCHEDOP_yield) just calls vcpu_yield(). Remove the >> indirection >> through the hypercall handler and use the function directly. >> >> Perform the same for SCHEDOP_block. >> >> Not a functional change. >> >> Signed-off-by: Teddy Astie <teddy.astie@vates.tech> >> --- >> xen/arch/x86/hvm/hvm.c | 3 ++- >> xen/arch/x86/hvm/svm/svm.c | 2 +- >> xen/arch/x86/hvm/viridian/viridian.c | 2 +- >> xen/arch/x86/hvm/vmx/vmx.c | 2 +- >> 4 files changed, 5 insertions(+), 4 deletions(-) >> >> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c >> index 4cb2e13046..5c3e9ad72e 100644 >> --- a/xen/arch/x86/hvm/hvm.c >> +++ b/xen/arch/x86/hvm/hvm.c >> @@ -1736,7 +1736,8 @@ void hvm_hlt(unsigned int eflags) >> if ( unlikely(!(eflags & X86_EFLAGS_IF)) ) >> return hvm_vcpu_down(curr); >> - do_sched_op(SCHEDOP_block, guest_handle_from_ptr(NULL, void)); >> + local_event_delivery_enable(); >> + vcpu_block(); > > I think it would be better to export and call > vcpu_block_enable_events(). This ensures they stay in-sync. > > The vcpu_vield() conversion looks good to me. Agreed on both points. ~Andrew
© 2016 - 2025 Red Hat, Inc.