cpu->exit_request do not use a load-acquire/store-release pair right now,
but this means that cpu_exit() does not store it after any flags that are
read in the slow path.
Probably everything is protected one way or the other by the BQL, because
after reading cpu->exit_request the CPU thread often goes to sleep
(by waiting on the BQL-protected cpu->halt_cond), but it's not clear.
Use load-acquire/store-release consistently.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
accel/kvm/kvm-all.c | 19 +++++++++----------
accel/tcg/cpu-exec.c | 7 +++++--
accel/tcg/tcg-accel-ops-rr.c | 2 +-
hw/core/cpu-common.c | 3 ++-
target/i386/nvmm/nvmm-all.c | 5 ++---
target/i386/whpx/whpx-all.c | 3 ++-
6 files changed, 21 insertions(+), 18 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 890d5ea9f86..57e35960125 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -3029,10 +3029,6 @@ static void kvm_eat_signals(CPUState *cpu)
if (kvm_immediate_exit) {
qatomic_set(&cpu->kvm_run->immediate_exit, 0);
- /* Write kvm_run->immediate_exit before the cpu->exit_request
- * write in kvm_cpu_exec.
- */
- smp_wmb();
return;
}
@@ -3187,7 +3183,8 @@ int kvm_cpu_exec(CPUState *cpu)
}
kvm_arch_pre_run(cpu, run);
- if (qatomic_read(&cpu->exit_request)) {
+ /* Corresponding store-release is in cpu_exit. */
+ if (qatomic_load_acquire(&cpu->exit_request)) {
trace_kvm_interrupt_exit_request();
/*
* KVM requires us to reenter the kernel after IO exits to complete
@@ -3197,13 +3194,15 @@ int kvm_cpu_exec(CPUState *cpu)
kvm_cpu_kick_self();
}
- /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
- * Matching barrier in kvm_eat_signals.
- */
- smp_rmb();
-
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
+ /*
+ * After writing cpu->exit_request, cpu_exit() sends a signal that writes
+ * kvm->run->immediate_exit. The signal is always happening after the
+ * write to cpu->exit_request so, if KVM read kvm->run->immediate_exit
+ * as true, cpu->exit_request will always read as true.
+ */
+
attrs = kvm_arch_post_run(cpu, run);
#ifdef KVM_HAVE_MCE_INJECTION
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 82867f456c1..4bd9ee01c19 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -849,8 +849,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
bql_unlock();
}
- /* Finally, check if we need to exit to the main loop. */
- if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
+ /*
+ * Finally, check if we need to exit to the main loop.
+ * The corresponding store-release is in cpu_exit.
+ */
+ if (unlikely(qatomic_load_acquire(&cpu->exit_request)) || icount_exit_request(cpu)) {
qatomic_set(&cpu->exit_request, 0);
if (cpu->exception_index == -1) {
cpu->exception_index = EXCP_INTERRUPT;
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 6eec5c9eee9..1e551e92d6d 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -242,7 +242,7 @@ static void *rr_cpu_thread_fn(void *arg)
cpu = first_cpu;
}
- while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
+ while (cpu && cpu_work_list_empty(cpu) && !qatomic_load_acquire(&cpu->exit_request)) {
/* Store rr_current_cpu before evaluating cpu_can_run(). */
qatomic_set_mb(&rr_current_cpu, cpu);
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
index 39e674aca21..f189ce861c9 100644
--- a/hw/core/cpu-common.c
+++ b/hw/core/cpu-common.c
@@ -84,7 +84,8 @@ void cpu_reset_interrupt(CPUState *cpu, int mask)
void cpu_exit(CPUState *cpu)
{
- qatomic_set(&cpu->exit_request, 1);
+ /* Ensure cpu_exec will see the reason why the exit request was set. */
+ qatomic_store_release(&cpu->exit_request, 1);
/* Ensure cpu_exec will see the exit request after TCG has exited. */
smp_wmb();
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c
index c1ac74c4f04..a5e3485c1f8 100644
--- a/target/i386/nvmm/nvmm-all.c
+++ b/target/i386/nvmm/nvmm-all.c
@@ -743,7 +743,8 @@ nvmm_vcpu_loop(CPUState *cpu)
nvmm_vcpu_pre_run(cpu);
- if (qatomic_read(&cpu->exit_request)) {
+ /* Corresponding store-release is in cpu_exit. */
+ if (qatomic_load_acquire(&cpu->exit_request)) {
#if NVMM_USER_VERSION >= 2
nvmm_vcpu_stop(vcpu);
#else
@@ -751,8 +752,6 @@ nvmm_vcpu_loop(CPUState *cpu)
#endif
}
- /* Read exit_request before the kernel reads the immediate exit flag */
- smp_rmb();
ret = nvmm_vcpu_run(mach, vcpu);
if (ret == -1) {
error_report("NVMM: Failed to exec a virtual processor,"
diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c
index 878cdd1668c..9f88e368d4d 100644
--- a/target/i386/whpx/whpx-all.c
+++ b/target/i386/whpx/whpx-all.c
@@ -1714,7 +1714,8 @@ static int whpx_vcpu_run(CPUState *cpu)
if (exclusive_step_mode == WHPX_STEP_NONE) {
whpx_vcpu_pre_run(cpu);
- if (qatomic_read(&cpu->exit_request)) {
+ /* Corresponding store-release is in cpu_exit. */
+ if (qatomic_load_acquire(&cpu->exit_request)) {
whpx_vcpu_kick(cpu);
}
}
--
2.50.1
On Fri, Aug 08, 2025 at 08:58:58PM +0200, Paolo Bonzini wrote: > cpu->exit_request do not use a load-acquire/store-release pair right now, > but this means that cpu_exit() does not store it after any flags that are > read in the slow path. > > Probably everything is protected one way or the other by the BQL, because > after reading cpu->exit_request the CPU thread often goes to sleep > (by waiting on the BQL-protected cpu->halt_cond), but it's not clear. > Use load-acquire/store-release consistently. > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> -- Peter Xu
On 8/9/25 04:58, Paolo Bonzini wrote: > cpu->exit_request do not use a load-acquire/store-release pair right now, > but this means that cpu_exit() does not store it after any flags that are > read in the slow path. > > Probably everything is protected one way or the other by the BQL, because > after reading cpu->exit_request the CPU thread often goes to sleep > (by waiting on the BQL-protected cpu->halt_cond), but it's not clear. > Use load-acquire/store-release consistently. > > Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> > --- > accel/kvm/kvm-all.c | 19 +++++++++---------- > accel/tcg/cpu-exec.c | 7 +++++-- > accel/tcg/tcg-accel-ops-rr.c | 2 +- > hw/core/cpu-common.c | 3 ++- > target/i386/nvmm/nvmm-all.c | 5 ++--- > target/i386/whpx/whpx-all.c | 3 ++- > 6 files changed, 21 insertions(+), 18 deletions(-) Reviewed-by: Richard Henderson <richard.henderson@linaro.org> > diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c > index 39e674aca21..f189ce861c9 100644 > --- a/hw/core/cpu-common.c > +++ b/hw/core/cpu-common.c > @@ -84,7 +84,8 @@ void cpu_reset_interrupt(CPUState *cpu, int mask) > > void cpu_exit(CPUState *cpu) > { > - qatomic_set(&cpu->exit_request, 1); > + /* Ensure cpu_exec will see the reason why the exit request was set. */ > + qatomic_store_release(&cpu->exit_request, 1); While you're touching the lines, since exit_request is bool, let's use true (and elsewhere in other patches, false). r~
© 2016 - 2025 Red Hat, Inc.