There is no reason for some accelerators to use qemu_process_cpu_events_common
(which is separated from qemu_process_cpu_events() specifically for round
robin TCG). They can also check for events directly on the first pass through
the loop, instead of setting cpu->exit_request to true.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
accel/dummy-cpus.c | 2 +-
accel/hvf/hvf-accel-ops.c | 2 +-
accel/kvm/kvm-accel-ops.c | 3 ++-
accel/tcg/tcg-accel-ops-mttcg.c | 7 ++---
accel/tcg/tcg-accel-ops-rr.c | 43 ++++++++++++++-----------------
target/i386/nvmm/nvmm-accel-ops.c | 6 ++---
target/i386/whpx/whpx-accel-ops.c | 6 ++---
7 files changed, 30 insertions(+), 39 deletions(-)
diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c
index 225a47c31fd..5752f6302c8 100644
--- a/accel/dummy-cpus.c
+++ b/accel/dummy-cpus.c
@@ -43,6 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
+ qemu_process_cpu_events(cpu);
bql_unlock();
#ifndef _WIN32
do {
@@ -57,7 +58,6 @@ static void *dummy_cpu_thread_fn(void *arg)
qemu_sem_wait(&cpu->sem);
#endif
bql_lock();
- qemu_process_cpu_events(cpu);
} while (!cpu->unplug);
bql_unlock();
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
index 7a27bdadb4f..8b794c2d418 100644
--- a/accel/hvf/hvf-accel-ops.c
+++ b/accel/hvf/hvf-accel-ops.c
@@ -192,13 +192,13 @@ static void *hvf_cpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
+ qemu_process_cpu_events(cpu);
if (cpu_can_run(cpu)) {
r = hvf_vcpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
}
}
- qemu_process_cpu_events(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
hvf_vcpu_destroy(cpu);
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
index 65a7f76a69a..8ed6945c2f7 100644
--- a/accel/kvm/kvm-accel-ops.c
+++ b/accel/kvm/kvm-accel-ops.c
@@ -47,13 +47,14 @@ static void *kvm_vcpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
+ qemu_process_cpu_events(cpu);
+
if (cpu_can_run(cpu)) {
r = kvm_cpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
}
}
- qemu_process_cpu_events(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
kvm_destroy_vcpu(cpu);
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index 342917c5f6e..cf1ee7ac258 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -84,10 +84,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
cpu_thread_signal_created(cpu);
qemu_guest_random_seed_thread_part2(cpu->random_seed);
- /* process any pending work */
- qatomic_set(&cpu->exit_request, true);
-
do {
+ qemu_process_cpu_events(cpu);
+
if (cpu_can_run(cpu)) {
int r;
bql_unlock();
@@ -112,8 +111,6 @@ static void *mttcg_cpu_thread_fn(void *arg)
break;
}
}
-
- qemu_process_cpu_events(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
tcg_cpu_destroy(cpu);
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 7dbdba7b514..2fb46439971 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -211,13 +211,30 @@ static void *rr_cpu_thread_fn(void *arg)
cpu = first_cpu;
- /* process any pending work */
- qatomic_set(&cpu->exit_request, true);
-
while (1) {
/* Only used for icount_enabled() */
int64_t cpu_budget = 0;
+ if (cpu) {
+ /*
+ * This could even reset exit_request for all CPUs, but in practice
+ * races between CPU exits and changes to "cpu" are so rare that
+ * there's no advantage in doing so.
+ */
+ qatomic_set(&cpu->exit_request, false);
+ }
+
+ if (icount_enabled() && all_cpu_threads_idle()) {
+ /*
+ * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
+ * in the main_loop, wake it up in order to start the warp timer.
+ */
+ qemu_notify_event();
+ }
+
+ rr_wait_io_event();
+ rr_deal_with_unplugged_cpus();
+
bql_unlock();
replay_mutex_lock();
bql_lock();
@@ -292,26 +309,6 @@ static void *rr_cpu_thread_fn(void *arg)
/* Does not need a memory barrier because a spurious wakeup is okay. */
qatomic_set(&rr_current_cpu, NULL);
-
- if (cpu) {
- /*
- * This could even reset exit_request for all CPUs, but in practice
- * races between CPU exits and changes to "cpu" are so rare that
- * there's no advantage in doing so.
- */
- qatomic_set(&cpu->exit_request, false);
- }
-
- if (icount_enabled() && all_cpu_threads_idle()) {
- /*
- * When all cpus are sleeping (e.g in WFI), to avoid a deadlock
- * in the main_loop, wake it up in order to start the warp timer.
- */
- qemu_notify_event();
- }
-
- rr_wait_io_event();
- rr_deal_with_unplugged_cpus();
}
g_assert_not_reached();
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
index d066364b989..dd5d5428b1c 100644
--- a/target/i386/nvmm/nvmm-accel-ops.c
+++ b/target/i386/nvmm/nvmm-accel-ops.c
@@ -42,16 +42,14 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
+ qemu_process_cpu_events(cpu);
+
if (cpu_can_run(cpu)) {
r = nvmm_vcpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
}
}
- while (cpu_thread_is_idle(cpu)) {
- qemu_cond_wait_bql(cpu->halt_cond);
- }
- qemu_process_cpu_events_common(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
nvmm_destroy_vcpu(cpu);
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
index 2ca4ee02636..f75886128d0 100644
--- a/target/i386/whpx/whpx-accel-ops.c
+++ b/target/i386/whpx/whpx-accel-ops.c
@@ -42,16 +42,14 @@ static void *whpx_cpu_thread_fn(void *arg)
qemu_guest_random_seed_thread_part2(cpu->random_seed);
do {
+ qemu_process_cpu_events(cpu);
+
if (cpu_can_run(cpu)) {
r = whpx_vcpu_exec(cpu);
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
}
}
- while (cpu_thread_is_idle(cpu)) {
- qemu_cond_wait_bql(cpu->halt_cond);
- }
- qemu_process_cpu_events_common(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
whpx_destroy_vcpu(cpu);
--
2.51.0