A TCG vCPU doing a busy loop systematicaly hangs the QEMU monitor
if the user passes 'device_add' without argument. This is because
drain_cpu_all() which is called from qmp_device_add() cannot return
if readers don't exit read-side critical sections. That is typically
what busy-looping TCG vCPUs do:
int cpu_exec(CPUState *cpu)
{
[...]
rcu_read_lock();
[...]
while (!cpu_handle_exception(cpu, &ret)) {
// Busy loop keeps vCPU here
}
[...]
rcu_read_unlock();
return ret;
}
Have all vCPU threads register a force_rcu notifier that will kick them
out of the loop using async_run_on_cpu(). The notifier is called with the
rcu_registry_lock mutex held, using async_run_on_cpu() ensures there are
no deadlocks.
Note that when running in round-robin mode, this means that we register
only one notifier which corresponds to the first vCPU. This is okay
since calling async_run_on_cpu() on any vCPU is enough to kick any
other vCPU from execution.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Fixes: 7bed89958bfb ("device_core: use drain_call_rcu in in qmp_device_add")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/650
Signed-off-by: Greg Kurz <groug@kaod.org>
---
accel/tcg/tcg-accel-ops-mttcg.c | 26 ++++++++++++++++++++++++++
accel/tcg/tcg-accel-ops-rr.c | 18 ++++++++++++++++++
2 files changed, 44 insertions(+)
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index 847d2079d21f..29632bd4c0af 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -28,6 +28,7 @@
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "qemu/main-loop.h"
+#include "qemu/notify.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
#include "hw/boards.h"
@@ -35,6 +36,26 @@
#include "tcg-accel-ops.h"
#include "tcg-accel-ops-mttcg.h"
+typedef struct MttcgForceRcuNotifier {
+ Notifier notifier;
+ CPUState *cpu;
+} MttcgForceRcuNotifier;
+
+static void do_nothing(CPUState *cpu, run_on_cpu_data d)
+{
+}
+
+static void mttcg_force_rcu(Notifier *notify, void *data)
+{
+ CPUState *cpu = container_of(notify, MttcgForceRcuNotifier, notifier)->cpu;
+
+ /*
+ * Called with rcu_registry_lock held, using async_run_on_cpu() ensures
+ * that there are no deadlocks.
+ */
+ async_run_on_cpu(cpu, do_nothing, RUN_ON_CPU_NULL);
+}
+
/*
* In the multi-threaded case each vCPU has its own thread. The TLS
* variable current_cpu can be used deep in the code to find the
@@ -43,12 +64,16 @@
static void *mttcg_cpu_thread_fn(void *arg)
{
+ MttcgForceRcuNotifier force_rcu;
CPUState *cpu = arg;
assert(tcg_enabled());
g_assert(!icount_enabled());
rcu_register_thread();
+ force_rcu.notifier.notify = mttcg_force_rcu;
+ force_rcu.cpu = cpu;
+ rcu_add_force_rcu_notifier(&force_rcu.notifier);
tcg_register_thread();
qemu_mutex_lock_iothread();
@@ -100,6 +125,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
tcg_cpus_destroy(cpu);
qemu_mutex_unlock_iothread();
+ rcu_remove_force_rcu_notifier(&force_rcu.notifier);
rcu_unregister_thread();
return NULL;
}
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index a5fd26190e20..934ac21d79b5 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -28,6 +28,7 @@
#include "sysemu/tcg.h"
#include "sysemu/replay.h"
#include "qemu/main-loop.h"
+#include "qemu/notify.h"
#include "qemu/guest-random.h"
#include "exec/exec-all.h"
@@ -133,6 +134,19 @@ static void rr_deal_with_unplugged_cpus(void)
}
}
+static void do_nothing(CPUState *cpu, run_on_cpu_data d)
+{
+}
+
+static void rr_force_rcu(Notifier *notify, void *data)
+{
+ /*
+ * Called with rcu_registry_lock held, using async_run_on_cpu() ensures
+ * that there are no deadlocks.
+ */
+ async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
+}
+
/*
* In the single-threaded case each vCPU is simulated in turn. If
* there is more than a single vCPU we create a simple timer to kick
@@ -143,10 +157,13 @@ static void rr_deal_with_unplugged_cpus(void)
static void *rr_cpu_thread_fn(void *arg)
{
+ Notifier force_rcu;
CPUState *cpu = arg;
assert(tcg_enabled());
rcu_register_thread();
+ force_rcu.notify = rr_force_rcu;
+ rcu_add_force_rcu_notifier(&force_rcu);
tcg_register_thread();
qemu_mutex_lock_iothread();
@@ -255,6 +272,7 @@ static void *rr_cpu_thread_fn(void *arg)
rr_deal_with_unplugged_cpus();
}
+ rcu_remove_force_rcu_notifier(&force_rcu);
rcu_unregister_thread();
return NULL;
}
--
2.31.1
On 11/8/21 12:33 PM, Greg Kurz wrote:
> +static void rr_force_rcu(Notifier *notify, void *data)
> +{
> + /*
> + * Called with rcu_registry_lock held, using async_run_on_cpu() ensures
> + * that there are no deadlocks.
> + */
> + async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
> +}
Should first_cpu really be rr_current_cpu?
It's not clear to me that this will work for -smp 2 -cpu thread=single.
r~
On 11/9/21 8:54 AM, Richard Henderson wrote:
> On 11/8/21 12:33 PM, Greg Kurz wrote:
>> +static void rr_force_rcu(Notifier *notify, void *data)
>> +{
>> + /*
>> + * Called with rcu_registry_lock held, using async_run_on_cpu() ensures
>> + * that there are no deadlocks.
>> + */
>> + async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
>> +}
>
> Should first_cpu really be rr_current_cpu?
> It's not clear to me that this will work for -smp 2 -cpu thread=single.
Alternately, no async_run_on_cpu at all, just rr_kick_next_cpu().
r~
On Tue, 9 Nov 2021 09:21:27 +0100
Richard Henderson <richard.henderson@linaro.org> wrote:
> On 11/9/21 8:54 AM, Richard Henderson wrote:
> > On 11/8/21 12:33 PM, Greg Kurz wrote:
> >> +static void rr_force_rcu(Notifier *notify, void *data)
> >> +{
> >> + /*
> >> + * Called with rcu_registry_lock held, using async_run_on_cpu() ensures
> >> + * that there are no deadlocks.
> >> + */
> >> + async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
> >> +}
> >
> > Should first_cpu really be rr_current_cpu?
> > It's not clear to me that this will work for -smp 2 -cpu thread=single.
>
Why wouldn't it work ? IIUC we always have a first_cpu and
async_run_on_cpu() will kick any vCPU currently run by the
RR thread... or am I missing something ?
Anyway, it seems more explicit to use rr_current_cpu.
> Alternately, no async_run_on_cpu at all, just rr_kick_next_cpu().
>
Heh, this looks even better ! I'll try this right away.
Thanks Richard !
--
Greg
>
> r~
>
On 11/9/21 18:24, Greg Kurz wrote:> Anyway, it seems more explicit to use rr_current_cpu.
>
>> Alternately, no async_run_on_cpu at all, just rr_kick_next_cpu().
>>
>
> Heh, this looks even better ! I'll try this right away.
Once you've tested it I can queue the series with just a
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -141,10 +141,10 @@ static void do_nothing(CPUState *cpu, run_on_cpu_data d)
static void rr_force_rcu(Notifier *notify, void *data)
{
/*
- * Called with rcu_registry_lock held, using async_run_on_cpu() ensures
- * that there are no deadlocks.
+ * Called with rcu_registry_lock held. rr_kick_next_cpu() is
+ * asynchronous, so there cannot be deadlocks.
*/
- async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
+ rr_kick_next_cpu();
}
/*
squashed in.
Paolo
On Tue, 9 Nov 2021 19:03:56 +0100
Paolo Bonzini <pbonzini@redhat.com> wrote:
> On 11/9/21 18:24, Greg Kurz wrote:> Anyway, it seems more explicit to use rr_current_cpu.
> >
> >> Alternately, no async_run_on_cpu at all, just rr_kick_next_cpu().
> >>
> >
> > Heh, this looks even better ! I'll try this right away.
>
> Once you've tested it I can queue the series with just a
>
> --- a/accel/tcg/tcg-accel-ops-rr.c
> +++ b/accel/tcg/tcg-accel-ops-rr.c
> @@ -141,10 +141,10 @@ static void do_nothing(CPUState *cpu, run_on_cpu_data d)
> static void rr_force_rcu(Notifier *notify, void *data)
> {
> /*
> - * Called with rcu_registry_lock held, using async_run_on_cpu() ensures
> - * that there are no deadlocks.
> + * Called with rcu_registry_lock held. rr_kick_next_cpu() is
> + * asynchronous, so there cannot be deadlocks.
> */
> - async_run_on_cpu(first_cpu, do_nothing, RUN_ON_CPU_NULL);
> + rr_kick_next_cpu();
> }
>
> /*
>
> squashed in.
>
I've tested and it works just fine. I need to send a v4 anyway so that
the commit message is in sync with the code changes.
Cheers,
--
Greg
> Paolo
>
© 2016 - 2026 Red Hat, Inc.