This completes the conversion to cpu_mutex_lock/unlock in the file.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
---
cpus-common.c | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/cpus-common.c b/cpus-common.c
index 3fccee5585..c2ad554d54 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -337,20 +337,19 @@ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
queue_work_on_cpu(cpu, wi);
}
-void process_queued_cpu_work(CPUState *cpu)
+/* Called with the CPU's lock held */
+static void process_queued_cpu_work_locked(CPUState *cpu)
{
struct qemu_work_item *wi;
bool has_bql = qemu_mutex_iothread_locked();
- qemu_mutex_lock(&cpu->lock);
if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
- qemu_mutex_unlock(&cpu->lock);
return;
}
while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
wi = QSIMPLEQ_FIRST(&cpu->work_list);
QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
- qemu_mutex_unlock(&cpu->lock);
+ cpu_mutex_unlock(cpu);
if (wi->exclusive) {
/* Running work items outside the BQL avoids the following deadlock:
* 1) start_exclusive() is called with the BQL taken while another
@@ -376,13 +375,19 @@ void process_queued_cpu_work(CPUState *cpu)
qemu_mutex_unlock_iothread();
}
}
- qemu_mutex_lock(&cpu->lock);
+ cpu_mutex_lock(cpu);
if (wi->free) {
g_free(wi);
} else {
atomic_mb_set(&wi->done, true);
}
}
- qemu_mutex_unlock(&cpu->lock);
qemu_cond_broadcast(&cpu->cond);
}
+
+void process_queued_cpu_work(CPUState *cpu)
+{
+ cpu_mutex_lock(cpu);
+ process_queued_cpu_work_locked(cpu);
+ cpu_mutex_unlock(cpu);
+}
--
2.17.1