Aside from the round robin threads this is all common code. By
moving the halt_cond setup we also no longer need hacks to work around
the race between QOM object creation and thread creation.
It is a little ugly to free stuff up for the round robin thread but
better it deal with its own specialises than making the other
accelerators jump through hoops.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
include/hw/core/cpu.h | 4 ++++
accel/dummy-cpus.c | 3 ---
accel/hvf/hvf-accel-ops.c | 4 ----
accel/kvm/kvm-accel-ops.c | 3 ---
accel/tcg/tcg-accel-ops-mttcg.c | 4 ----
accel/tcg/tcg-accel-ops-rr.c | 14 +++++++-------
hw/core/cpu-common.c | 5 +++++
target/i386/nvmm/nvmm-accel-ops.c | 3 ---
target/i386/whpx/whpx-accel-ops.c | 3 ---
9 files changed, 16 insertions(+), 27 deletions(-)
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 35d345371b..a405119eda 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -404,10 +404,14 @@ struct qemu_work_item;
* @tcg_cflags: Pre-computed cflags for this cpu.
* @nr_cores: Number of cores within this CPU package.
* @nr_threads: Number of threads within this CPU core.
+ * @thread: Host thread details, only live once @created is #true
+ * @sem: WIN32 only semaphore used only for qtest
+ * @thread_id: native thread id of vCPU, only live once @created is #true
* @running: #true if CPU is currently running (lockless).
* @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;
* valid under cpu_list_lock.
* @created: Indicates whether the CPU thread has been successfully created.
+ * @halt_cond: condition variable sleeping threads can wait on.
* @interrupt_request: Indicates a pending interrupt request.
* @halted: Nonzero if the CPU is in suspended state.
* @stop: Indicates a pending stop request.
diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c
index 20519f1ea4..f32d8c8dc3 100644
--- a/accel/dummy-cpus.c
+++ b/accel/dummy-cpus.c
@@ -68,9 +68,6 @@ void dummy_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
- cpu->thread = g_malloc0(sizeof(QemuThread));
- cpu->halt_cond = g_malloc0(sizeof(QemuCond));
- qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c
index 40d4187d9d..6f1e27ef46 100644
--- a/accel/hvf/hvf-accel-ops.c
+++ b/accel/hvf/hvf-accel-ops.c
@@ -463,10 +463,6 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
*/
assert(hvf_enabled());
- cpu->thread = g_malloc0(sizeof(QemuThread));
- cpu->halt_cond = g_malloc0(sizeof(QemuCond));
- qemu_cond_init(cpu->halt_cond);
-
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
index 94c828ac8d..c239dfc87a 100644
--- a/accel/kvm/kvm-accel-ops.c
+++ b/accel/kvm/kvm-accel-ops.c
@@ -66,9 +66,6 @@ static void kvm_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
- cpu->thread = g_malloc0(sizeof(QemuThread));
- cpu->halt_cond = g_malloc0(sizeof(QemuCond));
- qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn,
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index c552b45b8e..49814ec4af 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -137,10 +137,6 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
g_assert(tcg_enabled());
tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
- cpu->thread = g_new0(QemuThread, 1);
- cpu->halt_cond = g_malloc0(sizeof(QemuCond));
- qemu_cond_init(cpu->halt_cond);
-
/* create a thread per vCPU with TCG (MTTCG) */
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
cpu->cpu_index);
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 894e73e52c..84c36c1450 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -317,22 +317,22 @@ void rr_start_vcpu_thread(CPUState *cpu)
tcg_cpu_init_cflags(cpu, false);
if (!single_tcg_cpu_thread) {
- cpu->thread = g_new0(QemuThread, 1);
- cpu->halt_cond = g_new0(QemuCond, 1);
- qemu_cond_init(cpu->halt_cond);
+ single_tcg_halt_cond = cpu->halt_cond;
+ single_tcg_cpu_thread = cpu->thread;
/* share a single thread for all cpus with TCG */
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
qemu_thread_create(cpu->thread, thread_name,
rr_cpu_thread_fn,
cpu, QEMU_THREAD_JOINABLE);
-
- single_tcg_halt_cond = cpu->halt_cond;
- single_tcg_cpu_thread = cpu->thread;
} else {
- /* we share the thread */
+ /* we share the thread, dump spare data */
+ g_free(cpu->thread);
+ qemu_cond_destroy(cpu->halt_cond);
cpu->thread = single_tcg_cpu_thread;
cpu->halt_cond = single_tcg_halt_cond;
+
+ /* copy the stuff done at start of rr_cpu_thread_fn */
cpu->thread_id = first_cpu->thread_id;
cpu->neg.can_do_io = 1;
cpu->created = true;
diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c
index 0f0a247f56..6cfc01593a 100644
--- a/hw/core/cpu-common.c
+++ b/hw/core/cpu-common.c
@@ -261,6 +261,11 @@ static void cpu_common_initfn(Object *obj)
cpu->nr_threads = 1;
cpu->cflags_next_tb = -1;
+ /* allocate storage for thread info, initialise condition variables */
+ cpu->thread = g_new0(QemuThread, 1);
+ cpu->halt_cond = g_new0(QemuCond, 1);
+ qemu_cond_init(cpu->halt_cond);
+
qemu_mutex_init(&cpu->work_mutex);
qemu_lockcnt_init(&cpu->in_ioctl_lock);
QSIMPLEQ_INIT(&cpu->work_list);
diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c
index 6b2bfd9b9c..0ba31201e2 100644
--- a/target/i386/nvmm/nvmm-accel-ops.c
+++ b/target/i386/nvmm/nvmm-accel-ops.c
@@ -64,9 +64,6 @@ static void nvmm_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
- cpu->thread = g_new0(QemuThread, 1);
- cpu->halt_cond = g_new0(QemuCond, 1);
- qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/NVMM",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, qemu_nvmm_cpu_thread_fn,
diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c
index 189ae0f140..1a2b4e1c43 100644
--- a/target/i386/whpx/whpx-accel-ops.c
+++ b/target/i386/whpx/whpx-accel-ops.c
@@ -64,9 +64,6 @@ static void whpx_start_vcpu_thread(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
- cpu->thread = g_new0(QemuThread, 1);
- cpu->halt_cond = g_new0(QemuCond, 1);
- qemu_cond_init(cpu->halt_cond);
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX",
cpu->cpu_index);
qemu_thread_create(cpu->thread, thread_name, whpx_cpu_thread_fn,
--
2.39.2
On 30/5/24 21:42, Alex Bennée wrote: > Aside from the round robin threads this is all common code. By > moving the halt_cond setup we also no longer need hacks to work around > the race between QOM object creation and thread creation. > > It is a little ugly to free stuff up for the round robin thread but > better it deal with its own specialises than making the other > accelerators jump through hoops. > > Signed-off-by: Alex Bennée <alex.bennee@linaro.org> > --- > include/hw/core/cpu.h | 4 ++++ > accel/dummy-cpus.c | 3 --- > accel/hvf/hvf-accel-ops.c | 4 ---- > accel/kvm/kvm-accel-ops.c | 3 --- > accel/tcg/tcg-accel-ops-mttcg.c | 4 ---- > accel/tcg/tcg-accel-ops-rr.c | 14 +++++++------- > hw/core/cpu-common.c | 5 +++++ > target/i386/nvmm/nvmm-accel-ops.c | 3 --- > target/i386/whpx/whpx-accel-ops.c | 3 --- > 9 files changed, 16 insertions(+), 27 deletions(-) > diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c > index 894e73e52c..84c36c1450 100644 > --- a/accel/tcg/tcg-accel-ops-rr.c > +++ b/accel/tcg/tcg-accel-ops-rr.c > @@ -317,22 +317,22 @@ void rr_start_vcpu_thread(CPUState *cpu) > tcg_cpu_init_cflags(cpu, false); > > if (!single_tcg_cpu_thread) { > - cpu->thread = g_new0(QemuThread, 1); > - cpu->halt_cond = g_new0(QemuCond, 1); > - qemu_cond_init(cpu->halt_cond); > + single_tcg_halt_cond = cpu->halt_cond; > + single_tcg_cpu_thread = cpu->thread; > > /* share a single thread for all cpus with TCG */ > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG"); > qemu_thread_create(cpu->thread, thread_name, > rr_cpu_thread_fn, > cpu, QEMU_THREAD_JOINABLE); > - > - single_tcg_halt_cond = cpu->halt_cond; > - single_tcg_cpu_thread = cpu->thread; > } else { > - /* we share the thread */ > + /* we share the thread, dump spare data */ /* we share the thread, release allocations from cpu_common_initfn() */ > + g_free(cpu->thread); > + qemu_cond_destroy(cpu->halt_cond); > cpu->thread = single_tcg_cpu_thread; > cpu->halt_cond = single_tcg_halt_cond; > + > + /* copy the stuff done at start of rr_cpu_thread_fn */ > cpu->thread_id = first_cpu->thread_id; > cpu->neg.can_do_io = 1; > cpu->created = true; Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
On 5/30/24 12:42, Alex Bennée wrote: > Aside from the round robin threads this is all common code. By > moving the halt_cond setup we also no longer need hacks to work around > the race between QOM object creation and thread creation. > > It is a little ugly to free stuff up for the round robin thread but > better it deal with its own specialises than making the other > accelerators jump through hoops. > > Signed-off-by: Alex Bennée <alex.bennee@linaro.org> > --- > include/hw/core/cpu.h | 4 ++++ > accel/dummy-cpus.c | 3 --- > accel/hvf/hvf-accel-ops.c | 4 ---- > accel/kvm/kvm-accel-ops.c | 3 --- > accel/tcg/tcg-accel-ops-mttcg.c | 4 ---- > accel/tcg/tcg-accel-ops-rr.c | 14 +++++++------- > hw/core/cpu-common.c | 5 +++++ > target/i386/nvmm/nvmm-accel-ops.c | 3 --- > target/i386/whpx/whpx-accel-ops.c | 3 --- > 9 files changed, 16 insertions(+), 27 deletions(-) > > diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h > index 35d345371b..a405119eda 100644 > --- a/include/hw/core/cpu.h > +++ b/include/hw/core/cpu.h > @@ -404,10 +404,14 @@ struct qemu_work_item; > * @tcg_cflags: Pre-computed cflags for this cpu. > * @nr_cores: Number of cores within this CPU package. > * @nr_threads: Number of threads within this CPU core. > + * @thread: Host thread details, only live once @created is #true > + * @sem: WIN32 only semaphore used only for qtest > + * @thread_id: native thread id of vCPU, only live once @created is #true > * @running: #true if CPU is currently running (lockless). > * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; > * valid under cpu_list_lock. > * @created: Indicates whether the CPU thread has been successfully created. > + * @halt_cond: condition variable sleeping threads can wait on. > * @interrupt_request: Indicates a pending interrupt request. > * @halted: Nonzero if the CPU is in suspended state. > * @stop: Indicates a pending stop request. > diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c > index 20519f1ea4..f32d8c8dc3 100644 > --- a/accel/dummy-cpus.c > +++ b/accel/dummy-cpus.c > @@ -68,9 +68,6 @@ void dummy_start_vcpu_thread(CPUState *cpu) > { > char thread_name[VCPU_THREAD_NAME_SIZE]; > > - cpu->thread = g_malloc0(sizeof(QemuThread)); > - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); > - qemu_cond_init(cpu->halt_cond); > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY", > cpu->cpu_index); > qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu, > diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c > index 40d4187d9d..6f1e27ef46 100644 > --- a/accel/hvf/hvf-accel-ops.c > +++ b/accel/hvf/hvf-accel-ops.c > @@ -463,10 +463,6 @@ static void hvf_start_vcpu_thread(CPUState *cpu) > */ > assert(hvf_enabled()); > > - cpu->thread = g_malloc0(sizeof(QemuThread)); > - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); > - qemu_cond_init(cpu->halt_cond); > - > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF", > cpu->cpu_index); > qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn, > diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c > index 94c828ac8d..c239dfc87a 100644 > --- a/accel/kvm/kvm-accel-ops.c > +++ b/accel/kvm/kvm-accel-ops.c > @@ -66,9 +66,6 @@ static void kvm_start_vcpu_thread(CPUState *cpu) > { > char thread_name[VCPU_THREAD_NAME_SIZE]; > > - cpu->thread = g_malloc0(sizeof(QemuThread)); > - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); > - qemu_cond_init(cpu->halt_cond); > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM", > cpu->cpu_index); > qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn, > diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c > index c552b45b8e..49814ec4af 100644 > --- a/accel/tcg/tcg-accel-ops-mttcg.c > +++ b/accel/tcg/tcg-accel-ops-mttcg.c > @@ -137,10 +137,6 @@ void mttcg_start_vcpu_thread(CPUState *cpu) > g_assert(tcg_enabled()); > tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); > > - cpu->thread = g_new0(QemuThread, 1); > - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); > - qemu_cond_init(cpu->halt_cond); > - > /* create a thread per vCPU with TCG (MTTCG) */ > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", > cpu->cpu_index); > diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c > index 894e73e52c..84c36c1450 100644 > --- a/accel/tcg/tcg-accel-ops-rr.c > +++ b/accel/tcg/tcg-accel-ops-rr.c > @@ -317,22 +317,22 @@ void rr_start_vcpu_thread(CPUState *cpu) > tcg_cpu_init_cflags(cpu, false); > > if (!single_tcg_cpu_thread) { > - cpu->thread = g_new0(QemuThread, 1); > - cpu->halt_cond = g_new0(QemuCond, 1); > - qemu_cond_init(cpu->halt_cond); > + single_tcg_halt_cond = cpu->halt_cond; > + single_tcg_cpu_thread = cpu->thread; > > /* share a single thread for all cpus with TCG */ > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG"); > qemu_thread_create(cpu->thread, thread_name, > rr_cpu_thread_fn, > cpu, QEMU_THREAD_JOINABLE); > - > - single_tcg_halt_cond = cpu->halt_cond; > - single_tcg_cpu_thread = cpu->thread; > } else { > - /* we share the thread */ > + /* we share the thread, dump spare data */ > + g_free(cpu->thread); > + qemu_cond_destroy(cpu->halt_cond); > cpu->thread = single_tcg_cpu_thread; > cpu->halt_cond = single_tcg_halt_cond; > + > + /* copy the stuff done at start of rr_cpu_thread_fn */ > cpu->thread_id = first_cpu->thread_id; > cpu->neg.can_do_io = 1; > cpu->created = true; > diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c > index 0f0a247f56..6cfc01593a 100644 > --- a/hw/core/cpu-common.c > +++ b/hw/core/cpu-common.c > @@ -261,6 +261,11 @@ static void cpu_common_initfn(Object *obj) > cpu->nr_threads = 1; > cpu->cflags_next_tb = -1; > > + /* allocate storage for thread info, initialise condition variables */ > + cpu->thread = g_new0(QemuThread, 1); > + cpu->halt_cond = g_new0(QemuCond, 1); > + qemu_cond_init(cpu->halt_cond); > + > qemu_mutex_init(&cpu->work_mutex); > qemu_lockcnt_init(&cpu->in_ioctl_lock); > QSIMPLEQ_INIT(&cpu->work_list); > diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c > index 6b2bfd9b9c..0ba31201e2 100644 > --- a/target/i386/nvmm/nvmm-accel-ops.c > +++ b/target/i386/nvmm/nvmm-accel-ops.c > @@ -64,9 +64,6 @@ static void nvmm_start_vcpu_thread(CPUState *cpu) > { > char thread_name[VCPU_THREAD_NAME_SIZE]; > > - cpu->thread = g_new0(QemuThread, 1); > - cpu->halt_cond = g_new0(QemuCond, 1); > - qemu_cond_init(cpu->halt_cond); > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/NVMM", > cpu->cpu_index); > qemu_thread_create(cpu->thread, thread_name, qemu_nvmm_cpu_thread_fn, > diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c > index 189ae0f140..1a2b4e1c43 100644 > --- a/target/i386/whpx/whpx-accel-ops.c > +++ b/target/i386/whpx/whpx-accel-ops.c > @@ -64,9 +64,6 @@ static void whpx_start_vcpu_thread(CPUState *cpu) > { > char thread_name[VCPU_THREAD_NAME_SIZE]; > > - cpu->thread = g_new0(QemuThread, 1); > - cpu->halt_cond = g_new0(QemuCond, 1); > - qemu_cond_init(cpu->halt_cond); > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/WHPX", > cpu->cpu_index); > qemu_thread_create(cpu->thread, thread_name, whpx_cpu_thread_fn, Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
On Thu, May 30, 2024 at 03:29:41PM -0700, Pierrick Bouvier wrote: > On 5/30/24 12:42, Alex Bennée wrote: > > Aside from the round robin threads this is all common code. By > > moving the halt_cond setup we also no longer need hacks to work around > > the race between QOM object creation and thread creation. > > > > It is a little ugly to free stuff up for the round robin thread but > > better it deal with its own specialises than making the other > > accelerators jump through hoops. > > > > Signed-off-by: Alex Bennée <alex.bennee@linaro.org> ... > > diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c > > index 6b2bfd9b9c..0ba31201e2 100644 > > --- a/target/i386/nvmm/nvmm-accel-ops.c > > +++ b/target/i386/nvmm/nvmm-accel-ops.c > > @@ -64,9 +64,6 @@ static void nvmm_start_vcpu_thread(CPUState *cpu) > > { > > char thread_name[VCPU_THREAD_NAME_SIZE]; > > - cpu->thread = g_new0(QemuThread, 1); > > - cpu->halt_cond = g_new0(QemuCond, 1); > > - qemu_cond_init(cpu->halt_cond); > > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/NVMM", > > cpu->cpu_index); > > qemu_thread_create(cpu->thread, thread_name, qemu_nvmm_cpu_thread_fn, I haven't tested it since I don't have a recent qemu build but I doubt it will give issues as its main qemu stuff. Reinoud
© 2016 - 2024 Red Hat, Inc.