qemu_cpu_kick is used for a number of reasons including to indicate
there is work to be done. However when thread=single the old
qemu_cpu_kick_rr_cpu only advanced the vCPU to the next executing one
which can lead to a hang in the case that:
a) the kick is from outside the vCPUs (e.g. iothread)
b) the timers are paused (i.e. iothread calling run_on_cpu)
To avoid this lets split qemu_cpu_kick_rr into two functions. One for
the timer which continues to advance to the next timeslice and another
for all other kicks.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Cc: Doug Gale <doug16k@gmail.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Maydell <peter.maydell@linaro.org>
---
cpus.c | 24 ++++++++++++++++++------
1 file changed, 18 insertions(+), 6 deletions(-)
diff --git a/cpus.c b/cpus.c
index d2c61ff155..bee7209134 100644
--- a/cpus.c
+++ b/cpus.c
@@ -949,8 +949,8 @@ static inline int64_t qemu_tcg_next_kick(void)
return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
}
-/* Kick the currently round-robin scheduled vCPU */
-static void qemu_cpu_kick_rr_cpu(void)
+/* Kick the currently round-robin scheduled vCPU to next */
+static void qemu_cpu_kick_rr_next_cpu(void)
{
CPUState *cpu;
do {
@@ -961,6 +961,16 @@ static void qemu_cpu_kick_rr_cpu(void)
} while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
}
+/* Kick all RR vCPUs */
+static void qemu_cpu_kick_rr_cpus(void)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ cpu_exit(cpu);
+ };
+}
+
static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
{
}
@@ -993,7 +1003,7 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
static void kick_tcg_thread(void *opaque)
{
timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
- qemu_cpu_kick_rr_cpu();
+ qemu_cpu_kick_rr_next_cpu();
}
static void start_tcg_kick_timer(void)
@@ -1828,9 +1838,11 @@ void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
if (tcg_enabled()) {
- cpu_exit(cpu);
- /* NOP unless doing single-thread RR */
- qemu_cpu_kick_rr_cpu();
+ if (qemu_tcg_mttcg_enabled()) {
+ cpu_exit(cpu);
+ } else {
+ qemu_cpu_kick_rr_cpus();
+ }
} else {
if (hax_enabled()) {
/*
--
2.20.1
On 01/10/19 18:04, Alex Bennée wrote:
> qemu_cpu_kick is used for a number of reasons including to indicate
> there is work to be done. However when thread=single the old
> qemu_cpu_kick_rr_cpu only advanced the vCPU to the next executing one
> which can lead to a hang in the case that:
>
> a) the kick is from outside the vCPUs (e.g. iothread)
> b) the timers are paused (i.e. iothread calling run_on_cpu)
>
> To avoid this lets split qemu_cpu_kick_rr into two functions. One for
> the timer which continues to advance to the next timeslice and another
> for all other kicks.
>
> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
> Cc: Doug Gale <doug16k@gmail.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: Peter Maydell <peter.maydell@linaro.org>
> ---
> cpus.c | 24 ++++++++++++++++++------
> 1 file changed, 18 insertions(+), 6 deletions(-)
Looks good to me. Single-threaded TCG is not going to have high vCPU
counts anyway.
Paolo
> diff --git a/cpus.c b/cpus.c
> index d2c61ff155..bee7209134 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -949,8 +949,8 @@ static inline int64_t qemu_tcg_next_kick(void)
> return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
> }
>
> -/* Kick the currently round-robin scheduled vCPU */
> -static void qemu_cpu_kick_rr_cpu(void)
> +/* Kick the currently round-robin scheduled vCPU to next */
> +static void qemu_cpu_kick_rr_next_cpu(void)
> {
> CPUState *cpu;
> do {
> @@ -961,6 +961,16 @@ static void qemu_cpu_kick_rr_cpu(void)
> } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
> }
>
> +/* Kick all RR vCPUs */
> +static void qemu_cpu_kick_rr_cpus(void)
> +{
> + CPUState *cpu;
> +
> + CPU_FOREACH(cpu) {
> + cpu_exit(cpu);
> + };
> +}
> +
> static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
> {
> }
> @@ -993,7 +1003,7 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
> static void kick_tcg_thread(void *opaque)
> {
> timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
> - qemu_cpu_kick_rr_cpu();
> + qemu_cpu_kick_rr_next_cpu();
> }
>
> static void start_tcg_kick_timer(void)
> @@ -1828,9 +1838,11 @@ void qemu_cpu_kick(CPUState *cpu)
> {
> qemu_cond_broadcast(cpu->halt_cond);
> if (tcg_enabled()) {
> - cpu_exit(cpu);
> - /* NOP unless doing single-thread RR */
> - qemu_cpu_kick_rr_cpu();
> + if (qemu_tcg_mttcg_enabled()) {
> + cpu_exit(cpu);
> + } else {
> + qemu_cpu_kick_rr_cpus();
> + }
> } else {
> if (hax_enabled()) {
> /*
>
Paolo Bonzini <pbonzini@redhat.com> writes:
> On 01/10/19 18:04, Alex Bennée wrote:
>> qemu_cpu_kick is used for a number of reasons including to indicate
>> there is work to be done. However when thread=single the old
>> qemu_cpu_kick_rr_cpu only advanced the vCPU to the next executing one
>> which can lead to a hang in the case that:
>>
>> a) the kick is from outside the vCPUs (e.g. iothread)
>> b) the timers are paused (i.e. iothread calling run_on_cpu)
>>
>> To avoid this lets split qemu_cpu_kick_rr into two functions. One for
>> the timer which continues to advance to the next timeslice and another
>> for all other kicks.
>>
>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
>> Cc: Doug Gale <doug16k@gmail.com>
>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>> Cc: Peter Maydell <peter.maydell@linaro.org>
>> ---
>> cpus.c | 24 ++++++++++++++++++------
>> 1 file changed, 18 insertions(+), 6 deletions(-)
>
> Looks good to me. Single-threaded TCG is not going to have high vCPU
> counts anyway.
Are you going to take this via your queue?
>
> Paolo
>
>> diff --git a/cpus.c b/cpus.c
>> index d2c61ff155..bee7209134 100644
>> --- a/cpus.c
>> +++ b/cpus.c
>> @@ -949,8 +949,8 @@ static inline int64_t qemu_tcg_next_kick(void)
>> return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
>> }
>>
>> -/* Kick the currently round-robin scheduled vCPU */
>> -static void qemu_cpu_kick_rr_cpu(void)
>> +/* Kick the currently round-robin scheduled vCPU to next */
>> +static void qemu_cpu_kick_rr_next_cpu(void)
>> {
>> CPUState *cpu;
>> do {
>> @@ -961,6 +961,16 @@ static void qemu_cpu_kick_rr_cpu(void)
>> } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
>> }
>>
>> +/* Kick all RR vCPUs */
>> +static void qemu_cpu_kick_rr_cpus(void)
>> +{
>> + CPUState *cpu;
>> +
>> + CPU_FOREACH(cpu) {
>> + cpu_exit(cpu);
>> + };
>> +}
>> +
>> static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
>> {
>> }
>> @@ -993,7 +1003,7 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
>> static void kick_tcg_thread(void *opaque)
>> {
>> timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
>> - qemu_cpu_kick_rr_cpu();
>> + qemu_cpu_kick_rr_next_cpu();
>> }
>>
>> static void start_tcg_kick_timer(void)
>> @@ -1828,9 +1838,11 @@ void qemu_cpu_kick(CPUState *cpu)
>> {
>> qemu_cond_broadcast(cpu->halt_cond);
>> if (tcg_enabled()) {
>> - cpu_exit(cpu);
>> - /* NOP unless doing single-thread RR */
>> - qemu_cpu_kick_rr_cpu();
>> + if (qemu_tcg_mttcg_enabled()) {
>> + cpu_exit(cpu);
>> + } else {
>> + qemu_cpu_kick_rr_cpus();
>> + }
>> } else {
>> if (hax_enabled()) {
>> /*
>>
--
Alex Bennée
On 01/10/19 19:40, Alex Bennée wrote:
>
> Paolo Bonzini <pbonzini@redhat.com> writes:
>
>> On 01/10/19 18:04, Alex Bennée wrote:
>>> qemu_cpu_kick is used for a number of reasons including to indicate
>>> there is work to be done. However when thread=single the old
>>> qemu_cpu_kick_rr_cpu only advanced the vCPU to the next executing one
>>> which can lead to a hang in the case that:
>>>
>>> a) the kick is from outside the vCPUs (e.g. iothread)
>>> b) the timers are paused (i.e. iothread calling run_on_cpu)
>>>
>>> To avoid this lets split qemu_cpu_kick_rr into two functions. One for
>>> the timer which continues to advance to the next timeslice and another
>>> for all other kicks.
>>>
>>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
>>> Cc: Doug Gale <doug16k@gmail.com>
>>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>>> Cc: Peter Maydell <peter.maydell@linaro.org>
>>> ---
>>> cpus.c | 24 ++++++++++++++++++------
>>> 1 file changed, 18 insertions(+), 6 deletions(-)
>>
>> Looks good to me. Single-threaded TCG is not going to have high vCPU
>> counts anyway.
>
> Are you going to take this via your queue?
I wasn't, since we have had a proper TCG maintainer for a while. :)
Paolo
>
>>
>> Paolo
>>
>>> diff --git a/cpus.c b/cpus.c
>>> index d2c61ff155..bee7209134 100644
>>> --- a/cpus.c
>>> +++ b/cpus.c
>>> @@ -949,8 +949,8 @@ static inline int64_t qemu_tcg_next_kick(void)
>>> return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD;
>>> }
>>>
>>> -/* Kick the currently round-robin scheduled vCPU */
>>> -static void qemu_cpu_kick_rr_cpu(void)
>>> +/* Kick the currently round-robin scheduled vCPU to next */
>>> +static void qemu_cpu_kick_rr_next_cpu(void)
>>> {
>>> CPUState *cpu;
>>> do {
>>> @@ -961,6 +961,16 @@ static void qemu_cpu_kick_rr_cpu(void)
>>> } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
>>> }
>>>
>>> +/* Kick all RR vCPUs */
>>> +static void qemu_cpu_kick_rr_cpus(void)
>>> +{
>>> + CPUState *cpu;
>>> +
>>> + CPU_FOREACH(cpu) {
>>> + cpu_exit(cpu);
>>> + };
>>> +}
>>> +
>>> static void do_nothing(CPUState *cpu, run_on_cpu_data unused)
>>> {
>>> }
>>> @@ -993,7 +1003,7 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type)
>>> static void kick_tcg_thread(void *opaque)
>>> {
>>> timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick());
>>> - qemu_cpu_kick_rr_cpu();
>>> + qemu_cpu_kick_rr_next_cpu();
>>> }
>>>
>>> static void start_tcg_kick_timer(void)
>>> @@ -1828,9 +1838,11 @@ void qemu_cpu_kick(CPUState *cpu)
>>> {
>>> qemu_cond_broadcast(cpu->halt_cond);
>>> if (tcg_enabled()) {
>>> - cpu_exit(cpu);
>>> - /* NOP unless doing single-thread RR */
>>> - qemu_cpu_kick_rr_cpu();
>>> + if (qemu_tcg_mttcg_enabled()) {
>>> + cpu_exit(cpu);
>>> + } else {
>>> + qemu_cpu_kick_rr_cpus();
>>> + }
>>> } else {
>>> if (hax_enabled()) {
>>> /*
>>>
>
>
> --
> Alex Bennée
>
On 10/6/19 9:05 AM, Paolo Bonzini wrote: > On 01/10/19 19:40, Alex Bennée wrote: >> >> Paolo Bonzini <pbonzini@redhat.com> writes: >> >>> On 01/10/19 18:04, Alex Bennée wrote: >>>> qemu_cpu_kick is used for a number of reasons including to indicate >>>> there is work to be done. However when thread=single the old >>>> qemu_cpu_kick_rr_cpu only advanced the vCPU to the next executing one >>>> which can lead to a hang in the case that: >>>> >>>> a) the kick is from outside the vCPUs (e.g. iothread) >>>> b) the timers are paused (i.e. iothread calling run_on_cpu) >>>> >>>> To avoid this lets split qemu_cpu_kick_rr into two functions. One for >>>> the timer which continues to advance to the next timeslice and another >>>> for all other kicks. >>>> >>>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> >>>> Cc: Doug Gale <doug16k@gmail.com> >>>> Cc: Paolo Bonzini <pbonzini@redhat.com> >>>> Cc: Peter Maydell <peter.maydell@linaro.org> >>>> --- >>>> cpus.c | 24 ++++++++++++++++++------ >>>> 1 file changed, 18 insertions(+), 6 deletions(-) >>> >>> Looks good to me. Single-threaded TCG is not going to have high vCPU >>> counts anyway. >> >> Are you going to take this via your queue? > > I wasn't, since we have had a proper TCG maintainer for a while. :) Hah. Point taken, and queued. Would you care to go on the record with something more than a LGTM? r~
Richard Henderson <richard.henderson@linaro.org> writes: > On 10/6/19 9:05 AM, Paolo Bonzini wrote: >> On 01/10/19 19:40, Alex Bennée wrote: >>> >>> Paolo Bonzini <pbonzini@redhat.com> writes: >>> >>>> On 01/10/19 18:04, Alex Bennée wrote: >>>>> qemu_cpu_kick is used for a number of reasons including to indicate >>>>> there is work to be done. However when thread=single the old >>>>> qemu_cpu_kick_rr_cpu only advanced the vCPU to the next executing one >>>>> which can lead to a hang in the case that: >>>>> >>>>> a) the kick is from outside the vCPUs (e.g. iothread) >>>>> b) the timers are paused (i.e. iothread calling run_on_cpu) >>>>> >>>>> To avoid this lets split qemu_cpu_kick_rr into two functions. One for >>>>> the timer which continues to advance to the next timeslice and another >>>>> for all other kicks. >>>>> >>>>> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> >>>>> Cc: Doug Gale <doug16k@gmail.com> >>>>> Cc: Paolo Bonzini <pbonzini@redhat.com> >>>>> Cc: Peter Maydell <peter.maydell@linaro.org> >>>>> --- >>>>> cpus.c | 24 ++++++++++++++++++------ >>>>> 1 file changed, 18 insertions(+), 6 deletions(-) >>>> >>>> Looks good to me. Single-threaded TCG is not going to have high vCPU >>>> counts anyway. >>> >>> Are you going to take this via your queue? >> >> I wasn't, since we have had a proper TCG maintainer for a while. :) > > Hah. Point taken, and queued. Would you care to go on the record with > something more than a LGTM? Heh, I was confused because cpus.c is shown twice in MAINTAINERS as Paolo is the main loop guy ;-) > > > r~ -- Alex Bennée
On 07/10/19 16:00, Richard Henderson wrote: >> I wasn't, since we have had a proper TCG maintainer for a while. :) > Hah. Point taken, and queued. Would you care to go on the record with > something more than a LGTM? Sure, Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
© 2016 - 2026 Red Hat, Inc.