[Xen-devel] [PATCH] xen/sched: rework and rename vcpu_force_reschedule()

Juergen Gross posted 1 patch 4 years, 6 months ago
Patches applied successfully (tree, apply log)
git fetch https://github.com/patchew-project/xen tags/patchew/20190913121400.23263-1-jgross@suse.com
There is a newer version of this series
xen/arch/x86/pv/shim.c  |  4 +---
xen/common/domain.c     |  6 ++----
xen/common/schedule.c   | 52 +++++++++++++++++++++++++++----------------------
xen/include/xen/sched.h |  3 ++-
4 files changed, 34 insertions(+), 31 deletions(-)
[Xen-devel] [PATCH] xen/sched: rework and rename vcpu_force_reschedule()
Posted by Juergen Gross 4 years, 6 months ago
vcpu_force_reschedule() is only used for modifying the periodic timer
of a vcpu. Forcing a vcpu to give up the physical cpu for that purpose
is kind of brutal.

So instead of doing the reschedule dance just operate on the timer
directly. By protecting periodic timer modifications against concurrent
timer activation via a per-vcpu lock it is even no longer required to
bother the target vcpu at all for updating its timer.

Rename the function to vcpu_set_periodic_timer() as this now reflects
the functionality.

Signed-off-by: Juergen Gross <jgross@suse.com>
---
- Carved out from my core scheduling series
- Reworked to avoid deadlock when 2 vcpus are trying to modify each
  others periodic timers, leading to address all comments by Jan
  Beulich.
---
 xen/arch/x86/pv/shim.c  |  4 +---
 xen/common/domain.c     |  6 ++----
 xen/common/schedule.c   | 52 +++++++++++++++++++++++++++----------------------
 xen/include/xen/sched.h |  3 ++-
 4 files changed, 34 insertions(+), 31 deletions(-)

diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index 324ca27f93..5edbcd9ac5 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -410,7 +410,7 @@ int pv_shim_shutdown(uint8_t reason)
         unmap_vcpu_info(v);
 
         /* Reset the periodic timer to the default value. */
-        v->periodic_period = MILLISECS(10);
+        vcpu_set_periodic_timer(v, MILLISECS(10));
         /* Stop the singleshot timer. */
         stop_timer(&v->singleshot_timer);
 
@@ -419,8 +419,6 @@ int pv_shim_shutdown(uint8_t reason)
 
         if ( v != current )
             vcpu_unpause_by_systemcontroller(v);
-        else
-            vcpu_force_reschedule(v);
     }
 
     return 0;
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 9a48b2504b..0cff749bbe 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -1494,15 +1494,13 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( set.period_ns > STIME_DELTA_MAX )
             return -EINVAL;
 
-        v->periodic_period = set.period_ns;
-        vcpu_force_reschedule(v);
+        vcpu_set_periodic_timer(v, set.period_ns);
 
         break;
     }
 
     case VCPUOP_stop_periodic_timer:
-        v->periodic_period = 0;
-        vcpu_force_reschedule(v);
+        vcpu_set_periodic_timer(v, 0);
         break;
 
     case VCPUOP_set_singleshot_timer:
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index fdeec10c3b..6447662b85 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -312,6 +312,7 @@ int sched_init_vcpu(struct vcpu *v, unsigned int processor)
     v->processor = processor;
 
     /* Initialise the per-vcpu timers. */
+    spin_lock_init(&v->periodic_timer_lock);
     init_timer(&v->periodic_timer, vcpu_periodic_timer_fn,
                v, v->processor);
     init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn,
@@ -724,24 +725,6 @@ static void vcpu_migrate_finish(struct vcpu *v)
     vcpu_wake(v);
 }
 
-/*
- * Force a VCPU through a deschedule/reschedule path.
- * For example, using this when setting the periodic timer period means that
- * most periodic-timer state need only be touched from within the scheduler
- * which can thus be done without need for synchronisation.
- */
-void vcpu_force_reschedule(struct vcpu *v)
-{
-    spinlock_t *lock = vcpu_schedule_lock_irq(v);
-
-    if ( v->is_running )
-        vcpu_migrate_start(v);
-
-    vcpu_schedule_unlock_irq(lock, v);
-
-    vcpu_migrate_finish(v);
-}
-
 void restore_vcpu_affinity(struct domain *d)
 {
     unsigned int cpu = smp_processor_id();
@@ -1458,14 +1441,11 @@ long sched_adjust_global(struct xen_sysctl_scheduler_op *op)
     return rc;
 }
 
-static void vcpu_periodic_timer_work(struct vcpu *v)
+static void vcpu_periodic_timer_work_locked(struct vcpu *v)
 {
     s_time_t now;
     s_time_t periodic_next_event;
 
-    if ( v->periodic_period == 0 )
-        return;
-
     now = NOW();
     periodic_next_event = v->periodic_last_event + v->periodic_period;
 
@@ -1476,10 +1456,36 @@ static void vcpu_periodic_timer_work(struct vcpu *v)
         periodic_next_event = now + v->periodic_period;
     }
 
-    migrate_timer(&v->periodic_timer, smp_processor_id());
+    migrate_timer(&v->periodic_timer, v->processor);
     set_timer(&v->periodic_timer, periodic_next_event);
 }
 
+static void vcpu_periodic_timer_work(struct vcpu *v)
+{
+    if ( v->periodic_period == 0 )
+        return;
+
+    spin_lock(&v->periodic_timer_lock);
+    vcpu_periodic_timer_work_locked(v);
+    spin_unlock(&v->periodic_timer_lock);
+}
+
+/*
+ * Set the periodic timer of a vcpu.
+ */
+void vcpu_set_periodic_timer(struct vcpu *v, s_time_t value)
+{
+    spin_lock(&v->periodic_timer_lock);
+
+    stop_timer(&v->periodic_timer);
+
+    v->periodic_period = value;
+    if ( value )
+        vcpu_periodic_timer_work_locked(v);
+
+    spin_unlock(&v->periodic_timer_lock);
+}
+
 /*
  * The main function
  * - deschedule the current domain (scheduler independent).
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index e3601c1935..40097ff334 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -153,6 +153,7 @@ struct vcpu
 
     struct vcpu     *next_in_list;
 
+    spinlock_t       periodic_timer_lock;
     s_time_t         periodic_period;
     s_time_t         periodic_last_event;
     struct timer     periodic_timer;
@@ -864,7 +865,7 @@ struct scheduler *scheduler_get_default(void);
 struct scheduler *scheduler_alloc(unsigned int sched_id, int *perr);
 void scheduler_free(struct scheduler *sched);
 int schedule_cpu_switch(unsigned int cpu, struct cpupool *c);
-void vcpu_force_reschedule(struct vcpu *v);
+void vcpu_set_periodic_timer(struct vcpu *v, s_time_t value);
 int cpu_disable_scheduler(unsigned int cpu);
 /* We need it in dom0_setup_vcpu */
 void sched_set_affinity(struct vcpu *v, const cpumask_t *hard,
-- 
2.16.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
Re: [Xen-devel] [PATCH] xen/sched: rework and rename vcpu_force_reschedule()
Posted by Jan Beulich 4 years, 6 months ago
On 13.09.2019 14:14, Juergen Gross wrote:
> ---
> - Carved out from my core scheduling series
> - Reworked to avoid deadlock when 2 vcpus are trying to modify each
>   others periodic timers, leading to address all comments by Jan
>   Beulich.

Oh, indeed - a mutual vcpu_pause() can't end  well.

> @@ -724,24 +725,6 @@ static void vcpu_migrate_finish(struct vcpu *v)
>      vcpu_wake(v);
>  }
>  
> -/*
> - * Force a VCPU through a deschedule/reschedule path.
> - * For example, using this when setting the periodic timer period means that
> - * most periodic-timer state need only be touched from within the scheduler
> - * which can thus be done without need for synchronisation.
> - */
> -void vcpu_force_reschedule(struct vcpu *v)
> -{
> -    spinlock_t *lock = vcpu_schedule_lock_irq(v);
> -
> -    if ( v->is_running )
> -        vcpu_migrate_start(v);
> -
> -    vcpu_schedule_unlock_irq(lock, v);
> -
> -    vcpu_migrate_finish(v);
> -}

So the comment specifically said this approach was chosen to
avoid the need for synchronization. You now introduce
synchronization. I'm not severely opposed, but I think there
wants to be justification why the added synchronization is not
a problem (and would perhaps never have been).

> @@ -1458,14 +1441,11 @@ long sched_adjust_global(struct xen_sysctl_scheduler_op *op)
>      return rc;
>  }
>  
> -static void vcpu_periodic_timer_work(struct vcpu *v)
> +static void vcpu_periodic_timer_work_locked(struct vcpu *v)
>  {
>      s_time_t now;
>      s_time_t periodic_next_event;
>  
> -    if ( v->periodic_period == 0 )
> -        return;

I'm afraid you can't pull this out of here, or ...

> @@ -1476,10 +1456,36 @@ static void vcpu_periodic_timer_work(struct vcpu *v)
>          periodic_next_event = now + v->periodic_period;
>      }
>  
> -    migrate_timer(&v->periodic_timer, smp_processor_id());
> +    migrate_timer(&v->periodic_timer, v->processor);
>      set_timer(&v->periodic_timer, periodic_next_event);
>  }
>  
> +static void vcpu_periodic_timer_work(struct vcpu *v)
> +{
> +    if ( v->periodic_period == 0 )
> +        return;
> +
> +    spin_lock(&v->periodic_timer_lock);

... the conditional here needs to move into the locked region.
Otherwise, despite having found non-zero above, by the time the
lock was acquired the value may have changed to zero.

As to the migrate_timer() change: Other than I feared, using
v->processor of a non-running vCPU does not look to have any
chance of acting on an offline CPU, thanks to
cpu_disable_scheduler() dealing with all vCPU-s (and not just
running ones), and thanks to CPU offlining happening in
stop-machine context.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
Re: [Xen-devel] [PATCH] xen/sched: rework and rename vcpu_force_reschedule()
Posted by Juergen Gross 4 years, 6 months ago
On 13.09.19 16:42, Jan Beulich wrote:
> On 13.09.2019 14:14, Juergen Gross wrote:
>> ---
>> - Carved out from my core scheduling series
>> - Reworked to avoid deadlock when 2 vcpus are trying to modify each
>>    others periodic timers, leading to address all comments by Jan
>>    Beulich.
> 
> Oh, indeed - a mutual vcpu_pause() can't end  well.
> 
>> @@ -724,24 +725,6 @@ static void vcpu_migrate_finish(struct vcpu *v)
>>       vcpu_wake(v);
>>   }
>>   
>> -/*
>> - * Force a VCPU through a deschedule/reschedule path.
>> - * For example, using this when setting the periodic timer period means that
>> - * most periodic-timer state need only be touched from within the scheduler
>> - * which can thus be done without need for synchronisation.
>> - */
>> -void vcpu_force_reschedule(struct vcpu *v)
>> -{
>> -    spinlock_t *lock = vcpu_schedule_lock_irq(v);
>> -
>> -    if ( v->is_running )
>> -        vcpu_migrate_start(v);
>> -
>> -    vcpu_schedule_unlock_irq(lock, v);
>> -
>> -    vcpu_migrate_finish(v);
>> -}
> 
> So the comment specifically said this approach was chosen to
> avoid the need for synchronization. You now introduce
> synchronization. I'm not severely opposed, but I think there
> wants to be justification why the added synchronization is not
> a problem (and would perhaps never have been).

The comment doesn't say I'm avoiding synchronization, but
scheduling.

The (now needed) synchronization is very unlikely to cause (short)
blocking.

> 
>> @@ -1458,14 +1441,11 @@ long sched_adjust_global(struct xen_sysctl_scheduler_op *op)
>>       return rc;
>>   }
>>   
>> -static void vcpu_periodic_timer_work(struct vcpu *v)
>> +static void vcpu_periodic_timer_work_locked(struct vcpu *v)
>>   {
>>       s_time_t now;
>>       s_time_t periodic_next_event;
>>   
>> -    if ( v->periodic_period == 0 )
>> -        return;
> 
> I'm afraid you can't pull this out of here, or ...
> 
>> @@ -1476,10 +1456,36 @@ static void vcpu_periodic_timer_work(struct vcpu *v)
>>           periodic_next_event = now + v->periodic_period;
>>       }
>>   
>> -    migrate_timer(&v->periodic_timer, smp_processor_id());
>> +    migrate_timer(&v->periodic_timer, v->processor);
>>       set_timer(&v->periodic_timer, periodic_next_event);
>>   }
>>   
>> +static void vcpu_periodic_timer_work(struct vcpu *v)
>> +{
>> +    if ( v->periodic_period == 0 )
>> +        return;
>> +
>> +    spin_lock(&v->periodic_timer_lock);
> 
> ... the conditional here needs to move into the locked region.
> Otherwise, despite having found non-zero above, by the time the
> lock was acquired the value may have changed to zero.

Yes, indeed. I think I'll let the initial test in place in order
to avoid taking the lock in the (common) case where the periodic
timer is disabled. Just adding another test after taking the lock
is needed, though.

> 
> As to the migrate_timer() change: Other than I feared, using
> v->processor of a non-running vCPU does not look to have any
> chance of acting on an offline CPU, thanks to
> cpu_disable_scheduler() dealing with all vCPU-s (and not just
> running ones), and thanks to CPU offlining happening in
> stop-machine context.

Correct. Without that the schedule lock wouldn't work at all, as
it is using v->processor to find the correct lock.


Juergen

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel