In order to fix the whole SCHED_EXT balance/pick mess, and avoid
further complicating all this, make the regular:
p->pi_lock
rq->lock
dsq->lock
order work. Notably, while sched_class::pick_task() is called with
rq->lock held, and pick_task_scx() takes dsq->lock, and while the
normal sched_change pattern goes into dequeue/enqueue and thus takes
dsq->lock, various other things like task_call_func() /
sched_setaffinity() do not necessarily do so.
Therefore, add a per task spinlock pointer that can be set to
reference the shared runqueue lock where appropriate and teach
__task_rq_lock() to take this long along with rq->lock.
This ensures all 'normal' scheduling operations serialize against the
shared lock.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
include/linux/sched.h | 2 +-
kernel/sched/core.c | 27 ++++++++++++++++++++++-----
kernel/sched/sched.h | 10 ++++++----
kernel/sched/stats.h | 2 +-
4 files changed, 30 insertions(+), 11 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1225,8 +1225,8 @@ struct task_struct {
/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
spinlock_t alloc_lock;
- /* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
+ raw_spinlock_t *srq_lock;
struct wake_q_node wake_q;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -703,17 +703,24 @@ void double_rq_lock(struct rq *rq1, stru
struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
__acquires(rq->lock)
{
+ raw_spinlock_t *slock;
struct rq *rq;
lockdep_assert_held(&p->pi_lock);
for (;;) {
rq = task_rq(p);
+ slock = p->srq_lock;
raw_spin_rq_lock(rq);
- if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
+ if (slock)
+ raw_spin_lock(slock);
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p) &&
+ (!slock || p->srq_lock == slock))) {
rq_pin_lock(rq, rf);
return rq;
}
+ if (slock)
+ raw_spin_unlock(slock);
raw_spin_rq_unlock(rq);
while (unlikely(task_on_rq_migrating(p)))
@@ -728,12 +735,16 @@ struct rq *task_rq_lock(struct task_stru
__acquires(p->pi_lock)
__acquires(rq->lock)
{
+ raw_spinlock_t *slock;
struct rq *rq;
for (;;) {
raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
rq = task_rq(p);
+ slock = p->srq_lock;
raw_spin_rq_lock(rq);
+ if (slock)
+ raw_spin_lock(slock);
/*
* move_queued_task() task_rq_lock()
*
@@ -751,10 +762,14 @@ struct rq *task_rq_lock(struct task_stru
* dependency headed by '[L] rq = task_rq()' and the acquire
* will pair with the WMB to ensure we then also see migrating.
*/
- if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p) &&
+ (!slock || p->srq_lock == slock))) {
rq_pin_lock(rq, rf);
return rq;
}
+
+ if (slock)
+ raw_spin_unlock(slock);
raw_spin_rq_unlock(rq);
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
@@ -2617,7 +2632,8 @@ static int migration_cpu_stop(void *data
*/
WARN_ON_ONCE(!pending->stop_pending);
preempt_disable();
- task_rq_unlock(rq, p, &rf);
+ rq_unlock(rq, &rf);
+ raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
&pending->arg, &pending->stop_work);
preempt_enable();
@@ -2626,7 +2642,8 @@ static int migration_cpu_stop(void *data
out:
if (pending)
pending->stop_pending = false;
- task_rq_unlock(rq, p, &rf);
+ rq_unlock(rq, &rf);
+ raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
if (complete)
complete_all(&pending->done);
@@ -3743,7 +3760,7 @@ static int ttwu_runnable(struct task_str
ttwu_do_wakeup(p);
ret = 1;
}
- __task_rq_unlock(rq, &rf);
+ __task_rq_unlock(rq, p, &rf);
return ret;
}
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1800,10 +1800,13 @@ struct rq *task_rq_lock(struct task_stru
__acquires(p->pi_lock)
__acquires(rq->lock);
-static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
+static inline void
+__task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
__releases(rq->lock)
{
rq_unpin_lock(rq, rf);
+ if (p->srq_lock)
+ raw_spin_unlock(p->srq_lock);
raw_spin_rq_unlock(rq);
}
@@ -1812,8 +1815,7 @@ task_rq_unlock(struct rq *rq, struct tas
__releases(rq->lock)
__releases(p->pi_lock)
{
- rq_unpin_lock(rq, rf);
- raw_spin_rq_unlock(rq);
+ __task_rq_unlock(rq, p, rf);
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
}
@@ -1824,7 +1826,7 @@ DEFINE_LOCK_GUARD_1(task_rq_lock, struct
DEFINE_LOCK_GUARD_1(__task_rq_lock, struct task_struct,
_T->rq = __task_rq_lock(_T->lock, &_T->rf),
- __task_rq_unlock(_T->rq, &_T->rf),
+ __task_rq_unlock(_T->rq, _T->lock, &_T->rf),
struct rq *rq; struct rq_flags rf)
static inline void rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -206,7 +206,7 @@ static inline void psi_ttwu_dequeue(stru
rq = __task_rq_lock(p, &rf);
psi_task_change(p, p->psi_flags, 0);
- __task_rq_unlock(rq, &rf);
+ __task_rq_unlock(rq, p, &rf);
}
}