Provide a LOCKED queue flag, indicating that the {en,de}queue()
operation is in task_rq_lock() context.
Note: the sched_change in scx_bypass() is the only one that does not
use task_rq_lock(). If that were fixed, we could have sched_change
imply LOCKED.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/core.c | 31 +++++++++++++++++++++++++------
kernel/sched/sched.h | 7 +++++++
kernel/sched/syscalls.c | 4 ++--
3 files changed, 34 insertions(+), 8 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2716,7 +2716,7 @@ void set_cpus_allowed_common(struct task
static void
do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
{
- u32 flags = DEQUEUE_SAVE | DEQUEUE_NOCLOCK;
+ u32 flags = DEQUEUE_SAVE | DEQUEUE_NOCLOCK | DEQUEUE_LOCKED;
scoped_guard (sched_change, p, flags) {
p->sched_class->set_cpus_allowed(p, ctx);
@@ -3749,7 +3749,7 @@ static int ttwu_runnable(struct task_str
if (task_on_rq_queued(p)) {
update_rq_clock(rq);
if (p->se.sched_delayed)
- enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED);
+ enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED | ENQUEUE_LOCKED);
if (!task_on_cpu(rq, p)) {
/*
* When on_rq && !on_cpu the task is preempted, see if
@@ -4816,7 +4816,7 @@ void wake_up_new_task(struct task_struct
update_rq_clock(rq);
post_init_entity_util_avg(p);
- activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
+ activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL | ENQUEUE_LOCKED);
trace_sched_wakeup_new(p);
wakeup_preempt(rq, p, wake_flags);
if (p->sched_class->task_woken) {
@@ -7310,7 +7310,7 @@ void rt_mutex_post_schedule(void)
void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
{
int prio, oldprio, queue_flag =
- DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+ DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK | DEQUEUE_LOCKED;
const struct sched_class *prev_class, *next_class;
struct rq_flags rf;
struct rq *rq;
@@ -8056,7 +8056,7 @@ int migrate_task_to(struct task_struct *
void sched_setnuma(struct task_struct *p, int nid)
{
guard(task_rq_lock)(p);
- scoped_guard (sched_change, p, DEQUEUE_SAVE)
+ scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_LOCKED)
p->numa_preferred_nid = nid;
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -9160,7 +9160,7 @@ static void sched_change_group(struct ta
void sched_move_task(struct task_struct *tsk, bool for_autogroup)
{
unsigned int queue_flags =
- DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+ DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK | DEQUEUE_LOCKED;
bool resched = false;
struct rq *rq;
@@ -10841,6 +10841,13 @@ struct sched_change_ctx *sched_change_be
struct rq *rq = task_rq(p);
lockdep_assert_rq_held(rq);
+#ifdef CONFIG_PROVE_LOCKING
+ if (flags & DEQUEUE_LOCKED) {
+ lockdep_assert_held(&p->pi_lock);
+ if (p->srq_lock)
+ lockdep_assert_held(p->srq_lock);
+ }
+#endif
if (flags & DEQUEUE_CLASS) {
if (WARN_ON_ONCE(flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)))
@@ -10862,6 +10869,9 @@ struct sched_change_ctx *sched_change_be
.flags = flags,
.queued = task_on_rq_queued(p),
.running = task_current(rq, p),
+#ifdef CONFIG_PROVE_LOCKING
+ .srq_lock = p->srq_lock,
+#endif
};
if (!(flags & DEQUEUE_CLASS)) {
@@ -10888,6 +10898,15 @@ void sched_change_end(struct sched_chang
struct rq *rq = task_rq(p);
lockdep_assert_rq_held(rq);
+#ifdef CONFIG_PROVE_LOCKING
+ if (ctx->flags & ENQUEUE_LOCKED) {
+ lockdep_assert_held(&p->pi_lock);
+ if (p->srq_lock)
+ lockdep_assert_held(p->srq_lock);
+ if (ctx->srq_lock && ctx->srq_lock != p->srq_lock)
+ lockdep_assert_not_held(ctx->srq_lock);
+ }
+#endif
if ((ctx->flags & ENQUEUE_CLASS) && p->sched_class->switching_to)
p->sched_class->switching_to(rq, p);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2340,6 +2340,8 @@ extern const u32 sched_prio_to_wmult[40
* CLASS - going to update p->sched_class; makes sched_change call the
* various switch methods.
*
+ * LOCKED - task_rq_lock() context, implies p->srq_lock taken when set.
+ *
* ENQUEUE_HEAD - place at front of runqueue (tail if not specified)
* ENQUEUE_REPLENISH - CBS (replenish runtime and postpone deadline)
* ENQUEUE_MIGRATED - the task was migrated during wakeup
@@ -2355,6 +2357,7 @@ extern const u32 sched_prio_to_wmult[40
#define DEQUEUE_MIGRATING 0x0010 /* Matches ENQUEUE_MIGRATING */
#define DEQUEUE_DELAYED 0x0020 /* Matches ENQUEUE_DELAYED */
#define DEQUEUE_CLASS 0x0040 /* Matches ENQUEUE_CLASS */
+#define DEQUEUE_LOCKED 0x0080 /* Matches ENQUEUE_LOCKED */
#define DEQUEUE_SPECIAL 0x00010000
#define DEQUEUE_THROTTLE 0x00020000
@@ -2367,6 +2370,7 @@ extern const u32 sched_prio_to_wmult[40
#define ENQUEUE_MIGRATING 0x0010
#define ENQUEUE_DELAYED 0x0020
#define ENQUEUE_CLASS 0x0040
+#define ENQUEUE_LOCKED 0x0080
#define ENQUEUE_HEAD 0x00010000
#define ENQUEUE_REPLENISH 0x00020000
@@ -3963,6 +3967,9 @@ extern void balance_callbacks(struct rq
struct sched_change_ctx {
u64 prio;
struct task_struct *p;
+#ifdef CONFIG_PROVE_LOCKING
+ raw_spinlock_t *srq_lock;
+#endif
int flags;
bool queued;
bool running;
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -89,7 +89,7 @@ void set_user_nice(struct task_struct *p
return;
}
- scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK) {
+ scoped_guard (sched_change, p, DEQUEUE_SAVE | DEQUEUE_NOCLOCK | DEQUEUE_LOCKED) {
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p, true);
old_prio = p->prio;
@@ -503,7 +503,7 @@ int __sched_setscheduler(struct task_str
struct balance_callback *head;
struct rq_flags rf;
int reset_on_fork;
- int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK;
+ int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK | DEQUEUE_LOCKED;
struct rq *rq;
bool cpuset_locked = false;