Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/core.c | 4 ++--
kernel/sched/deadline.c | 6 ++++--
kernel/sched/ext.c | 4 ++--
kernel/sched/fair.c | 8 +++++---
kernel/sched/idle.c | 5 +++--
kernel/sched/rt.c | 6 ++++--
kernel/sched/sched.h | 18 ++++++++++--------
kernel/sched/stop_task.c | 5 +++--
8 files changed, 33 insertions(+), 23 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10857,7 +10857,7 @@ struct sched_change_ctx *sched_change_be
if (ctx->queued)
dequeue_task(rq, p, flags);
if (ctx->running)
- put_prev_task(rq, p);
+ put_prev_task(rq, p, flags);
if ((flags & DEQUEUE_CLASS) && p->sched_class->switched_from)
p->sched_class->switched_from(rq, p);
@@ -10878,7 +10878,7 @@ void sched_change_end(struct sched_chang
if (ctx->queued)
enqueue_task(rq, p, ctx->flags | ENQUEUE_NOCLOCK);
if (ctx->running)
- set_next_task(rq, p);
+ set_next_task(rq, p, ctx->flags);
if (ctx->flags & ENQUEUE_CLASS) {
if (p->sched_class->switched_to)
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2340,10 +2340,11 @@ static void start_hrtick_dl(struct rq *r
}
#endif /* !CONFIG_SCHED_HRTICK */
-static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
+static void set_next_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_dl_entity *dl_se = &p->dl;
struct dl_rq *dl_rq = &rq->dl;
+ bool first = flags & ENQUEUE_FIRST;
p->se.exec_start = rq_clock_task(rq);
if (on_dl_rq(&p->dl))
@@ -2413,7 +2414,8 @@ static struct task_struct *pick_task_dl(
return __pick_task_dl(rq);
}
-static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
+static void put_prev_task_dl(struct rq *rq, struct task_struct *p,
+ struct task_struct *next, int flags)
{
struct sched_dl_entity *dl_se = &p->dl;
struct dl_rq *dl_rq = &rq->dl;
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -3243,7 +3243,7 @@ static void process_ddsp_deferred_locals
}
}
-static void set_next_task_scx(struct rq *rq, struct task_struct *p, bool first)
+static void set_next_task_scx(struct rq *rq, struct task_struct *p, int flags)
{
struct scx_sched *sch = scx_root;
@@ -3346,7 +3346,7 @@ static void switch_class(struct rq *rq,
}
static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
- struct task_struct *next)
+ struct task_struct *next, int flags)
{
struct scx_sched *sch = scx_root;
update_curr_scx(rq);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8839,7 +8839,7 @@ static struct task_struct *pick_task_fai
}
static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
-static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first);
+static void set_next_task_fair(struct rq *rq, struct task_struct *p, int flags);
struct task_struct *
pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
@@ -8955,7 +8955,8 @@ void fair_server_init(struct rq *rq)
/*
* Account for a descheduled task:
*/
-static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next)
+static void put_prev_task_fair(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next, int flags)
{
struct sched_entity *se = &prev->se;
struct cfs_rq *cfs_rq;
@@ -13286,9 +13287,10 @@ static void __set_next_task_fair(struct
* This routine is mostly called to set cfs_rq->curr field when a task
* migrates between groups/classes.
*/
-static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first)
+static void set_next_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_entity *se = &p->se;
+ bool first = flags & ENQUEUE_FIRST;
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -452,13 +452,14 @@ static void wakeup_preempt_idle(struct r
resched_curr(rq);
}
-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next)
+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next, int flags)
{
dl_server_update_idle_time(rq, prev);
scx_update_idle(rq, false, true);
}
-static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
+static void set_next_task_idle(struct rq *rq, struct task_struct *next, int flags)
{
update_idle_core(rq);
scx_update_idle(rq, true, true);
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1636,10 +1636,11 @@ static void wakeup_preempt_rt(struct rq
check_preempt_equal_prio(rq, p);
}
-static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
+static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq = &rq->rt;
+ bool first = flags & ENQUEUE_FIRST;
p->se.exec_start = rq_clock_task(rq);
if (on_rt_rq(&p->rt))
@@ -1707,7 +1708,8 @@ static struct task_struct *pick_task_rt(
return p;
}
-static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
+static void put_prev_task_rt(struct rq *rq, struct task_struct *p,
+ struct task_struct *next, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
struct rt_rq *rt_rq = &rq->rt;
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2370,7 +2370,9 @@ extern const u32 sched_prio_to_wmult[40
#define ENQUEUE_REPLENISH 0x00020000
#define ENQUEUE_MIGRATED 0x00040000
#define ENQUEUE_INITIAL 0x00080000
+
#define ENQUEUE_RQ_SELECTED 0x00100000
+#define ENQUEUE_FIRST 0x00200000
#define RETRY_TASK ((void *)-1UL)
@@ -2448,8 +2450,8 @@ struct sched_class {
* sched_change:
* __schedule: rq->lock
*/
- void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next);
- void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
+ void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next, int flags);
+ void (*set_next_task)(struct rq *rq, struct task_struct *p, int flags);
/*
* select_task_rq: p->pi_lock
@@ -2544,15 +2546,15 @@ struct sched_class {
#endif
};
-static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
+static inline void put_prev_task(struct rq *rq, struct task_struct *prev, int flags)
{
WARN_ON_ONCE(rq->donor != prev);
- prev->sched_class->put_prev_task(rq, prev, NULL);
+ prev->sched_class->put_prev_task(rq, prev, NULL, flags);
}
-static inline void set_next_task(struct rq *rq, struct task_struct *next)
+static inline void set_next_task(struct rq *rq, struct task_struct *next, int flags)
{
- next->sched_class->set_next_task(rq, next, false);
+ next->sched_class->set_next_task(rq, next, flags);
}
static inline void
@@ -2576,8 +2578,8 @@ static inline void put_prev_set_next_tas
if (next == prev)
return;
- prev->sched_class->put_prev_task(rq, prev, next);
- next->sched_class->set_next_task(rq, next, true);
+ prev->sched_class->put_prev_task(rq, prev, next, 0);
+ next->sched_class->set_next_task(rq, next, ENQUEUE_FIRST);
}
/*
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -27,7 +27,7 @@ wakeup_preempt_stop(struct rq *rq, struc
/* we're never preempted */
}
-static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first)
+static void set_next_task_stop(struct rq *rq, struct task_struct *stop, int flags)
{
stop->se.exec_start = rq_clock_task(rq);
}
@@ -58,7 +58,8 @@ static void yield_task_stop(struct rq *r
BUG(); /* the stop task should never yield, its pointless. */
}
-static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next)
+static void put_prev_task_stop(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next, int flags)
{
update_curr_common(rq);
}