[RFC][PATCH 2/3] sched: Add support to pick functions to take rf

Peter Zijlstra posted 3 patches 4 months ago
[RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Peter Zijlstra 4 months ago
From: Joel Fernandes <joelagnelf@nvidia.com>

Some pick functions like the internal pick_next_task_fair() already take
rf but some others dont. We need this for scx's server pick function.
Prepare for this by having pick functions accept it.

[peterz: - added RETRY_TASK handling
         - removed pick_next_task_fair indirection]
Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
SubmissionLink: https://lkml.kernel.org/r/20250809184800.129831-6-joelagnelf@nvidia.com
---
 include/linux/sched.h    |    7 ++-----
 kernel/sched/core.c      |   35 ++++++++++++++++++++++++++---------
 kernel/sched/deadline.c  |    8 ++++----
 kernel/sched/ext.c       |    2 +-
 kernel/sched/fair.c      |   16 ++++++----------
 kernel/sched/idle.c      |    2 +-
 kernel/sched/rt.c        |    2 +-
 kernel/sched/sched.h     |   10 ++++++----
 kernel/sched/stop_task.c |    2 +-
 9 files changed, 48 insertions(+), 36 deletions(-)

--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -637,8 +637,8 @@ struct sched_rt_entity {
 #endif
 } __randomize_layout;
 
-typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
-typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+struct rq_flags;
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *, struct rq_flags *rf);
 
 struct sched_dl_entity {
 	struct rb_node			rb_node;
@@ -730,9 +730,6 @@ struct sched_dl_entity {
 	 * dl_server_update().
 	 *
 	 * @rq the runqueue this server is for
-	 *
-	 * @server_has_tasks() returns true if @server_pick return a
-	 * runnable task.
 	 */
 	struct rq			*rq;
 	dl_server_pick_f		server_pick_task;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5901,7 +5901,7 @@ __pick_next_task(struct rq *rq, struct t
 
 		/* Assume the next prioritized class is idle_sched_class */
 		if (!p) {
-			p = pick_task_idle(rq);
+			p = pick_task_idle(rq, rf);
 			put_prev_set_next_task(rq, prev, p);
 		}
 
@@ -5913,11 +5913,15 @@ __pick_next_task(struct rq *rq, struct t
 
 	for_each_active_class(class) {
 		if (class->pick_next_task) {
-			p = class->pick_next_task(rq, prev);
+			p = class->pick_next_task(rq, prev, rf);
+			if (unlikely(p == RETRY_TASK))
+				goto restart;
 			if (p)
 				return p;
 		} else {
-			p = class->pick_task(rq);
+			p = class->pick_task(rq, rf);
+			if (unlikely(p == RETRY_TASK))
+				goto restart;
 			if (p) {
 				put_prev_set_next_task(rq, prev, p);
 				return p;
@@ -5947,7 +5951,11 @@ static inline bool cookie_match(struct t
 	return a->core_cookie == b->core_cookie;
 }
 
-static inline struct task_struct *pick_task(struct rq *rq)
+/*
+ * Careful; this can return RETRY_TASK, it does not include the retry-loop
+ * itself due to the whole SMT pick retry thing below.
+ */
+static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
 {
 	const struct sched_class *class;
 	struct task_struct *p;
@@ -5955,7 +5963,7 @@ static inline struct task_struct *pick_t
 	rq->dl_server = NULL;
 
 	for_each_active_class(class) {
-		p = class->pick_task(rq);
+		p = class->pick_task(rq, rf);
 		if (p)
 			return p;
 	}
@@ -5970,7 +5978,7 @@ static void queue_core_balance(struct rq
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
-	struct task_struct *next, *p, *max = NULL;
+	struct task_struct *next, *p, *max;
 	const struct cpumask *smt_mask;
 	bool fi_before = false;
 	bool core_clock_updated = (rq == rq->core);
@@ -6055,7 +6063,10 @@ pick_next_task(struct rq *rq, struct tas
 	 * and there are no cookied tasks running on siblings.
 	 */
 	if (!need_sync) {
-		next = pick_task(rq);
+restart_single:
+		next = pick_task(rq, rf);
+		if (unlikely(next == RETRY_TASK))
+			goto restart_single;
 		if (!next->core_cookie) {
 			rq->core_pick = NULL;
 			rq->core_dl_server = NULL;
@@ -6075,6 +6086,8 @@ pick_next_task(struct rq *rq, struct tas
 	 *
 	 * Tie-break prio towards the current CPU
 	 */
+restart_multi:
+	max = NULL;
 	for_each_cpu_wrap(i, smt_mask, cpu) {
 		rq_i = cpu_rq(i);
 
@@ -6086,7 +6099,11 @@ pick_next_task(struct rq *rq, struct tas
 		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
 			update_rq_clock(rq_i);
 
-		rq_i->core_pick = p = pick_task(rq_i);
+		p = pick_task(rq_i, rf);
+		if (unlikely(p == RETRY_TASK))
+			goto restart_multi;
+
+		rq_i->core_pick = p;
 		rq_i->core_dl_server = rq_i->dl_server;
 
 		if (!max || prio_less(max, p, fi_before))
@@ -6108,7 +6125,7 @@ pick_next_task(struct rq *rq, struct tas
 			if (cookie)
 				p = sched_core_find(rq_i, cookie);
 			if (!p)
-				p = idle_sched_class.pick_task(rq_i);
+				p = idle_sched_class.pick_task(rq_i, rf);
 		}
 
 		rq_i->core_pick = p;
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2352,7 +2352,7 @@ static struct sched_dl_entity *pick_next
  * __pick_next_task_dl - Helper to pick the next -deadline task to run.
  * @rq: The runqueue to pick the next task from.
  */
-static struct task_struct *__pick_task_dl(struct rq *rq)
+static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
 {
 	struct sched_dl_entity *dl_se;
 	struct dl_rq *dl_rq = &rq->dl;
@@ -2366,7 +2366,7 @@ static struct task_struct *__pick_task_d
 	WARN_ON_ONCE(!dl_se);
 
 	if (dl_server(dl_se)) {
-		p = dl_se->server_pick_task(dl_se);
+		p = dl_se->server_pick_task(dl_se, rf);
 		if (!p) {
 			dl_server_stop(dl_se);
 			goto again;
@@ -2379,9 +2379,9 @@ static struct task_struct *__pick_task_d
 	return p;
 }
 
-static struct task_struct *pick_task_dl(struct rq *rq)
+static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf)
 {
-	return __pick_task_dl(rq);
+	return __pick_task_dl(rq, rf);
 }
 
 static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -2332,7 +2332,7 @@ static struct task_struct *first_local_t
 					struct task_struct, scx.dsq_list.node);
 }
 
-static struct task_struct *pick_task_scx(struct rq *rq)
+static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
 {
 	struct task_struct *prev = rq->curr;
 	struct task_struct *p;
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8822,7 +8822,7 @@ static void check_preempt_wakeup_fair(st
 	resched_curr_lazy(rq);
 }
 
-static struct task_struct *pick_task_fair(struct rq *rq)
+static struct task_struct *pick_task_fair(struct rq *rq, struct rq_flags *rf)
 {
 	struct sched_entity *se;
 	struct cfs_rq *cfs_rq;
@@ -8866,7 +8866,7 @@ pick_next_task_fair(struct rq *rq, struc
 	int new_tasks;
 
 again:
-	p = pick_task_fair(rq);
+	p = pick_task_fair(rq, rf);
 	if (!p)
 		goto idle;
 	se = &p->se;
@@ -8945,14 +8945,10 @@ pick_next_task_fair(struct rq *rq, struc
 	return NULL;
 }
 
-static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
+static struct task_struct *
+fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
 {
-	return pick_next_task_fair(rq, prev, NULL);
-}
-
-static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
-{
-	return pick_task_fair(dl_se->rq);
+	return pick_task_fair(dl_se->rq, rf);
 }
 
 void fair_server_init(struct rq *rq)
@@ -13632,7 +13628,7 @@ DEFINE_SCHED_CLASS(fair) = {
 	.wakeup_preempt		= check_preempt_wakeup_fair,
 
 	.pick_task		= pick_task_fair,
-	.pick_next_task		= __pick_next_task_fair,
+	.pick_next_task		= pick_next_task_fair,
 	.put_prev_task		= put_prev_task_fair,
 	.set_next_task          = set_next_task_fair,
 
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -466,7 +466,7 @@ static void set_next_task_idle(struct rq
 	next->se.exec_start = rq_clock_task(rq);
 }
 
-struct task_struct *pick_task_idle(struct rq *rq)
+struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)
 {
 	scx_update_idle(rq, true, false);
 	return rq->idle;
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1695,7 +1695,7 @@ static struct task_struct *_pick_next_ta
 	return rt_task_of(rt_se);
 }
 
-static struct task_struct *pick_task_rt(struct rq *rq)
+static struct task_struct *pick_task_rt(struct rq *rq, struct rq_flags *rf)
 {
 	struct task_struct *p;
 
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2467,7 +2467,7 @@ struct sched_class {
 	/*
 	 * schedule/pick_next_task: rq->lock
 	 */
-	struct task_struct *(*pick_task)(struct rq *rq);
+	struct task_struct *(*pick_task)(struct rq *rq, struct rq_flags *rf);
 	/*
 	 * Optional! When implemented pick_next_task() should be equivalent to:
 	 *
@@ -2477,7 +2477,8 @@ struct sched_class {
 	 *       set_next_task_first(next);
 	 *   }
 	 */
-	struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
+	struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev,
+					      struct rq_flags *rf);
 
 	/*
 	 * sched_change:
@@ -2690,8 +2691,9 @@ static inline bool sched_fair_runnable(s
 	return rq->cfs.nr_queued > 0;
 }
 
-extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
-extern struct task_struct *pick_task_idle(struct rq *rq);
+extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev,
+					       struct rq_flags *rf);
+extern struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf);
 
 #define SCA_CHECK		0x01
 #define SCA_MIGRATE_DISABLE	0x02
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -32,7 +32,7 @@ static void set_next_task_stop(struct rq
 	stop->se.exec_start = rq_clock_task(rq);
 }
 
-static struct task_struct *pick_task_stop(struct rq *rq)
+static struct task_struct *pick_task_stop(struct rq *rq, struct rq_flags *rf)
 {
 	if (!sched_stop_runnable(rq))
 		return NULL;
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Vincent Guittot 4 months ago
On Mon, 6 Oct 2025 at 12:57, Peter Zijlstra <peterz@infradead.org> wrote:
>
> From: Joel Fernandes <joelagnelf@nvidia.com>
>
> Some pick functions like the internal pick_next_task_fair() already take
> rf but some others dont. We need this for scx's server pick function.
> Prepare for this by having pick functions accept it.
>
> [peterz: - added RETRY_TASK handling
>          - removed pick_next_task_fair indirection]
> Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
> ---
> SubmissionLink: https://lkml.kernel.org/r/20250809184800.129831-6-joelagnelf@nvidia.com
> ---
>  include/linux/sched.h    |    7 ++-----
>  kernel/sched/core.c      |   35 ++++++++++++++++++++++++++---------
>  kernel/sched/deadline.c  |    8 ++++----
>  kernel/sched/ext.c       |    2 +-
>  kernel/sched/fair.c      |   16 ++++++----------
>  kernel/sched/idle.c      |    2 +-
>  kernel/sched/rt.c        |    2 +-
>  kernel/sched/sched.h     |   10 ++++++----
>  kernel/sched/stop_task.c |    2 +-
>  9 files changed, 48 insertions(+), 36 deletions(-)
>
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -637,8 +637,8 @@ struct sched_rt_entity {
>  #endif
>  } __randomize_layout;
>
> -typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
> -typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
> +struct rq_flags;
> +typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *, struct rq_flags *rf);
>
>  struct sched_dl_entity {
>         struct rb_node                  rb_node;
> @@ -730,9 +730,6 @@ struct sched_dl_entity {
>          * dl_server_update().
>          *
>          * @rq the runqueue this server is for
> -        *
> -        * @server_has_tasks() returns true if @server_pick return a
> -        * runnable task.
>          */
>         struct rq                       *rq;
>         dl_server_pick_f                server_pick_task;
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -5901,7 +5901,7 @@ __pick_next_task(struct rq *rq, struct t
>
>                 /* Assume the next prioritized class is idle_sched_class */
>                 if (!p) {
> -                       p = pick_task_idle(rq);
> +                       p = pick_task_idle(rq, rf);
>                         put_prev_set_next_task(rq, prev, p);
>                 }
>
> @@ -5913,11 +5913,15 @@ __pick_next_task(struct rq *rq, struct t
>
>         for_each_active_class(class) {
>                 if (class->pick_next_task) {
> -                       p = class->pick_next_task(rq, prev);
> +                       p = class->pick_next_task(rq, prev, rf);
> +                       if (unlikely(p == RETRY_TASK))
> +                               goto restart;
>                         if (p)
>                                 return p;
>                 } else {
> -                       p = class->pick_task(rq);
> +                       p = class->pick_task(rq, rf);
> +                       if (unlikely(p == RETRY_TASK))
> +                               goto restart;
>                         if (p) {
>                                 put_prev_set_next_task(rq, prev, p);
>                                 return p;
> @@ -5947,7 +5951,11 @@ static inline bool cookie_match(struct t
>         return a->core_cookie == b->core_cookie;
>  }
>
> -static inline struct task_struct *pick_task(struct rq *rq)
> +/*
> + * Careful; this can return RETRY_TASK, it does not include the retry-loop
> + * itself due to the whole SMT pick retry thing below.
> + */
> +static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
>  {
>         const struct sched_class *class;
>         struct task_struct *p;
> @@ -5955,7 +5963,7 @@ static inline struct task_struct *pick_t
>         rq->dl_server = NULL;
>
>         for_each_active_class(class) {
> -               p = class->pick_task(rq);
> +               p = class->pick_task(rq, rf);
>                 if (p)
>                         return p;
>         }
> @@ -5970,7 +5978,7 @@ static void queue_core_balance(struct rq
>  static struct task_struct *
>  pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
>  {
> -       struct task_struct *next, *p, *max = NULL;
> +       struct task_struct *next, *p, *max;
>         const struct cpumask *smt_mask;
>         bool fi_before = false;
>         bool core_clock_updated = (rq == rq->core);
> @@ -6055,7 +6063,10 @@ pick_next_task(struct rq *rq, struct tas
>          * and there are no cookied tasks running on siblings.
>          */
>         if (!need_sync) {
> -               next = pick_task(rq);
> +restart_single:
> +               next = pick_task(rq, rf);
> +               if (unlikely(next == RETRY_TASK))
> +                       goto restart_single;
>                 if (!next->core_cookie) {
>                         rq->core_pick = NULL;
>                         rq->core_dl_server = NULL;
> @@ -6075,6 +6086,8 @@ pick_next_task(struct rq *rq, struct tas
>          *
>          * Tie-break prio towards the current CPU
>          */
> +restart_multi:
> +       max = NULL;
>         for_each_cpu_wrap(i, smt_mask, cpu) {
>                 rq_i = cpu_rq(i);
>
> @@ -6086,7 +6099,11 @@ pick_next_task(struct rq *rq, struct tas
>                 if (i != cpu && (rq_i != rq->core || !core_clock_updated))
>                         update_rq_clock(rq_i);
>
> -               rq_i->core_pick = p = pick_task(rq_i);
> +               p = pick_task(rq_i, rf);
> +               if (unlikely(p == RETRY_TASK))
> +                       goto restart_multi;
> +
> +               rq_i->core_pick = p;
>                 rq_i->core_dl_server = rq_i->dl_server;
>
>                 if (!max || prio_less(max, p, fi_before))
> @@ -6108,7 +6125,7 @@ pick_next_task(struct rq *rq, struct tas
>                         if (cookie)
>                                 p = sched_core_find(rq_i, cookie);
>                         if (!p)
> -                               p = idle_sched_class.pick_task(rq_i);
> +                               p = idle_sched_class.pick_task(rq_i, rf);
>                 }
>
>                 rq_i->core_pick = p;
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -2352,7 +2352,7 @@ static struct sched_dl_entity *pick_next
>   * __pick_next_task_dl - Helper to pick the next -deadline task to run.
>   * @rq: The runqueue to pick the next task from.
>   */
> -static struct task_struct *__pick_task_dl(struct rq *rq)
> +static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
>  {
>         struct sched_dl_entity *dl_se;
>         struct dl_rq *dl_rq = &rq->dl;
> @@ -2366,7 +2366,7 @@ static struct task_struct *__pick_task_d
>         WARN_ON_ONCE(!dl_se);
>
>         if (dl_server(dl_se)) {
> -               p = dl_se->server_pick_task(dl_se);
> +               p = dl_se->server_pick_task(dl_se, rf);
>                 if (!p) {
>                         dl_server_stop(dl_se);
>                         goto again;
> @@ -2379,9 +2379,9 @@ static struct task_struct *__pick_task_d
>         return p;
>  }
>
> -static struct task_struct *pick_task_dl(struct rq *rq)
> +static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf)
>  {
> -       return __pick_task_dl(rq);
> +       return __pick_task_dl(rq, rf);
>  }
>
>  static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -2332,7 +2332,7 @@ static struct task_struct *first_local_t
>                                         struct task_struct, scx.dsq_list.node);
>  }
>
> -static struct task_struct *pick_task_scx(struct rq *rq)
> +static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
>  {
>         struct task_struct *prev = rq->curr;
>         struct task_struct *p;
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -8822,7 +8822,7 @@ static void check_preempt_wakeup_fair(st
>         resched_curr_lazy(rq);
>  }
>
> -static struct task_struct *pick_task_fair(struct rq *rq)
> +static struct task_struct *pick_task_fair(struct rq *rq, struct rq_flags *rf)
>  {
>         struct sched_entity *se;
>         struct cfs_rq *cfs_rq;
> @@ -8866,7 +8866,7 @@ pick_next_task_fair(struct rq *rq, struc
>         int new_tasks;
>
>  again:
> -       p = pick_task_fair(rq);
> +       p = pick_task_fair(rq, rf);
>         if (!p)
>                 goto idle;
>         se = &p->se;
> @@ -8945,14 +8945,10 @@ pick_next_task_fair(struct rq *rq, struc
>         return NULL;
>  }
>
> -static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
> +static struct task_struct *
> +fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
>  {
> -       return pick_next_task_fair(rq, prev, NULL);

The special case of a NULL rf pointer is used to skip
sched_balance_newidle() at the end of pick_next_task_fair() in the
pick_next_task() slo path when prev_balance has already it. This means
that it will be called twice if prev is not a fair task.

While reviewing this series, I also noticed an older issue that we
have with check pelt lost idle time [1]
[1] https://lore.kernel.org/all/20251008131214.3759798-1-vincent.guittot@linaro.org/

We can't use rf in pick_next_task_fair() to skip
sched_balance_newidle() but we need a dedicated param


> -}
> -
> -static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
> -{
> -       return pick_task_fair(dl_se->rq);
> +       return pick_task_fair(dl_se->rq, rf);
>  }
>
>  void fair_server_init(struct rq *rq)
> @@ -13632,7 +13628,7 @@ DEFINE_SCHED_CLASS(fair) = {
>         .wakeup_preempt         = check_preempt_wakeup_fair,
>
>         .pick_task              = pick_task_fair,
> -       .pick_next_task         = __pick_next_task_fair,
> +       .pick_next_task         = pick_next_task_fair,
>         .put_prev_task          = put_prev_task_fair,
>         .set_next_task          = set_next_task_fair,
>
> --- a/kernel/sched/idle.c
> +++ b/kernel/sched/idle.c
> @@ -466,7 +466,7 @@ static void set_next_task_idle(struct rq
>         next->se.exec_start = rq_clock_task(rq);
>  }
>
> -struct task_struct *pick_task_idle(struct rq *rq)
> +struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)
>  {
>         scx_update_idle(rq, true, false);
>         return rq->idle;
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -1695,7 +1695,7 @@ static struct task_struct *_pick_next_ta
>         return rt_task_of(rt_se);
>  }
>
> -static struct task_struct *pick_task_rt(struct rq *rq)
> +static struct task_struct *pick_task_rt(struct rq *rq, struct rq_flags *rf)
>  {
>         struct task_struct *p;
>
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -2467,7 +2467,7 @@ struct sched_class {
>         /*
>          * schedule/pick_next_task: rq->lock
>          */
> -       struct task_struct *(*pick_task)(struct rq *rq);
> +       struct task_struct *(*pick_task)(struct rq *rq, struct rq_flags *rf);
>         /*
>          * Optional! When implemented pick_next_task() should be equivalent to:
>          *
> @@ -2477,7 +2477,8 @@ struct sched_class {
>          *       set_next_task_first(next);
>          *   }
>          */
> -       struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
> +       struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev,
> +                                             struct rq_flags *rf);
>
>         /*
>          * sched_change:
> @@ -2690,8 +2691,9 @@ static inline bool sched_fair_runnable(s
>         return rq->cfs.nr_queued > 0;
>  }
>
> -extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
> -extern struct task_struct *pick_task_idle(struct rq *rq);
> +extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev,
> +                                              struct rq_flags *rf);
> +extern struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf);
>
>  #define SCA_CHECK              0x01
>  #define SCA_MIGRATE_DISABLE    0x02
> --- a/kernel/sched/stop_task.c
> +++ b/kernel/sched/stop_task.c
> @@ -32,7 +32,7 @@ static void set_next_task_stop(struct rq
>         stop->se.exec_start = rq_clock_task(rq);
>  }
>
> -static struct task_struct *pick_task_stop(struct rq *rq)
> +static struct task_struct *pick_task_stop(struct rq *rq, struct rq_flags *rf)
>  {
>         if (!sched_stop_runnable(rq))
>                 return NULL;
>
>
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Peter Zijlstra 4 months ago
On Wed, Oct 08, 2025 at 03:16:58PM +0200, Vincent Guittot wrote:

> > +static struct task_struct *
> > +fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
> >  {
> > -       return pick_next_task_fair(rq, prev, NULL);
> 
> The special case of a NULL rf pointer is used to skip
> sched_balance_newidle() at the end of pick_next_task_fair() in the
> pick_next_task() slo path when prev_balance has already it. This means
> that it will be called twice if prev is not a fair task.

Oh right. I suppose we can simply remove balance_fair.

> While reviewing this series, I also noticed an older issue that we
> have with check pelt lost idle time [1]
> [1] https://lore.kernel.org/all/20251008131214.3759798-1-vincent.guittot@linaro.org/

Let me go have a look.
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Vincent Guittot 4 months ago
On Wed, 8 Oct 2025 at 15:58, Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Wed, Oct 08, 2025 at 03:16:58PM +0200, Vincent Guittot wrote:
>
> > > +static struct task_struct *
> > > +fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
> > >  {
> > > -       return pick_next_task_fair(rq, prev, NULL);
> >
> > The special case of a NULL rf pointer is used to skip
> > sched_balance_newidle() at the end of pick_next_task_fair() in the
> > pick_next_task() slo path when prev_balance has already it. This means
> > that it will be called twice if prev is not a fair task.
>
> Oh right. I suppose we can simply remove balance_fair.

That was the option that I also had in mind but this will change from
current behavior and I'm afraid that sched_ext people will complain.
Currently, if prev is sched_ext, we don't call higher class.balance()
which includes the fair class balance_fair->sched_balance_newidle.  If
we now always call sched_balance_newidle() at the end
pick_next_task_fair(), we will try to pull a fair task at each
schedule between sched_ext tasks

>
> > While reviewing this series, I also noticed an older issue that we
> > have with check pelt lost idle time [1]
> > [1] https://lore.kernel.org/all/20251008131214.3759798-1-vincent.guittot@linaro.org/
>
> Let me go have a look.
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Tejun Heo 4 months ago
Hello,

On Wed, Oct 08, 2025 at 05:22:42PM +0200, Vincent Guittot wrote:
> On Wed, 8 Oct 2025 at 15:58, Peter Zijlstra <peterz@infradead.org> wrote:
> > On Wed, Oct 08, 2025 at 03:16:58PM +0200, Vincent Guittot wrote:
> >
> > > > +static struct task_struct *
> > > > +fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
> > > >  {
> > > > -       return pick_next_task_fair(rq, prev, NULL);
> > >
> > > The special case of a NULL rf pointer is used to skip
> > > sched_balance_newidle() at the end of pick_next_task_fair() in the
> > > pick_next_task() slo path when prev_balance has already it. This means
> > > that it will be called twice if prev is not a fair task.
> >
> > Oh right. I suppose we can simply remove balance_fair.
> 
> That was the option that I also had in mind but this will change from
> current behavior and I'm afraid that sched_ext people will complain.
> Currently, if prev is sched_ext, we don't call higher class.balance()
> which includes the fair class balance_fair->sched_balance_newidle.  If
> we now always call sched_balance_newidle() at the end
> pick_next_task_fair(), we will try to pull a fair task at each
> schedule between sched_ext tasks

If we pass in @prev into pick(), can't pick() decide whether to newidle
balance or not based on that?

Thanks.

-- 
tejun
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Vincent Guittot 4 months ago
On Wed, 8 Oct 2025 at 22:34, Tejun Heo <tj@kernel.org> wrote:
>
> Hello,
>
> On Wed, Oct 08, 2025 at 05:22:42PM +0200, Vincent Guittot wrote:
> > On Wed, 8 Oct 2025 at 15:58, Peter Zijlstra <peterz@infradead.org> wrote:
> > > On Wed, Oct 08, 2025 at 03:16:58PM +0200, Vincent Guittot wrote:
> > >
> > > > > +static struct task_struct *
> > > > > +fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
> > > > >  {
> > > > > -       return pick_next_task_fair(rq, prev, NULL);
> > > >
> > > > The special case of a NULL rf pointer is used to skip
> > > > sched_balance_newidle() at the end of pick_next_task_fair() in the
> > > > pick_next_task() slo path when prev_balance has already it. This means
> > > > that it will be called twice if prev is not a fair task.
> > >
> > > Oh right. I suppose we can simply remove balance_fair.
> >
> > That was the option that I also had in mind but this will change from
> > current behavior and I'm afraid that sched_ext people will complain.
> > Currently, if prev is sched_ext, we don't call higher class.balance()
> > which includes the fair class balance_fair->sched_balance_newidle.  If
> > we now always call sched_balance_newidle() at the end
> > pick_next_task_fair(), we will try to pull a fair task at each
> > schedule between sched_ext tasks
>
> If we pass in @prev into pick(), can't pick() decide whether to newidle
> balance or not based on that?

The problem is that with dl_server, you can has a prev of a lower prio
but still want to run a newidle balance :
-cfs task preempted by dl server
-cfs task migrates to another cpu
-we want to run newidle balance when cpu become idle

>
> Thanks.
>
> --
> tejun
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Peter Zijlstra 3 months, 4 weeks ago
On Thu, Oct 09, 2025 at 09:17:44AM +0200, Vincent Guittot wrote:
> On Wed, 8 Oct 2025 at 22:34, Tejun Heo <tj@kernel.org> wrote:
> >
> > Hello,
> >
> > On Wed, Oct 08, 2025 at 05:22:42PM +0200, Vincent Guittot wrote:
> > > On Wed, 8 Oct 2025 at 15:58, Peter Zijlstra <peterz@infradead.org> wrote:
> > > > On Wed, Oct 08, 2025 at 03:16:58PM +0200, Vincent Guittot wrote:
> > > >
> > > > > > +static struct task_struct *
> > > > > > +fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
> > > > > >  {
> > > > > > -       return pick_next_task_fair(rq, prev, NULL);
> > > > >
> > > > > The special case of a NULL rf pointer is used to skip
> > > > > sched_balance_newidle() at the end of pick_next_task_fair() in the
> > > > > pick_next_task() slo path when prev_balance has already it. This means
> > > > > that it will be called twice if prev is not a fair task.
> > > >
> > > > Oh right. I suppose we can simply remove balance_fair.
> > >
> > > That was the option that I also had in mind but this will change from
> > > current behavior and I'm afraid that sched_ext people will complain.
> > > Currently, if prev is sched_ext, we don't call higher class.balance()
> > > which includes the fair class balance_fair->sched_balance_newidle.  If
> > > we now always call sched_balance_newidle() at the end
> > > pick_next_task_fair(), we will try to pull a fair task at each
> > > schedule between sched_ext tasks
> >
> > If we pass in @prev into pick(), can't pick() decide whether to newidle
> > balance or not based on that?
> 
> The problem is that with dl_server, you can has a prev of a lower prio
> but still want to run a newidle balance :
> -cfs task preempted by dl server
> -cfs task migrates to another cpu
> -we want to run newidle balance when cpu become idle

Bah; so yeah, this new behaviour is better for indeed always calling
newidle when it is needed, but you're also right that in case of ext
this might not be ideal.

So I have a pile of newidle hacks here:

  https://lkml.kernel.org/r/20251010170937.GG4067720@noisy.programming.kicks-ass.net

and while I don't particularly like NI_SPARE (the has_spare_tasks thing
is fickle); the idea seems to have some merit for this situation --
where we know we'll not be having fair tasks at all.

I mean, we can always do something like this to sched_balance_newidle():

	if (scx_switched_all())
		return 0;

Not pretty, but should do the job.
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Peter Zijlstra 3 months, 4 weeks ago
On Mon, Oct 13, 2025 at 01:04:49PM +0200, Peter Zijlstra wrote:

> Bah; so yeah, this new behaviour is better for indeed always calling
> newidle when it is needed, but you're also right that in case of ext
> this might not be ideal.
> 
> So I have a pile of newidle hacks here:
> 
>   https://lkml.kernel.org/r/20251010170937.GG4067720@noisy.programming.kicks-ass.net
> 
> and while I don't particularly like NI_SPARE (the has_spare_tasks thing
> is fickle); the idea seems to have some merit for this situation --
> where we know we'll not be having fair tasks at all.
> 
> I mean, we can always do something like this to sched_balance_newidle():
> 
> 	if (scx_switched_all())
> 		return 0;
> 
> Not pretty, but should do the job.

Oh, never mind, none of this is needed.

__pick_next_task()

	if (scx_enabled())
	  goto restart;

	...
restart:
	for_each_active_class(class) {
		...
	}


And then we have next_active_class() skip fair_sched_class entirely when
scx_switch_all().

So in the common ext case, we'll not hit pick_next_task_fair() at all.
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Vincent Guittot 3 months, 4 weeks ago
On Mon, 13 Oct 2025 at 13:09, Peter Zijlstra <peterz@infradead.org> wrote:
>
> On Mon, Oct 13, 2025 at 01:04:49PM +0200, Peter Zijlstra wrote:
>
> > Bah; so yeah, this new behaviour is better for indeed always calling
> > newidle when it is needed, but you're also right that in case of ext
> > this might not be ideal.
> >
> > So I have a pile of newidle hacks here:
> >
> >   https://lkml.kernel.org/r/20251010170937.GG4067720@noisy.programming.kicks-ass.net
> >
> > and while I don't particularly like NI_SPARE (the has_spare_tasks thing
> > is fickle); the idea seems to have some merit for this situation --
> > where we know we'll not be having fair tasks at all.
> >
> > I mean, we can always do something like this to sched_balance_newidle():
> >
> >       if (scx_switched_all())
> >               return 0;
> >
> > Not pretty, but should do the job.
>
> Oh, never mind, none of this is needed.
>
> __pick_next_task()
>
>         if (scx_enabled())
>           goto restart;
>
>         ...
> restart:
>         for_each_active_class(class) {
>                 ...
>         }
>
>
> And then we have next_active_class() skip fair_sched_class entirely when
> scx_switch_all().

Ah yes you're right. fair is not called in case of scx_switched_all()

>
> So in the common ext case, we'll not hit pick_next_task_fair() at all.
Re: [RFC][PATCH 2/3] sched: Add support to pick functions to take rf
Posted by Tejun Heo 3 months, 4 weeks ago
On Mon, Oct 13, 2025 at 03:06:33PM +0200, Vincent Guittot wrote:
...
> > And then we have next_active_class() skip fair_sched_class entirely when
> > scx_switch_all().
> 
> Ah yes you're right. fair is not called in case of scx_switched_all()

Yeah, when siwtch_all this isn't an issue at all. In partial mode, the extra
new idle may be noticeable but I don't think many people are using partial
mode, so we can worry about it when it becomes a problem.

Thanks.

-- 
tejun
[tip: sched/core] sched: Add support to pick functions to take rf
Posted by tip-bot2 for Joel Fernandes 3 months, 3 weeks ago
The following commit has been merged into the sched/core branch of tip:

Commit-ID:     50653216e4ff7a74c95b2ee9ec439916875556ec
Gitweb:        https://git.kernel.org/tip/50653216e4ff7a74c95b2ee9ec439916875556ec
Author:        Joel Fernandes <joelagnelf@nvidia.com>
AuthorDate:    Sat, 09 Aug 2025 14:47:50 -04:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Thu, 16 Oct 2025 11:13:55 +02:00

sched: Add support to pick functions to take rf

Some pick functions like the internal pick_next_task_fair() already take
rf but some others dont. We need this for scx's server pick function.
Prepare for this by having pick functions accept it.

[peterz: - added RETRY_TASK handling
         - removed pick_next_task_fair indirection]
Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Tejun Heo <tj@kernel.org>
---
 include/linux/sched.h    |  7 ++-----
 kernel/sched/core.c      | 35 ++++++++++++++++++++++++++---------
 kernel/sched/deadline.c  |  8 ++++----
 kernel/sched/ext.c       |  2 +-
 kernel/sched/fair.c      | 26 ++++++--------------------
 kernel/sched/idle.c      |  2 +-
 kernel/sched/rt.c        |  2 +-
 kernel/sched/sched.h     | 10 ++++++----
 kernel/sched/stop_task.c |  2 +-
 9 files changed, 48 insertions(+), 46 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77426c3..0757647 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -637,8 +637,8 @@ struct sched_rt_entity {
 #endif
 } __randomize_layout;
 
-typedef bool (*dl_server_has_tasks_f)(struct sched_dl_entity *);
-typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *);
+struct rq_flags;
+typedef struct task_struct *(*dl_server_pick_f)(struct sched_dl_entity *, struct rq_flags *rf);
 
 struct sched_dl_entity {
 	struct rb_node			rb_node;
@@ -730,9 +730,6 @@ struct sched_dl_entity {
 	 * dl_server_update().
 	 *
 	 * @rq the runqueue this server is for
-	 *
-	 * @server_has_tasks() returns true if @server_pick return a
-	 * runnable task.
 	 */
 	struct rq			*rq;
 	dl_server_pick_f		server_pick_task;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9fc990f..a75d456 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5901,7 +5901,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
 		/* Assume the next prioritized class is idle_sched_class */
 		if (!p) {
-			p = pick_task_idle(rq);
+			p = pick_task_idle(rq, rf);
 			put_prev_set_next_task(rq, prev, p);
 		}
 
@@ -5913,11 +5913,15 @@ restart:
 
 	for_each_active_class(class) {
 		if (class->pick_next_task) {
-			p = class->pick_next_task(rq, prev);
+			p = class->pick_next_task(rq, prev, rf);
+			if (unlikely(p == RETRY_TASK))
+				goto restart;
 			if (p)
 				return p;
 		} else {
-			p = class->pick_task(rq);
+			p = class->pick_task(rq, rf);
+			if (unlikely(p == RETRY_TASK))
+				goto restart;
 			if (p) {
 				put_prev_set_next_task(rq, prev, p);
 				return p;
@@ -5947,7 +5951,11 @@ static inline bool cookie_match(struct task_struct *a, struct task_struct *b)
 	return a->core_cookie == b->core_cookie;
 }
 
-static inline struct task_struct *pick_task(struct rq *rq)
+/*
+ * Careful; this can return RETRY_TASK, it does not include the retry-loop
+ * itself due to the whole SMT pick retry thing below.
+ */
+static inline struct task_struct *pick_task(struct rq *rq, struct rq_flags *rf)
 {
 	const struct sched_class *class;
 	struct task_struct *p;
@@ -5955,7 +5963,7 @@ static inline struct task_struct *pick_task(struct rq *rq)
 	rq->dl_server = NULL;
 
 	for_each_active_class(class) {
-		p = class->pick_task(rq);
+		p = class->pick_task(rq, rf);
 		if (p)
 			return p;
 	}
@@ -5970,7 +5978,7 @@ static void queue_core_balance(struct rq *rq);
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
-	struct task_struct *next, *p, *max = NULL;
+	struct task_struct *next, *p, *max;
 	const struct cpumask *smt_mask;
 	bool fi_before = false;
 	bool core_clock_updated = (rq == rq->core);
@@ -6055,7 +6063,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	 * and there are no cookied tasks running on siblings.
 	 */
 	if (!need_sync) {
-		next = pick_task(rq);
+restart_single:
+		next = pick_task(rq, rf);
+		if (unlikely(next == RETRY_TASK))
+			goto restart_single;
 		if (!next->core_cookie) {
 			rq->core_pick = NULL;
 			rq->core_dl_server = NULL;
@@ -6075,6 +6086,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 	 *
 	 * Tie-break prio towards the current CPU
 	 */
+restart_multi:
+	max = NULL;
 	for_each_cpu_wrap(i, smt_mask, cpu) {
 		rq_i = cpu_rq(i);
 
@@ -6086,7 +6099,11 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 		if (i != cpu && (rq_i != rq->core || !core_clock_updated))
 			update_rq_clock(rq_i);
 
-		rq_i->core_pick = p = pick_task(rq_i);
+		p = pick_task(rq_i, rf);
+		if (unlikely(p == RETRY_TASK))
+			goto restart_multi;
+
+		rq_i->core_pick = p;
 		rq_i->core_dl_server = rq_i->dl_server;
 
 		if (!max || prio_less(max, p, fi_before))
@@ -6108,7 +6125,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 			if (cookie)
 				p = sched_core_find(rq_i, cookie);
 			if (!p)
-				p = idle_sched_class.pick_task(rq_i);
+				p = idle_sched_class.pick_task(rq_i, rf);
 		}
 
 		rq_i->core_pick = p;
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 83e6175..48357d4 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2352,7 +2352,7 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
  * __pick_next_task_dl - Helper to pick the next -deadline task to run.
  * @rq: The runqueue to pick the next task from.
  */
-static struct task_struct *__pick_task_dl(struct rq *rq)
+static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
 {
 	struct sched_dl_entity *dl_se;
 	struct dl_rq *dl_rq = &rq->dl;
@@ -2366,7 +2366,7 @@ again:
 	WARN_ON_ONCE(!dl_se);
 
 	if (dl_server(dl_se)) {
-		p = dl_se->server_pick_task(dl_se);
+		p = dl_se->server_pick_task(dl_se, rf);
 		if (!p) {
 			dl_server_stop(dl_se);
 			goto again;
@@ -2379,9 +2379,9 @@ again:
 	return p;
 }
 
-static struct task_struct *pick_task_dl(struct rq *rq)
+static struct task_struct *pick_task_dl(struct rq *rq, struct rq_flags *rf)
 {
-	return __pick_task_dl(rq);
+	return __pick_task_dl(rq, rf);
 }
 
 static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 949c3a6..dc743ca 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -2332,7 +2332,7 @@ static struct task_struct *first_local_task(struct rq *rq)
 					struct task_struct, scx.dsq_list.node);
 }
 
-static struct task_struct *pick_task_scx(struct rq *rq)
+static struct task_struct *pick_task_scx(struct rq *rq, struct rq_flags *rf)
 {
 	struct task_struct *prev = rq->curr;
 	struct task_struct *p;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 23ac05c..2554055 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8705,15 +8705,6 @@ static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context
 	set_task_max_allowed_capacity(p);
 }
 
-static int
-balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
-{
-	if (sched_fair_runnable(rq))
-		return 1;
-
-	return sched_balance_newidle(rq, rf) != 0;
-}
-
 static void set_next_buddy(struct sched_entity *se)
 {
 	for_each_sched_entity(se) {
@@ -8822,7 +8813,7 @@ preempt:
 	resched_curr_lazy(rq);
 }
 
-static struct task_struct *pick_task_fair(struct rq *rq)
+static struct task_struct *pick_task_fair(struct rq *rq, struct rq_flags *rf)
 {
 	struct sched_entity *se;
 	struct cfs_rq *cfs_rq;
@@ -8866,7 +8857,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
 	int new_tasks;
 
 again:
-	p = pick_task_fair(rq);
+	p = pick_task_fair(rq, rf);
 	if (!p)
 		goto idle;
 	se = &p->se;
@@ -8945,14 +8936,10 @@ idle:
 	return NULL;
 }
 
-static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev)
-{
-	return pick_next_task_fair(rq, prev, NULL);
-}
-
-static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se)
+static struct task_struct *
+fair_server_pick_task(struct sched_dl_entity *dl_se, struct rq_flags *rf)
 {
-	return pick_task_fair(dl_se->rq);
+	return pick_task_fair(dl_se->rq, rf);
 }
 
 void fair_server_init(struct rq *rq)
@@ -13644,11 +13631,10 @@ DEFINE_SCHED_CLASS(fair) = {
 	.wakeup_preempt		= check_preempt_wakeup_fair,
 
 	.pick_task		= pick_task_fair,
-	.pick_next_task		= __pick_next_task_fair,
+	.pick_next_task		= pick_next_task_fair,
 	.put_prev_task		= put_prev_task_fair,
 	.set_next_task          = set_next_task_fair,
 
-	.balance		= balance_fair,
 	.select_task_rq		= select_task_rq_fair,
 	.migrate_task_rq	= migrate_task_rq_fair,
 
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 055b0dd..7fa0b59 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -466,7 +466,7 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
 	next->se.exec_start = rq_clock_task(rq);
 }
 
-struct task_struct *pick_task_idle(struct rq *rq)
+struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf)
 {
 	scx_update_idle(rq, true, false);
 	return rq->idle;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 9bc828d..1fd97f2 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1695,7 +1695,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
 	return rt_task_of(rt_se);
 }
 
-static struct task_struct *pick_task_rt(struct rq *rq)
+static struct task_struct *pick_task_rt(struct rq *rq, struct rq_flags *rf)
 {
 	struct task_struct *p;
 
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f4a3230..8946294 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2470,7 +2470,7 @@ struct sched_class {
 	/*
 	 * schedule/pick_next_task: rq->lock
 	 */
-	struct task_struct *(*pick_task)(struct rq *rq);
+	struct task_struct *(*pick_task)(struct rq *rq, struct rq_flags *rf);
 	/*
 	 * Optional! When implemented pick_next_task() should be equivalent to:
 	 *
@@ -2480,7 +2480,8 @@ struct sched_class {
 	 *       set_next_task_first(next);
 	 *   }
 	 */
-	struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev);
+	struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev,
+					      struct rq_flags *rf);
 
 	/*
 	 * sched_change:
@@ -2707,8 +2708,9 @@ static inline bool sched_fair_runnable(struct rq *rq)
 	return rq->cfs.nr_queued > 0;
 }
 
-extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
-extern struct task_struct *pick_task_idle(struct rq *rq);
+extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev,
+					       struct rq_flags *rf);
+extern struct task_struct *pick_task_idle(struct rq *rq, struct rq_flags *rf);
 
 #define SCA_CHECK		0x01
 #define SCA_MIGRATE_DISABLE	0x02
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index d98c453..4f9192b 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -32,7 +32,7 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool fir
 	stop->se.exec_start = rq_clock_task(rq);
 }
 
-static struct task_struct *pick_task_stop(struct rq *rq)
+static struct task_struct *pick_task_stop(struct rq *rq, struct rq_flags *rf)
 {
 	if (!sched_stop_runnable(rq))
 		return NULL;