kernel/sched/ext.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)
This commit update balance_scx() in the comments to balance_one().
Signed-off-by: Zqiang <qiang.zhang@linux.dev>
---
kernel/sched/ext.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 7a53d1cf8e82..5ebf8a740847 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1577,7 +1577,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
*
* @p may go through multiple stopping <-> running transitions between
* here and put_prev_task_scx() if task attribute changes occur while
- * balance_scx() leaves @rq unlocked. However, they don't contain any
+ * balance_one() leaves @rq unlocked. However, they don't contain any
* information meaningful to the BPF scheduler and can be suppressed by
* skipping the callbacks if the task is !QUEUED.
*/
@@ -2372,7 +2372,7 @@ static void switch_class(struct rq *rq, struct task_struct *next)
* preempted, and it regaining control of the CPU.
*
* ->cpu_release() complements ->cpu_acquire(), which is emitted the
- * next time that balance_scx() is invoked.
+ * next time that balance_one() is invoked.
*/
if (!rq->scx.cpu_released) {
if (SCX_HAS_OP(sch, cpu_release)) {
@@ -2478,7 +2478,7 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
}
/*
- * If balance_scx() is telling us to keep running @prev, replenish slice
+ * If balance_one() is telling us to keep running @prev, replenish slice
* if necessary and keep running @prev. Otherwise, pop the first one
* from the local DSQ.
*/
@@ -4025,7 +4025,7 @@ static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn);
*
* - ops.dispatch() is ignored.
*
- * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
+ * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
* can't be trusted. Whenever a tick triggers, the running task is rotated to
* the tail of the queue with core_sched_at touched.
*
@@ -6069,7 +6069,7 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
/*
* A successfully consumed task can be dequeued before it starts
* running while the CPU is trying to migrate other dispatched
- * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
+ * tasks. Bump nr_tasks to tell balance_one() to retry on empty
* local DSQ.
*/
dspc->nr_tasks++;
--
2.17.1
On Fri, Dec 19, 2025 at 05:34:04PM +0800, Zqiang wrote:
> This commit update balance_scx() in the comments to balance_one().
>
> Signed-off-by: Zqiang <qiang.zhang@linux.dev>
Maybe we can mention that balance_scx() was removed by commit 4c95380701f58
("sched/ext: Fold balance_scx() into pick_task_scx()"). Now balance_one()
is called directly from do_pick_task_scx() and does all the work that used
to be handled by balance_scx(), so the renames are correct.
In any case, looks good to me, thanks for the cleanup.
Reviewed-by: Andrea Righi <arighi@nvidia.com>
-Andrea
> ---
> kernel/sched/ext.c | 10 +++++-----
> 1 file changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
> index 7a53d1cf8e82..5ebf8a740847 100644
> --- a/kernel/sched/ext.c
> +++ b/kernel/sched/ext.c
> @@ -1577,7 +1577,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
> *
> * @p may go through multiple stopping <-> running transitions between
> * here and put_prev_task_scx() if task attribute changes occur while
> - * balance_scx() leaves @rq unlocked. However, they don't contain any
> + * balance_one() leaves @rq unlocked. However, they don't contain any
> * information meaningful to the BPF scheduler and can be suppressed by
> * skipping the callbacks if the task is !QUEUED.
> */
> @@ -2372,7 +2372,7 @@ static void switch_class(struct rq *rq, struct task_struct *next)
> * preempted, and it regaining control of the CPU.
> *
> * ->cpu_release() complements ->cpu_acquire(), which is emitted the
> - * next time that balance_scx() is invoked.
> + * next time that balance_one() is invoked.
> */
> if (!rq->scx.cpu_released) {
> if (SCX_HAS_OP(sch, cpu_release)) {
> @@ -2478,7 +2478,7 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
> }
>
> /*
> - * If balance_scx() is telling us to keep running @prev, replenish slice
> + * If balance_one() is telling us to keep running @prev, replenish slice
> * if necessary and keep running @prev. Otherwise, pop the first one
> * from the local DSQ.
> */
> @@ -4025,7 +4025,7 @@ static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn);
> *
> * - ops.dispatch() is ignored.
> *
> - * - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
> + * - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
> * can't be trusted. Whenever a tick triggers, the running task is rotated to
> * the tail of the queue with core_sched_at touched.
> *
> @@ -6069,7 +6069,7 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
> /*
> * A successfully consumed task can be dequeued before it starts
> * running while the CPU is trying to migrate other dispatched
> - * tasks. Bump nr_tasks to tell balance_scx() to retry on empty
> + * tasks. Bump nr_tasks to tell balance_one() to retry on empty
> * local DSQ.
> */
> dspc->nr_tasks++;
> --
> 2.17.1
>
© 2016 - 2026 Red Hat, Inc.