We should not (re)set slice protection in the sched_change pattern
which calls put_prev_task() / set_next_task().
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/fair.c | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5420,7 +5420,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
}
static void
-set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool first)
{
clear_buddies(cfs_rq, se);
@@ -5435,7 +5435,8 @@ set_next_entity(struct cfs_rq *cfs_rq, s
__dequeue_entity(cfs_rq, se);
update_load_avg(cfs_rq, se, UPDATE_TG);
- set_protect_slice(cfs_rq, se);
+ if (first)
+ set_protect_slice(cfs_rq, se);
}
update_stats_curr_start(cfs_rq, se);
@@ -8958,13 +8959,13 @@ pick_next_task_fair(struct rq *rq, struc
pse = parent_entity(pse);
}
if (se_depth >= pse_depth) {
- set_next_entity(cfs_rq_of(se), se);
+ set_next_entity(cfs_rq_of(se), se, true);
se = parent_entity(se);
}
}
put_prev_entity(cfs_rq, pse);
- set_next_entity(cfs_rq, se);
+ set_next_entity(cfs_rq, se, true);
__set_next_task_fair(rq, p, true);
}
@@ -13578,7 +13579,7 @@ static void set_next_task_fair(struct rq
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
- set_next_entity(cfs_rq, se);
+ set_next_entity(cfs_rq, se, first);
/* ensure bandwidth has been allocated on our new cfs_rq */
account_cfs_rq_runtime(cfs_rq, 0);
}
On Fri, 30 Jan 2026 at 10:47, Peter Zijlstra <peterz@infradead.org> wrote:
>
> We should not (re)set slice protection in the sched_change pattern
> which calls put_prev_task() / set_next_task().
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
> ---
> kernel/sched/fair.c | 11 ++++++-----
> 1 file changed, 6 insertions(+), 5 deletions(-)
>
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -5420,7 +5420,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
> }
>
> static void
> -set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> +set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool first)
> {
> clear_buddies(cfs_rq, se);
>
> @@ -5435,7 +5435,8 @@ set_next_entity(struct cfs_rq *cfs_rq, s
> __dequeue_entity(cfs_rq, se);
> update_load_avg(cfs_rq, se, UPDATE_TG);
>
> - set_protect_slice(cfs_rq, se);
> + if (first)
> + set_protect_slice(cfs_rq, se);
> }
>
> update_stats_curr_start(cfs_rq, se);
> @@ -8958,13 +8959,13 @@ pick_next_task_fair(struct rq *rq, struc
> pse = parent_entity(pse);
> }
> if (se_depth >= pse_depth) {
> - set_next_entity(cfs_rq_of(se), se);
> + set_next_entity(cfs_rq_of(se), se, true);
> se = parent_entity(se);
> }
> }
>
> put_prev_entity(cfs_rq, pse);
> - set_next_entity(cfs_rq, se);
> + set_next_entity(cfs_rq, se, true);
>
> __set_next_task_fair(rq, p, true);
> }
> @@ -13578,7 +13579,7 @@ static void set_next_task_fair(struct rq
> for_each_sched_entity(se) {
> struct cfs_rq *cfs_rq = cfs_rq_of(se);
>
> - set_next_entity(cfs_rq, se);
> + set_next_entity(cfs_rq, se, first);
> /* ensure bandwidth has been allocated on our new cfs_rq */
> account_cfs_rq_runtime(cfs_rq, 0);
> }
>
>
© 2016 - 2026 Red Hat, Inc.