kernel/sched/fair.c | 46 +++++++++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 18 deletions(-)
In reweight_task(), there are two situations:
1. The task was on_rq, then the task's load_avg is accurate because in
__sched_setscheduler()/set_user_nice(), we would dequeue the on_rq tasks
before doing reweight. The task's load_avg would be synchronized with
cfs_rq through update_load_avg() in dequeue_task().
2. The task is sleeping, its load_avg might not have been updated for some
time, which can result in inaccurate dequeue_load_avg() in
reweight_entity().
This patch solves this by using sync_entity_load_avg() to synchronize the
load_avg of se with cfs_rq before dequeue_load_avg() in reweight_entity().
For tasks were on_rq, since we already update load_avg to accurate values
in dequeue_task(), this change will not have other effects due to the short
time interval between the two updates.
Suggested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
Reviewed-by: Vishal Chourasia <vishalc@linux.ibm.com>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
---
Changes in v4:
- Fix the 'if else' code style issue.(Dietmar)
- Add a description of __sched_setscheduler()/set_user_nice() in the commit
log.(Dietmar)
- Add comment before calling sync_entity_load_avg().(Qais)
Changes in v3:
- use sync_entity_load_avg() rather than update_load_avg() to sync the
sleeping task with its cfs_rq suggested by Dietmar.
- Link t0 v2: https://lore.kernel.org/lkml/20240720051248.59608-1-zhouchuyi@bytedance.com/
Changes in v2:
- change the description in commit log.
- use update_load_avg() in reweight_task() rather than in reweight_entity
suggested by chengming.
- Link to v1: https://lore.kernel.org/lkml/20240716150840.23061-1-zhouchuyi@bytedance.com/
---
kernel/sched/fair.c | 46 +++++++++++++++++++++++++++------------------
1 file changed, 28 insertions(+), 18 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9057584ec06d..1e3c7c582541 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3669,11 +3669,32 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
}
+
+static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
+{
+ return u64_u32_load_copy(cfs_rq->avg.last_update_time,
+ cfs_rq->last_update_time_copy);
+}
+
+/*
+ * Synchronize entity load avg of dequeued entity without locking
+ * the previous rq.
+ */
+static void sync_entity_load_avg(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ u64 last_update_time;
+
+ last_update_time = cfs_rq_last_update_time(cfs_rq);
+ __update_load_avg_blocked_se(last_update_time, se);
+}
+
#else
static inline void
enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
+static void sync_entity_load_avg(struct sched_entity *se) { }
#endif
static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
@@ -3795,7 +3816,14 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
if (!curr)
__dequeue_entity(cfs_rq, se);
update_load_sub(&cfs_rq->load, se->load.weight);
+ } else if (entity_is_task(se)) {
+ /*
+ * If the task is sleeping, we need to synchronize entity load avg
+ * before dequeue_load_avg().
+ */
+ sync_entity_load_avg(se);
}
+
dequeue_load_avg(cfs_rq, se);
if (se->on_rq) {
@@ -4034,11 +4062,6 @@ static inline bool load_avg_is_decayed(struct sched_avg *sa)
return true;
}
-static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
-{
- return u64_u32_load_copy(cfs_rq->avg.last_update_time,
- cfs_rq->last_update_time_copy);
-}
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
@@ -4773,19 +4796,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
}
}
-/*
- * Synchronize entity load avg of dequeued entity without locking
- * the previous rq.
- */
-static void sync_entity_load_avg(struct sched_entity *se)
-{
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- u64 last_update_time;
-
- last_update_time = cfs_rq_last_update_time(cfs_rq);
- __update_load_avg_blocked_se(last_update_time, se);
-}
-
/*
* Task first catches up with cfs_rq, and then subtract
* itself from the cfs_rq (task must be off the queue now).
--
2.20.1
On Mon, 29 Jul 2024 at 12:47, Chuyi Zhou <zhouchuyi@bytedance.com> wrote:
>
> In reweight_task(), there are two situations:
>
> 1. The task was on_rq, then the task's load_avg is accurate because in
> __sched_setscheduler()/set_user_nice(), we would dequeue the on_rq tasks
> before doing reweight. The task's load_avg would be synchronized with
> cfs_rq through update_load_avg() in dequeue_task().
>
> 2. The task is sleeping, its load_avg might not have been updated for some
> time, which can result in inaccurate dequeue_load_avg() in
> reweight_entity().
>
> This patch solves this by using sync_entity_load_avg() to synchronize the
> load_avg of se with cfs_rq before dequeue_load_avg() in reweight_entity().
> For tasks were on_rq, since we already update load_avg to accurate values
> in dequeue_task(), this change will not have other effects due to the short
> time interval between the two updates.
>
> Suggested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
> Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
> Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
> Reviewed-by: Vishal Chourasia <vishalc@linux.ibm.com>
> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
>
> ---
> Changes in v4:
> - Fix the 'if else' code style issue.(Dietmar)
> - Add a description of __sched_setscheduler()/set_user_nice() in the commit
> log.(Dietmar)
> - Add comment before calling sync_entity_load_avg().(Qais)
> Changes in v3:
> - use sync_entity_load_avg() rather than update_load_avg() to sync the
> sleeping task with its cfs_rq suggested by Dietmar.
> - Link t0 v2: https://lore.kernel.org/lkml/20240720051248.59608-1-zhouchuyi@bytedance.com/
> Changes in v2:
> - change the description in commit log.
> - use update_load_avg() in reweight_task() rather than in reweight_entity
> suggested by chengming.
> - Link to v1: https://lore.kernel.org/lkml/20240716150840.23061-1-zhouchuyi@bytedance.com/
> ---
> kernel/sched/fair.c | 46 +++++++++++++++++++++++++++------------------
> 1 file changed, 28 insertions(+), 18 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 9057584ec06d..1e3c7c582541 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -3669,11 +3669,32 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
> cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
> cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
> }
> +
> +static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
> +{
> + return u64_u32_load_copy(cfs_rq->avg.last_update_time,
> + cfs_rq->last_update_time_copy);
> +}
> +
> +/*
> + * Synchronize entity load avg of dequeued entity without locking
> + * the previous rq.
> + */
> +static void sync_entity_load_avg(struct sched_entity *se)
> +{
> + struct cfs_rq *cfs_rq = cfs_rq_of(se);
> + u64 last_update_time;
> +
> + last_update_time = cfs_rq_last_update_time(cfs_rq);
> + __update_load_avg_blocked_se(last_update_time, se);
> +}
> +
> #else
> static inline void
> enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
> static inline void
> dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
> +static void sync_entity_load_avg(struct sched_entity *se) { }
> #endif
>
> static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
> @@ -3795,7 +3816,14 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
> if (!curr)
> __dequeue_entity(cfs_rq, se);
> update_load_sub(&cfs_rq->load, se->load.weight);
> + } else if (entity_is_task(se)) {
> + /*
> + * If the task is sleeping, we need to synchronize entity load avg
> + * before dequeue_load_avg().
> + */
> + sync_entity_load_avg(se);
> }
> +
> dequeue_load_avg(cfs_rq, se);
>
> if (se->on_rq) {
> @@ -4034,11 +4062,6 @@ static inline bool load_avg_is_decayed(struct sched_avg *sa)
> return true;
> }
>
> -static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
> -{
> - return u64_u32_load_copy(cfs_rq->avg.last_update_time,
> - cfs_rq->last_update_time_copy);
> -}
> #ifdef CONFIG_FAIR_GROUP_SCHED
> /*
> * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
> @@ -4773,19 +4796,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
> }
> }
>
> -/*
> - * Synchronize entity load avg of dequeued entity without locking
> - * the previous rq.
> - */
> -static void sync_entity_load_avg(struct sched_entity *se)
> -{
> - struct cfs_rq *cfs_rq = cfs_rq_of(se);
> - u64 last_update_time;
> -
> - last_update_time = cfs_rq_last_update_time(cfs_rq);
> - __update_load_avg_blocked_se(last_update_time, se);
> -}
> -
> /*
> * Task first catches up with cfs_rq, and then subtract
> * itself from the cfs_rq (task must be off the queue now).
> --
> 2.20.1
>
Friendly Ping.
在 2024/8/5 21:41, Vincent Guittot 写道:
> On Mon, 29 Jul 2024 at 12:47, Chuyi Zhou <zhouchuyi@bytedance.com> wrote:
>>
>> In reweight_task(), there are two situations:
>>
>> 1. The task was on_rq, then the task's load_avg is accurate because in
>> __sched_setscheduler()/set_user_nice(), we would dequeue the on_rq tasks
>> before doing reweight. The task's load_avg would be synchronized with
>> cfs_rq through update_load_avg() in dequeue_task().
>>
>> 2. The task is sleeping, its load_avg might not have been updated for some
>> time, which can result in inaccurate dequeue_load_avg() in
>> reweight_entity().
>>
>> This patch solves this by using sync_entity_load_avg() to synchronize the
>> load_avg of se with cfs_rq before dequeue_load_avg() in reweight_entity().
>> For tasks were on_rq, since we already update load_avg to accurate values
>> in dequeue_task(), this change will not have other effects due to the short
>> time interval between the two updates.
>>
>> Suggested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
>> Signed-off-by: Chuyi Zhou <zhouchuyi@bytedance.com>
>> Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev>
>> Reviewed-by: Vishal Chourasia <vishalc@linux.ibm.com>
>> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
>
> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
>
>>
>> ---
>> Changes in v4:
>> - Fix the 'if else' code style issue.(Dietmar)
>> - Add a description of __sched_setscheduler()/set_user_nice() in the commit
>> log.(Dietmar)
>> - Add comment before calling sync_entity_load_avg().(Qais)
>> Changes in v3:
>> - use sync_entity_load_avg() rather than update_load_avg() to sync the
>> sleeping task with its cfs_rq suggested by Dietmar.
>> - Link t0 v2: https://lore.kernel.org/lkml/20240720051248.59608-1-zhouchuyi@bytedance.com/
>> Changes in v2:
>> - change the description in commit log.
>> - use update_load_avg() in reweight_task() rather than in reweight_entity
>> suggested by chengming.
>> - Link to v1: https://lore.kernel.org/lkml/20240716150840.23061-1-zhouchuyi@bytedance.com/
>> ---
>> kernel/sched/fair.c | 46 +++++++++++++++++++++++++++------------------
>> 1 file changed, 28 insertions(+), 18 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 9057584ec06d..1e3c7c582541 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -3669,11 +3669,32 @@ dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
>> cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
>> cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
>> }
>> +
>> +static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
>> +{
>> + return u64_u32_load_copy(cfs_rq->avg.last_update_time,
>> + cfs_rq->last_update_time_copy);
>> +}
>> +
>> +/*
>> + * Synchronize entity load avg of dequeued entity without locking
>> + * the previous rq.
>> + */
>> +static void sync_entity_load_avg(struct sched_entity *se)
>> +{
>> + struct cfs_rq *cfs_rq = cfs_rq_of(se);
>> + u64 last_update_time;
>> +
>> + last_update_time = cfs_rq_last_update_time(cfs_rq);
>> + __update_load_avg_blocked_se(last_update_time, se);
>> +}
>> +
>> #else
>> static inline void
>> enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
>> static inline void
>> dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
>> +static void sync_entity_load_avg(struct sched_entity *se) { }
>> #endif
>>
>> static void reweight_eevdf(struct sched_entity *se, u64 avruntime,
>> @@ -3795,7 +3816,14 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
>> if (!curr)
>> __dequeue_entity(cfs_rq, se);
>> update_load_sub(&cfs_rq->load, se->load.weight);
>> + } else if (entity_is_task(se)) {
>> + /*
>> + * If the task is sleeping, we need to synchronize entity load avg
>> + * before dequeue_load_avg().
>> + */
>> + sync_entity_load_avg(se);
>> }
>> +
>> dequeue_load_avg(cfs_rq, se);
>>
>> if (se->on_rq) {
>> @@ -4034,11 +4062,6 @@ static inline bool load_avg_is_decayed(struct sched_avg *sa)
>> return true;
>> }
>>
>> -static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
>> -{
>> - return u64_u32_load_copy(cfs_rq->avg.last_update_time,
>> - cfs_rq->last_update_time_copy);
>> -}
>> #ifdef CONFIG_FAIR_GROUP_SCHED
>> /*
>> * Because list_add_leaf_cfs_rq always places a child cfs_rq on the list
>> @@ -4773,19 +4796,6 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
>> }
>> }
>>
>> -/*
>> - * Synchronize entity load avg of dequeued entity without locking
>> - * the previous rq.
>> - */
>> -static void sync_entity_load_avg(struct sched_entity *se)
>> -{
>> - struct cfs_rq *cfs_rq = cfs_rq_of(se);
>> - u64 last_update_time;
>> -
>> - last_update_time = cfs_rq_last_update_time(cfs_rq);
>> - __update_load_avg_blocked_se(last_update_time, se);
>> -}
>> -
>> /*
>> * Task first catches up with cfs_rq, and then subtract
>> * itself from the cfs_rq (task must be off the queue now).
>> --
>> 2.20.1
>>
© 2016 - 2026 Red Hat, Inc.