Standardize scheduler load-balancing function names on the
sched_balance_() prefix.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Shrikanth Hegde <sshegde@linux.ibm.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
---
kernel/sched/fair.c | 8 ++++----
kernel/sched/pelt.c | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 96a81b2fa281..95f7092043f3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9411,7 +9411,7 @@ static unsigned long task_h_load(struct task_struct *p)
}
#endif
-static void update_blocked_averages(int cpu)
+static void sched_balance_update_blocked_averages(int cpu)
{
bool decayed = false, done = true;
struct rq *rq = cpu_rq(cpu);
@@ -12079,7 +12079,7 @@ static bool update_nohz_stats(struct rq *rq)
if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
return true;
- update_blocked_averages(cpu);
+ sched_balance_update_blocked_averages(cpu);
return rq->has_blocked_load;
}
@@ -12339,7 +12339,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
raw_spin_rq_unlock(this_rq);
t0 = sched_clock_cpu(this_cpu);
- update_blocked_averages(this_cpu);
+ sched_balance_update_blocked_averages(this_cpu);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
@@ -12431,7 +12431,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
return;
/* normal load balance */
- update_blocked_averages(this_rq->cpu);
+ sched_balance_update_blocked_averages(this_rq->cpu);
sched_balance_domains(this_rq, idle);
}
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 63b6cf898220..f80955ecdce6 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -209,7 +209,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
* This means that weight will be 0 but not running for a sched_entity
* but also for a cfs_rq if the latter becomes idle. As an example,
* this happens during idle_balance() which calls
- * update_blocked_averages().
+ * sched_balance_update_blocked_averages().
*
* Also see the comment in accumulate_sum().
*/
--
2.40.1
On 2024/3/8 19:18, Ingo Molnar wrote:
> Standardize scheduler load-balancing function names on the
> sched_balance_() prefix.
>
> Signed-off-by: Ingo Molnar <mingo@kernel.org>
> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
> Cc: Linus Torvalds <torvalds@linux-foundation.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Shrikanth Hegde <sshegde@linux.ibm.com>
> Cc: Valentin Schneider <vschneid@redhat.com>
> Cc: Vincent Guittot <vincent.guittot@linaro.org>
> ---
> kernel/sched/fair.c | 8 ++++----
> kernel/sched/pelt.c | 2 +-
> 2 files changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 96a81b2fa281..95f7092043f3 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -9411,7 +9411,7 @@ static unsigned long task_h_load(struct task_struct *p)
> }
> #endif
>
> -static void update_blocked_averages(int cpu)
> +static void sched_balance_update_blocked_averages(int cpu)
> {
> bool decayed = false, done = true;
> struct rq *rq = cpu_rq(cpu);
> @@ -12079,7 +12079,7 @@ static bool update_nohz_stats(struct rq *rq)
> if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
> return true;
>
> - update_blocked_averages(cpu);
> + sched_balance_update_blocked_averages(cpu);
>
> return rq->has_blocked_load;
> }
> @@ -12339,7 +12339,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
> raw_spin_rq_unlock(this_rq);
>
> t0 = sched_clock_cpu(this_cpu);
> - update_blocked_averages(this_cpu);
> + sched_balance_update_blocked_averages(this_cpu);
>
> rcu_read_lock();
> for_each_domain(this_cpu, sd) {
> @@ -12431,7 +12431,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
> return;
>
> /* normal load balance */
> - update_blocked_averages(this_rq->cpu);
> + sched_balance_update_blocked_averages(this_rq->cpu);
> sched_balance_domains(this_rq, idle);
> }
>
> diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
> index 63b6cf898220..f80955ecdce6 100644
> --- a/kernel/sched/pelt.c
> +++ b/kernel/sched/pelt.c
> @@ -209,7 +209,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
> * This means that weight will be 0 but not running for a sched_entity
> * but also for a cfs_rq if the latter becomes idle. As an example,
> * this happens during idle_balance() which calls
Could we also fix this ghost idle_balance() in this serial (maybe in
patch 10)?
Honglei
> - * update_blocked_averages().
> + * sched_balance_update_blocked_averages().
> *
> * Also see the comment in accumulate_sum().
> */
* Honglei Wang <jameshongleiwang@126.com> wrote: > > --- a/kernel/sched/pelt.c > > +++ b/kernel/sched/pelt.c > > @@ -209,7 +209,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa, > > * This means that weight will be 0 but not running for a sched_entity > > * but also for a cfs_rq if the latter becomes idle. As an example, > > * this happens during idle_balance() which calls > > Could we also fix this ghost idle_balance() in this serial (maybe in patch > 10)? Good point - I've added the patch below. Thanks, Ingo ===================> From: Ingo Molnar <mingo@kernel.org> Date: Tue, 12 Mar 2024 11:33:50 +0100 Subject: [PATCH] sched/balancing: Fix a couple of outdated function names in comments The 'idle_balance()' function hasn't existed for years, and there's no load_balance_newidle() either - both are sched_balance_newidle() today. Reported-by: Honglei Wang <jameshongleiwang@126.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> --- kernel/sched/fair.c | 2 +- kernel/sched/pelt.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 54177ff96e4b..c35452109c76 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6866,7 +6866,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) #ifdef CONFIG_SMP -/* Working cpumask for: sched_balance_rq, load_balance_newidle. */ +/* Working cpumask for: sched_balance_rq(), sched_balance_newidle(). */ static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask); static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask); diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index f80955ecdce6..3a96da25b67c 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -208,7 +208,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa, * se has been already dequeued but cfs_rq->curr still points to it. * This means that weight will be 0 but not running for a sched_entity * but also for a cfs_rq if the latter becomes idle. As an example, - * this happens during idle_balance() which calls + * this happens during sched_balance_newidle() which calls * sched_balance_update_blocked_averages(). * * Also see the comment in accumulate_sum().
The following commit has been merged into the sched/core branch of tip:
Commit-ID: 391b7a5335c45b2bafe535cb440836ccd17515aa
Gitweb: https://git.kernel.org/tip/391b7a5335c45b2bafe535cb440836ccd17515aa
Author: Ingo Molnar <mingo@kernel.org>
AuthorDate: Fri, 08 Mar 2024 12:18:15 +01:00
Committer: Ingo Molnar <mingo@kernel.org>
CommitterDate: Tue, 12 Mar 2024 12:00:00 +01:00
sched/balancing: Rename update_blocked_averages() => sched_balance_update_blocked_averages()
Standardize scheduler load-balancing function names on the
sched_balance_() prefix.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Link: https://lore.kernel.org/r/20240308111819.1101550-10-mingo@kernel.org
---
kernel/sched/fair.c | 8 ++++----
kernel/sched/pelt.c | 2 +-
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 96a81b2..95f7092 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9411,7 +9411,7 @@ static unsigned long task_h_load(struct task_struct *p)
}
#endif
-static void update_blocked_averages(int cpu)
+static void sched_balance_update_blocked_averages(int cpu)
{
bool decayed = false, done = true;
struct rq *rq = cpu_rq(cpu);
@@ -12079,7 +12079,7 @@ static bool update_nohz_stats(struct rq *rq)
if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick)))
return true;
- update_blocked_averages(cpu);
+ sched_balance_update_blocked_averages(cpu);
return rq->has_blocked_load;
}
@@ -12339,7 +12339,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
raw_spin_rq_unlock(this_rq);
t0 = sched_clock_cpu(this_cpu);
- update_blocked_averages(this_cpu);
+ sched_balance_update_blocked_averages(this_cpu);
rcu_read_lock();
for_each_domain(this_cpu, sd) {
@@ -12431,7 +12431,7 @@ static __latent_entropy void sched_balance_softirq(struct softirq_action *h)
return;
/* normal load balance */
- update_blocked_averages(this_rq->cpu);
+ sched_balance_update_blocked_averages(this_rq->cpu);
sched_balance_domains(this_rq, idle);
}
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 63b6cf8..f80955e 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -209,7 +209,7 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
* This means that weight will be 0 but not running for a sched_entity
* but also for a cfs_rq if the latter becomes idle. As an example,
* this happens during idle_balance() which calls
- * update_blocked_averages().
+ * sched_balance_update_blocked_averages().
*
* Also see the comment in accumulate_sum().
*/
© 2016 - 2026 Red Hat, Inc.