[PATCH sched_ext/for-6.12] sched: Move update_other_load_avgs() to kernel/sched/pelt.c

Tejun Heo posted 1 patch 2 months, 2 weeks ago
kernel/sched/pelt.c     |   20 ++++++++++++++++++++
kernel/sched/pelt.h     |    1 +
kernel/sched/sched.h    |    2 --
kernel/sched/syscalls.c |   22 ----------------------
4 files changed, 21 insertions(+), 24 deletions(-)
[PATCH sched_ext/for-6.12] sched: Move update_other_load_avgs() to kernel/sched/pelt.c
Posted by Tejun Heo 2 months, 2 weeks ago
96fd6c65efc6 ("sched: Factor out update_other_load_avgs() from
__update_blocked_others()") added update_other_load_avgs() in
kernel/sched/syscalls.c right above effective_cpu_util(). This location
didn't fit that well in the first place, and with 5d871a63997f ("sched/fair:
Move effective_cpu_util() and effective_cpu_util() in fair.c") moving
effective_cpu_util() to kernel/sched/fair.c, it looks even more out of
place.

Relocate the function to kernel/sched/pelt.c where all its callees are.

No functional changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@redhat.com>
---
 kernel/sched/pelt.c     |   20 ++++++++++++++++++++
 kernel/sched/pelt.h     |    1 +
 kernel/sched/sched.h    |    2 --
 kernel/sched/syscalls.c |   22 ----------------------
 4 files changed, 21 insertions(+), 24 deletions(-)

diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index fa52906a4478..a9c65d97b3ca 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -467,3 +467,23 @@ int update_irq_load_avg(struct rq *rq, u64 running)
 	return ret;
 }
 #endif
+
+/*
+ * Load avg and utiliztion metrics need to be updated periodically and before
+ * consumption. This function updates the metrics for all subsystems except for
+ * the fair class. @rq must be locked and have its clock updated.
+ */
+bool update_other_load_avgs(struct rq *rq)
+{
+	u64 now = rq_clock_pelt(rq);
+	const struct sched_class *curr_class = rq->curr->sched_class;
+	unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
+
+	lockdep_assert_rq_held(rq);
+
+	/* hw_pressure doesn't care about invariance */
+	return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
+		update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
+		update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
+		update_irq_load_avg(rq, 0);
+}
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 2150062949d4..f4f6a0875c66 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -6,6 +6,7 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
 int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+bool update_other_load_avgs(struct rq *rq);
 
 #ifdef CONFIG_SCHED_HW_PRESSURE
 int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 07a2f523e2eb..8063db62b027 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3245,8 +3245,6 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { }
 
 #ifdef CONFIG_SMP
 
-bool update_other_load_avgs(struct rq *rq);
-
 unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
 				 unsigned long *min,
 				 unsigned long *max);
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index b621e0050e42..ddf6e6c82f02 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -258,28 +258,6 @@ int sched_core_idle_cpu(int cpu)
 
 #endif
 
-#ifdef CONFIG_SMP
-/*
- * Load avg and utiliztion metrics need to be updated periodically and before
- * consumption. This function updates the metrics for all subsystems except for
- * the fair class. @rq must be locked and have its clock updated.
- */
-bool update_other_load_avgs(struct rq *rq)
-{
-	u64 now = rq_clock_pelt(rq);
-	const struct sched_class *curr_class = rq->curr->sched_class;
-	unsigned long hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
-
-	lockdep_assert_rq_held(rq);
-
-	/* hw_pressure doesn't care about invariance */
-	return update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) |
-		update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) |
-		update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) |
-		update_irq_load_avg(rq, 0);
-}
-#endif /* CONFIG_SMP */
-
 /**
  * find_process_by_pid - find a process with a matching PID value.
  * @pid: the pid in question.
Re: [PATCH sched_ext/for-6.12] sched: Move update_other_load_avgs() to kernel/sched/pelt.c
Posted by Tejun Heo 2 months, 2 weeks ago
On Wed, Sep 11, 2024 at 09:36:43AM -1000, Tejun Heo wrote:
> 96fd6c65efc6 ("sched: Factor out update_other_load_avgs() from
> __update_blocked_others()") added update_other_load_avgs() in
> kernel/sched/syscalls.c right above effective_cpu_util(). This location
> didn't fit that well in the first place, and with 5d871a63997f ("sched/fair:
> Move effective_cpu_util() and effective_cpu_util() in fair.c") moving
> effective_cpu_util() to kernel/sched/fair.c, it looks even more out of
> place.
> 
> Relocate the function to kernel/sched/pelt.c where all its callees are.
> 
> No functional changes.
> 
> Signed-off-by: Tejun Heo <tj@kernel.org>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Ingo Molnar <mingo@redhat.com>

Applied to sched_ext/for-6.12.

Thanks.

-- 
tejun