Traversing the Perf Domains requires rcu_read_lock() to be held and is
conditional on sched_energy_enabled(). Ensure right protections applied.
Also skip capacity inversion detection for our own pd; which was an
error.
Fixes: 44c7b80bffc3 ("sched/fair: Detect capacity inversion")
Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Qais Yousef (Google) <qyousef@layalina.io>
---
kernel/sched/fair.c | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5a8e75d4a17b..34239d3118f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8992,16 +8992,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
* * Thermal pressure will impact all cpus in this perf domain
* equally.
*/
- if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+ if (sched_energy_enabled()) {
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
- struct perf_domain *pd = rcu_dereference(rq->rd->pd);
+ struct perf_domain *pd;
+
+ rcu_read_lock();
+ pd = rcu_dereference(rq->rd->pd);
rq->cpu_capacity_inverted = 0;
for (; pd; pd = pd->next) {
struct cpumask *pd_span = perf_domain_span(pd);
unsigned long pd_cap_orig, pd_cap;
+ /* We can't be inverted against our own pd */
+ if (cpumask_test_cpu(cpu_of(rq), pd_span))
+ continue;
+
cpu = cpumask_any(pd_span);
pd_cap_orig = arch_scale_cpu_capacity(cpu);
@@ -9026,6 +9033,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
break;
}
}
+
+ rcu_read_unlock();
}
trace_sched_cpu_capacity_tp(rq);
--
2.25.1
On Thu, 12 Jan 2023 at 13:27, Qais Yousef <qyousef@layalina.io> wrote: > > Traversing the Perf Domains requires rcu_read_lock() to be held and is > conditional on sched_energy_enabled(). Ensure right protections applied. > > Also skip capacity inversion detection for our own pd; which was an > error. > > Fixes: 44c7b80bffc3 ("sched/fair: Detect capacity inversion") > Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com> > Signed-off-by: Qais Yousef (Google) <qyousef@layalina.io> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> > --- > kernel/sched/fair.c | 13 +++++++++++-- > 1 file changed, 11 insertions(+), 2 deletions(-) > > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index 5a8e75d4a17b..34239d3118f0 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -8992,16 +8992,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) > * * Thermal pressure will impact all cpus in this perf domain > * equally. > */ > - if (static_branch_unlikely(&sched_asym_cpucapacity)) { > + if (sched_energy_enabled()) { > unsigned long inv_cap = capacity_orig - thermal_load_avg(rq); > - struct perf_domain *pd = rcu_dereference(rq->rd->pd); > + struct perf_domain *pd; > + > + rcu_read_lock(); > > + pd = rcu_dereference(rq->rd->pd); > rq->cpu_capacity_inverted = 0; > > for (; pd; pd = pd->next) { > struct cpumask *pd_span = perf_domain_span(pd); > unsigned long pd_cap_orig, pd_cap; > > + /* We can't be inverted against our own pd */ > + if (cpumask_test_cpu(cpu_of(rq), pd_span)) > + continue; > + > cpu = cpumask_any(pd_span); > pd_cap_orig = arch_scale_cpu_capacity(cpu); > > @@ -9026,6 +9033,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) > break; > } > } > + > + rcu_read_unlock(); > } > > trace_sched_cpu_capacity_tp(rq); > -- > 2.25.1 >
The following commit has been merged into the sched/urgent branch of tip:
Commit-ID: da07d2f9c153e457e845d4dcfdd13568d71d18a4
Gitweb: https://git.kernel.org/tip/da07d2f9c153e457e845d4dcfdd13568d71d18a4
Author: Qais Yousef <qyousef@layalina.io>
AuthorDate: Thu, 12 Jan 2023 12:27:08
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 13 Jan 2023 11:40:21 +01:00
sched/fair: Fixes for capacity inversion detection
Traversing the Perf Domains requires rcu_read_lock() to be held and is
conditional on sched_energy_enabled(). Ensure right protections applied.
Also skip capacity inversion detection for our own pd; which was an
error.
Fixes: 44c7b80bffc3 ("sched/fair: Detect capacity inversion")
Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Qais Yousef (Google) <qyousef@layalina.io>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20230112122708.330667-3-qyousef@layalina.io
---
kernel/sched/fair.c | 13 +++++++++++--
1 file changed, 11 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index be43731..0f87369 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8868,16 +8868,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
* * Thermal pressure will impact all cpus in this perf domain
* equally.
*/
- if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+ if (sched_energy_enabled()) {
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
- struct perf_domain *pd = rcu_dereference(rq->rd->pd);
+ struct perf_domain *pd;
+
+ rcu_read_lock();
+ pd = rcu_dereference(rq->rd->pd);
rq->cpu_capacity_inverted = 0;
for (; pd; pd = pd->next) {
struct cpumask *pd_span = perf_domain_span(pd);
unsigned long pd_cap_orig, pd_cap;
+ /* We can't be inverted against our own pd */
+ if (cpumask_test_cpu(cpu_of(rq), pd_span))
+ continue;
+
cpu = cpumask_any(pd_span);
pd_cap_orig = arch_scale_cpu_capacity(cpu);
@@ -8902,6 +8909,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
break;
}
}
+
+ rcu_read_unlock();
}
trace_sched_cpu_capacity_tp(rq);
© 2016 - 2025 Red Hat, Inc.