select_idle_capacity() does not modify the temporary cpumask used to
find the common CPUs between the domain span and the task's affinity.
Use the for_each_cpu_and_wrap() helper to avoid the temporary cpumask.
No functional changes intended.
Cc: Yury Norov <yury.norov@gmail.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
---
kernel/sched/fair.c | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 20198aa64882..5abe0ccecef3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7704,16 +7704,12 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
unsigned long task_util, util_min, util_max, best_cap = 0;
int fits, best_fits = 0;
int cpu, best_cpu = -1;
- struct cpumask *cpus;
-
- cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
- cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
task_util = task_util_est(p);
util_min = uclamp_eff_value(p, UCLAMP_MIN);
util_max = uclamp_eff_value(p, UCLAMP_MAX);
- for_each_cpu_wrap(cpu, cpus, target) {
+ for_each_cpu_and_wrap(cpu, sched_domain_span(sd), p->cpus_ptr, target) {
unsigned long cpu_cap = capacity_of(cpu);
if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
--
2.43.0