[PATCH RFC v2 8/8] sched/fair: Uncache asym_prefer_cpu and find it during update_sd_lb_stats()

K Prateek Nayak posted 8 patches 11 months, 3 weeks ago
[PATCH RFC v2 8/8] sched/fair: Uncache asym_prefer_cpu and find it during update_sd_lb_stats()
Posted by K Prateek Nayak 11 months, 3 weeks ago
On AMD processors supporting dynamic preferred core ranking, the
asym_prefer_cpu cached in sched_group can change dynamically. Since
asym_prefer_cpu is cached when the sched domain hierarchy is built,
updating the cached value across the system would require rebuilding
the sched domain which is prohibitively expensive.

All the asym_prefer_cpu comparisons in the load balancing path are only
carried out post the sched group stats have been updated after iterating
all the CPUs in the group. Uncache the asym_prefer_cpu and compute it
while sched group statistics are being updated as a part of sg_lb_stats.

Fixes: f3a052391822 ("cpufreq: amd-pstate: Enable amd-pstate preferred core support")
Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
---
v1..v2:

o Modified the layout of "struct sg_lb_stats" to keep all the
  SD_ASYM_PACKING fields together (Srikanth)
---
 kernel/sched/fair.c     | 21 +++++++++++++++++++--
 kernel/sched/sched.h    |  1 -
 kernel/sched/topology.c | 15 +--------------
 3 files changed, 20 insertions(+), 17 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f27d7d36a78d..f088dd4a527c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9920,6 +9920,8 @@ struct sg_lb_stats {
 	unsigned int group_weight;
 	enum group_type group_type;
 	unsigned int group_asym_packing;	/* Tasks should be moved to preferred CPU */
+	unsigned int asym_prefer_cpu;		/* Group CPU with highest asym priority */
+	int highest_asym_prio;			/* Asym priority of asym_prefer_cpu */
 	unsigned int group_smt_balance;		/* Task on busy SMT be moved */
 	unsigned long group_misfit_task_load;	/* A CPU has a task too big for its capacity */
 #ifdef CONFIG_NUMA_BALANCING
@@ -10249,7 +10251,7 @@ sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group
 	    (sgs->group_weight - sgs->idle_cpus != 1))
 		return false;
 
-	return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
+	return sched_asym(env->sd, env->dst_cpu, sgs->asym_prefer_cpu);
 }
 
 /* One group has more than one SMT CPU while the other group does not */
@@ -10330,6 +10332,17 @@ sched_reduced_capacity(struct rq *rq, struct sched_domain *sd)
 	return check_cpu_capacity(rq, sd);
 }
 
+static inline void
+update_sg_pick_asym_prefer(struct sg_lb_stats *sgs, int cpu)
+{
+	int asym_prio = arch_asym_cpu_priority(cpu);
+
+	if (asym_prio > sgs->highest_asym_prio) {
+		sgs->asym_prefer_cpu = cpu;
+		sgs->highest_asym_prio = asym_prio;
+	}
+}
+
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  * @env: The load balancing environment.
@@ -10352,6 +10365,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 	memset(sgs, 0, sizeof(*sgs));
 
 	local_group = group == sds->local;
+	sgs->highest_asym_prio = INT_MIN;
 
 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
 		struct rq *rq = cpu_rq(i);
@@ -10365,6 +10379,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 		nr_running = rq->nr_running;
 		sgs->sum_nr_running += nr_running;
 
+		if (sd_flags & SD_ASYM_PACKING)
+			update_sg_pick_asym_prefer(sgs, i);
+
 		if (cpu_overutilized(i))
 			*sg_overutilized = 1;
 
@@ -10486,7 +10503,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 
 	case group_asym_packing:
 		/* Prefer to move from lowest priority CPU's work */
-		return sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu);
+		return sched_asym_prefer(busiest->asym_prefer_cpu, sgs->asym_prefer_cpu);
 
 	case group_misfit_task:
 		/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index aef716c41edb..a3f0d326bd11 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2047,7 +2047,6 @@ struct sched_group {
 	unsigned int		group_weight;
 	unsigned int		cores;
 	struct sched_group_capacity *sgc;
-	int			asym_prefer_cpu;	/* CPU of highest priority in group */
 	int			flags;
 
 	/*
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 9c405f0e7b26..20aa087710f0 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1302,7 +1302,7 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 	WARN_ON(!sg);
 
 	do {
-		int cpu, cores = 0, max_cpu = -1;
+		int cpu, cores = 0;
 
 		sg->group_weight = cpumask_weight(sched_group_span(sg));
 
@@ -1314,19 +1314,6 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
 #endif
 		}
 		sg->cores = cores;
-
-		if (!(sd->flags & SD_ASYM_PACKING))
-			goto next;
-
-		for_each_cpu(cpu, sched_group_span(sg)) {
-			if (max_cpu < 0)
-				max_cpu = cpu;
-			else if (sched_asym_prefer(cpu, max_cpu))
-				max_cpu = cpu;
-		}
-		sg->asym_prefer_cpu = max_cpu;
-
-next:
 		sg = sg->next;
 	} while (sg != sd->groups);
 
-- 
2.43.0