Introduce a new migration type, migrate_llc_task, to support
cache-aware load balancing.
After identifying the busiest sched_group (having the most tasks
preferring the destination LLC), mark migrations with this type.
During load balancing, each runqueue in the busiest sched_group is
examined, and the runqueue with the highest number of tasks preferring
the destination CPU is selected as the busiest runqueue.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
Notes:
v1->v2: Remove unnecessary cpus_share_cache() check in
sched_balance_find_src_rq() (K Prateek Nayak)
kernel/sched/fair.c | 32 +++++++++++++++++++++++++++++++-
1 file changed, 31 insertions(+), 1 deletion(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index db555c11b5b8..529adf342ce0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9547,7 +9547,8 @@ enum migration_type {
migrate_load = 0,
migrate_util,
migrate_task,
- migrate_misfit
+ migrate_misfit,
+ migrate_llc_task
};
#define LBF_ALL_PINNED 0x01
@@ -10134,6 +10135,10 @@ static int detach_tasks(struct lb_env *env)
env->imbalance -= util;
break;
+ case migrate_llc_task:
+ env->imbalance--;
+ break;
+
case migrate_task:
env->imbalance--;
break;
@@ -11766,6 +11771,15 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
return;
}
+#ifdef CONFIG_SCHED_CACHE
+ if (busiest->group_type == group_llc_balance) {
+ /* Move a task that prefer local LLC */
+ env->migration_type = migrate_llc_task;
+ env->imbalance = 1;
+ return;
+ }
+#endif
+
if (busiest->group_type == group_imbalanced) {
/*
* In the group_imb case we cannot rely on group-wide averages
@@ -12073,6 +12087,10 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
struct rq *busiest = NULL, *rq;
unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
unsigned int busiest_nr = 0;
+#ifdef CONFIG_SCHED_CACHE
+ unsigned int busiest_pref_llc = 0;
+ int dst_llc;
+#endif
int i;
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
@@ -12181,6 +12199,16 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
}
break;
+ case migrate_llc_task:
+#ifdef CONFIG_SCHED_CACHE
+ dst_llc = llc_id(env->dst_cpu);
+ if (dst_llc >= 0 &&
+ busiest_pref_llc < rq->nr_pref_llc[dst_llc]) {
+ busiest_pref_llc = rq->nr_pref_llc[dst_llc];
+ busiest = rq;
+ }
+#endif
+ break;
case migrate_task:
if (busiest_nr < nr_running) {
busiest_nr = nr_running;
@@ -12363,6 +12391,8 @@ static void update_lb_imbalance_stat(struct lb_env *env, struct sched_domain *sd
case migrate_misfit:
__schedstat_add(sd->lb_imbalance_misfit[idle], env->imbalance);
break;
+ case migrate_llc_task:
+ break;
}
}
--
2.32.0