From nobody Wed Apr 8 07:54:02 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 1C6A3C28D13 for ; Mon, 22 Aug 2022 07:38:28 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233319AbiHVHi0 (ORCPT ); Mon, 22 Aug 2022 03:38:26 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:56394 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233271AbiHVHiR (ORCPT ); Mon, 22 Aug 2022 03:38:17 -0400 Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 11D27E01 for ; Mon, 22 Aug 2022 00:38:14 -0700 (PDT) Received: from canpemm500009.china.huawei.com (unknown [172.30.72.54]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4MB3z85dbXzkWZs; Mon, 22 Aug 2022 15:34:44 +0800 (CST) Received: from localhost.localdomain (10.67.164.66) by canpemm500009.china.huawei.com (7.192.105.203) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Mon, 22 Aug 2022 15:38:12 +0800 From: Yicong Yang To: , , , , , , , CC: , , , , , , , , , , <21cnbao@gmail.com>, , , , , , , Subject: [PATCH v7 1/2] sched: Add per_cpu cluster domain info and cpus_share_lowest_cache API Date: Mon, 22 Aug 2022 15:36:09 +0800 Message-ID: <20220822073610.27205-2-yangyicong@huawei.com> X-Mailer: git-send-email 2.31.0 In-Reply-To: <20220822073610.27205-1-yangyicong@huawei.com> References: <20220822073610.27205-1-yangyicong@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.67.164.66] X-ClientProxiedBy: dggems703-chm.china.huawei.com (10.3.19.180) To canpemm500009.china.huawei.com (7.192.105.203) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Barry Song Add per-cpu cluster domain info and cpus_share_lowest_cache() API. This is the preparation for the optimization of select_idle_cpu() on platforms with cluster scheduler level. Tested-by: K Prateek Nayak Signed-off-by: Barry Song Signed-off-by: Yicong Yang Reviewed-by: Gautham R. Shenoy Reviewed-by: Tim Chen Reviewed-by: Vincent Guittot --- include/linux/sched/sd_flags.h | 7 +++++++ include/linux/sched/topology.h | 8 +++++++- kernel/sched/core.c | 12 ++++++++++++ kernel/sched/sched.h | 2 ++ kernel/sched/topology.c | 15 +++++++++++++++ 5 files changed, 43 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index 57bde66d95f7..42ed454e8b18 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -109,6 +109,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | = SDF_NEEDS_GROUPS) */ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) =20 +/* + * Domain members share CPU cluster (LLC tags or L2 cache) + * + * NEEDS_GROUPS: Clusters are shared between groups. + */ +SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS) + /* * Domain members share CPU package resources (i.e. caches) * diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 816df6cc444e..c0d21667ddf3 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void) #ifdef CONFIG_SCHED_CLUSTER static inline int cpu_cluster_flags(void) { - return SD_SHARE_PKG_RESOURCES; + return SD_CLUSTER | SD_SHARE_PKG_RESOURCES; } #endif =20 @@ -179,6 +179,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms); void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); =20 bool cpus_share_cache(int this_cpu, int that_cpu); +bool cpus_share_lowest_cache(int this_cpu, int that_cpu); =20 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); @@ -232,6 +233,11 @@ static inline bool cpus_share_cache(int this_cpu, int = that_cpu) return true; } =20 +static inline bool cpus_share_lowest_cache(int this_cpu, int that_cpu) +{ + return true; +} + #endif /* !CONFIG_SMP */ =20 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ee28253c9ac0..2d647598d26c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3802,6 +3802,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu) return per_cpu(sd_llc_id, this_cpu) =3D=3D per_cpu(sd_llc_id, that_cpu); } =20 +/* + * Whether CPUs are share lowest cache, which means LLC on non-cluster + * machines and LLC tag or L2 on machines with clusters. + */ +bool cpus_share_lowest_cache(int this_cpu, int that_cpu) +{ + if (this_cpu =3D=3D that_cpu) + return true; + + return per_cpu(sd_lowest_cache_id, this_cpu) =3D=3D per_cpu(sd_lowest_cac= he_id, that_cpu); +} + static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) { /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e26688d387ae..e9f0935605e2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1809,7 +1809,9 @@ static inline struct sched_domain *lowest_flag_domain= (int cpu, int flag) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); +DECLARE_PER_CPU(int, sd_lowest_cache_id); DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_cluster); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 8739c2a5a54e..8ab27c0d6d1f 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -664,6 +664,8 @@ static void destroy_sched_domains(struct sched_domain *= sd) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); +DEFINE_PER_CPU(int, sd_lowest_cache_id); +DEFINE_PER_CPU(struct sched_domain __rcu *, sd_cluster); DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); @@ -689,6 +691,18 @@ static void update_top_cache_domain(int cpu) per_cpu(sd_llc_id, cpu) =3D id; rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); =20 + sd =3D lowest_flag_domain(cpu, SD_CLUSTER); + if (sd) + id =3D cpumask_first(sched_domain_span(sd)); + rcu_assign_pointer(per_cpu(sd_cluster, cpu), sd); + + /* + * This assignment should be placed after the sd_llc_id as + * we want this id equals to cluster id on cluster machines + * but equals to LLC id on non-Cluster machines. + */ + per_cpu(sd_lowest_cache_id, cpu) =3D id; + sd =3D lowest_flag_domain(cpu, SD_NUMA); rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); =20 @@ -1532,6 +1546,7 @@ static struct cpumask ***sched_domains_numa_masks; */ #define TOPOLOGY_SD_FLAGS \ (SD_SHARE_CPUCAPACITY | \ + SD_CLUSTER | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING) --=20 2.24.0 From nobody Wed Apr 8 07:54:02 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 76CEEC28D13 for ; Mon, 22 Aug 2022 07:38:32 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S231136AbiHVHia (ORCPT ); Mon, 22 Aug 2022 03:38:30 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:56408 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233287AbiHVHiR (ORCPT ); Mon, 22 Aug 2022 03:38:17 -0400 Received: from szxga02-in.huawei.com (szxga02-in.huawei.com [45.249.212.188]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 610A9D62 for ; Mon, 22 Aug 2022 00:38:15 -0700 (PDT) Received: from canpemm500009.china.huawei.com (unknown [172.30.72.54]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4MB3yD3yrFzXdhw; Mon, 22 Aug 2022 15:33:56 +0800 (CST) Received: from localhost.localdomain (10.67.164.66) by canpemm500009.china.huawei.com (7.192.105.203) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Mon, 22 Aug 2022 15:38:12 +0800 From: Yicong Yang To: , , , , , , , CC: , , , , , , , , , , <21cnbao@gmail.com>, , , , , , , Subject: [PATCH v7 2/2] sched/fair: Scan cluster before scanning LLC in wake-up path Date: Mon, 22 Aug 2022 15:36:10 +0800 Message-ID: <20220822073610.27205-3-yangyicong@huawei.com> X-Mailer: git-send-email 2.31.0 In-Reply-To: <20220822073610.27205-1-yangyicong@huawei.com> References: <20220822073610.27205-1-yangyicong@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.67.164.66] X-ClientProxiedBy: dggems703-chm.china.huawei.com (10.3.19.180) To canpemm500009.china.huawei.com (7.192.105.203) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Barry Song For platforms having clusters like Kunpeng920, CPUs within the same cluster have lower latency when synchronizing and accessing shared resources like cache. Thus, this patch tries to find an idle cpu within the cluster of the target CPU before scanning the whole LLC to gain lower latency. Testing has been done on Kunpeng920 by pinning tasks to one numa and two numa. On Kunpeng920, Each numa has 8 clusters and each cluster has 4 CPUs. With this patch, We noticed enhancement on tbench within one numa or cross two numa. On numa 0: 6.0-rc1 patched Hmean 1 351.20 ( 0.00%) 396.45 * 12.88%* Hmean 2 700.43 ( 0.00%) 793.76 * 13.32%* Hmean 4 1404.42 ( 0.00%) 1583.62 * 12.76%* Hmean 8 2833.31 ( 0.00%) 3147.85 * 11.10%* Hmean 16 5501.90 ( 0.00%) 6089.89 * 10.69%* Hmean 32 10428.59 ( 0.00%) 10619.63 * 1.83%* Hmean 64 8223.39 ( 0.00%) 8306.93 * 1.02%* Hmean 128 7042.88 ( 0.00%) 7068.03 * 0.36%* On numa 0-1: 6.0-rc1 patched Hmean 1 363.06 ( 0.00%) 397.13 * 9.38%* Hmean 2 721.68 ( 0.00%) 789.84 * 9.44%* Hmean 4 1435.15 ( 0.00%) 1566.01 * 9.12%* Hmean 8 2776.17 ( 0.00%) 3007.05 * 8.32%* Hmean 16 5471.71 ( 0.00%) 6103.91 * 11.55%* Hmean 32 10164.98 ( 0.00%) 11531.81 * 13.45%* Hmean 64 17143.28 ( 0.00%) 20078.68 * 17.12%* Hmean 128 14552.70 ( 0.00%) 15156.41 * 4.15%* Hmean 256 12827.37 ( 0.00%) 13326.86 * 3.89%* Note neither Kunpeng920 nor x86 Jacobsville supports SMT, so the SMT branch in the code has not been tested but it supposed to work. Suggested-by: Peter Zijlstra [https://lore.kernel.org/lkml/Ytfjs+m1kUs0ScSn@worktop.programming.kicks-as= s.net] Tested-by: Yicong Yang Signed-off-by: Barry Song Signed-off-by: Yicong Yang Reviewed-by: Tim Chen Reviewed-by: Chen Yu Tested-by: K Prateek Nayak --- kernel/sched/fair.c | 30 +++++++++++++++++++++++++++--- kernel/sched/sched.h | 2 ++ kernel/sched/topology.c | 10 ++++++++++ 3 files changed, 39 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 914096c5b1ae..6fa77610d0f5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6437,6 +6437,30 @@ static int select_idle_cpu(struct task_struct *p, st= ruct sched_domain *sd, bool } } =20 + if (static_branch_unlikely(&sched_cluster_active)) { + struct sched_domain *sdc =3D rcu_dereference(per_cpu(sd_cluster, target)= ); + + if (sdc) { + for_each_cpu_wrap(cpu, sched_domain_span(sdc), target + 1) { + if (!cpumask_test_cpu(cpu, cpus)) + continue; + + if (has_idle_core) { + i =3D select_idle_core(p, cpu, cpus, &idle_cpu); + if ((unsigned int)i < nr_cpumask_bits) + return i; + } else { + if (--nr <=3D 0) + return -1; + idle_cpu =3D __select_idle_cpu(cpu, p); + if ((unsigned int)idle_cpu < nr_cpumask_bits) + return idle_cpu; + } + } + cpumask_andnot(cpus, cpus, sched_domain_span(sdc)); + } + } + for_each_cpu_wrap(cpu, cpus, target + 1) { if (has_idle_core) { i =3D select_idle_core(p, cpu, cpus, &idle_cpu); @@ -6444,7 +6468,7 @@ static int select_idle_cpu(struct task_struct *p, str= uct sched_domain *sd, bool return i; =20 } else { - if (!--nr) + if (--nr <=3D 0) return -1; idle_cpu =3D __select_idle_cpu(cpu, p); if ((unsigned int)idle_cpu < nr_cpumask_bits) @@ -6543,7 +6567,7 @@ static int select_idle_sibling(struct task_struct *p,= int prev, int target) /* * If the previous CPU is cache affine and idle, don't be stupid: */ - if (prev !=3D target && cpus_share_cache(prev, target) && + if (prev !=3D target && cpus_share_lowest_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) && asym_fits_capacity(task_util, prev)) return prev; @@ -6569,7 +6593,7 @@ static int select_idle_sibling(struct task_struct *p,= int prev, int target) p->recent_used_cpu =3D prev; if (recent_used_cpu !=3D prev && recent_used_cpu !=3D target && - cpus_share_cache(recent_used_cpu, target) && + cpus_share_lowest_cache(recent_used_cpu, target) && (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cp= u)) && cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && asym_fits_capacity(task_util, recent_used_cpu)) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e9f0935605e2..60e8a91e29d1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1815,7 +1815,9 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_clust= er); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); + extern struct static_key_false sched_asym_cpucapacity; +extern struct static_key_false sched_cluster_active; =20 struct sched_group_capacity { atomic_t ref; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 8ab27c0d6d1f..04ead3227201 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -670,7 +670,9 @@ DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_l= lc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); + DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); +DEFINE_STATIC_KEY_FALSE(sched_cluster_active); =20 static void update_top_cache_domain(int cpu) { @@ -2268,6 +2270,7 @@ build_sched_domains(const struct cpumask *cpu_map, st= ruct sched_domain_attr *att struct rq *rq =3D NULL; int i, ret =3D -ENOMEM; bool has_asym =3D false; + bool has_cluster =3D false; =20 if (WARN_ON(cpumask_empty(cpu_map))) goto error; @@ -2289,6 +2292,7 @@ build_sched_domains(const struct cpumask *cpu_map, st= ruct sched_domain_attr *att sd =3D build_sched_domain(tl, cpu_map, attr, sd, i); =20 has_asym |=3D sd->flags & SD_ASYM_CPUCAPACITY; + has_cluster |=3D sd->flags & SD_CLUSTER; =20 if (tl =3D=3D sched_domain_topology) *per_cpu_ptr(d.sd, i) =3D sd; @@ -2399,6 +2403,9 @@ build_sched_domains(const struct cpumask *cpu_map, st= ruct sched_domain_attr *att if (has_asym) static_branch_inc_cpuslocked(&sched_asym_cpucapacity); =20 + if (has_cluster) + static_branch_inc_cpuslocked(&sched_cluster_active); + if (rq && sched_debug_verbose) { pr_info("root domain span: %*pbl (max cpu_capacity =3D %lu)\n", cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); @@ -2498,6 +2505,9 @@ static void detach_destroy_domains(const struct cpuma= sk *cpu_map) if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) static_branch_dec_cpuslocked(&sched_asym_cpucapacity); =20 + if (rcu_access_pointer(per_cpu(sd_cluster, cpu))) + static_branch_dec_cpuslocked(&sched_cluster_active); + rcu_read_lock(); for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); --=20 2.24.0