From nobody Wed Apr 15 04:16:50 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7DC96C43334 for ; Tue, 26 Jul 2022 07:49:53 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S238490AbiGZHtv (ORCPT ); Tue, 26 Jul 2022 03:49:51 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:38738 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S238141AbiGZHto (ORCPT ); Tue, 26 Jul 2022 03:49:44 -0400 Received: from szxga03-in.huawei.com (szxga03-in.huawei.com [45.249.212.189]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 7AF062A418 for ; Tue, 26 Jul 2022 00:49:42 -0700 (PDT) Received: from canpemm500009.china.huawei.com (unknown [172.30.72.55]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4LsTYW1G1Xz9svY; Tue, 26 Jul 2022 15:48:31 +0800 (CST) Received: from localhost.localdomain (10.67.164.66) by canpemm500009.china.huawei.com (7.192.105.203) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 26 Jul 2022 15:49:39 +0800 From: Yicong Yang To: , , , , , , , CC: , , , , , , , , , , <21cnbao@gmail.com>, , , , , , , Subject: [PATCH v6 1/2] sched: Add per_cpu cluster domain info and cpus_share_lowest_cache API Date: Tue, 26 Jul 2022 15:47:57 +0800 Message-ID: <20220726074758.46686-2-yangyicong@huawei.com> X-Mailer: git-send-email 2.31.0 In-Reply-To: <20220726074758.46686-1-yangyicong@huawei.com> References: <20220726074758.46686-1-yangyicong@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.67.164.66] X-ClientProxiedBy: dggems704-chm.china.huawei.com (10.3.19.181) To canpemm500009.china.huawei.com (7.192.105.203) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Barry Song Add per-cpu cluster domain info and cpus_share_lowest_cache() API. This is the preparation for the optimization of select_idle_cpu() on platforms with cluster scheduler level. Tested-by: K Prateek Nayak Signed-off-by: Barry Song Signed-off-by: Yicong Yang Reviewed-by: Gautham R. Shenoy Reviewed-by: Tim Chen Reviewed-by: Vincent Guittot --- include/linux/sched/sd_flags.h | 7 +++++++ include/linux/sched/topology.h | 8 +++++++- kernel/sched/core.c | 12 ++++++++++++ kernel/sched/sched.h | 2 ++ kernel/sched/topology.c | 15 +++++++++++++++ 5 files changed, 43 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h index 57bde66d95f7..42ed454e8b18 100644 --- a/include/linux/sched/sd_flags.h +++ b/include/linux/sched/sd_flags.h @@ -109,6 +109,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | = SDF_NEEDS_GROUPS) */ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) =20 +/* + * Domain members share CPU cluster (LLC tags or L2 cache) + * + * NEEDS_GROUPS: Clusters are shared between groups. + */ +SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS) + /* * Domain members share CPU package resources (i.e. caches) * diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 816df6cc444e..c0d21667ddf3 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void) #ifdef CONFIG_SCHED_CLUSTER static inline int cpu_cluster_flags(void) { - return SD_SHARE_PKG_RESOURCES; + return SD_CLUSTER | SD_SHARE_PKG_RESOURCES; } #endif =20 @@ -179,6 +179,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms); void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); =20 bool cpus_share_cache(int this_cpu, int that_cpu); +bool cpus_share_lowest_cache(int this_cpu, int that_cpu); =20 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); typedef int (*sched_domain_flags_f)(void); @@ -232,6 +233,11 @@ static inline bool cpus_share_cache(int this_cpu, int = that_cpu) return true; } =20 +static inline bool cpus_share_lowest_cache(int this_cpu, int that_cpu) +{ + return true; +} + #endif /* !CONFIG_SMP */ =20 #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a463dbc92fcd..96109ad82694 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3802,6 +3802,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu) return per_cpu(sd_llc_id, this_cpu) =3D=3D per_cpu(sd_llc_id, that_cpu); } =20 +/* + * Whether CPUs are share lowest cache, which means LLC on non-cluster + * machines and LLC tag or L2 on machines with clusters. + */ +bool cpus_share_lowest_cache(int this_cpu, int that_cpu) +{ + if (this_cpu =3D=3D that_cpu) + return true; + + return per_cpu(sd_lowest_cache_id, this_cpu) =3D=3D per_cpu(sd_lowest_cac= he_id, that_cpu); +} + static inline bool ttwu_queue_cond(int cpu) { /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 73ae32898f25..845cd029d572 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1802,7 +1802,9 @@ static inline struct sched_domain *lowest_flag_domain= (int cpu, int flag) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); +DECLARE_PER_CPU(int, sd_lowest_cache_id); DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); +DECLARE_PER_CPU(struct sched_domain __rcu *, sd_cluster); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 8739c2a5a54e..8ab27c0d6d1f 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -664,6 +664,8 @@ static void destroy_sched_domains(struct sched_domain *= sd) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); +DEFINE_PER_CPU(int, sd_lowest_cache_id); +DEFINE_PER_CPU(struct sched_domain __rcu *, sd_cluster); DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); @@ -689,6 +691,18 @@ static void update_top_cache_domain(int cpu) per_cpu(sd_llc_id, cpu) =3D id; rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); =20 + sd =3D lowest_flag_domain(cpu, SD_CLUSTER); + if (sd) + id =3D cpumask_first(sched_domain_span(sd)); + rcu_assign_pointer(per_cpu(sd_cluster, cpu), sd); + + /* + * This assignment should be placed after the sd_llc_id as + * we want this id equals to cluster id on cluster machines + * but equals to LLC id on non-Cluster machines. + */ + per_cpu(sd_lowest_cache_id, cpu) =3D id; + sd =3D lowest_flag_domain(cpu, SD_NUMA); rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); =20 @@ -1532,6 +1546,7 @@ static struct cpumask ***sched_domains_numa_masks; */ #define TOPOLOGY_SD_FLAGS \ (SD_SHARE_CPUCAPACITY | \ + SD_CLUSTER | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING) --=20 2.24.0 From nobody Wed Apr 15 04:16:50 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B45AEC43334 for ; Tue, 26 Jul 2022 07:49:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S238484AbiGZHtr (ORCPT ); Tue, 26 Jul 2022 03:49:47 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:38740 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S238114AbiGZHto (ORCPT ); Tue, 26 Jul 2022 03:49:44 -0400 Received: from szxga03-in.huawei.com (szxga03-in.huawei.com [45.249.212.189]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 965FE2A422 for ; Tue, 26 Jul 2022 00:49:42 -0700 (PDT) Received: from canpemm500009.china.huawei.com (unknown [172.30.72.56]) by szxga03-in.huawei.com (SkyGuard) with ESMTP id 4LsTYW3VW9z9svw; Tue, 26 Jul 2022 15:48:31 +0800 (CST) Received: from localhost.localdomain (10.67.164.66) by canpemm500009.china.huawei.com (7.192.105.203) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2375.24; Tue, 26 Jul 2022 15:49:40 +0800 From: Yicong Yang To: , , , , , , , CC: , , , , , , , , , , <21cnbao@gmail.com>, , , , , , , Subject: [PATCH v6 2/2] sched/fair: Scan cluster before scanning LLC in wake-up path Date: Tue, 26 Jul 2022 15:47:58 +0800 Message-ID: <20220726074758.46686-3-yangyicong@huawei.com> X-Mailer: git-send-email 2.31.0 In-Reply-To: <20220726074758.46686-1-yangyicong@huawei.com> References: <20220726074758.46686-1-yangyicong@huawei.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-Originating-IP: [10.67.164.66] X-ClientProxiedBy: dggems704-chm.china.huawei.com (10.3.19.181) To canpemm500009.china.huawei.com (7.192.105.203) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" From: Barry Song For platforms having clusters like Kunpeng920, CPUs within the same cluster have lower latency when synchronizing and accessing shared resources like cache. Thus, this patch tries to find an idle cpu within the cluster of the target CPU before scanning the whole LLC to gain lower latency. Testing has been done on Kunpeng920 by pinning tasks to one numa and two numa. On Kunpeng920, Each numa has 8 clusters and each cluster has 4 CPUs. With this patch, We noticed enhancement on tbench within one numa or cross two numa. On numa 0: tip/sched/core patched Hmean 1 345.89 ( 0.00%) 398.43 * 15.19%* Hmean 2 697.77 ( 0.00%) 794.40 * 13.85%* Hmean 4 1392.51 ( 0.00%) 1577.60 * 13.29%* Hmean 8 2800.61 ( 0.00%) 3118.38 * 11.35%* Hmean 16 5514.27 ( 0.00%) 6124.51 * 11.07%* Hmean 32 10869.81 ( 0.00%) 10690.97 * -1.65%* Hmean 64 8315.22 ( 0.00%) 8520.73 * 2.47%* Hmean 128 6324.47 ( 0.00%) 7253.65 * 14.69%* On numa 0-1: tip/sched/core patched Hmean 1 348.68 ( 0.00%) 397.74 * 14.07%* Hmean 2 693.57 ( 0.00%) 795.54 * 14.70%* Hmean 4 1369.26 ( 0.00%) 1548.72 * 13.11%* Hmean 8 2772.99 ( 0.00%) 3055.54 * 10.19%* Hmean 16 4825.83 ( 0.00%) 5936.64 * 23.02%* Hmean 32 10250.32 ( 0.00%) 11780.59 * 14.93%* Hmean 64 16309.51 ( 0.00%) 19864.38 * 21.80%* Hmean 128 13022.32 ( 0.00%) 16365.43 * 25.67%* Hmean 256 11335.79 ( 0.00%) 13991.33 * 23.43%* Note neither Kunpeng920 nor x86 Jacobsville supports SMT, so the SMT branch in the code has not been tested but it supposed to work. Suggested-by: Peter Zijlstra [https://lore.kernel.org/lkml/Ytfjs+m1kUs0ScSn@worktop.programming.kicks-as= s.net] Tested-by: Yicong Yang Signed-off-by: Barry Song Signed-off-by: Yicong Yang Reviewed-by: Tim Chen --- kernel/sched/fair.c | 31 ++++++++++++++++++++++++++++--- kernel/sched/sched.h | 2 ++ kernel/sched/topology.c | 10 ++++++++++ 3 files changed, 40 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2fc47257ae91..1100fbf7baaf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6437,6 +6437,31 @@ static int select_idle_cpu(struct task_struct *p, st= ruct sched_domain *sd, bool } } =20 + if (IS_ENABLED(CONFIG_SCHED_CLUSTER) && + static_branch_unlikely(&sched_cluster_active)) { + struct sched_domain *sdc =3D rcu_dereference(per_cpu(sd_cluster, target)= ); + + if (sdc) { + for_each_cpu_wrap(cpu, sched_domain_span(sdc), target + 1) { + if (!cpumask_test_cpu(cpu, cpus)) + continue; + + if (has_idle_core) { + i =3D select_idle_core(p, cpu, cpus, &idle_cpu); + if ((unsigned int)i < nr_cpumask_bits) + return i; + } else { + if (--nr <=3D 0) + return -1; + idle_cpu =3D __select_idle_cpu(cpu, p); + if ((unsigned int)idle_cpu < nr_cpumask_bits) + return idle_cpu; + } + } + cpumask_andnot(cpus, cpus, sched_domain_span(sdc)); + } + } + for_each_cpu_wrap(cpu, cpus, target + 1) { if (has_idle_core) { i =3D select_idle_core(p, cpu, cpus, &idle_cpu); @@ -6444,7 +6469,7 @@ static int select_idle_cpu(struct task_struct *p, str= uct sched_domain *sd, bool return i; =20 } else { - if (!--nr) + if (--nr <=3D 0) return -1; idle_cpu =3D __select_idle_cpu(cpu, p); if ((unsigned int)idle_cpu < nr_cpumask_bits) @@ -6543,7 +6568,7 @@ static int select_idle_sibling(struct task_struct *p,= int prev, int target) /* * If the previous CPU is cache affine and idle, don't be stupid: */ - if (prev !=3D target && cpus_share_cache(prev, target) && + if (prev !=3D target && cpus_share_lowest_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) && asym_fits_capacity(task_util, prev)) return prev; @@ -6569,7 +6594,7 @@ static int select_idle_sibling(struct task_struct *p,= int prev, int target) p->recent_used_cpu =3D prev; if (recent_used_cpu !=3D prev && recent_used_cpu !=3D target && - cpus_share_cache(recent_used_cpu, target) && + cpus_share_lowest_cache(recent_used_cpu, target) && (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cp= u)) && cpumask_test_cpu(p->recent_used_cpu, p->cpus_ptr) && asym_fits_capacity(task_util, recent_used_cpu)) { diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 845cd029d572..4f0d3a74a532 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1808,7 +1808,9 @@ DECLARE_PER_CPU(struct sched_domain __rcu *, sd_clust= er); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); + extern struct static_key_false sched_asym_cpucapacity; +extern struct static_key_false sched_cluster_active; =20 struct sched_group_capacity { atomic_t ref; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 8ab27c0d6d1f..04ead3227201 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -670,7 +670,9 @@ DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_l= lc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); + DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); +DEFINE_STATIC_KEY_FALSE(sched_cluster_active); =20 static void update_top_cache_domain(int cpu) { @@ -2268,6 +2270,7 @@ build_sched_domains(const struct cpumask *cpu_map, st= ruct sched_domain_attr *att struct rq *rq =3D NULL; int i, ret =3D -ENOMEM; bool has_asym =3D false; + bool has_cluster =3D false; =20 if (WARN_ON(cpumask_empty(cpu_map))) goto error; @@ -2289,6 +2292,7 @@ build_sched_domains(const struct cpumask *cpu_map, st= ruct sched_domain_attr *att sd =3D build_sched_domain(tl, cpu_map, attr, sd, i); =20 has_asym |=3D sd->flags & SD_ASYM_CPUCAPACITY; + has_cluster |=3D sd->flags & SD_CLUSTER; =20 if (tl =3D=3D sched_domain_topology) *per_cpu_ptr(d.sd, i) =3D sd; @@ -2399,6 +2403,9 @@ build_sched_domains(const struct cpumask *cpu_map, st= ruct sched_domain_attr *att if (has_asym) static_branch_inc_cpuslocked(&sched_asym_cpucapacity); =20 + if (has_cluster) + static_branch_inc_cpuslocked(&sched_cluster_active); + if (rq && sched_debug_verbose) { pr_info("root domain span: %*pbl (max cpu_capacity =3D %lu)\n", cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); @@ -2498,6 +2505,9 @@ static void detach_destroy_domains(const struct cpuma= sk *cpu_map) if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) static_branch_dec_cpuslocked(&sched_asym_cpucapacity); =20 + if (rcu_access_pointer(per_cpu(sd_cluster, cpu))) + static_branch_dec_cpuslocked(&sched_cluster_active); + rcu_read_lock(); for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); --=20 2.24.0