kernel/cgroup/cpuset-v1.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-)
From: Chen Ridong <chenridong@huawei.com>
The commit 6e1d31ce495c ("cpuset: separate generate_sched_domains for v1
and v2") introduced dead code that was originally added for cpuset-v2
partition domain generation. Remove the redundant root_load_balance check.
Fixes: 6e1d31ce495c ("cpuset: separate generate_sched_domains for v1 and v2")
Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
Closes: https://lore.kernel.org/cgroups/9a442808-ed53-4657-988b-882cc0014c0d@huaweicloud.com/T/
Signed-off-by: Chen Ridong <chenridong@huawei.com>
---
kernel/cgroup/cpuset-v1.c | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c
index 7303315fdba7..ecfea7800f0d 100644
--- a/kernel/cgroup/cpuset-v1.c
+++ b/kernel/cgroup/cpuset-v1.c
@@ -605,7 +605,6 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains,
int ndoms = 0; /* number of sched domains in result */
int nslot; /* next empty doms[] struct cpumask slot */
struct cgroup_subsys_state *pos_css;
- bool root_load_balance = is_sched_load_balance(&top_cpuset);
int nslot_update;
lockdep_assert_cpuset_lock_held();
@@ -615,7 +614,7 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains,
csa = NULL;
/* Special case for the 99% of systems with one, full, sched domain */
- if (root_load_balance) {
+ if (is_sched_load_balance(&top_cpuset)) {
ndoms = 1;
doms = alloc_sched_domains(ndoms);
if (!doms)
@@ -638,8 +637,6 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains,
csn = 0;
rcu_read_lock();
- if (root_load_balance)
- csa[csn++] = &top_cpuset;
cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
if (cp == &top_cpuset)
continue;
--
2.34.1
On 12/20/25 5:15 AM, Chen Ridong wrote:
> From: Chen Ridong <chenridong@huawei.com>
>
> The commit 6e1d31ce495c ("cpuset: separate generate_sched_domains for v1
> and v2") introduced dead code that was originally added for cpuset-v2
> partition domain generation. Remove the redundant root_load_balance check.
>
> Fixes: 6e1d31ce495c ("cpuset: separate generate_sched_domains for v1 and v2")
> Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
> Closes: https://lore.kernel.org/cgroups/9a442808-ed53-4657-988b-882cc0014c0d@huaweicloud.com/T/
> Signed-off-by: Chen Ridong <chenridong@huawei.com>
> ---
> kernel/cgroup/cpuset-v1.c | 5 +----
> 1 file changed, 1 insertion(+), 4 deletions(-)
>
> diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c
> index 7303315fdba7..ecfea7800f0d 100644
> --- a/kernel/cgroup/cpuset-v1.c
> +++ b/kernel/cgroup/cpuset-v1.c
> @@ -605,7 +605,6 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains,
> int ndoms = 0; /* number of sched domains in result */
> int nslot; /* next empty doms[] struct cpumask slot */
> struct cgroup_subsys_state *pos_css;
> - bool root_load_balance = is_sched_load_balance(&top_cpuset);
> int nslot_update;
>
> lockdep_assert_cpuset_lock_held();
> @@ -615,7 +614,7 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains,
> csa = NULL;
>
> /* Special case for the 99% of systems with one, full, sched domain */
> - if (root_load_balance) {
> + if (is_sched_load_balance(&top_cpuset)) {
> ndoms = 1;
> doms = alloc_sched_domains(ndoms);
> if (!doms)
> @@ -638,8 +637,6 @@ int cpuset1_generate_sched_domains(cpumask_var_t **domains,
> csn = 0;
>
> rcu_read_lock();
> - if (root_load_balance)
> - csa[csn++] = &top_cpuset;
> cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
> if (cp == &top_cpuset)
> continue;
Reviewed-by: Waiman Long <longman@redhat.com>
© 2016 - 2026 Red Hat, Inc.