From: Chen Ridong <chenridong@huawei.com>
Currently, free_cpumasks() can free both tmpmasks and cpumasks of a cpuset
(cs). However, these two operations are not logically coupled. To improve
code clarity:
1. Move cpumask freeing to free_cpuset()
2. Rename free_cpumasks() to free_tmpmasks()
This change enforces the single responsibility principle.
Signed-off-by: Chen Ridong <chenridong@huawei.com>
---
kernel/cgroup/cpuset.c | 32 +++++++++++++-------------------
1 file changed, 13 insertions(+), 19 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 3466ebbf1016..aebda14cc67f 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -459,23 +459,14 @@ static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
}
/**
- * free_cpumasks - free cpumasks in a tmpmasks structure
- * @cs: the cpuset that have cpumasks to be free.
+ * free_tmpmasks - free cpumasks in a tmpmasks structure
* @tmp: the tmpmasks structure pointer
*/
-static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
+static inline void free_tmpmasks(struct tmpmasks *tmp)
{
- if (cs) {
- free_cpumask_var(cs->cpus_allowed);
- free_cpumask_var(cs->effective_cpus);
- free_cpumask_var(cs->effective_xcpus);
- free_cpumask_var(cs->exclusive_cpus);
- }
- if (tmp) {
- free_cpumask_var(tmp->new_cpus);
- free_cpumask_var(tmp->addmask);
- free_cpumask_var(tmp->delmask);
- }
+ free_cpumask_var(tmp->new_cpus);
+ free_cpumask_var(tmp->addmask);
+ free_cpumask_var(tmp->delmask);
}
/**
@@ -508,7 +499,10 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
*/
static inline void free_cpuset(struct cpuset *cs)
{
- free_cpumasks(cs, NULL);
+ free_cpumask_var(cs->cpus_allowed);
+ free_cpumask_var(cs->effective_cpus);
+ free_cpumask_var(cs->effective_xcpus);
+ free_cpumask_var(cs->exclusive_cpus);
kfree(cs);
}
@@ -2427,7 +2421,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (cs->partition_root_state)
update_partition_sd_lb(cs, old_prs);
out_free:
- free_cpumasks(NULL, &tmp);
+ free_tmpmasks(&tmp);
return retval;
}
@@ -2530,7 +2524,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (cs->partition_root_state)
update_partition_sd_lb(cs, old_prs);
- free_cpumasks(NULL, &tmp);
+ free_tmpmasks(&tmp);
return 0;
}
@@ -2983,7 +2977,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
notify_partition_change(cs, old_prs);
if (force_sd_rebuild)
rebuild_sched_domains_locked();
- free_cpumasks(NULL, &tmpmask);
+ free_tmpmasks(&tmpmask);
return 0;
}
@@ -4006,7 +4000,7 @@ static void cpuset_handle_hotplug(void)
if (force_sd_rebuild)
rebuild_sched_domains_cpuslocked();
- free_cpumasks(NULL, ptmp);
+ free_tmpmasks(ptmp);
}
void cpuset_update_active_cpus(void)
--
2.34.1
On 8/18/25 2:41 AM, Chen Ridong wrote: > From: Chen Ridong <chenridong@huawei.com> > > Currently, free_cpumasks() can free both tmpmasks and cpumasks of a cpuset > (cs). However, these two operations are not logically coupled. To improve > code clarity: > 1. Move cpumask freeing to free_cpuset() > 2. Rename free_cpumasks() to free_tmpmasks() > > This change enforces the single responsibility principle. > > Signed-off-by: Chen Ridong <chenridong@huawei.com> > --- > kernel/cgroup/cpuset.c | 32 +++++++++++++------------------- > 1 file changed, 13 insertions(+), 19 deletions(-) > > diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c > index 3466ebbf1016..aebda14cc67f 100644 > --- a/kernel/cgroup/cpuset.c > +++ b/kernel/cgroup/cpuset.c > @@ -459,23 +459,14 @@ static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) > } > > /** > - * free_cpumasks - free cpumasks in a tmpmasks structure > - * @cs: the cpuset that have cpumasks to be free. > + * free_tmpmasks - free cpumasks in a tmpmasks structure > * @tmp: the tmpmasks structure pointer > */ > -static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp) > +static inline void free_tmpmasks(struct tmpmasks *tmp) > { > - if (cs) { > - free_cpumask_var(cs->cpus_allowed); > - free_cpumask_var(cs->effective_cpus); > - free_cpumask_var(cs->effective_xcpus); > - free_cpumask_var(cs->exclusive_cpus); > - } > - if (tmp) { > - free_cpumask_var(tmp->new_cpus); > - free_cpumask_var(tmp->addmask); > - free_cpumask_var(tmp->delmask); > - } > + free_cpumask_var(tmp->new_cpus); > + free_cpumask_var(tmp->addmask); > + free_cpumask_var(tmp->delmask); > } > > /** > @@ -508,7 +499,10 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs) > */ > static inline void free_cpuset(struct cpuset *cs) > { > - free_cpumasks(cs, NULL); > + free_cpumask_var(cs->cpus_allowed); > + free_cpumask_var(cs->effective_cpus); > + free_cpumask_var(cs->effective_xcpus); > + free_cpumask_var(cs->exclusive_cpus); > kfree(cs); > } > > @@ -2427,7 +2421,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, > if (cs->partition_root_state) > update_partition_sd_lb(cs, old_prs); > out_free: > - free_cpumasks(NULL, &tmp); > + free_tmpmasks(&tmp); > return retval; > } > > @@ -2530,7 +2524,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, > if (cs->partition_root_state) > update_partition_sd_lb(cs, old_prs); > > - free_cpumasks(NULL, &tmp); > + free_tmpmasks(&tmp); > return 0; > } > > @@ -2983,7 +2977,7 @@ static int update_prstate(struct cpuset *cs, int new_prs) > notify_partition_change(cs, old_prs); > if (force_sd_rebuild) > rebuild_sched_domains_locked(); > - free_cpumasks(NULL, &tmpmask); > + free_tmpmasks(&tmpmask); > return 0; > } > > @@ -4006,7 +4000,7 @@ static void cpuset_handle_hotplug(void) > if (force_sd_rebuild) > rebuild_sched_domains_cpuslocked(); > > - free_cpumasks(NULL, ptmp); > + free_tmpmasks(ptmp); > } > > void cpuset_update_active_cpus(void) Reviewed-by: Waiman Long <longman@redhat.com>
© 2016 - 2025 Red Hat, Inc.