From: Gabriele Monaco <gmonaco@redhat.com>
update_unbound_workqueue_cpumask() updates unbound workqueues settings
when there's a change in isolated CPUs, but it can be used for other
subsystems requiring updated when isolated CPUs change.
Generalise the name to update_isolation_cpumasks() to prepare for other
functions unrelated to workqueues to be called in that spot.
[longman: Change the function name to update_isolation_cpumasks()]
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Acked-by: Waiman Long <longman@redhat.com>
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
Signed-off-by: Waiman Long <longman@redhat.com>
---
kernel/cgroup/cpuset.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 7aef59ea9627..da770dac955e 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1393,7 +1393,7 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
-static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
+static void update_isolation_cpumasks(bool isolcpus_updated)
{
int ret;
@@ -1557,7 +1557,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
list_add(&cs->remote_sibling, &remote_children);
cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
cpuset_force_rebuild();
cs->prs_err = 0;
@@ -1598,7 +1598,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
compute_excpus(cs, cs->effective_xcpus);
reset_partition_data(cs);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
cpuset_force_rebuild();
/*
@@ -1667,7 +1667,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
if (xcpus)
cpumask_copy(cs->exclusive_cpus, xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
if (adding || deleting)
cpuset_force_rebuild();
@@ -2011,7 +2011,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
tmp->delmask);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
if ((old_prs != new_prs) && (cmd == partcmd_update))
update_partition_exclusive_flag(cs, new_prs);
@@ -3029,7 +3029,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
else if (isolcpus_updated)
isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
spin_unlock_irq(&callback_lock);
- update_unbound_workqueue_cpumask(isolcpus_updated);
+ update_isolation_cpumasks(isolcpus_updated);
/* Force update if switching back to member & update effective_xcpus */
update_cpumasks_hier(cs, &tmpmask, !new_prs);
--
2.51.1
On 2025/11/3 9:34, Waiman Long wrote:
> From: Gabriele Monaco <gmonaco@redhat.com>
>
> update_unbound_workqueue_cpumask() updates unbound workqueues settings
> when there's a change in isolated CPUs, but it can be used for other
> subsystems requiring updated when isolated CPUs change.
>
> Generalise the name to update_isolation_cpumasks() to prepare for other
> functions unrelated to workqueues to be called in that spot.
>
> [longman: Change the function name to update_isolation_cpumasks()]
>
> Acked-by: Frederic Weisbecker <frederic@kernel.org>
> Acked-by: Waiman Long <longman@redhat.com>
> Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
> Signed-off-by: Waiman Long <longman@redhat.com>
> ---
> kernel/cgroup/cpuset.c | 12 ++++++------
> 1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
> index 7aef59ea9627..da770dac955e 100644
> --- a/kernel/cgroup/cpuset.c
> +++ b/kernel/cgroup/cpuset.c
> @@ -1393,7 +1393,7 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
> return isolcpus_updated;
> }
>
> -static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
> +static void update_isolation_cpumasks(bool isolcpus_updated)
> {
> int ret;
>
> @@ -1557,7 +1557,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
> list_add(&cs->remote_sibling, &remote_children);
> cpumask_copy(cs->effective_xcpus, tmp->new_cpus);
> spin_unlock_irq(&callback_lock);
> - update_unbound_workqueue_cpumask(isolcpus_updated);
> + update_isolation_cpumasks(isolcpus_updated);
> cpuset_force_rebuild();
> cs->prs_err = 0;
>
> @@ -1598,7 +1598,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
> compute_excpus(cs, cs->effective_xcpus);
> reset_partition_data(cs);
> spin_unlock_irq(&callback_lock);
> - update_unbound_workqueue_cpumask(isolcpus_updated);
> + update_isolation_cpumasks(isolcpus_updated);
> cpuset_force_rebuild();
>
> /*
> @@ -1667,7 +1667,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
> if (xcpus)
> cpumask_copy(cs->exclusive_cpus, xcpus);
> spin_unlock_irq(&callback_lock);
> - update_unbound_workqueue_cpumask(isolcpus_updated);
> + update_isolation_cpumasks(isolcpus_updated);
> if (adding || deleting)
> cpuset_force_rebuild();
>
> @@ -2011,7 +2011,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
> tmp->delmask);
>
> spin_unlock_irq(&callback_lock);
> - update_unbound_workqueue_cpumask(isolcpus_updated);
> + update_isolation_cpumasks(isolcpus_updated);
>
> if ((old_prs != new_prs) && (cmd == partcmd_update))
> update_partition_exclusive_flag(cs, new_prs);
> @@ -3029,7 +3029,7 @@ static int update_prstate(struct cpuset *cs, int new_prs)
> else if (isolcpus_updated)
> isolated_cpus_update(old_prs, new_prs, cs->effective_xcpus);
> spin_unlock_irq(&callback_lock);
> - update_unbound_workqueue_cpumask(isolcpus_updated);
> + update_isolation_cpumasks(isolcpus_updated);
>
> /* Force update if switching back to member & update effective_xcpus */
> update_cpumasks_hier(cs, &tmpmask, !new_prs);
LGTM
Reviewed-by: Chen Ridong<chenridong@huawei.com>
--
Best regards,
Ridong
© 2016 - 2026 Red Hat, Inc.