Currently the user can set up isolated cpus via cpuset and nohz_full in
such a way that leaves no housekeeping CPU (i.e. no CPU that is neither
domain isolated nor nohz full). This can be a problem for other
subsystems (e.g. the timer wheel imgration).
Prevent this configuration by blocking any assignation that would cause
the union of domain isolated cpus and nohz_full to covers all CPUs.
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
kernel/cgroup/cpuset.c | 63 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 81a9239053a7..3cedc3580373 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
}
+/*
+ * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update
+ * @prs: new or old partition_root_state
+ * @parent: parent cpuset
+ * Return: true if isolated_cpus needs modification, false otherwise
+ */
+static bool isolated_cpus_should_update(int prs, struct cpuset *parent)
+{
+ if (!parent)
+ parent = &top_cpuset;
+ return prs != parent->partition_root_state;
+}
+
/*
* partition_xcpus_add - Add new exclusive CPUs to partition
* @new_prs: new partition_root_state
@@ -1339,6 +1352,42 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
+/*
+ * isolated_cpus_can_update - check for isolated & nohz_full conflicts
+ * @add_cpus: cpu mask for cpus that are going to be isolated
+ * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL
+ * Return: false if there is conflict, true otherwise
+ *
+ * If nohz_full is enabled and we have isolated CPUs, their combination must
+ * still leave housekeeping CPUs.
+ */
+static bool isolated_cpus_can_update(struct cpumask *add_cpus,
+ struct cpumask *del_cpus)
+{
+ cpumask_var_t full_hk_cpus;
+ int res = true;
+
+ if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return true;
+
+ if (del_cpus && cpumask_weight_and(del_cpus,
+ housekeeping_cpumask(HK_TYPE_KERNEL_NOISE)))
+ return true;
+
+ if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
+ return false;
+
+ cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+ cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
+ cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask);
+ if (!cpumask_weight_andnot(full_hk_cpus, add_cpus))
+ res = false;
+
+ free_cpumask_var(full_hk_cpus);
+ return res;
+}
+
static void update_exclusion_cpumasks(bool isolcpus_updated)
{
int ret;
@@ -1464,6 +1513,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
return PERR_INVCPUS;
+ if (isolated_cpus_should_update(new_prs, NULL) &&
+ !isolated_cpus_can_update(tmp->new_cpus, NULL))
+ return PERR_HKEEPING;
spin_lock_irq(&callback_lock);
isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
@@ -1563,6 +1615,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
cs->prs_err = PERR_NOCPUS;
+ else if (isolated_cpus_should_update(prs, NULL) &&
+ !isolated_cpus_can_update(tmp->addmask, tmp->delmask))
+ cs->prs_err = PERR_HKEEPING;
if (cs->prs_err)
goto invalidate;
}
@@ -1914,6 +1969,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
return err;
}
+ if (deleting && isolated_cpus_should_update(new_prs, parent) &&
+ !isolated_cpus_can_update(tmp->delmask, tmp->addmask)) {
+ cs->prs_err = PERR_HKEEPING;
+ return PERR_HKEEPING;
+ }
+
/*
* Change the parent's effective_cpus & effective_xcpus (top cpuset
* only).
@@ -2934,6 +2995,8 @@ static int update_prstate(struct cpuset *cs, int new_prs)
* Need to update isolated_cpus.
*/
isolcpus_updated = true;
+ if (!isolated_cpus_can_update(cs->effective_xcpus, NULL))
+ err = PERR_HKEEPING;
} else {
/*
* Switching back to member is always allowed even if it
--
2.51.0
On 2025/9/15 22:59, Gabriele Monaco wrote: > Currently the user can set up isolated cpus via cpuset and nohz_full in > such a way that leaves no housekeeping CPU (i.e. no CPU that is neither > domain isolated nor nohz full). This can be a problem for other > subsystems (e.g. the timer wheel imgration). > > Prevent this configuration by blocking any assignation that would cause > the union of domain isolated cpus and nohz_full to covers all CPUs. > > Acked-by: Frederic Weisbecker <frederic@kernel.org> > Signed-off-by: Gabriele Monaco <gmonaco@redhat.com> > --- > kernel/cgroup/cpuset.c | 63 ++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 63 insertions(+) > > diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c > index 81a9239053a7..3cedc3580373 100644 > --- a/kernel/cgroup/cpuset.c > +++ b/kernel/cgroup/cpuset.c > @@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus > cpumask_andnot(isolated_cpus, isolated_cpus, xcpus); > } > > +/* > + * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update > + * @prs: new or old partition_root_state > + * @parent: parent cpuset > + * Return: true if isolated_cpus needs modification, false otherwise > + */ > +static bool isolated_cpus_should_update(int prs, struct cpuset *parent) > +{ > + if (!parent) > + parent = &top_cpuset; > + return prs != parent->partition_root_state; > +} > + Hi all, I'm a bit confused about the logic for updating isolated CPUs. As I understand it, the isolated_cpus set should be updated in two scenarios: 1. When changing to an isolated partition. 2. When a valid isolated partition becomes invalid or changes its membership. However, I find the current approach of comparing the parent's partition_root_state with prs to determine whether to update the isolated CPUs somewhat difficult to follow. Wouldn't a more straightforward approach be something like this? static bool isolated_cpus_should_update(int old_prs, int new_prs) { if (old_prs == new_prs) return false; /* Changing to an isolated partition */ if (new_prs == PRS_ISOLATED) return true; /* Isolated partition changing to another state */ if (old_prs == PRS_ISOLATED) return true; return false; } I'd greatly appreciate it if someone could help clarify this. Thank you. > /* > * partition_xcpus_add - Add new exclusive CPUs to partition > * @new_prs: new partition_root_state > @@ -1339,6 +1352,42 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent, > return isolcpus_updated; > } > > +/* > + * isolated_cpus_can_update - check for isolated & nohz_full conflicts > + * @add_cpus: cpu mask for cpus that are going to be isolated > + * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL > + * Return: false if there is conflict, true otherwise > + * > + * If nohz_full is enabled and we have isolated CPUs, their combination must > + * still leave housekeeping CPUs. > + */ > +static bool isolated_cpus_can_update(struct cpumask *add_cpus, > + struct cpumask *del_cpus) > +{ > + cpumask_var_t full_hk_cpus; > + int res = true; > + > + if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE)) > + return true; > + > + if (del_cpus && cpumask_weight_and(del_cpus, > + housekeeping_cpumask(HK_TYPE_KERNEL_NOISE))) > + return true; > + > + if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL)) > + return false; > + > + cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE), > + housekeeping_cpumask(HK_TYPE_DOMAIN)); > + cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus); > + cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask); > + if (!cpumask_weight_andnot(full_hk_cpus, add_cpus)) > + res = false; > + > + free_cpumask_var(full_hk_cpus); > + return res; > +} > + > static void update_exclusion_cpumasks(bool isolcpus_updated) > { > int ret; > @@ -1464,6 +1513,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, > if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) || > cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) > return PERR_INVCPUS; > + if (isolated_cpus_should_update(new_prs, NULL) && > + !isolated_cpus_can_update(tmp->new_cpus, NULL)) > + return PERR_HKEEPING; > > spin_lock_irq(&callback_lock); > isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); > @@ -1563,6 +1615,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, > else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || > cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)) > cs->prs_err = PERR_NOCPUS; > + else if (isolated_cpus_should_update(prs, NULL) && > + !isolated_cpus_can_update(tmp->addmask, tmp->delmask)) > + cs->prs_err = PERR_HKEEPING; > if (cs->prs_err) > goto invalidate; > } > @@ -1914,6 +1969,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, > return err; > } > > + if (deleting && isolated_cpus_should_update(new_prs, parent) && > + !isolated_cpus_can_update(tmp->delmask, tmp->addmask)) { > + cs->prs_err = PERR_HKEEPING; > + return PERR_HKEEPING; > + } > + > /* > * Change the parent's effective_cpus & effective_xcpus (top cpuset > * only). > @@ -2934,6 +2995,8 @@ static int update_prstate(struct cpuset *cs, int new_prs) > * Need to update isolated_cpus. > */ > isolcpus_updated = true; > + if (!isolated_cpus_can_update(cs->effective_xcpus, NULL)) > + err = PERR_HKEEPING; > } else { > /* > * Switching back to member is always allowed even if it -- Best regards, Ridong
On 9/15/25 10:59 AM, Gabriele Monaco wrote: > Currently the user can set up isolated cpus via cpuset and nohz_full in > such a way that leaves no housekeeping CPU (i.e. no CPU that is neither > domain isolated nor nohz full). This can be a problem for other > subsystems (e.g. the timer wheel imgration). > > Prevent this configuration by blocking any assignation that would cause > the union of domain isolated cpus and nohz_full to covers all CPUs. > > Acked-by: Frederic Weisbecker <frederic@kernel.org> > Signed-off-by: Gabriele Monaco <gmonaco@redhat.com> > --- > kernel/cgroup/cpuset.c | 63 ++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 63 insertions(+) > > diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c > index 81a9239053a7..3cedc3580373 100644 > --- a/kernel/cgroup/cpuset.c > +++ b/kernel/cgroup/cpuset.c > @@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus > cpumask_andnot(isolated_cpus, isolated_cpus, xcpus); > } > > +/* > + * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update > + * @prs: new or old partition_root_state > + * @parent: parent cpuset > + * Return: true if isolated_cpus needs modification, false otherwise > + */ > +static bool isolated_cpus_should_update(int prs, struct cpuset *parent) > +{ > + if (!parent) > + parent = &top_cpuset; > + return prs != parent->partition_root_state; > +} > + > /* > * partition_xcpus_add - Add new exclusive CPUs to partition > * @new_prs: new partition_root_state > @@ -1339,6 +1352,42 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent, > return isolcpus_updated; > } > > +/* > + * isolated_cpus_can_update - check for isolated & nohz_full conflicts > + * @add_cpus: cpu mask for cpus that are going to be isolated > + * @del_cpus: cpu mask for cpus that are no longer isolated, can be NULL > + * Return: false if there is conflict, true otherwise > + * > + * If nohz_full is enabled and we have isolated CPUs, their combination must > + * still leave housekeeping CPUs. > + */ > +static bool isolated_cpus_can_update(struct cpumask *add_cpus, > + struct cpumask *del_cpus) > +{ > + cpumask_var_t full_hk_cpus; > + int res = true; > + > + if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE)) > + return true; > + > + if (del_cpus && cpumask_weight_and(del_cpus, > + housekeeping_cpumask(HK_TYPE_KERNEL_NOISE))) > + return true; > + > + if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL)) > + return false; > + > + cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE), > + housekeeping_cpumask(HK_TYPE_DOMAIN)); > + cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus); > + cpumask_and(full_hk_cpus, full_hk_cpus, cpu_active_mask); > + if (!cpumask_weight_andnot(full_hk_cpus, add_cpus)) > + res = false; > + > + free_cpumask_var(full_hk_cpus); > + return res; > +} > + > static void update_exclusion_cpumasks(bool isolcpus_updated) > { > int ret; > @@ -1464,6 +1513,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, > if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) || > cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) > return PERR_INVCPUS; > + if (isolated_cpus_should_update(new_prs, NULL) && > + !isolated_cpus_can_update(tmp->new_cpus, NULL)) > + return PERR_HKEEPING; > > spin_lock_irq(&callback_lock); > isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); > @@ -1563,6 +1615,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, > else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || > cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)) > cs->prs_err = PERR_NOCPUS; > + else if (isolated_cpus_should_update(prs, NULL) && > + !isolated_cpus_can_update(tmp->addmask, tmp->delmask)) > + cs->prs_err = PERR_HKEEPING; > if (cs->prs_err) > goto invalidate; > } > @@ -1914,6 +1969,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, > return err; > } > > + if (deleting && isolated_cpus_should_update(new_prs, parent) && > + !isolated_cpus_can_update(tmp->delmask, tmp->addmask)) { > + cs->prs_err = PERR_HKEEPING; > + return PERR_HKEEPING; > + } > + > /* > * Change the parent's effective_cpus & effective_xcpus (top cpuset > * only). > @@ -2934,6 +2995,8 @@ static int update_prstate(struct cpuset *cs, int new_prs) > * Need to update isolated_cpus. > */ > isolcpus_updated = true; > + if (!isolated_cpus_can_update(cs->effective_xcpus, NULL)) > + err = PERR_HKEEPING; > } else { > /* > * Switching back to member is always allowed even if it Reviewed-by: Waiman Long <longman@redhat.com>
© 2016 - 2025 Red Hat, Inc.