Currently the user can set up isolated cpus via cpuset and nohz_full in
such a way that leaves no housekeeping CPU (i.e. no CPU that is neither
domain isolated nor nohz full). This can be a problem for other
subsystems (e.g. the timer wheel imgration).
Prevent this configuration by blocking any assignation that would cause
the union of domain isolated cpus and nohz_full to covers all CPUs.
Acked-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Gabriele Monaco <gmonaco@redhat.com>
---
kernel/cgroup/cpuset.c | 57 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 57 insertions(+)
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 6e3f44ffaa21..7b66ccedbc53 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus
cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
}
+/*
+ * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update
+ * @prs: new or old partition_root_state
+ * @parent: parent cpuset
+ * Return: true if isolated_cpus needs modification, false otherwise
+ */
+static bool isolated_cpus_should_update(int prs, struct cpuset *parent)
+{
+ if (!parent)
+ parent = &top_cpuset;
+ return prs != parent->partition_root_state;
+}
+
/*
* partition_xcpus_add - Add new exclusive CPUs to partition
* @new_prs: new partition_root_state
@@ -1339,6 +1352,36 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
return isolcpus_updated;
}
+/*
+ * isolcpus_nohz_conflict - check for isolated & nohz_full conflicts
+ * @new_cpus: cpu mask for cpus that are going to be isolated
+ * Return: true if there is conflict, false otherwise
+ *
+ * If nohz_full is enabled and we have isolated CPUs, their combination must
+ * still leave housekeeping CPUs.
+ */
+static bool isolcpus_nohz_conflict(struct cpumask *new_cpus)
+{
+ cpumask_var_t full_hk_cpus;
+ int res = false;
+
+ if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return false;
+
+ if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL))
+ return true;
+
+ cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
+ housekeeping_cpumask(HK_TYPE_DOMAIN));
+ cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus);
+ cpumask_and(full_hk_cpus, full_hk_cpus, cpu_online_mask);
+ if (!cpumask_weight_andnot(full_hk_cpus, new_cpus))
+ res = true;
+
+ free_cpumask_var(full_hk_cpus);
+ return res;
+}
+
static void update_exclusion_cpumasks(bool isolcpus_updated)
{
int ret;
@@ -1464,6 +1507,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
return PERR_INVCPUS;
+ if (isolated_cpus_should_update(new_prs, NULL) &&
+ isolcpus_nohz_conflict(tmp->new_cpus))
+ return PERR_HKEEPING;
spin_lock_irq(&callback_lock);
isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
@@ -1563,6 +1609,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus,
else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
cpumask_subset(top_cpuset.effective_cpus, tmp->addmask))
cs->prs_err = PERR_NOCPUS;
+ else if (isolated_cpus_should_update(prs, NULL) &&
+ isolcpus_nohz_conflict(tmp->addmask))
+ cs->prs_err = PERR_HKEEPING;
if (cs->prs_err)
goto invalidate;
}
@@ -1914,6 +1963,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
return err;
}
+ if (deleting && isolated_cpus_should_update(new_prs, parent) &&
+ isolcpus_nohz_conflict(tmp->delmask)) {
+ cs->prs_err = PERR_HKEEPING;
+ return PERR_HKEEPING;
+ }
+
/*
* Change the parent's effective_cpus & effective_xcpus (top cpuset
* only).
@@ -2934,6 +2989,8 @@ static int update_prstate(struct cpuset *cs, int new_prs)
* Need to update isolated_cpus.
*/
isolcpus_updated = true;
+ if (isolcpus_nohz_conflict(cs->effective_xcpus))
+ err = PERR_HKEEPING;
} else {
/*
* Switching back to member is always allowed even if it
--
2.50.1
On 8/8/25 12:01 PM, Gabriele Monaco wrote: > Currently the user can set up isolated cpus via cpuset and nohz_full in > such a way that leaves no housekeeping CPU (i.e. no CPU that is neither > domain isolated nor nohz full). This can be a problem for other > subsystems (e.g. the timer wheel imgration). > > Prevent this configuration by blocking any assignation that would cause > the union of domain isolated cpus and nohz_full to covers all CPUs. > > Acked-by: Frederic Weisbecker <frederic@kernel.org> > Signed-off-by: Gabriele Monaco <gmonaco@redhat.com> > --- > kernel/cgroup/cpuset.c | 57 ++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 57 insertions(+) > > diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c > index 6e3f44ffaa21..7b66ccedbc53 100644 > --- a/kernel/cgroup/cpuset.c > +++ b/kernel/cgroup/cpuset.c > @@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus > cpumask_andnot(isolated_cpus, isolated_cpus, xcpus); > } > > +/* > + * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update > + * @prs: new or old partition_root_state > + * @parent: parent cpuset > + * Return: true if isolated_cpus needs modification, false otherwise > + */ > +static bool isolated_cpus_should_update(int prs, struct cpuset *parent) > +{ > + if (!parent) > + parent = &top_cpuset; > + return prs != parent->partition_root_state; > +} > + > /* > * partition_xcpus_add - Add new exclusive CPUs to partition > * @new_prs: new partition_root_state > @@ -1339,6 +1352,36 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent, > return isolcpus_updated; > } > > +/* > + * isolcpus_nohz_conflict - check for isolated & nohz_full conflicts > + * @new_cpus: cpu mask for cpus that are going to be isolated > + * Return: true if there is conflict, false otherwise > + * > + * If nohz_full is enabled and we have isolated CPUs, their combination must > + * still leave housekeeping CPUs. > + */ > +static bool isolcpus_nohz_conflict(struct cpumask *new_cpus) > +{ > + cpumask_var_t full_hk_cpus; > + int res = false; > + > + if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE)) > + return false; > + > + if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL)) > + return true; > + > + cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE), > + housekeeping_cpumask(HK_TYPE_DOMAIN)); > + cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus); > + cpumask_and(full_hk_cpus, full_hk_cpus, cpu_online_mask); BTW, could you replace cpu_online_mask to cpu_active_mask in case you need to update this patch series again? Thanks, Longman
On 8/8/25 12:01 PM, Gabriele Monaco wrote: > Currently the user can set up isolated cpus via cpuset and nohz_full in > such a way that leaves no housekeeping CPU (i.e. no CPU that is neither > domain isolated nor nohz full). This can be a problem for other > subsystems (e.g. the timer wheel imgration). > > Prevent this configuration by blocking any assignation that would cause > the union of domain isolated cpus and nohz_full to covers all CPUs. > > Acked-by: Frederic Weisbecker <frederic@kernel.org> > Signed-off-by: Gabriele Monaco <gmonaco@redhat.com> > --- > kernel/cgroup/cpuset.c | 57 ++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 57 insertions(+) > > diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c > index 6e3f44ffaa21..7b66ccedbc53 100644 > --- a/kernel/cgroup/cpuset.c > +++ b/kernel/cgroup/cpuset.c > @@ -1275,6 +1275,19 @@ static void isolated_cpus_update(int old_prs, int new_prs, struct cpumask *xcpus > cpumask_andnot(isolated_cpus, isolated_cpus, xcpus); > } > > +/* > + * isolated_cpus_should_update - Returns if the isolated_cpus mask needs update > + * @prs: new or old partition_root_state > + * @parent: parent cpuset > + * Return: true if isolated_cpus needs modification, false otherwise > + */ > +static bool isolated_cpus_should_update(int prs, struct cpuset *parent) > +{ > + if (!parent) > + parent = &top_cpuset; > + return prs != parent->partition_root_state; > +} > + > /* > * partition_xcpus_add - Add new exclusive CPUs to partition > * @new_prs: new partition_root_state > @@ -1339,6 +1352,36 @@ static bool partition_xcpus_del(int old_prs, struct cpuset *parent, > return isolcpus_updated; > } > > +/* > + * isolcpus_nohz_conflict - check for isolated & nohz_full conflicts > + * @new_cpus: cpu mask for cpus that are going to be isolated > + * Return: true if there is conflict, false otherwise > + * > + * If nohz_full is enabled and we have isolated CPUs, their combination must > + * still leave housekeeping CPUs. > + */ > +static bool isolcpus_nohz_conflict(struct cpumask *new_cpus) > +{ > + cpumask_var_t full_hk_cpus; > + int res = false; > + > + if (!housekeeping_enabled(HK_TYPE_KERNEL_NOISE)) > + return false; > + > + if (!alloc_cpumask_var(&full_hk_cpus, GFP_KERNEL)) > + return true; > + > + cpumask_and(full_hk_cpus, housekeeping_cpumask(HK_TYPE_KERNEL_NOISE), > + housekeeping_cpumask(HK_TYPE_DOMAIN)); > + cpumask_andnot(full_hk_cpus, full_hk_cpus, isolated_cpus); > + cpumask_and(full_hk_cpus, full_hk_cpus, cpu_online_mask); > + if (!cpumask_weight_andnot(full_hk_cpus, new_cpus)) > + res = true; > + > + free_cpumask_var(full_hk_cpus); > + return res; > +} > + The check could probably be further optimized. This check will need to be updated when the relationship between isolated CPUs and nohz_full CPUs changes in the future. It does get the job done for now. > static void update_exclusion_cpumasks(bool isolcpus_updated) > { > int ret; > @@ -1464,6 +1507,9 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, > if (!cpumask_intersects(tmp->new_cpus, cpu_active_mask) || > cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus)) > return PERR_INVCPUS; > + if (isolated_cpus_should_update(new_prs, NULL) && > + isolcpus_nohz_conflict(tmp->new_cpus)) > + return PERR_HKEEPING; > > spin_lock_irq(&callback_lock); > isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus); > @@ -1563,6 +1609,9 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *xcpus, > else if (cpumask_intersects(tmp->addmask, subpartitions_cpus) || > cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)) > cs->prs_err = PERR_NOCPUS; > + else if (isolated_cpus_should_update(prs, NULL) && > + isolcpus_nohz_conflict(tmp->addmask)) > + cs->prs_err = PERR_HKEEPING; > if (cs->prs_err) > goto invalidate; > } > @@ -1914,6 +1963,12 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd, > return err; > } > > + if (deleting && isolated_cpus_should_update(new_prs, parent) && > + isolcpus_nohz_conflict(tmp->delmask)) { > + cs->prs_err = PERR_HKEEPING; > + return PERR_HKEEPING; > + } > + > /* > * Change the parent's effective_cpus & effective_xcpus (top cpuset > * only). > @@ -2934,6 +2989,8 @@ static int update_prstate(struct cpuset *cs, int new_prs) > * Need to update isolated_cpus. > */ > isolcpus_updated = true; > + if (isolcpus_nohz_conflict(cs->effective_xcpus)) > + err = PERR_HKEEPING; > } else { > /* > * Switching back to member is always allowed even if it Acked-by: Waiman Long <longman@redhat.com>
© 2016 - 2025 Red Hat, Inc.