From: Chen Ridong <chenridong@huawei.com>
cpuset: add helpers for cpus_read_lock and cpuset_mutex locks.
Replace repetitive locking patterns with new helpers:
- cpuset_full_lock()
- cpuset_full_unlock()
This makes the code cleaner and ensures consistent lock ordering.
Signed-off-by: Chen Ridong <chenridong@huawei.com>
Reviewed-by: Waiman Long <longman@redhat.com>
---
kernel/cgroup/cpuset-internal.h | 2 ++
kernel/cgroup/cpuset-v1.c | 12 +++----
kernel/cgroup/cpuset.c | 60 +++++++++++++++++++--------------
3 files changed, 40 insertions(+), 34 deletions(-)
diff --git a/kernel/cgroup/cpuset-internal.h b/kernel/cgroup/cpuset-internal.h
index 75b3aef39231..337608f408ce 100644
--- a/kernel/cgroup/cpuset-internal.h
+++ b/kernel/cgroup/cpuset-internal.h
@@ -276,6 +276,8 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on)
ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
int cpuset_common_seq_show(struct seq_file *sf, void *v);
+void cpuset_full_lock(void);
+void cpuset_full_unlock(void);
/*
* cpuset-v1.c
diff --git a/kernel/cgroup/cpuset-v1.c b/kernel/cgroup/cpuset-v1.c
index b69a7db67090..12e76774c75b 100644
--- a/kernel/cgroup/cpuset-v1.c
+++ b/kernel/cgroup/cpuset-v1.c
@@ -169,8 +169,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
cpuset_filetype_t type = cft->private;
int retval = -ENODEV;
- cpus_read_lock();
- cpuset_lock();
+ cpuset_full_lock();
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -184,8 +183,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
- cpuset_unlock();
- cpus_read_unlock();
+ cpuset_full_unlock();
return retval;
}
@@ -454,8 +452,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
cpuset_filetype_t type = cft->private;
int retval = 0;
- cpus_read_lock();
- cpuset_lock();
+ cpuset_full_lock();
if (!is_cpuset_online(cs)) {
retval = -ENODEV;
goto out_unlock;
@@ -498,8 +495,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
break;
}
out_unlock:
- cpuset_unlock();
- cpus_read_unlock();
+ cpuset_full_unlock();
return retval;
}
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 7b0b81c835bf..a78ccd11ce9b 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -250,6 +250,12 @@ static struct cpuset top_cpuset = {
static DEFINE_MUTEX(cpuset_mutex);
+/**
+ * cpuset_lock - Acquire the global cpuset mutex
+ *
+ * This locks the global cpuset mutex to prevent modifications to cpuset
+ * hierarchy and configurations. This helper is not enough to make modification.
+ */
void cpuset_lock(void)
{
mutex_lock(&cpuset_mutex);
@@ -260,6 +266,24 @@ void cpuset_unlock(void)
mutex_unlock(&cpuset_mutex);
}
+/**
+ * cpuset_full_lock - Acquire full protection for cpuset modification
+ *
+ * Takes both CPU hotplug read lock (cpus_read_lock()) and cpuset mutex
+ * to safely modify cpuset data.
+ */
+void cpuset_full_lock(void)
+{
+ cpus_read_lock();
+ mutex_lock(&cpuset_mutex);
+}
+
+void cpuset_full_unlock(void)
+{
+ mutex_unlock(&cpuset_mutex);
+ cpus_read_unlock();
+}
+
static DEFINE_SPINLOCK(callback_lock);
void cpuset_callback_lock_irq(void)
@@ -3234,8 +3258,7 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
int retval = -ENODEV;
buf = strstrip(buf);
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
+ cpuset_full_lock();
if (!is_cpuset_online(cs))
goto out_unlock;
@@ -3264,8 +3287,7 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
if (force_sd_rebuild)
rebuild_sched_domains_locked();
out_unlock:
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
+ cpuset_full_unlock();
flush_workqueue(cpuset_migrate_mm_wq);
return retval ?: nbytes;
}
@@ -3368,12 +3390,10 @@ static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
else
return -EINVAL;
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
+ cpuset_full_lock();
if (is_cpuset_online(cs))
retval = update_prstate(cs, val);
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
+ cpuset_full_unlock();
return retval ?: nbytes;
}
@@ -3498,9 +3518,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
if (!parent)
return 0;
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
-
+ cpuset_full_lock();
if (is_spread_page(parent))
set_bit(CS_SPREAD_PAGE, &cs->flags);
if (is_spread_slab(parent))
@@ -3552,8 +3570,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
spin_unlock_irq(&callback_lock);
out_unlock:
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
+ cpuset_full_unlock();
return 0;
}
@@ -3568,16 +3585,12 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
-
+ cpuset_full_lock();
if (!cpuset_v2() && is_sched_load_balance(cs))
cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
cpuset_dec();
-
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
+ cpuset_full_unlock();
}
/*
@@ -3589,16 +3602,11 @@ static void cpuset_css_killed(struct cgroup_subsys_state *css)
{
struct cpuset *cs = css_cs(css);
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
-
+ cpuset_full_lock();
/* Reset valid partition back to member */
if (is_partition_valid(cs))
update_prstate(cs, PRS_MEMBER);
-
- mutex_unlock(&cpuset_mutex);
- cpus_read_unlock();
-
+ cpuset_full_unlock();
}
static void cpuset_css_free(struct cgroup_subsys_state *css)
--
2.34.1
(I wrote this yesterday before merging but I'm still sending it to give my opinion ;-)) On Mon, Aug 25, 2025 at 03:23:52AM +0000, Chen Ridong <chenridong@huaweicloud.com> wrote: > From: Chen Ridong <chenridong@huawei.com> > > cpuset: add helpers for cpus_read_lock and cpuset_mutex locks. > > Replace repetitive locking patterns with new helpers: > - cpuset_full_lock() > - cpuset_full_unlock() I don't see many precedents elsewhere in the kernel for such naming (like _lock and _full_lock()). Wouldn't it be more illustrative to have cpuset_read_lock() and cpuset_write_lock()? (As I'm looking at current users and your accompanying comments which could be substituted with the more conventional naming.) (Also if you decide going this direction, please mention commit 111cd11bbc548 ("sched/cpuset: Bring back cpuset_mutex") in the message so that it doesn't tempt to do further changes.) > This makes the code cleaner and ensures consistent lock ordering. Lock guards anyone? (When you're touching this and seeking clean code.) Thanks, Michal
On 8/26/25 10:23 AM, Michal Koutný wrote: > (I wrote this yesterday before merging but I'm still sending it to give > my opinion ;-)) > > On Mon, Aug 25, 2025 at 03:23:52AM +0000, Chen Ridong <chenridong@huaweicloud.com> wrote: >> From: Chen Ridong <chenridong@huawei.com> >> >> cpuset: add helpers for cpus_read_lock and cpuset_mutex locks. >> >> Replace repetitive locking patterns with new helpers: >> - cpuset_full_lock() >> - cpuset_full_unlock() > I don't see many precedents elsewhere in the kernel for such naming > (like _lock and _full_lock()). Wouldn't it be more illustrative to have > cpuset_read_lock() and cpuset_write_lock()? (As I'm looking at current > users and your accompanying comments which could be substituted with > the more conventional naming.) Good naming is always an issue. Using cpuset_read_lock/cpuset_write_lock will be more confusing as the current locking scheme is exclusive. > (Also if you decide going this direction, please mention commit > 111cd11bbc548 ("sched/cpuset: Bring back cpuset_mutex") in the message > so that it doesn't tempt to do further changes.) > > >> This makes the code cleaner and ensures consistent lock ordering. > Lock guards anyone? (When you're touching this and seeking clean code.) Yes, I guess we can use lock guards here. You are welcome to send a patch to do that. Cheers, Longman > > Thanks, > Michal
On 2025/8/26 22:43, Waiman Long wrote: > > On 8/26/25 10:23 AM, Michal Koutný wrote: >> (I wrote this yesterday before merging but I'm still sending it to give >> my opinion ;-)) >> >> On Mon, Aug 25, 2025 at 03:23:52AM +0000, Chen Ridong <chenridong@huaweicloud.com> wrote: >>> From: Chen Ridong <chenridong@huawei.com> >>> >>> cpuset: add helpers for cpus_read_lock and cpuset_mutex locks. >>> >>> Replace repetitive locking patterns with new helpers: >>> - cpuset_full_lock() >>> - cpuset_full_unlock() >> I don't see many precedents elsewhere in the kernel for such naming >> (like _lock and _full_lock()). Wouldn't it be more illustrative to have >> cpuset_read_lock() and cpuset_write_lock()? (As I'm looking at current >> users and your accompanying comments which could be substituted with >> the more conventional naming.) > Good naming is always an issue. Using cpuset_read_lock/cpuset_write_lock will be more confusing as > the current locking scheme is exclusive. >> (Also if you decide going this direction, please mention commit >> 111cd11bbc548 ("sched/cpuset: Bring back cpuset_mutex") in the message >> so that it doesn't tempt to do further changes.) >> >> >>> This makes the code cleaner and ensures consistent lock ordering. >> Lock guards anyone? (When you're touching this and seeking clean code.) > > Yes, I guess we can use lock guards here. You are welcome to send a patch to do that. > I attempted to define the cpuset_full_lock() macro, but the initial implementation was inconsistent with our coding conventions. Initial version: #define cpuset_full_lock() \ guard(cpus_read_lock)(); \ guard(mutex)(&cpuset_mutex); It was suggested to use a do-while construct for proper scoping. but it could not work if we define as: #define cpuset_full_lock() \ do { \ guard(cpus_read_lock)(); \ guard(mutex)(&cpuset_mutex); \ } while(0) So I sent this patch version. -- Best regards, Ridong
On Wed, Aug 27, 2025 at 02:23:17PM +0800, Chen Ridong <chenridong@huaweicloud.com> wrote: > It was suggested to use a do-while construct for proper scoping. but it could not work if we define as: Perhaps like this: DEFINE_LOCK_GUARD_0(cpuset_full, cpuset_full_lock(), cpuset_full_unlock()) > So I sent this patch version. No probs, it's a minor issue. Michal
© 2016 - 2025 Red Hat, Inc.