[PATCH v0 00/13] [RFC] sched: Add more Scope-based Resource Management Support

Jemmy Wong posted 13 patches 3 months, 3 weeks ago
Only 0 patches received!
include/linux/completion.h |  4 ++++
kernel/sched/completion.c  | 32 ++++++++++----------------------
2 files changed, 14 insertions(+), 22 deletions(-)
[PATCH v0 00/13] [RFC] sched: Add more Scope-based Resource Management Support
Posted by Jemmy Wong 3 months, 3 weeks ago
From 0dc6cbb0bd06fe2c29999b7c6b3c2206b612d1fa Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Thu, 19 Jun 2025 03:28:50 +0800
Subject: [PATCH v0 00/13] [RFC] sched: Add more Scope-based Resource Management Support

Hi,

I greatly admire the Scope-based Resource Management infrastructure
as it elegantly aligns with the Resource Acquisition Is Initialization (RAII)
programming idiom, improving code safety and maintainability.

I am interested in driving a comprehensive conversion of traditional
manual lock/unlock patterns to use guard/scoped_guard,
starting with the sched module.

Before proceeding, I’d like to confirm if you believe
this effort is valuable and whether you’d support such a conversion.

Best,
Jemmy

Jemmy Wong (13):
  sched/completion: Scope-based Resource Management Support
  sched/dealine: Scope-based Resource Management Support
  sched/psi: Scope-based Resource Management Support
  sched/cpuacct: Scope-based Resource Management Support
  sched/syscalls: Scope-based Resource Management Support
  sched/core_sched: Scope-based Resource Management Support
  sched/cpudeadline: Scope-based Resource Management Support
  sched/cpufreq_schedutil: Scope-based Resource Management Support
  sched/cputime: Scope-based Resource Management Support
  sched/stats: Scope-based Resource Management Support
  sched/wait: Scope-based Resource Management Support
  sched/swait: Scope-based Resource Management Support
  sched/ext_idle: Scope-based Resource Management Support

 include/linux/completion.h       |   4 ++
 include/linux/rcupdate.h         |   4 ++
 kernel/sched/completion.c        |  32 +++------
 kernel/sched/core_sched.c        |  30 ++++----
 kernel/sched/cpuacct.c           |  12 +---
 kernel/sched/cpudeadline.c       |   9 +--
 kernel/sched/cpufreq_schedutil.c |  41 +++++------
 kernel/sched/cputime.c           |  39 +++++------
 kernel/sched/deadline.c          | 114 +++++++++++++------------------
 kernel/sched/ext_idle.c          |  29 ++++----
 kernel/sched/psi.c               |  71 ++++++-------------
 kernel/sched/sched.h             |  14 +---
 kernel/sched/stats.c             |   3 +-
 kernel/sched/swait.c             |  29 +++-----
 kernel/sched/syscalls.c          |   4 +-
 kernel/sched/wait.c              |  43 +++---------
 16 files changed, 171 insertions(+), 307 deletions(-)

--
2.43.0
From 44681a9c8dea41c8b765158e850ef08d7e6236ef Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:25:30 +0800
Subject: [PATCH v0 01/13] sched/completion: Scope-based Resource Management
 Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 include/linux/completion.h |  4 ++++
 kernel/sched/completion.c  | 32 ++++++++++----------------------
 2 files changed, 14 insertions(+), 22 deletions(-)

diff --git a/include/linux/completion.h b/include/linux/completion.h
index fb2915676574..fcd987b56fa0 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -32,6 +32,10 @@ struct completion {
 static inline void complete_acquire(struct completion *x) {}
 static inline void complete_release(struct completion *x) {}

+DEFINE_LOCK_GUARD_1(complete, struct completion,
+	complete_acquire(_T->lock),
+	complete_release(_T->lock))
+
 #define COMPLETION_INITIALIZER(work) \
 	{ 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }

diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c
index 3561ab533dd4..9d4efdce9d23 100644
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -15,14 +15,11 @@

 static void complete_with_flags(struct completion *x, int wake_flags)
 {
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&x->wait.lock, flags);
+	guard(raw_spinlock_irqsave)(&x->wait.lock);

 	if (x->done != UINT_MAX)
 		x->done++;
 	swake_up_locked(&x->wait, wake_flags);
-	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
 }

 void complete_on_current_cpu(struct completion *x)
@@ -66,14 +63,11 @@ EXPORT_SYMBOL(complete);
  */
 void complete_all(struct completion *x)
 {
-	unsigned long flags;
-
 	lockdep_assert_RT_in_threaded_ctx();

-	raw_spin_lock_irqsave(&x->wait.lock, flags);
+	guard(raw_spinlock_irqsave)(&x->wait.lock);
 	x->done = UINT_MAX;
 	swake_up_all_locked(&x->wait);
-	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
 }
 EXPORT_SYMBOL(complete_all);

@@ -110,13 +104,10 @@ __wait_for_common(struct completion *x,
 {
 	might_sleep();

-	complete_acquire(x);
-
-	raw_spin_lock_irq(&x->wait.lock);
-	timeout = do_wait_for_common(x, action, timeout, state);
-	raw_spin_unlock_irq(&x->wait.lock);
-
-	complete_release(x);
+	guard(complete)(x);
+	scoped_guard(raw_spinlock_irq, &x->wait.lock) {
+		timeout = do_wait_for_common(x, action, timeout, state);
+	}

 	return timeout;
 }
@@ -303,7 +294,6 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
  */
 bool try_wait_for_completion(struct completion *x)
 {
-	unsigned long flags;
 	bool ret = true;

 	/*
@@ -315,12 +305,12 @@ bool try_wait_for_completion(struct completion *x)
 	if (!READ_ONCE(x->done))
 		return false;

-	raw_spin_lock_irqsave(&x->wait.lock, flags);
+	guard(raw_spinlock_irqsave)(&x->wait.lock);
 	if (!x->done)
 		ret = false;
 	else if (x->done != UINT_MAX)
 		x->done--;
-	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+
 	return ret;
 }
 EXPORT_SYMBOL(try_wait_for_completion);
@@ -336,8 +326,6 @@ EXPORT_SYMBOL(try_wait_for_completion);
  */
 bool completion_done(struct completion *x)
 {
-	unsigned long flags;
-
 	if (!READ_ONCE(x->done))
 		return false;

@@ -346,8 +334,8 @@ bool completion_done(struct completion *x)
 	 * otherwise we can end up freeing the completion before complete()
 	 * is done referencing it.
 	 */
-	raw_spin_lock_irqsave(&x->wait.lock, flags);
-	raw_spin_unlock_irqrestore(&x->wait.lock, flags);
+	guard(raw_spinlock_irqsave)(&x->wait.lock);
+
 	return true;
 }
 EXPORT_SYMBOL(completion_done);
--
2.43.0
From 35cdf70828f8b9f9eee819c7b19846e67fe12100 Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:26:00 +0800
Subject: [PATCH v0 02/13] sched/dealine: Scope-based Resource Management
 Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 include/linux/rcupdate.h |   4 ++
 kernel/sched/deadline.c  | 114 ++++++++++++++++-----------------------
 2 files changed, 50 insertions(+), 68 deletions(-)

diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 120536f4c6eb..b1aacdb89886 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -1166,4 +1166,8 @@ DEFINE_LOCK_GUARD_0(rcu,
 	} while (0),
 	rcu_read_unlock())

+DEFINE_LOCK_GUARD_0(rcu_read_lock_sched,
+	rcu_read_lock_sched(),
+	rcu_read_unlock_sched())
+
 #endif /* __LINUX_RCUPDATE_H */
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index ad45a8fea245..21c0f32726c2 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -478,9 +478,9 @@ static void task_non_contending(struct sched_dl_entity *dl_se)

 				if (READ_ONCE(p->__state) == TASK_DEAD)
 					sub_rq_bw(dl_se, &rq->dl);
-				raw_spin_lock(&dl_b->lock);
-				__dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
-				raw_spin_unlock(&dl_b->lock);
+				scoped_guard(raw_spinlock, &dl_b->lock) {
+					__dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
+				}
 				__dl_clear_params(dl_se);
 			}
 		}
@@ -738,14 +738,14 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
 	 * domain.
 	 */
 	dl_b = &rq->rd->dl_bw;
-	raw_spin_lock(&dl_b->lock);
-	__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
-	raw_spin_unlock(&dl_b->lock);
+	scoped_guard(raw_spinlock, &dl_b->lock) {
+		__dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
+	}

 	dl_b = &later_rq->rd->dl_bw;
-	raw_spin_lock(&dl_b->lock);
-	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
-	raw_spin_unlock(&dl_b->lock);
+	scoped_guard(raw_spinlock, &dl_b->lock) {
+		__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
+	}

 	set_task_cpu(p, later_rq->cpu);
 	double_unlock_balance(later_rq, rq);
@@ -1588,7 +1588,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
 	if (rt_bandwidth_enabled()) {
 		struct rt_rq *rt_rq = &rq->rt;

-		raw_spin_lock(&rt_rq->rt_runtime_lock);
+		guard(raw_spinlock)(&rt_rq->rt_runtime_lock);
 		/*
 		 * We'll let actual RT tasks worry about the overflow here, we
 		 * have our own CBS to keep us inline; only account when RT
@@ -1596,7 +1596,6 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
 		 */
 		if (sched_rt_bandwidth_account(rt_rq))
 			rt_rq->rt_time += delta_exec;
-		raw_spin_unlock(&rt_rq->rt_runtime_lock);
 	}
 #endif
 }
@@ -1808,9 +1807,9 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
 			dl_se->dl_non_contending = 0;
 		}

-		raw_spin_lock(&dl_b->lock);
-		__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
-		raw_spin_unlock(&dl_b->lock);
+		scoped_guard(raw_spinlock, &dl_b->lock) {
+			__dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
+		}
 		__dl_clear_params(dl_se);

 		goto unlock;
@@ -2234,11 +2233,12 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags)
 	struct rq *rq;

 	if (!(flags & WF_TTWU))
-		goto out;
+		return cpu;

 	rq = cpu_rq(cpu);

-	rcu_read_lock();
+	guard(rcu)();
+
 	curr = READ_ONCE(rq->curr); /* unlocked access */
 	donor = READ_ONCE(rq->donor);

@@ -2270,15 +2270,12 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags)
 		    dl_task_is_earliest_deadline(p, cpu_rq(target)))
 			cpu = target;
 	}
-	rcu_read_unlock();

-out:
 	return cpu;
 }

 static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
 {
-	struct rq_flags rf;
 	struct rq *rq;

 	if (READ_ONCE(p->__state) != TASK_WAKING)
@@ -2290,7 +2287,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
 	 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
 	 * rq->lock is not... So, lock it
 	 */
-	rq_lock(rq, &rf);
+	guard(rq_lock)(rq);
 	if (p->dl.dl_non_contending) {
 		update_rq_clock(rq);
 		sub_running_bw(&p->dl, &rq->dl);
@@ -2305,7 +2302,6 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
 		cancel_inactive_timer(&p->dl);
 	}
 	sub_rq_bw(&p->dl, &rq->dl);
-	rq_unlock(rq, &rf);
 }

 static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
@@ -2574,7 +2570,8 @@ static int find_later_rq(struct task_struct *task)
 	if (!cpumask_test_cpu(this_cpu, later_mask))
 		this_cpu = -1;

-	rcu_read_lock();
+	guard(rcu)();
+
 	for_each_domain(cpu, sd) {
 		if (sd->flags & SD_WAKE_AFFINE) {
 			int best_cpu;
@@ -2585,7 +2582,6 @@ static int find_later_rq(struct task_struct *task)
 			 */
 			if (this_cpu != -1 &&
 			    cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
-				rcu_read_unlock();
 				return this_cpu;
 			}

@@ -2598,12 +2594,10 @@ static int find_later_rq(struct task_struct *task)
 			 * already under consideration through later_mask.
 			 */
 			if (best_cpu < nr_cpu_ids) {
-				rcu_read_unlock();
 				return best_cpu;
 			}
 		}
 	}
-	rcu_read_unlock();

 	/*
 	 * At this point, all our guesses failed, we just return
@@ -2909,9 +2903,8 @@ static void set_cpus_allowed_dl(struct task_struct *p,
 		 * off. In the worst case, sched_setattr() may temporary fail
 		 * until we complete the update.
 		 */
-		raw_spin_lock(&src_dl_b->lock);
+		guard(raw_spinlock)(&src_dl_b->lock);
 		__dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
-		raw_spin_unlock(&src_dl_b->lock);
 	}

 	set_cpus_allowed_common(p, ctx);
@@ -2962,11 +2955,9 @@ void dl_add_task_root_domain(struct task_struct *p)
 	rq = __task_rq_lock(p, &rf);

 	dl_b = &rq->rd->dl_bw;
-	raw_spin_lock(&dl_b->lock);
-
-	__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
-
-	raw_spin_unlock(&dl_b->lock);
+	scoped_guard(raw_spinlock, &dl_b->lock) {
+		__dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
+	}

 	task_rq_unlock(rq, p, &rf);
 }
@@ -3187,7 +3178,6 @@ int sched_dl_global_validate(void)
 	u64 cookie = ++dl_cookie;
 	struct dl_bw *dl_b;
 	int cpu, cpus, ret = 0;
-	unsigned long flags;

 	/*
 	 * Here we want to check the bandwidth not being set to some
@@ -3195,24 +3185,20 @@ int sched_dl_global_validate(void)
 	 * any of the root_domains.
 	 */
 	for_each_online_cpu(cpu) {
-		rcu_read_lock_sched();
+		if (ret)
+			break;
+
+		guard(rcu_read_lock_sched)();

 		if (dl_bw_visited(cpu, cookie))
-			goto next;
+			continue;

 		dl_b = dl_bw_of(cpu);
 		cpus = dl_bw_cpus(cpu);

-		raw_spin_lock_irqsave(&dl_b->lock, flags);
+		guard(raw_spinlock_irqsave)(&dl_b->lock);
 		if (new_bw * cpus < dl_b->total_bw)
 			ret = -EBUSY;
-		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-
-next:
-		rcu_read_unlock_sched();
-
-		if (ret)
-			break;
 	}

 	return ret;
@@ -3237,26 +3223,21 @@ void sched_dl_do_global(void)
 	u64 cookie = ++dl_cookie;
 	struct dl_bw *dl_b;
 	int cpu;
-	unsigned long flags;

 	if (global_rt_runtime() != RUNTIME_INF)
 		new_bw = to_ratio(global_rt_period(), global_rt_runtime());

 	for_each_possible_cpu(cpu) {
-		rcu_read_lock_sched();
-
-		if (dl_bw_visited(cpu, cookie)) {
-			rcu_read_unlock_sched();
-			continue;
-		}
+		scoped_guard(rcu_read_lock_sched) {
+			if (dl_bw_visited(cpu, cookie))
+				continue;

-		dl_b = dl_bw_of(cpu);
+			dl_b = dl_bw_of(cpu);

-		raw_spin_lock_irqsave(&dl_b->lock, flags);
-		dl_b->bw = new_bw;
-		raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+			guard(raw_spinlock_irqsave)(&dl_b->lock);
+			dl_b->bw = new_bw;
+		}

-		rcu_read_unlock_sched();
 		init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
 	}
 }
@@ -3291,7 +3272,8 @@ int sched_dl_overflow(struct task_struct *p, int policy,
 	 * its parameters, we may need to update accordingly the total
 	 * allocated bandwidth of the container.
 	 */
-	raw_spin_lock(&dl_b->lock);
+	guard(raw_spinlock)(&dl_b->lock);
+
 	cpus = dl_bw_cpus(cpu);
 	cap = dl_bw_capacity(cpu);

@@ -3322,7 +3304,6 @@ int sched_dl_overflow(struct task_struct *p, int policy,
 		 */
 		err = 0;
 	}
-	raw_spin_unlock(&dl_b->lock);

 	return err;
 }
@@ -3462,18 +3443,17 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
 int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
 				 const struct cpumask *trial)
 {
-	unsigned long flags, cap;
+	unsigned long cap;
 	struct dl_bw *cur_dl_b;
 	int ret = 1;

-	rcu_read_lock_sched();
+	guard(rcu_read_lock_sched)();
 	cur_dl_b = dl_bw_of(cpumask_any(cur));
 	cap = __dl_bw_capacity(trial);
-	raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
+
+	guard(raw_spinlock_irqsave)(&cur_dl_b->lock);
 	if (__dl_overflow(cur_dl_b, cap, 0, 0))
 		ret = 0;
-	raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
-	rcu_read_unlock_sched();

 	return ret;
 }
@@ -3486,14 +3466,15 @@ enum dl_bw_request {

 static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
 {
-	unsigned long flags, cap;
+	unsigned long cap;
 	struct dl_bw *dl_b;
 	bool overflow = 0;
 	u64 fair_server_bw = 0;

-	rcu_read_lock_sched();
+	guard(rcu_read_lock_sched)();
+
 	dl_b = dl_bw_of(cpu);
-	raw_spin_lock_irqsave(&dl_b->lock, flags);
+	guard(raw_spinlock_irqsave)(&dl_b->lock);

 	cap = dl_bw_capacity(cpu);
 	switch (req) {
@@ -3550,9 +3531,6 @@ static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
 		break;
 	}

-	raw_spin_unlock_irqrestore(&dl_b->lock, flags);
-	rcu_read_unlock_sched();
-
 	return overflow ? -EBUSY : 0;
 }

--
2.43.0
From 314d093c8e6cc7e761092e1b5f038ed587e19a04 Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:26:52 +0800
Subject: [PATCH v0 03/13] sched/psi: Scope-based Resource Management Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/psi.c   | 71 +++++++++++++-------------------------------
 kernel/sched/sched.h | 14 ++-------
 2 files changed, 23 insertions(+), 62 deletions(-)

diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index ad04a5c3162a..6b2a8f403d65 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -563,7 +563,7 @@ static void psi_avgs_work(struct work_struct *work)
 	dwork = to_delayed_work(work);
 	group = container_of(dwork, struct psi_group, avgs_work);

-	mutex_lock(&group->avgs_lock);
+	guard(mutex)(&group->avgs_lock);

 	now = sched_clock();

@@ -584,8 +584,6 @@ static void psi_avgs_work(struct work_struct *work)
 		schedule_delayed_work(dwork, nsecs_to_jiffies(
 				group->avg_next_update - now) + 1);
 	}
-
-	mutex_unlock(&group->avgs_lock);
 }

 static void init_rtpoll_triggers(struct psi_group *group, u64 now)
@@ -613,7 +611,7 @@ static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long dela
 	if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
 		return;

-	rcu_read_lock();
+	guard(rcu)();

 	task = rcu_dereference(group->rtpoll_task);
 	/*
@@ -624,8 +622,6 @@ static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long dela
 		mod_timer(&group->rtpoll_timer, jiffies + delay);
 	else
 		atomic_set(&group->rtpoll_scheduled, 0);
-
-	rcu_read_unlock();
 }

 static void psi_rtpoll_work(struct psi_group *group)
@@ -634,7 +630,7 @@ static void psi_rtpoll_work(struct psi_group *group)
 	u32 changed_states;
 	u64 now;

-	mutex_lock(&group->rtpoll_trigger_lock);
+	guard(mutex)(&group->rtpoll_trigger_lock);

 	now = sched_clock();

@@ -693,7 +689,7 @@ static void psi_rtpoll_work(struct psi_group *group)

 	if (now > group->rtpoll_until) {
 		group->rtpoll_next_update = ULLONG_MAX;
-		goto out;
+		return;
 	}

 	if (now >= group->rtpoll_next_update) {
@@ -708,9 +704,6 @@ static void psi_rtpoll_work(struct psi_group *group)
 	psi_schedule_rtpoll_work(group,
 		nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
 		force_reschedule);
-
-out:
-	mutex_unlock(&group->rtpoll_trigger_lock);
 }

 static int psi_rtpoll_worker(void *data)
@@ -1046,9 +1039,6 @@ void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_st
  */
 void psi_memstall_enter(unsigned long *flags)
 {
-	struct rq_flags rf;
-	struct rq *rq;
-
 	if (static_branch_likely(&psi_disabled))
 		return;

@@ -1060,12 +1050,10 @@ void psi_memstall_enter(unsigned long *flags)
 	 * changes to the task's scheduling state, otherwise we can
 	 * race with CPU migration.
 	 */
-	rq = this_rq_lock_irq(&rf);
+	guard(rq_lock_irq)(this_rq());

 	current->in_memstall = 1;
 	psi_task_change(current, 0, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
-
-	rq_unlock_irq(rq, &rf);
 }
 EXPORT_SYMBOL_GPL(psi_memstall_enter);

@@ -1077,9 +1065,6 @@ EXPORT_SYMBOL_GPL(psi_memstall_enter);
  */
 void psi_memstall_leave(unsigned long *flags)
 {
-	struct rq_flags rf;
-	struct rq *rq;
-
 	if (static_branch_likely(&psi_disabled))
 		return;

@@ -1090,12 +1075,10 @@ void psi_memstall_leave(unsigned long *flags)
 	 * changes to the task's scheduling state, otherwise we could
 	 * race with CPU migration.
 	 */
-	rq = this_rq_lock_irq(&rf);
+	guard(rq_lock_irq)(this_rq());

 	current->in_memstall = 0;
 	psi_task_change(current, TSK_MEMSTALL | TSK_MEMSTALL_RUNNING, 0);
-
-	rq_unlock_irq(rq, &rf);
 }
 EXPORT_SYMBOL_GPL(psi_memstall_leave);

@@ -1146,7 +1129,6 @@ void psi_cgroup_free(struct cgroup *cgroup)
 void cgroup_move_task(struct task_struct *task, struct css_set *to)
 {
 	unsigned int task_flags;
-	struct rq_flags rf;
 	struct rq *rq;

 	if (!static_branch_likely(&psi_cgroups_enabled)) {
@@ -1158,7 +1140,8 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
 		return;
 	}

-	rq = task_rq_lock(task, &rf);
+	CLASS(task_rq_lock, rq_guard)(task);
+	rq = rq_guard.rq;

 	/*
 	 * We may race with schedule() dropping the rq lock between
@@ -1194,8 +1177,6 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)

 	if (task_flags)
 		psi_task_change(task, 0, task_flags);
-
-	task_rq_unlock(rq, task, &rf);
 }

 void psi_cgroup_restart(struct psi_group *group)
@@ -1222,11 +1203,9 @@ void psi_cgroup_restart(struct psi_group *group)

 	for_each_possible_cpu(cpu) {
 		struct rq *rq = cpu_rq(cpu);
-		struct rq_flags rf;

-		rq_lock_irq(rq, &rf);
+		guard(rq_lock_irq)(rq);
 		psi_group_change(group, cpu, 0, 0, true);
-		rq_unlock_irq(rq, &rf);
 	}
 }
 #endif /* CONFIG_CGROUPS */
@@ -1246,12 +1225,12 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
 #endif

 	/* Update averages before reporting them */
-	mutex_lock(&group->avgs_lock);
-	now = sched_clock();
-	collect_percpu_times(group, PSI_AVGS, NULL);
-	if (now >= group->avg_next_update)
-		group->avg_next_update = update_averages(group, now);
-	mutex_unlock(&group->avgs_lock);
+	scoped_guard(mutex, &group->avgs_lock) {
+		now = sched_clock();
+		collect_percpu_times(group, PSI_AVGS, NULL);
+		if (now >= group->avg_next_update)
+			group->avg_next_update = update_averages(group, now);
+	}

 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 	only_full = res == PSI_IRQ;
@@ -1349,7 +1328,7 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
 	t->aggregator = privileged ? PSI_POLL : PSI_AVGS;

 	if (privileged) {
-		mutex_lock(&group->rtpoll_trigger_lock);
+		guard(mutex)(&group->rtpoll_trigger_lock);

 		if (!rcu_access_pointer(group->rtpoll_task)) {
 			struct task_struct *task;
@@ -1357,7 +1336,6 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
 			task = kthread_create(psi_rtpoll_worker, group, "psimon");
 			if (IS_ERR(task)) {
 				kfree(t);
-				mutex_unlock(&group->rtpoll_trigger_lock);
 				return ERR_CAST(task);
 			}
 			atomic_set(&group->rtpoll_wakeup, 0);
@@ -1370,15 +1348,11 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group, char *buf,
 			div_u64(t->win.size, UPDATES_PER_WINDOW));
 		group->rtpoll_nr_triggers[t->state]++;
 		group->rtpoll_states |= (1 << t->state);
-
-		mutex_unlock(&group->rtpoll_trigger_lock);
 	} else {
-		mutex_lock(&group->avgs_lock);
+		guard(mutex)(&group->avgs_lock);

 		list_add(&t->node, &group->avg_triggers);
 		group->avg_nr_triggers[t->state]++;
-
-		mutex_unlock(&group->avgs_lock);
 	}
 	return t;
 }
@@ -1407,14 +1381,13 @@ void psi_trigger_destroy(struct psi_trigger *t)
 		wake_up_interruptible(&t->event_wait);

 	if (t->aggregator == PSI_AVGS) {
-		mutex_lock(&group->avgs_lock);
+		guard(mutex)(&group->avgs_lock);
 		if (!list_empty(&t->node)) {
 			list_del(&t->node);
 			group->avg_nr_triggers[t->state]--;
 		}
-		mutex_unlock(&group->avgs_lock);
 	} else {
-		mutex_lock(&group->rtpoll_trigger_lock);
+		guard(mutex)(&group->rtpoll_trigger_lock);
 		if (!list_empty(&t->node)) {
 			struct psi_trigger *tmp;
 			u64 period = ULLONG_MAX;
@@ -1443,7 +1416,6 @@ void psi_trigger_destroy(struct psi_trigger *t)
 				timer_delete(&group->rtpoll_timer);
 			}
 		}
-		mutex_unlock(&group->rtpoll_trigger_lock);
 	}

 	/*
@@ -1546,22 +1518,19 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
 	seq = file->private_data;

 	/* Take seq->lock to protect seq->private from concurrent writes */
-	mutex_lock(&seq->lock);
+	guard(mutex)(&seq->lock);

 	/* Allow only one trigger per file descriptor */
 	if (seq->private) {
-		mutex_unlock(&seq->lock);
 		return -EBUSY;
 	}

 	new = psi_trigger_create(&psi_system, buf, res, file, NULL);
 	if (IS_ERR(new)) {
-		mutex_unlock(&seq->lock);
 		return PTR_ERR(new);
 	}

 	smp_store_release(&seq->private, new);
-	mutex_unlock(&seq->lock);

 	return nbytes;
 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 475bb5998295..c2929f6be25f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1893,17 +1893,9 @@ DEFINE_LOCK_GUARD_1(rq_lock_irqsave, struct rq,
 		    rq_unlock_irqrestore(_T->lock, &_T->rf),
 		    struct rq_flags rf)

-static inline struct rq *this_rq_lock_irq(struct rq_flags *rf)
-	__acquires(rq->lock)
-{
-	struct rq *rq;
-
-	local_irq_disable();
-	rq = this_rq();
-	rq_lock(rq, rf);
-
-	return rq;
-}
+DEFINE_LOCK_GUARD_1(raw_spin_rq_lock_irq, struct rq,
+		    raw_spin_rq_lock_irq(_T->lock),
+		    raw_spin_rq_unlock_irq(_T->lock))

 #ifdef CONFIG_NUMA

--
2.43.0
From 409c82ef25583f35ec53d0f574484d3c3c4fb5f1 Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:27:13 +0800
Subject: [PATCH v0 04/13] sched/cpuacct: Scope-based Resource Management
 Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/cpuacct.c | 12 ++----------
 1 file changed, 2 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index 0de9dda09949..c6ba078ae4f6 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -109,7 +109,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
 	/*
 	 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
 	 */
-	raw_spin_rq_lock_irq(cpu_rq(cpu));
+	guard(raw_spin_rq_lock_irq)(cpu_rq(cpu));
 #endif

 	switch (index) {
@@ -125,10 +125,6 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
 		break;
 	}

-#ifndef CONFIG_64BIT
-	raw_spin_rq_unlock_irq(cpu_rq(cpu));
-#endif
-
 	return data;
 }

@@ -145,16 +141,12 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu)
 	/*
 	 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
 	 */
-	raw_spin_rq_lock_irq(cpu_rq(cpu));
+	guard(raw_spin_rq_lock_irq)(cpu_rq(cpu));
 #endif
 	*cpuusage = 0;
 	cpustat[CPUTIME_USER] = cpustat[CPUTIME_NICE] = 0;
 	cpustat[CPUTIME_SYSTEM] = cpustat[CPUTIME_IRQ] = 0;
 	cpustat[CPUTIME_SOFTIRQ] = 0;
-
-#ifndef CONFIG_64BIT
-	raw_spin_rq_unlock_irq(cpu_rq(cpu));
-#endif
 }

 /* Return total CPU usage (in nanoseconds) of a group */
--
2.43.0
From 903c2521f9a1a8020033614c7f6733c8cafc1968 Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:27:31 +0800
Subject: [PATCH v0 05/13] sched/syscalls: Scope-based Resource Management
 Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/syscalls.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 547c1f05b667..cc677f5845c2 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -1353,9 +1353,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
 static void do_sched_yield(void)
 {
 	struct rq_flags rf;
-	struct rq *rq;
+	struct rq *rq = this_rq();

-	rq = this_rq_lock_irq(&rf);
+	rq_lock_irq(rq, &rf);

 	schedstat_inc(rq->yld_count);
 	current->sched_class->yield_task(rq);
--
2.43.0
From b9859f2d49c79f25702406e28504aac8603050dc Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:27:59 +0800
Subject: [PATCH v0 06/13] sched/core_sched: Scope-based Resource Management
 Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/core_sched.c | 30 ++++++++++++------------------
 1 file changed, 12 insertions(+), 18 deletions(-)

diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
index c4606ca89210..4dc6fdc8bae0 100644
--- a/kernel/sched/core_sched.c
+++ b/kernel/sched/core_sched.c
@@ -54,11 +54,10 @@ static unsigned long sched_core_update_cookie(struct task_struct *p,
 					      unsigned long cookie)
 {
 	unsigned long old_cookie;
-	struct rq_flags rf;
 	struct rq *rq;

-	rq = task_rq_lock(p, &rf);
-
+	CLASS(task_rq_lock, guard)(p);
+	rq = guard.rq;
 	/*
 	 * Since creating a cookie implies sched_core_get(), and we cannot set
 	 * a cookie until after we've created it, similarly, we cannot destroy
@@ -91,18 +90,15 @@ static unsigned long sched_core_update_cookie(struct task_struct *p,
 	if (task_on_cpu(rq, p))
 		resched_curr(rq);

-	task_rq_unlock(rq, p, &rf);
-
 	return old_cookie;
 }

 static unsigned long sched_core_clone_cookie(struct task_struct *p)
 {
-	unsigned long cookie, flags;
+	unsigned long cookie;

-	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	guard(raw_spinlock_irqsave)(&p->pi_lock);
 	cookie = sched_core_get_cookie(p->core_cookie);
-	raw_spin_unlock_irqrestore(&p->pi_lock, flags);

 	return cookie;
 }
@@ -145,18 +141,16 @@ int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
 	    (cmd != PR_SCHED_CORE_GET && uaddr))
 		return -EINVAL;

-	rcu_read_lock();
-	if (pid == 0) {
-		task = current;
-	} else {
-		task = find_task_by_vpid(pid);
-		if (!task) {
-			rcu_read_unlock();
-			return -ESRCH;
+	scoped_guard(rcu) {
+		if (pid == 0) {
+			task = current;
+		} else {
+			task = find_task_by_vpid(pid);
+			if (!task)
+				return -ESRCH;
 		}
+		get_task_struct(task);
 	}
-	get_task_struct(task);
-	rcu_read_unlock();

 	/*
 	 * Check if this process has the right to modify the specified
--
2.43.0
From bd17d15ad082a4288e45be1679f52cba691a8e0d Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:28:15 +0800
Subject: [PATCH v0 07/13] sched/cpudeadline: Scope-based Resource Management
 Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/cpudeadline.c | 9 ++-------
 1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 95baa12a1029..7959c9383e72 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -173,11 +173,10 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
 void cpudl_clear(struct cpudl *cp, int cpu)
 {
 	int old_idx, new_cpu;
-	unsigned long flags;

 	WARN_ON(!cpu_present(cpu));

-	raw_spin_lock_irqsave(&cp->lock, flags);
+	guard(raw_spinlock_irqsave)(&cp->lock);

 	old_idx = cp->elements[cpu].idx;
 	if (old_idx == IDX_INVALID) {
@@ -197,7 +196,6 @@ void cpudl_clear(struct cpudl *cp, int cpu)

 		cpumask_set_cpu(cpu, cp->free_cpus);
 	}
-	raw_spin_unlock_irqrestore(&cp->lock, flags);
 }

 /*
@@ -213,11 +211,10 @@ void cpudl_clear(struct cpudl *cp, int cpu)
 void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
 {
 	int old_idx;
-	unsigned long flags;

 	WARN_ON(!cpu_present(cpu));

-	raw_spin_lock_irqsave(&cp->lock, flags);
+	guard(raw_spinlock_irqsave)(&cp->lock);

 	old_idx = cp->elements[cpu].idx;
 	if (old_idx == IDX_INVALID) {
@@ -232,8 +229,6 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
 		cp->elements[old_idx].dl = dl;
 		cpudl_heapify(cp, old_idx);
 	}
-
-	raw_spin_unlock_irqrestore(&cp->lock, flags);
 }

 /*
--
2.43.0
From 072805c802590a9e0e76cd6a0cc214fe10d7ba93 Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:28:34 +0800
Subject: [PATCH v0 08/13] sched/cpufreq_schedutil: Scope-based Resource
 Management Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/cpufreq_schedutil.c | 41 +++++++++++++-------------------
 1 file changed, 17 insertions(+), 24 deletions(-)

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 461242ec958a..72bad0d98177 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -449,9 +449,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
 	if (sg_policy->policy->fast_switch_enabled) {
 		cpufreq_driver_fast_switch(sg_policy->policy, next_f);
 	} else {
-		raw_spin_lock(&sg_policy->update_lock);
+		guard(raw_spinlock)(&sg_policy->update_lock);
 		sugov_deferred_update(sg_policy);
-		raw_spin_unlock(&sg_policy->update_lock);
 	}
 }

@@ -515,7 +514,7 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
 	unsigned int next_f;

-	raw_spin_lock(&sg_policy->update_lock);
+	guard(raw_spinlock)(&sg_policy->update_lock);

 	sugov_iowait_boost(sg_cpu, time, flags);
 	sg_cpu->last_update = time;
@@ -526,22 +525,19 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
 		next_f = sugov_next_freq_shared(sg_cpu, time);

 		if (!sugov_update_next_freq(sg_policy, time, next_f))
-			goto unlock;
+			return;

 		if (sg_policy->policy->fast_switch_enabled)
 			cpufreq_driver_fast_switch(sg_policy->policy, next_f);
 		else
 			sugov_deferred_update(sg_policy);
 	}
-unlock:
-	raw_spin_unlock(&sg_policy->update_lock);
 }

 static void sugov_work(struct kthread_work *work)
 {
 	struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
 	unsigned int freq;
-	unsigned long flags;

 	/*
 	 * Hold sg_policy->update_lock shortly to handle the case where:
@@ -553,14 +549,14 @@ static void sugov_work(struct kthread_work *work)
 	 * sugov_work() will just be called again by kthread_work code; and the
 	 * request will be proceed before the sugov thread sleeps.
 	 */
-	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
-	freq = sg_policy->next_freq;
-	sg_policy->work_in_progress = false;
-	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
+	scoped_guard(raw_spinlock_irqsave, &sg_policy->update_lock) {
+		freq = sg_policy->next_freq;
+		sg_policy->work_in_progress = false;
+	}

-	mutex_lock(&sg_policy->work_lock);
-	__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
-	mutex_unlock(&sg_policy->work_lock);
+	scoped_guard(mutex, &sg_policy->work_lock) {
+		__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
+	}
 }

 static void sugov_irq_work(struct irq_work *irq_work)
@@ -822,14 +818,12 @@ static void sugov_exit(struct cpufreq_policy *policy)
 	struct sugov_tunables *tunables = sg_policy->tunables;
 	unsigned int count;

-	mutex_lock(&global_tunables_lock);
-
-	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
-	policy->governor_data = NULL;
-	if (!count)
-		sugov_clear_global_tunables();
-
-	mutex_unlock(&global_tunables_lock);
+	scoped_guard(mutex, &global_tunables_lock) {
+		count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
+		policy->governor_data = NULL;
+		if (!count)
+			sugov_clear_global_tunables();
+	}

 	sugov_kthread_stop(sg_policy);
 	sugov_policy_free(sg_policy);
@@ -892,9 +886,8 @@ static void sugov_limits(struct cpufreq_policy *policy)
 	struct sugov_policy *sg_policy = policy->governor_data;

 	if (!policy->fast_switch_enabled) {
-		mutex_lock(&sg_policy->work_lock);
+		guard(mutex)(&sg_policy->work_lock);
 		cpufreq_policy_apply_limits(policy);
-		mutex_unlock(&sg_policy->work_lock);
 	}

 	/*
--
2.43.0
From 507782856b661027f8b25fe77fdd77986857295a Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:28:47 +0800
Subject: [PATCH v0 09/13] sched/cputime: Scope-based Resource Management
 Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/cputime.c | 39 +++++++++++++++++----------------------
 1 file changed, 17 insertions(+), 22 deletions(-)

diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 6dab4854c6c0..1bff83476183 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -292,12 +292,10 @@ static inline u64 read_sum_exec_runtime(struct task_struct *t)
 static u64 read_sum_exec_runtime(struct task_struct *t)
 {
 	u64 ns;
-	struct rq_flags rf;
 	struct rq *rq;

-	rq = task_rq_lock(t, &rf);
+	guard(task_rq_lock)(t);
 	ns = t->se.sum_exec_runtime;
-	task_rq_unlock(rq, t, &rf);

 	return ns;
 }
@@ -326,7 +324,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 	if (same_thread_group(current, tsk))
 		(void) task_sched_runtime(current);

-	rcu_read_lock();
+	guard(rcu)();
 	/* Attempt a lockless read on the first round. */
 	nextseq = 0;
 	do {
@@ -346,7 +344,6 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 		nextseq = 1;
 	} while (need_seqretry(&sig->stats_lock, seq));
 	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
-	rcu_read_unlock();
 }

 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -984,15 +981,14 @@ u64 kcpustat_field(struct kernel_cpustat *kcpustat,
 	for (;;) {
 		struct task_struct *curr;

-		rcu_read_lock();
-		curr = rcu_dereference(rq->curr);
-		if (WARN_ON_ONCE(!curr)) {
-			rcu_read_unlock();
-			return cpustat[usage];
-		}
+		scoped_guard(rcu) {
+			curr = rcu_dereference(rq->curr);
+			if (WARN_ON_ONCE(!curr))
+				return cpustat[usage];

-		err = kcpustat_field_vtime(cpustat, curr, usage, cpu, &val);
-		rcu_read_unlock();
+			err = kcpustat_field_vtime(cpustat, curr,
+				usage, cpu, &val);
+		}

 		if (!err)
 			return val;
@@ -1071,16 +1067,15 @@ void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
 	for (;;) {
 		struct task_struct *curr;

-		rcu_read_lock();
-		curr = rcu_dereference(rq->curr);
-		if (WARN_ON_ONCE(!curr)) {
-			rcu_read_unlock();
-			*dst = *src;
-			return;
-		}
+		scoped_guard(rcu) {
+			curr = rcu_dereference(rq->curr);
+			if (WARN_ON_ONCE(!curr)) {
+				*dst = *src;
+				return;
+			}

-		err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
-		rcu_read_unlock();
+			err = kcpustat_cpu_fetch_vtime(dst, src, curr, cpu);
+		}

 		if (!err)
 			return;
--
2.43.0
From ee28e9e123bc4557008a38652d78de5019c41be0 Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:29:04 +0800
Subject: [PATCH v0 10/13] sched/stats: Scope-based Resource Management Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/stats.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 4346fd81c31f..1497a244eb40 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -134,7 +134,7 @@ static int show_schedstat(struct seq_file *seq, void *v)

 #ifdef CONFIG_SMP
 		/* domain-specific stats */
-		rcu_read_lock();
+		guard(rcu)();
 		for_each_domain(cpu, sd) {
 			enum cpu_idle_type itype;

@@ -162,7 +162,6 @@ static int show_schedstat(struct seq_file *seq, void *v)
 			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
 			    sd->ttwu_move_balance);
 		}
-		rcu_read_unlock();
 #endif
 	}
 	return 0;
--
2.43.0
From 1cea7c63e8d44b4ec5c8a3e7d7911200a4458047 Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:29:20 +0800
Subject: [PATCH v0 11/13] sched/wait: Scope-based Resource Management Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/wait.c | 43 ++++++++++---------------------------------
 1 file changed, 10 insertions(+), 33 deletions(-)

diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 51e38f5f4701..8c5dba8fc312 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -16,44 +16,32 @@ EXPORT_SYMBOL(__init_waitqueue_head);

 void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	unsigned long flags;
-
 	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
-	spin_lock_irqsave(&wq_head->lock, flags);
+	guard(spinlock_irqsave)(&wq_head->lock);
 	__add_wait_queue(wq_head, wq_entry);
-	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(add_wait_queue);

 void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	unsigned long flags;
-
 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
-	spin_lock_irqsave(&wq_head->lock, flags);
+	guard(spinlock_irqsave)(&wq_head->lock);
 	__add_wait_queue_entry_tail(wq_head, wq_entry);
-	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(add_wait_queue_exclusive);

 void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	unsigned long flags;
-
 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY;
-	spin_lock_irqsave(&wq_head->lock, flags);
+	guard(spinlock_irqsave)(&wq_head->lock);
 	__add_wait_queue(wq_head, wq_entry);
-	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL_GPL(add_wait_queue_priority);

 void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&wq_head->lock, flags);
+	guard(spinlock_irqsave)(&wq_head->lock);
 	__remove_wait_queue(wq_head, wq_entry);
-	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(remove_wait_queue);

@@ -99,13 +87,11 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
 static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
 			int nr_exclusive, int wake_flags, void *key)
 {
-	unsigned long flags;
 	int remaining;

-	spin_lock_irqsave(&wq_head->lock, flags);
+	guard(spinlock_irqsave)(&wq_head->lock);
 	remaining = __wake_up_common(wq_head, mode, nr_exclusive, wake_flags,
 			key);
-	spin_unlock_irqrestore(&wq_head->lock, flags);

 	return nr_exclusive - remaining;
 }
@@ -228,14 +214,11 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head)
 void
 prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 {
-	unsigned long flags;
-
 	wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
-	spin_lock_irqsave(&wq_head->lock, flags);
+	guard(spinlock_irqsave)(&wq_head->lock);
 	if (list_empty(&wq_entry->entry))
 		__add_wait_queue(wq_head, wq_entry);
 	set_current_state(state);
-	spin_unlock_irqrestore(&wq_head->lock, flags);
 }
 EXPORT_SYMBOL(prepare_to_wait);

@@ -243,17 +226,16 @@ EXPORT_SYMBOL(prepare_to_wait);
 bool
 prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 {
-	unsigned long flags;
 	bool was_empty = false;

 	wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
-	spin_lock_irqsave(&wq_head->lock, flags);
+	guard(spinlock_irqsave)(&wq_head->lock);
 	if (list_empty(&wq_entry->entry)) {
 		was_empty = list_empty(&wq_head->head);
 		__add_wait_queue_entry_tail(wq_head, wq_entry);
 	}
 	set_current_state(state);
-	spin_unlock_irqrestore(&wq_head->lock, flags);
+
 	return was_empty;
 }
 EXPORT_SYMBOL(prepare_to_wait_exclusive);
@@ -269,10 +251,9 @@ EXPORT_SYMBOL(init_wait_entry);

 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state)
 {
-	unsigned long flags;
 	long ret = 0;

-	spin_lock_irqsave(&wq_head->lock, flags);
+	guard(spinlock_irqsave)(&wq_head->lock);
 	if (signal_pending_state(state, current)) {
 		/*
 		 * Exclusive waiter must not fail if it was selected by wakeup,
@@ -297,7 +278,6 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_en
 		}
 		set_current_state(state);
 	}
-	spin_unlock_irqrestore(&wq_head->lock, flags);

 	return ret;
 }
@@ -355,8 +335,6 @@ EXPORT_SYMBOL(do_wait_intr_irq);
  */
 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
 {
-	unsigned long flags;
-
 	__set_current_state(TASK_RUNNING);
 	/*
 	 * We can check for list emptiness outside the lock
@@ -372,9 +350,8 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en
 	 *    the list).
 	 */
 	if (!list_empty_careful(&wq_entry->entry)) {
-		spin_lock_irqsave(&wq_head->lock, flags);
+		guard(spinlock_irqsave)(&wq_head->lock);
 		list_del_init(&wq_entry->entry);
-		spin_unlock_irqrestore(&wq_head->lock, flags);
 	}
 }
 EXPORT_SYMBOL(finish_wait);
--
2.43.0
From c70921420ddedfa08fed87d08bcb7130e63dcd7d Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:29:35 +0800
Subject: [PATCH v0 12/13] sched/swait: Scope-based Resource Management Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/swait.c | 29 +++++++++--------------------
 1 file changed, 9 insertions(+), 20 deletions(-)

diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
index 72505cd3b60a..cf4eda166cf0 100644
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -46,11 +46,9 @@ void swake_up_all_locked(struct swait_queue_head *q)

 void swake_up_one(struct swait_queue_head *q)
 {
-	unsigned long flags;

-	raw_spin_lock_irqsave(&q->lock, flags);
+	guard(raw_spinlock_irqsave)(&q->lock);
 	swake_up_locked(q, 0);
-	raw_spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL(swake_up_one);

@@ -63,9 +61,12 @@ void swake_up_all(struct swait_queue_head *q)
 	struct swait_queue *curr;
 	LIST_HEAD(tmp);

-	raw_spin_lock_irq(&q->lock);
-	list_splice_init(&q->task_list, &tmp);
+	scoped_guard(raw_spinlock_irq, &q->lock) {
+		list_splice_init(&q->task_list, &tmp);
+	}
 	while (!list_empty(&tmp)) {
+		guard(raw_spinlock_irq)(&q->lock);
+
 		curr = list_first_entry(&tmp, typeof(*curr), task_list);

 		wake_up_state(curr->task, TASK_NORMAL);
@@ -73,11 +74,7 @@ void swake_up_all(struct swait_queue_head *q)

 		if (list_empty(&tmp))
 			break;
-
-		raw_spin_unlock_irq(&q->lock);
-		raw_spin_lock_irq(&q->lock);
 	}
-	raw_spin_unlock_irq(&q->lock);
 }
 EXPORT_SYMBOL(swake_up_all);

@@ -90,21 +87,17 @@ void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)

 void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state)
 {
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&q->lock, flags);
+	guard(raw_spinlock_irqsave)(&q->lock);
 	__prepare_to_swait(q, wait);
 	set_current_state(state);
-	raw_spin_unlock_irqrestore(&q->lock, flags);
 }
 EXPORT_SYMBOL(prepare_to_swait_exclusive);

 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
 {
-	unsigned long flags;
 	long ret = 0;

-	raw_spin_lock_irqsave(&q->lock, flags);
+	guard(raw_spinlock_irqsave)(&q->lock);
 	if (signal_pending_state(state, current)) {
 		/*
 		 * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one()
@@ -116,7 +109,6 @@ long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait
 		__prepare_to_swait(q, wait);
 		set_current_state(state);
 	}
-	raw_spin_unlock_irqrestore(&q->lock, flags);

 	return ret;
 }
@@ -131,14 +123,11 @@ void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)

 void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
 {
-	unsigned long flags;
-
 	__set_current_state(TASK_RUNNING);

 	if (!list_empty_careful(&wait->task_list)) {
-		raw_spin_lock_irqsave(&q->lock, flags);
+		guard(raw_spinlock_irqsave)(&q->lock);
 		list_del_init(&wait->task_list);
-		raw_spin_unlock_irqrestore(&q->lock, flags);
 	}
 }
 EXPORT_SYMBOL(finish_swait);
--
2.43.0
From 0dc6cbb0bd06fe2c29999b7c6b3c2206b612d1fa Mon Sep 17 00:00:00 2001
From: Jemmy Wong <jemmywong512@gmail.com>
Date: Sat, 14 Jun 2025 15:30:26 +0800
Subject: [PATCH v0 13/13] sched/ext_idle: Scope-based Resource Management
 Support

This change replaces manual lock acquisition and release with lock guards
to improve code robustness and reduce the risk of lock mismanagement.

Signed-off-by: Jemmy Wong <jemmywong512@gmail.com>

---
 kernel/sched/ext_idle.c | 29 ++++++++++++-----------------
 1 file changed, 12 insertions(+), 17 deletions(-)

diff --git a/kernel/sched/ext_idle.c b/kernel/sched/ext_idle.c
index 6d29d3cbc670..0d280ab06b6d 100644
--- a/kernel/sched/ext_idle.c
+++ b/kernel/sched/ext_idle.c
@@ -458,7 +458,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 	bool is_prev_allowed;
 	s32 cpu;

-	preempt_disable();
+	guard(preempt)();

 	/*
 	 * Check whether @prev_cpu is still within the allowed set. If not,
@@ -485,7 +485,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 	/*
 	 * This is necessary to protect llc_cpus.
 	 */
-	rcu_read_lock();
+	guard(rcu)();

 	/*
 	 * Determine the subset of CPUs that the task can use in its
@@ -528,7 +528,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 		if (is_prev_allowed && cpus_share_cache(cpu, prev_cpu) &&
 		    scx_idle_test_and_clear_cpu(prev_cpu)) {
 			cpu = prev_cpu;
-			goto out_unlock;
+			return cpu;
 		}

 		/*
@@ -550,7 +550,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 		    (!(flags & SCX_PICK_IDLE_IN_NODE) || (waker_node == node)) &&
 		    !cpumask_empty(idle_cpumask(waker_node)->cpu)) {
 			if (cpumask_test_cpu(cpu, allowed))
-				goto out_unlock;
+				return cpu;
 		}
 	}

@@ -566,7 +566,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 		    cpumask_test_cpu(prev_cpu, idle_cpumask(node)->smt) &&
 		    scx_idle_test_and_clear_cpu(prev_cpu)) {
 			cpu = prev_cpu;
-			goto out_unlock;
+			return cpu;
 		}

 		/*
@@ -575,7 +575,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 		if (llc_cpus) {
 			cpu = pick_idle_cpu_in_node(llc_cpus, node, SCX_PICK_IDLE_CORE);
 			if (cpu >= 0)
-				goto out_unlock;
+				return cpu;
 		}

 		/*
@@ -584,7 +584,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 		if (numa_cpus) {
 			cpu = pick_idle_cpu_in_node(numa_cpus, node, SCX_PICK_IDLE_CORE);
 			if (cpu >= 0)
-				goto out_unlock;
+				return cpu;
 		}

 		/*
@@ -597,7 +597,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 		 */
 		cpu = scx_pick_idle_cpu(allowed, node, flags | SCX_PICK_IDLE_CORE);
 		if (cpu >= 0)
-			goto out_unlock;
+			return cpu;

 		/*
 		 * Give up if we're strictly looking for a full-idle SMT
@@ -605,7 +605,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 		 */
 		if (flags & SCX_PICK_IDLE_CORE) {
 			cpu = -EBUSY;
-			goto out_unlock;
+			return cpu;
 		}
 	}

@@ -614,7 +614,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 	 */
 	if (is_prev_allowed && scx_idle_test_and_clear_cpu(prev_cpu)) {
 		cpu = prev_cpu;
-		goto out_unlock;
+		return cpu;
 	}

 	/*
@@ -623,7 +623,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 	if (llc_cpus) {
 		cpu = pick_idle_cpu_in_node(llc_cpus, node, 0);
 		if (cpu >= 0)
-			goto out_unlock;
+			return cpu;
 	}

 	/*
@@ -632,7 +632,7 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 	if (numa_cpus) {
 		cpu = pick_idle_cpu_in_node(numa_cpus, node, 0);
 		if (cpu >= 0)
-			goto out_unlock;
+			return cpu;
 	}

 	/*
@@ -645,11 +645,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags,
 	 */
 	cpu = scx_pick_idle_cpu(allowed, node, flags);

-out_unlock:
-	rcu_read_unlock();
-out_enable:
-	preempt_enable();
-
 	return cpu;
 }

--
2.43.0