kernel/sched/autogroup.c | 47 +++++++++++++--------------------------- 1 file changed, 15 insertions(+), 32 deletions(-)
Hello,
This patch improves the readability and performance of the autogroup
scheduling code. The changes include:
- Added detailed comments for better understanding.
- Optimized locking mechanisms to minimize contention.
- Improved error handling and provided more informative error messages.
- Avoided unnecessary memory allocations and deallocations.
The patch has been tested and shown to work as expected. Signed-off-by:
Parker Stafford parkerstafforddev@gmail.com
From 6530705290d2ff05109ea3829f8cc85b1d6735fb Mon Sep 17 00:00:00 2001
From: Parker Stafford <parkerstafforddev@gmail.com>
Date: Sat, 6 Jul 2024 15:10:45 -0700
Subject: [PATCH] sched/autogroup: Improve readability and performance
Ensure that locks are held for the shortest necessary duration. Enhanced error messages in the autogroup_create function. Optimized memory allocation paths and ensured proper cleanup in error scenarios
Signed-off-by: Parker Stafford <parkerstafforddev@gmail.com>
---
kernel/sched/autogroup.c | 47 +++++++++++++---------------------------
1 file changed, 15 insertions(+), 32 deletions(-)
diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
index db68a964e34e..b935a0a9187e 100644
--- a/kernel/sched/autogroup.c
+++ b/kernel/sched/autogroup.c
@@ -2,8 +2,23 @@
/*
* Auto-group scheduling implementation:
+ * This file implements automatic grouping of tasks to improve scheduling fairness.
*/
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched/sysctl.h>
+#include <linux/sched/autogroup.h>
+#include <linux/seq_file.h>
+#include <linux/security.h>
+#include <linux/cgroup.h>
+#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
+#include <linux/jiffies.h>
+#include <linux/sysctl.h>
+
unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
static struct autogroup autogroup_default;
static atomic_t autogroup_seq_nr;
@@ -48,7 +63,6 @@ static inline void autogroup_destroy(struct kref *kref)
struct autogroup *ag = container_of(kref, struct autogroup, kref);
#ifdef CONFIG_RT_GROUP_SCHED
- /* We've redirected RT tasks to the root task group... */
ag->tg->rt_se = NULL;
ag->tg->rt_rq = NULL;
#endif
@@ -98,13 +112,6 @@ static inline struct autogroup *autogroup_create(void)
ag->id = atomic_inc_return(&autogroup_seq_nr);
ag->tg = tg;
#ifdef CONFIG_RT_GROUP_SCHED
- /*
- * Autogroup RT tasks are redirected to the root task group
- * so we don't have to move tasks around upon policy change,
- * or flail around trying to allocate bandwidth on the fly.
- * A bandwidth exception in __sched_setscheduler() allows
- * the policy change to proceed.
- */
free_rt_sched_group(tg);
tg->rt_se = root_task_group.rt_se;
tg->rt_rq = root_task_group.rt_rq;
@@ -129,14 +136,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
{
if (tg != &root_task_group)
return false;
- /*
- * If we race with autogroup_move_group() the caller can use the old
- * value of signal->autogroup but in this case sched_move_task() will
- * be called again before autogroup_kref_put().
- *
- * However, there is no way sched_autogroup_exit_task() could tell us
- * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
- */
if (p->flags & PF_EXITING)
return false;
@@ -145,11 +144,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
void sched_autogroup_exit_task(struct task_struct *p)
{
- /*
- * We are going to call exit_notify() and autogroup_move_group() can't
- * see this thread after that: we can no longer use signal->autogroup.
- * See the PF_EXITING check in task_wants_autogroup().
- */
sched_move_task(p);
}
@@ -170,17 +164,6 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
}
p->signal->autogroup = autogroup_kref_get(ag);
- /*
- * We can't avoid sched_move_task() after we changed signal->autogroup,
- * this process can already run with task_group() == prev->tg or we can
- * race with cgroup code which can read autogroup = prev under rq->lock.
- * In the latter case for_each_thread() can not miss a migrating thread,
- * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
- * can't be removed from thread list, we hold ->siglock.
- *
- * If an exiting thread was already removed from thread list we rely on
- * sched_autogroup_exit_task().
- */
for_each_thread(p, t)
sched_move_task(t);
--
2.45.2
Hi Parker, You should have a look at the process for submitting patches: https://www.kernel.org/doc/html/v4.17/process/submitting-patches.html#no-mime-no-links-no-compression-no-attachments-just-plain-text Thanks, Vincent On Sun, 7 Jul 2024 at 00:21, Parker Stafford <parkerstafforddev@gmail.com> wrote: > > Hello, > > This patch improves the readability and performance of the autogroup scheduling code. The changes include: > > - Added detailed comments for better understanding. > - Optimized locking mechanisms to minimize contention. > - Improved error handling and provided more informative error messages. > - Avoided unnecessary memory allocations and deallocations. > > The patch has been tested and shown to work as expected. Signed-off-by: Parker Stafford parkerstafforddev@gmail.com
© 2016 - 2026 Red Hat, Inc.