[PATCH v6 7/9] sched/fair: allow changing cgroup of new forked task

Chengming Zhou posted 9 patches 3 years, 7 months ago
[PATCH v6 7/9] sched/fair: allow changing cgroup of new forked task
Posted by Chengming Zhou 3 years, 7 months ago
commit 7dc603c9028e ("sched/fair: Fix PELT integrity for new tasks")
introduce a TASK_NEW state and an unnessary limitation that would fail
when changing cgroup of new forked task.

Because at that time, we can't handle task_change_group_fair() for new
forked fair task which hasn't been woken up by wake_up_new_task(),
which will cause detach on an unattached task sched_avg problem.

This patch delete this unnessary limitation by adding check before do
detach or attach in task_change_group_fair().

So cpu_cgrp_subsys.can_attach() has nothing to do for fair tasks,
only define it in #ifdef CONFIG_RT_GROUP_SCHED.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
---
 kernel/sched/core.c | 25 +++++--------------------
 kernel/sched/fair.c |  7 +++++++
 2 files changed, 12 insertions(+), 20 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8e3f1c3f0b2c..14819bd66021 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10290,36 +10290,19 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
 	sched_unregister_group(tg);
 }
 
+#ifdef CONFIG_RT_GROUP_SCHED
 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 {
 	struct task_struct *task;
 	struct cgroup_subsys_state *css;
-	int ret = 0;
 
 	cgroup_taskset_for_each(task, css, tset) {
-#ifdef CONFIG_RT_GROUP_SCHED
 		if (!sched_rt_can_attach(css_tg(css), task))
 			return -EINVAL;
-#endif
-		/*
-		 * Serialize against wake_up_new_task() such that if it's
-		 * running, we're sure to observe its full state.
-		 */
-		raw_spin_lock_irq(&task->pi_lock);
-		/*
-		 * Avoid calling sched_move_task() before wake_up_new_task()
-		 * has happened. This would lead to problems with PELT, due to
-		 * move wanting to detach+attach while we're not attached yet.
-		 */
-		if (READ_ONCE(task->__state) == TASK_NEW)
-			ret = -EINVAL;
-		raw_spin_unlock_irq(&task->pi_lock);
-
-		if (ret)
-			break;
 	}
-	return ret;
+	return 0;
 }
+#endif
 
 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
 {
@@ -11155,7 +11138,9 @@ struct cgroup_subsys cpu_cgrp_subsys = {
 	.css_released	= cpu_cgroup_css_released,
 	.css_free	= cpu_cgroup_css_free,
 	.css_extra_stat_show = cpu_extra_stat_show,
+#ifdef CONFIG_RT_GROUP_SCHED
 	.can_attach	= cpu_cgroup_can_attach,
+#endif
 	.attach		= cpu_cgroup_attach,
 	.legacy_cftypes	= cpu_legacy_files,
 	.dfl_cftypes	= cpu_files,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index eba8a64f905a..c319b0bd2bc1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11840,6 +11840,13 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_change_group_fair(struct task_struct *p)
 {
+	/*
+	 * We couldn't detach or attach a forked task which
+	 * hasn't been woken up by wake_up_new_task().
+	 */
+	if (READ_ONCE(p->__state) == TASK_NEW)
+		return;
+
 	detach_task_cfs_rq(p);
 
 #ifdef CONFIG_SMP
-- 
2.37.2
Re: [PATCH v6 7/9] sched/fair: allow changing cgroup of new forked task
Posted by Vincent Guittot 3 years, 7 months ago
On Thu, 18 Aug 2022 at 14:48, Chengming Zhou
<zhouchengming@bytedance.com> wrote:
>
> commit 7dc603c9028e ("sched/fair: Fix PELT integrity for new tasks")
> introduce a TASK_NEW state and an unnessary limitation that would fail
> when changing cgroup of new forked task.
>
> Because at that time, we can't handle task_change_group_fair() for new
> forked fair task which hasn't been woken up by wake_up_new_task(),
> which will cause detach on an unattached task sched_avg problem.
>
> This patch delete this unnessary limitation by adding check before do
> detach or attach in task_change_group_fair().
>
> So cpu_cgrp_subsys.can_attach() has nothing to do for fair tasks,
> only define it in #ifdef CONFIG_RT_GROUP_SCHED.
>
> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>

Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>

> ---
>  kernel/sched/core.c | 25 +++++--------------------
>  kernel/sched/fair.c |  7 +++++++
>  2 files changed, 12 insertions(+), 20 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 8e3f1c3f0b2c..14819bd66021 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -10290,36 +10290,19 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
>         sched_unregister_group(tg);
>  }
>
> +#ifdef CONFIG_RT_GROUP_SCHED
>  static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
>  {
>         struct task_struct *task;
>         struct cgroup_subsys_state *css;
> -       int ret = 0;
>
>         cgroup_taskset_for_each(task, css, tset) {
> -#ifdef CONFIG_RT_GROUP_SCHED
>                 if (!sched_rt_can_attach(css_tg(css), task))
>                         return -EINVAL;
> -#endif
> -               /*
> -                * Serialize against wake_up_new_task() such that if it's
> -                * running, we're sure to observe its full state.
> -                */
> -               raw_spin_lock_irq(&task->pi_lock);
> -               /*
> -                * Avoid calling sched_move_task() before wake_up_new_task()
> -                * has happened. This would lead to problems with PELT, due to
> -                * move wanting to detach+attach while we're not attached yet.
> -                */
> -               if (READ_ONCE(task->__state) == TASK_NEW)
> -                       ret = -EINVAL;
> -               raw_spin_unlock_irq(&task->pi_lock);
> -
> -               if (ret)
> -                       break;
>         }
> -       return ret;
> +       return 0;
>  }
> +#endif
>
>  static void cpu_cgroup_attach(struct cgroup_taskset *tset)
>  {
> @@ -11155,7 +11138,9 @@ struct cgroup_subsys cpu_cgrp_subsys = {
>         .css_released   = cpu_cgroup_css_released,
>         .css_free       = cpu_cgroup_css_free,
>         .css_extra_stat_show = cpu_extra_stat_show,
> +#ifdef CONFIG_RT_GROUP_SCHED
>         .can_attach     = cpu_cgroup_can_attach,
> +#endif
>         .attach         = cpu_cgroup_attach,
>         .legacy_cftypes = cpu_legacy_files,
>         .dfl_cftypes    = cpu_files,
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index eba8a64f905a..c319b0bd2bc1 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -11840,6 +11840,13 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
>  #ifdef CONFIG_FAIR_GROUP_SCHED
>  static void task_change_group_fair(struct task_struct *p)
>  {
> +       /*
> +        * We couldn't detach or attach a forked task which
> +        * hasn't been woken up by wake_up_new_task().
> +        */
> +       if (READ_ONCE(p->__state) == TASK_NEW)
> +               return;
> +
>         detach_task_cfs_rq(p);
>
>  #ifdef CONFIG_SMP
> --
> 2.37.2
>
[tip: sched/core] sched/fair: Allow changing cgroup of new forked task
Posted by tip-bot2 for Chengming Zhou 3 years, 7 months ago
The following commit has been merged into the sched/core branch of tip:

Commit-ID:     df16b71c686cb096774e30153c9ce6756450796c
Gitweb:        https://git.kernel.org/tip/df16b71c686cb096774e30153c9ce6756450796c
Author:        Chengming Zhou <zhouchengming@bytedance.com>
AuthorDate:    Thu, 18 Aug 2022 20:48:03 +08:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Tue, 23 Aug 2022 11:01:19 +02:00

sched/fair: Allow changing cgroup of new forked task

commit 7dc603c9028e ("sched/fair: Fix PELT integrity for new tasks")
introduce a TASK_NEW state and an unnessary limitation that would fail
when changing cgroup of new forked task.

Because at that time, we can't handle task_change_group_fair() for new
forked fair task which hasn't been woken up by wake_up_new_task(),
which will cause detach on an unattached task sched_avg problem.

This patch delete this unnessary limitation by adding check before do
detach or attach in task_change_group_fair().

So cpu_cgrp_subsys.can_attach() has nothing to do for fair tasks,
only define it in #ifdef CONFIG_RT_GROUP_SCHED.

Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20220818124805.601-8-zhouchengming@bytedance.com
---
 kernel/sched/core.c | 25 +++++--------------------
 kernel/sched/fair.c |  7 +++++++
 2 files changed, 12 insertions(+), 20 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e74e79f..603a80e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10238,36 +10238,19 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
 	sched_unregister_group(tg);
 }
 
+#ifdef CONFIG_RT_GROUP_SCHED
 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 {
 	struct task_struct *task;
 	struct cgroup_subsys_state *css;
-	int ret = 0;
 
 	cgroup_taskset_for_each(task, css, tset) {
-#ifdef CONFIG_RT_GROUP_SCHED
 		if (!sched_rt_can_attach(css_tg(css), task))
 			return -EINVAL;
-#endif
-		/*
-		 * Serialize against wake_up_new_task() such that if it's
-		 * running, we're sure to observe its full state.
-		 */
-		raw_spin_lock_irq(&task->pi_lock);
-		/*
-		 * Avoid calling sched_move_task() before wake_up_new_task()
-		 * has happened. This would lead to problems with PELT, due to
-		 * move wanting to detach+attach while we're not attached yet.
-		 */
-		if (READ_ONCE(task->__state) == TASK_NEW)
-			ret = -EINVAL;
-		raw_spin_unlock_irq(&task->pi_lock);
-
-		if (ret)
-			break;
 	}
-	return ret;
+	return 0;
 }
+#endif
 
 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
 {
@@ -11103,7 +11086,9 @@ struct cgroup_subsys cpu_cgrp_subsys = {
 	.css_released	= cpu_cgroup_css_released,
 	.css_free	= cpu_cgroup_css_free,
 	.css_extra_stat_show = cpu_extra_stat_show,
+#ifdef CONFIG_RT_GROUP_SCHED
 	.can_attach	= cpu_cgroup_can_attach,
+#endif
 	.attach		= cpu_cgroup_attach,
 	.legacy_cftypes	= cpu_legacy_files,
 	.dfl_cftypes	= cpu_files,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e92bc05..fd1aa4c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -11676,6 +11676,13 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static void task_change_group_fair(struct task_struct *p)
 {
+	/*
+	 * We couldn't detach or attach a forked task which
+	 * hasn't been woken up by wake_up_new_task().
+	 */
+	if (READ_ONCE(p->__state) == TASK_NEW)
+		return;
+
 	detach_task_cfs_rq(p);
 
 #ifdef CONFIG_SMP