[PATCH v7 18/23] sched: Add push_task_chain helper

John Stultz posted 23 patches 1 year, 12 months ago
[PATCH v7 18/23] sched: Add push_task_chain helper
Posted by John Stultz 1 year, 12 months ago
From: Connor O'Brien <connoro@google.com>

Switch logic that deactivates, sets the task cpu,
and reactivates a task on a different rq to use a
helper that will be later extended to push entire
blocked task chains.

This patch was broken out from a larger chain migration
patch originally by Connor O'Brien.

Cc: Joel Fernandes <joelaf@google.com>
Cc: Qais Yousef <qyousef@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Zimuzo Ezeozue <zezeozue@google.com>
Cc: Youssef Esmat <youssefesmat@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Will Deacon <will@kernel.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Metin Kaya <Metin.Kaya@arm.com>
Cc: Xuewen Yan <xuewen.yan94@gmail.com>
Cc: K Prateek Nayak <kprateek.nayak@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kernel-team@android.com
Signed-off-by: Connor O'Brien <connoro@google.com>
[jstultz: split out from larger chain migration patch]
Signed-off-by: John Stultz <jstultz@google.com>
---
 kernel/sched/core.c     | 4 +---
 kernel/sched/deadline.c | 8 ++------
 kernel/sched/rt.c       | 8 ++------
 kernel/sched/sched.h    | 9 +++++++++
 4 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0cd63bd0bdcd..0c212dcd4b7a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2721,9 +2721,7 @@ int push_cpu_stop(void *arg)
 
 	// XXX validate p is still the highest prio task
 	if (task_rq(p) == rq) {
-		deactivate_task(rq, p, 0);
-		set_task_cpu(p, lowest_rq->cpu);
-		activate_task(lowest_rq, p, 0);
+		push_task_chain(rq, lowest_rq, p);
 		resched_curr(lowest_rq);
 	}
 
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 4f998549ea74..def1eb23318b 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2313,9 +2313,7 @@ static int push_dl_task(struct rq *rq)
 		goto retry;
 	}
 
-	deactivate_task(rq, next_task, 0);
-	set_task_cpu(next_task, later_rq->cpu);
-	activate_task(later_rq, next_task, 0);
+	push_task_chain(rq, later_rq, next_task);
 	ret = 1;
 
 	resched_curr(later_rq);
@@ -2401,9 +2399,7 @@ static void pull_dl_task(struct rq *this_rq)
 			if (is_migration_disabled(p)) {
 				push_task = get_push_task(src_rq);
 			} else {
-				deactivate_task(src_rq, p, 0);
-				set_task_cpu(p, this_cpu);
-				activate_task(this_rq, p, 0);
+				push_task_chain(src_rq, this_rq, p);
 				dmin = p->dl.deadline;
 				resched = true;
 			}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a7b51a021111..cf0eb4aac613 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2128,9 +2128,7 @@ static int push_rt_task(struct rq *rq, bool pull)
 		goto retry;
 	}
 
-	deactivate_task(rq, next_task, 0);
-	set_task_cpu(next_task, lowest_rq->cpu);
-	activate_task(lowest_rq, next_task, 0);
+	push_task_chain(rq, lowest_rq, next_task);
 	resched_curr(lowest_rq);
 	ret = 1;
 
@@ -2401,9 +2399,7 @@ static void pull_rt_task(struct rq *this_rq)
 			if (is_migration_disabled(p)) {
 				push_task = get_push_task(src_rq);
 			} else {
-				deactivate_task(src_rq, p, 0);
-				set_task_cpu(p, this_cpu);
-				activate_task(this_rq, p, 0);
+				push_task_chain(src_rq, this_rq, p);
 				resched = true;
 			}
 			/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 765ba10661de..19afe532771f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -3546,5 +3546,14 @@ static inline void init_sched_mm_cid(struct task_struct *t) { }
 
 extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
 extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
+#ifdef CONFIG_SMP
+static inline
+void push_task_chain(struct rq *rq, struct rq *dst_rq, struct task_struct *task)
+{
+	deactivate_task(rq, task, 0);
+	set_task_cpu(task, dst_rq->cpu);
+	activate_task(dst_rq, task, 0);
+}
+#endif
 
 #endif /* _KERNEL_SCHED_SCHED_H */
-- 
2.43.0.472.g3155946c3a-goog
Re: [PATCH v7 18/23] sched: Add push_task_chain helper
Posted by Metin Kaya 1 year, 12 months ago
On 20/12/2023 12:18 am, John Stultz wrote:
> From: Connor O'Brien <connoro@google.com>
> 
> Switch logic that deactivates, sets the task cpu,
> and reactivates a task on a different rq to use a
> helper that will be later extended to push entire
> blocked task chains.
> 
> This patch was broken out from a larger chain migration
> patch originally by Connor O'Brien.

I think the patches #18, #19, #22 can be upstreamed regardless of other 
Proxy Execution patches.

> 
> Cc: Joel Fernandes <joelaf@google.com>
> Cc: Qais Yousef <qyousef@google.com>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: Peter Zijlstra <peterz@infradead.org>
> Cc: Juri Lelli <juri.lelli@redhat.com>
> Cc: Vincent Guittot <vincent.guittot@linaro.org>
> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
> Cc: Valentin Schneider <vschneid@redhat.com>
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Cc: Ben Segall <bsegall@google.com>
> Cc: Zimuzo Ezeozue <zezeozue@google.com>
> Cc: Youssef Esmat <youssefesmat@google.com>
> Cc: Mel Gorman <mgorman@suse.de>
> Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
> Cc: Will Deacon <will@kernel.org>
> Cc: Waiman Long <longman@redhat.com>
> Cc: Boqun Feng <boqun.feng@gmail.com>
> Cc: "Paul E. McKenney" <paulmck@kernel.org>
> Cc: Metin Kaya <Metin.Kaya@arm.com>
> Cc: Xuewen Yan <xuewen.yan94@gmail.com>
> Cc: K Prateek Nayak <kprateek.nayak@amd.com>
> Cc: Thomas Gleixner <tglx@linutronix.de>
> Cc: kernel-team@android.com
> Signed-off-by: Connor O'Brien <connoro@google.com>
> [jstultz: split out from larger chain migration patch]
> Signed-off-by: John Stultz <jstultz@google.com>
> ---
>   kernel/sched/core.c     | 4 +---
>   kernel/sched/deadline.c | 8 ++------
>   kernel/sched/rt.c       | 8 ++------
>   kernel/sched/sched.h    | 9 +++++++++
>   4 files changed, 14 insertions(+), 15 deletions(-)
> 
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 0cd63bd0bdcd..0c212dcd4b7a 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -2721,9 +2721,7 @@ int push_cpu_stop(void *arg)
>   
>   	// XXX validate p is still the highest prio task
>   	if (task_rq(p) == rq) {
> -		deactivate_task(rq, p, 0);
> -		set_task_cpu(p, lowest_rq->cpu);
> -		activate_task(lowest_rq, p, 0);
> +		push_task_chain(rq, lowest_rq, p);
>   		resched_curr(lowest_rq);
>   	}
>   
> diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
> index 4f998549ea74..def1eb23318b 100644
> --- a/kernel/sched/deadline.c
> +++ b/kernel/sched/deadline.c
> @@ -2313,9 +2313,7 @@ static int push_dl_task(struct rq *rq)
>   		goto retry;
>   	}
>   
> -	deactivate_task(rq, next_task, 0);
> -	set_task_cpu(next_task, later_rq->cpu);
> -	activate_task(later_rq, next_task, 0);
> +	push_task_chain(rq, later_rq, next_task);
>   	ret = 1;
>   
>   	resched_curr(later_rq);
> @@ -2401,9 +2399,7 @@ static void pull_dl_task(struct rq *this_rq)
>   			if (is_migration_disabled(p)) {
>   				push_task = get_push_task(src_rq);
>   			} else {
> -				deactivate_task(src_rq, p, 0);
> -				set_task_cpu(p, this_cpu);
> -				activate_task(this_rq, p, 0);
> +				push_task_chain(src_rq, this_rq, p);
>   				dmin = p->dl.deadline;
>   				resched = true;
>   			}
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index a7b51a021111..cf0eb4aac613 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -2128,9 +2128,7 @@ static int push_rt_task(struct rq *rq, bool pull)
>   		goto retry;
>   	}
>   
> -	deactivate_task(rq, next_task, 0);
> -	set_task_cpu(next_task, lowest_rq->cpu);
> -	activate_task(lowest_rq, next_task, 0);
> +	push_task_chain(rq, lowest_rq, next_task);
>   	resched_curr(lowest_rq);
>   	ret = 1;
>   
> @@ -2401,9 +2399,7 @@ static void pull_rt_task(struct rq *this_rq)
>   			if (is_migration_disabled(p)) {
>   				push_task = get_push_task(src_rq);
>   			} else {
> -				deactivate_task(src_rq, p, 0);
> -				set_task_cpu(p, this_cpu);
> -				activate_task(this_rq, p, 0);
> +				push_task_chain(src_rq, this_rq, p);
>   				resched = true;
>   			}
>   			/*
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index 765ba10661de..19afe532771f 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -3546,5 +3546,14 @@ static inline void init_sched_mm_cid(struct task_struct *t) { }
>   
>   extern u64 avg_vruntime(struct cfs_rq *cfs_rq);
>   extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se);
> +#ifdef CONFIG_SMP
> +static inline
> +void push_task_chain(struct rq *rq, struct rq *dst_rq, struct task_struct *task)
> +{
> +	deactivate_task(rq, task, 0);
> +	set_task_cpu(task, dst_rq->cpu);
> +	activate_task(dst_rq, task, 0);
> +}
> +#endif
>   
>   #endif /* _KERNEL_SCHED_SCHED_H */