[PATCH v2 21/35] sched: prepare for lazy rescheduling in resched_curr()

Ankur Arora posted 35 patches 1 year, 8 months ago
[PATCH v2 21/35] sched: prepare for lazy rescheduling in resched_curr()
Posted by Ankur Arora 1 year, 8 months ago
Handle RESCHED_LAZY in resched_curr(), by registering an intent to
reschedule at exit-to-user.
Given that the rescheduling is not imminent, skip the preempt folding
and the resched IPI.

Also, update set_nr_and_not_polling() to handle RESCHED_LAZY. Note that
there are no changes to set_nr_if_polling(), since lazy rescheduling
is not meaningful for idle.

And finally, now that there are two need-resched bits, enforce a
priority order while setting them.

Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Originally-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/lkml/87jzshhexi.ffs@tglx/
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 kernel/sched/core.c | 35 +++++++++++++++++++++++------------
 1 file changed, 23 insertions(+), 12 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index df8e333f2d8b..27b908cc9134 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -899,14 +899,14 @@ static inline void hrtick_rq_init(struct rq *rq)
 
 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
 /*
- * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
+ * Atomically set TIF_NEED_RESCHED[_LAZY] and test for TIF_POLLING_NRFLAG,
  * this avoids any races wrt polling state changes and thereby avoids
  * spurious IPIs.
  */
-static inline bool set_nr_and_not_polling(struct task_struct *p)
+static inline bool set_nr_and_not_polling(struct task_struct *p, resched_t rs)
 {
 	struct thread_info *ti = task_thread_info(p);
-	return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
+	return !(fetch_or(&ti->flags, _tif_resched(rs)) & _TIF_POLLING_NRFLAG);
 }
 
 /*
@@ -931,9 +931,9 @@ static bool set_nr_if_polling(struct task_struct *p)
 }
 
 #else
-static inline bool set_nr_and_not_polling(struct task_struct *p)
+static inline bool set_nr_and_not_polling(struct task_struct *p, resched_t rs)
 {
-	__set_tsk_need_resched(p, RESCHED_NOW);
+	__set_tsk_need_resched(p, rs);
 	return true;
 }
 
@@ -1041,25 +1041,34 @@ void wake_up_q(struct wake_q_head *head)
 void resched_curr(struct rq *rq)
 {
 	struct task_struct *curr = rq->curr;
+	resched_t rs = RESCHED_NOW;
 	int cpu;
 
 	lockdep_assert_rq_held(rq);
 
-	if (__test_tsk_need_resched(curr, RESCHED_NOW))
+	/*
+	 * TIF_NEED_RESCHED is the higher priority bit, so if it is already
+	 * set, nothing more to be done.
+	 */
+	if (__test_tsk_need_resched(curr, RESCHED_NOW) ||
+	    (rs == RESCHED_LAZY && __test_tsk_need_resched(curr, RESCHED_LAZY)))
 		return;
 
 	cpu = cpu_of(rq);
 
 	if (cpu == smp_processor_id()) {
-		__set_tsk_need_resched(curr, RESCHED_NOW);
-		set_preempt_need_resched();
+		__set_tsk_need_resched(curr, rs);
+		if (rs == RESCHED_NOW)
+			set_preempt_need_resched();
 		return;
 	}
 
-	if (set_nr_and_not_polling(curr))
-		smp_send_reschedule(cpu);
-	else
+	if (set_nr_and_not_polling(curr, rs)) {
+		if (rs == RESCHED_NOW)
+			smp_send_reschedule(cpu);
+	} else {
 		trace_sched_wake_idle_without_ipi(cpu);
+	}
 }
 
 void resched_cpu(int cpu)
@@ -1154,7 +1163,7 @@ static void wake_up_idle_cpu(int cpu)
 	 * and testing of the above solutions didn't appear to report
 	 * much benefits.
 	 */
-	if (set_nr_and_not_polling(rq->idle))
+	if (set_nr_and_not_polling(rq->idle, RESCHED_NOW))
 		smp_send_reschedule(cpu);
 	else
 		trace_sched_wake_idle_without_ipi(cpu);
@@ -6704,6 +6713,8 @@ static void __sched notrace __schedule(unsigned int sched_mode)
 	}
 
 	next = pick_next_task(rq, prev, &rf);
+
+	/* Clear both TIF_NEED_RESCHED, TIF_NEED_RESCHED_LAZY */
 	clear_tsk_need_resched(prev);
 	clear_preempt_need_resched();
 #ifdef CONFIG_SCHED_DEBUG
-- 
2.31.1
Re: [PATCH v2 21/35] sched: prepare for lazy rescheduling in resched_curr()
Posted by Peter Zijlstra 1 year, 8 months ago
On Mon, May 27, 2024 at 05:35:07PM -0700, Ankur Arora wrote:

> @@ -1041,25 +1041,34 @@ void wake_up_q(struct wake_q_head *head)
>  void resched_curr(struct rq *rq)
>  {
>  	struct task_struct *curr = rq->curr;
> +	resched_t rs = RESCHED_NOW;
>  	int cpu;
>  
>  	lockdep_assert_rq_held(rq);
>  
> -	if (__test_tsk_need_resched(curr, RESCHED_NOW))
> +	/*
> +	 * TIF_NEED_RESCHED is the higher priority bit, so if it is already
> +	 * set, nothing more to be done.
> +	 */
> +	if (__test_tsk_need_resched(curr, RESCHED_NOW) ||
> +	    (rs == RESCHED_LAZY && __test_tsk_need_resched(curr, RESCHED_LAZY)))
>  		return;
>  
>  	cpu = cpu_of(rq);
>  
>  	if (cpu == smp_processor_id()) {
> -		__set_tsk_need_resched(curr, RESCHED_NOW);
> -		set_preempt_need_resched();
> +		__set_tsk_need_resched(curr, rs);
> +		if (rs == RESCHED_NOW)
> +			set_preempt_need_resched();
>  		return;
>  	}
>  
> -	if (set_nr_and_not_polling(curr))
> -		smp_send_reschedule(cpu);
> -	else
> +	if (set_nr_and_not_polling(curr, rs)) {
> +		if (rs == RESCHED_NOW)
> +			smp_send_reschedule(cpu);

I'm thinking this wants at least something like:

		WARN_ON_ONCE(rs == RESCHED_LAZY && is_idle_task(curr));


> +	} else {
>  		trace_sched_wake_idle_without_ipi(cpu);
> +	}
>  }
>  
>  void resched_cpu(int cpu)