[PATCH v2 06/35] thread_info: define __tif_need_resched(resched_t)

Ankur Arora posted 35 patches 1 year, 8 months ago
[PATCH v2 06/35] thread_info: define __tif_need_resched(resched_t)
Posted by Ankur Arora 1 year, 8 months ago
Define __tif_need_resched() which takes a resched_t parameter to
decide the immediacy of the need-resched.

Update need_resched() and should_resched() so they both check for
__tif_need_resched(RESCHED_NOW), which keeps the current semantics.

Non scheduling code -- which only cares about any immediately required
preemption -- can continue unchanged since the commonly used interfaces
(need_resched(), should_resched(), tif_need_resched()) stay the same.

This also allows lazy preemption to just be a scheduler detail.

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Rafael J. Wysocki" <rafael@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Originally-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/lkml/87jzshhexi.ffs@tglx/
Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 include/linux/preempt.h     |  2 +-
 include/linux/sched.h       |  7 ++++++-
 include/linux/thread_info.h | 34 ++++++++++++++++++++++++++++------
 kernel/trace/trace.c        |  2 +-
 4 files changed, 36 insertions(+), 9 deletions(-)

diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index ce76f1a45722..d453f5e34390 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -312,7 +312,7 @@ do { \
 } while (0)
 #define preempt_fold_need_resched() \
 do { \
-	if (tif_need_resched()) \
+	if (__tif_need_resched(RESCHED_NOW)) \
 		set_preempt_need_resched(); \
 } while (0)
 
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4808e5dd4f69..37a51115b691 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2062,7 +2062,12 @@ extern int __cond_resched_rwlock_write(rwlock_t *lock);
 
 static __always_inline bool need_resched(void)
 {
-	return unlikely(tif_need_resched());
+	return unlikely(__tif_need_resched(RESCHED_NOW));
+}
+
+static __always_inline bool need_resched_lazy(void)
+{
+	return unlikely(__tif_need_resched(RESCHED_LAZY));
 }
 
 /*
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 65e5beedc915..e246b01553a5 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -216,22 +216,44 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
 
 #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
 
-static __always_inline bool tif_need_resched(void)
+static __always_inline bool __tif_need_resched_bitop(int nr_flag)
 {
-	return arch_test_bit(TIF_NEED_RESCHED,
-			     (unsigned long *)(&current_thread_info()->flags));
+	return arch_test_bit(nr_flag,
+		     (unsigned long *)(&current_thread_info()->flags));
 }
 
 #else
 
-static __always_inline bool tif_need_resched(void)
+static __always_inline bool __tif_need_resched_bitop(int nr_flag)
 {
-	return test_bit(TIF_NEED_RESCHED,
-			(unsigned long *)(&current_thread_info()->flags));
+	return test_bit(nr_flag,
+		(unsigned long *)(&current_thread_info()->flags));
 }
 
 #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
 
+static __always_inline bool __tif_need_resched(resched_t rs)
+{
+	/*
+	 * With !PREEMPT_AUTO, this check is only meaningful if we
+	 * are checking if tif_resched(RESCHED_NOW) is set.
+	 */
+	if (IS_ENABLED(CONFIG_PREEMPT_AUTO) || rs == RESCHED_NOW)
+		return __tif_need_resched_bitop(tif_resched(rs));
+	else
+		return false;
+}
+
+static __always_inline bool tif_need_resched(void)
+{
+	return __tif_need_resched(RESCHED_NOW);
+}
+
+static __always_inline bool tif_need_resched_lazy(void)
+{
+	return __tif_need_resched(RESCHED_LAZY);
+}
+
 #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
 static inline int arch_within_stack_frames(const void * const stack,
 					   const void * const stackend,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 233d1af39fff..ed229527be05 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2511,7 +2511,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
 	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
 		trace_flags |= TRACE_FLAG_BH_OFF;
 
-	if (tif_need_resched())
+	if (__tif_need_resched(RESCHED_NOW))
 		trace_flags |= TRACE_FLAG_NEED_RESCHED;
 	if (test_preempt_need_resched())
 		trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
-- 
2.31.1
Re: [PATCH v2 06/35] thread_info: define __tif_need_resched(resched_t)
Posted by Peter Zijlstra 1 year, 8 months ago
On Mon, May 27, 2024 at 05:34:52PM -0700, Ankur Arora wrote:

> diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
> index 65e5beedc915..e246b01553a5 100644
> --- a/include/linux/thread_info.h
> +++ b/include/linux/thread_info.h
> @@ -216,22 +216,44 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti
>  
>  #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
>  
> -static __always_inline bool tif_need_resched(void)
> +static __always_inline bool __tif_need_resched_bitop(int nr_flag)
>  {
> -	return arch_test_bit(TIF_NEED_RESCHED,
> -			     (unsigned long *)(&current_thread_info()->flags));
> +	return arch_test_bit(nr_flag,
> +		     (unsigned long *)(&current_thread_info()->flags));
>  }
>  
>  #else
>  
> -static __always_inline bool tif_need_resched(void)
> +static __always_inline bool __tif_need_resched_bitop(int nr_flag)
>  {
> -	return test_bit(TIF_NEED_RESCHED,
> -			(unsigned long *)(&current_thread_info()->flags));
> +	return test_bit(nr_flag,
> +		(unsigned long *)(&current_thread_info()->flags));
>  }

:se cino=(0:0

That is, you're wrecking the indentation here.

>  
>  #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
>  
> +static __always_inline bool __tif_need_resched(resched_t rs)
> +{
> +	/*
> +	 * With !PREEMPT_AUTO, this check is only meaningful if we
> +	 * are checking if tif_resched(RESCHED_NOW) is set.
> +	 */
> +	if (IS_ENABLED(CONFIG_PREEMPT_AUTO) || rs == RESCHED_NOW)
> +		return __tif_need_resched_bitop(tif_resched(rs));
> +	else
> +		return false;
> +}

	if (!IS_ENABLED(CONFIG_PREEMPT_AUTO) && rs == RESCHED_LAZY)
		return false;

	return __tif_need_resched_bitop(tif_resched(rs));


> +
> +static __always_inline bool tif_need_resched(void)
> +{
> +	return __tif_need_resched(RESCHED_NOW);
> +}
> +
> +static __always_inline bool tif_need_resched_lazy(void)
> +{
> +	return __tif_need_resched(RESCHED_LAZY);
> +}
> +
>  #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
>  static inline int arch_within_stack_frames(const void * const stack,
>  					   const void * const stackend,
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 233d1af39fff..ed229527be05 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -2511,7 +2511,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
>  	if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
>  		trace_flags |= TRACE_FLAG_BH_OFF;
>  
> -	if (tif_need_resched())
> +	if (__tif_need_resched(RESCHED_NOW))
>  		trace_flags |= TRACE_FLAG_NEED_RESCHED;

Per the above this is a NO-OP.