[RFC PATCH v2 4/5] softirq: Unify should_wakeup_ksoftirqd()

K Prateek Nayak posted 5 patches 1 year, 3 months ago
There is a newer version of this series
[RFC PATCH v2 4/5] softirq: Unify should_wakeup_ksoftirqd()
Posted by K Prateek Nayak 1 year, 3 months ago
Define softirq_ctrl::cnt for !PREEMPT_RT kernels too and unify
should_wakeup_ksoftirqd() to return based on softirq_ctrl::cnt.
Since these counts can change quite frequently when running interrupt
heavy benchmark, declare per-cpu softirq_ctrl as cacheline aligned.

No functional changes intended since !PREEMPT_RT kernels do not
increment the softirq_ctrl::cnt (yet) and should always return true
mimicking the current behavior.

Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
---
v1..v2:

- New patch. Broken off from approach discussed in
  https://lore.kernel.org/lkml/880f13fd-753d-2c5a-488a-d75c99e8dfa3@amd.com/

- Cacheline align softirq_ctrl since benchmark stressing the IPI path
  showed sensitivity to softirq_ctrl being aligned vs unaligned. This
  was also observed on v1 and has been elaborated in
  https://lore.kernel.org/lkml/20240710090210.41856-4-kprateek.nayak@amd.com/
---
 kernel/softirq.c | 55 ++++++++++++++++++++++++------------------------
 1 file changed, 27 insertions(+), 28 deletions(-)

diff --git a/kernel/softirq.c b/kernel/softirq.c
index e70a51d737ee..d8902fbcdebf 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -88,23 +88,6 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
 EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
 #endif
 
-/*
- * SOFTIRQ_OFFSET usage:
- *
- * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
- * to a per CPU counter and to task::softirqs_disabled_cnt.
- *
- * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
- *   processing.
- *
- * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
- *   on local_bh_disable or local_bh_enable.
- *
- * This lets us distinguish between whether we are currently processing
- * softirq and whether we just have bh disabled.
- */
-#ifdef CONFIG_PREEMPT_RT
-
 /*
  * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and
  * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a
@@ -118,14 +101,40 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
  * the task which is in a softirq disabled section is preempted or blocks.
  */
 struct softirq_ctrl {
+#ifdef CONFIG_PREEMPT_RT
 	local_lock_t	lock;
+#endif
 	int		cnt;
 };
 
-static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
+static DEFINE_PER_CPU_ALIGNED(struct softirq_ctrl, softirq_ctrl) = {
+#ifdef CONFIG_PREEMPT_RT
 	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
+#endif
 };
 
+static inline bool should_wake_ksoftirqd(void)
+{
+	return !this_cpu_read(softirq_ctrl.cnt);
+}
+
+/*
+ * SOFTIRQ_OFFSET usage:
+ *
+ * On !RT kernels 'count' is the preempt counter, on RT kernels this applies
+ * to a per CPU counter and to task::softirqs_disabled_cnt.
+ *
+ * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq
+ *   processing.
+ *
+ * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
+ *   on local_bh_disable or local_bh_enable.
+ *
+ * This lets us distinguish between whether we are currently processing
+ * softirq and whether we just have bh disabled.
+ */
+#ifdef CONFIG_PREEMPT_RT
+
 /**
  * local_bh_blocked() - Check for idle whether BH processing is blocked
  *
@@ -270,11 +279,6 @@ static inline void ksoftirqd_run_end(void)
 static inline void softirq_handle_begin(void) { }
 static inline void softirq_handle_end(void) { }
 
-static inline bool should_wake_ksoftirqd(void)
-{
-	return !this_cpu_read(softirq_ctrl.cnt);
-}
-
 static inline void invoke_softirq(void)
 {
 	if (should_wake_ksoftirqd())
@@ -419,11 +423,6 @@ static inline void ksoftirqd_run_end(void)
 	local_irq_enable();
 }
 
-static inline bool should_wake_ksoftirqd(void)
-{
-	return true;
-}
-
 static inline void invoke_softirq(void)
 {
 	if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) {
-- 
2.34.1
Re: [RFC PATCH v2 4/5] softirq: Unify should_wakeup_ksoftirqd()
Posted by Peter Zijlstra 1 year, 3 months ago
On Wed, Sep 04, 2024 at 11:12:22AM +0000, K Prateek Nayak wrote:

> @@ -118,14 +101,40 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
>   * the task which is in a softirq disabled section is preempted or blocks.
>   */
>  struct softirq_ctrl {
> +#ifdef CONFIG_PREEMPT_RT
>  	local_lock_t	lock;
> +#endif
>  	int		cnt;
>  };
>  
> -static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
> +static DEFINE_PER_CPU_ALIGNED(struct softirq_ctrl, softirq_ctrl) = {
> +#ifdef CONFIG_PREEMPT_RT
>  	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
> +#endif
>  };

With the exception of CONFIG_DEBUG_LOCK_ALLOC (part of LOCKDEP)
local_lock_t is an empty structure when PREEMPT_RT=n.

That is to say, you can probably get by without those extra #ifdefs.
Re: [RFC PATCH v2 4/5] softirq: Unify should_wakeup_ksoftirqd()
Posted by K Prateek Nayak 1 year, 3 months ago
Hello Peter,

On 9/4/2024 5:45 PM, Peter Zijlstra wrote:
> On Wed, Sep 04, 2024 at 11:12:22AM +0000, K Prateek Nayak wrote:
> 
>> @@ -118,14 +101,40 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
>>    * the task which is in a softirq disabled section is preempted or blocks.
>>    */
>>   struct softirq_ctrl {
>> +#ifdef CONFIG_PREEMPT_RT
>>   	local_lock_t	lock;
>> +#endif
>>   	int		cnt;
>>   };
>>   
>> -static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
>> +static DEFINE_PER_CPU_ALIGNED(struct softirq_ctrl, softirq_ctrl) = {
>> +#ifdef CONFIG_PREEMPT_RT
>>   	.lock	= INIT_LOCAL_LOCK(softirq_ctrl.lock),
>> +#endif
>>   };
> 
> With the exception of CONFIG_DEBUG_LOCK_ALLOC (part of LOCKDEP)
> local_lock_t is an empty structure when PREEMPT_RT=n.
> 
> That is to say, you can probably get by without those extra #ifdefs.

Thank you for the suggestion. I'll drop those extra #ifdefs in the next
version.
-- 
Thanks and Regards,
Prateek