[PATCH rcu 5/5] rcu: Fix rcu_read_unlock() deadloop due to IRQ work

neeraj.upadhyay@kernel.org posted 5 patches 3 months ago
[PATCH rcu 5/5] rcu: Fix rcu_read_unlock() deadloop due to IRQ work
Posted by neeraj.upadhyay@kernel.org 3 months ago
From: Joel Fernandes <joelagnelf@nvidia.com>

During rcu_read_unlock_special(), if this happens during irq_exit(), we
can lockup if an IPI is issued. This is because the IPI itself triggers
the irq_exit() path causing a recursive lock up.

This is precisely what Xiongfeng found when invoking a BPF program on
the trace_tick_stop() tracepoint As shown in the trace below. Fix by
managing the irq_work state correctly.

irq_exit()
  __irq_exit_rcu()
    /* in_hardirq() returns false after this */
    preempt_count_sub(HARDIRQ_OFFSET)
    tick_irq_exit()
      tick_nohz_irq_exit()
	    tick_nohz_stop_sched_tick()
	      trace_tick_stop()  /* a bpf prog is hooked on this trace point */
		   __bpf_trace_tick_stop()
		      bpf_trace_run2()
			    rcu_read_unlock_special()
                              /* will send a IPI to itself */
			      irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);

A simple reproducer can also be obtained by doing the following in
tick_irq_exit(). It will hang on boot without the patch:

  static inline void tick_irq_exit(void)
  {
 +	rcu_read_lock();
 +	WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
 +	rcu_read_unlock();
 +

Reported-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
Closes: https://lore.kernel.org/all/9acd5f9f-6732-7701-6880-4b51190aa070@huawei.com/
Tested-by: Qi Xi <xiqi2@huawei.com>
Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
Reviewed-by: "Paul E. McKenney" <paulmck@kernel.org>
Signed-off-by: Neeraj Upadhyay (AMD) <neeraj.upadhyay@kernel.org>
---
 kernel/rcu/tree.h        | 11 ++++++++++-
 kernel/rcu/tree_plugin.h | 23 +++++++++++++++++++----
 2 files changed, 29 insertions(+), 5 deletions(-)

diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 3830c19cf2f6..f8f612269e6e 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -174,6 +174,15 @@ struct rcu_snap_record {
 	unsigned long   jiffies;	/* Track jiffies value */
 };
 
+/*
+ * The IRQ work (deferred_qs_iw) is used by RCU to get scheduler's attention.
+ * It can be in one of the following states:
+ * - DEFER_QS_IDLE: An IRQ work was never scheduled.
+ * - DEFER_QS_PENDING: An IRQ work was scheduler but never run.
+ */
+#define DEFER_QS_IDLE		0
+#define DEFER_QS_PENDING	1
+
 /* Per-CPU data for read-copy update. */
 struct rcu_data {
 	/* 1) quiescent-state and grace-period handling : */
@@ -192,7 +201,7 @@ struct rcu_data {
 					/*  during and after the last grace */
 					/* period it is aware of. */
 	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */
-	bool defer_qs_iw_pending;	/* Scheduler attention pending? */
+	int defer_qs_iw_pending;	/* Scheduler attention pending? */
 	struct work_struct strict_work;	/* Schedule readers for strict GPs. */
 
 	/* 2) batch handling */
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index a91b2322a0cd..aec584812574 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -486,13 +486,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 	struct rcu_node *rnp;
 	union rcu_special special;
 
+	rdp = this_cpu_ptr(&rcu_data);
+	if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING)
+		rdp->defer_qs_iw_pending = DEFER_QS_IDLE;
+
 	/*
 	 * If RCU core is waiting for this CPU to exit its critical section,
 	 * report the fact that it has exited.  Because irqs are disabled,
 	 * t->rcu_read_unlock_special cannot change.
 	 */
 	special = t->rcu_read_unlock_special;
-	rdp = this_cpu_ptr(&rcu_data);
 	if (!special.s && !rdp->cpu_no_qs.b.exp) {
 		local_irq_restore(flags);
 		return;
@@ -629,7 +632,18 @@ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
 
 	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
 	local_irq_save(flags);
-	rdp->defer_qs_iw_pending = false;
+
+	/*
+	 * Requeue the IRQ work on next unlock in following situation:
+	 * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING)
+	 * 2. CPU enters new rcu_read_lock()
+	 * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0
+	 * 4. rcu_read_unlock() does not re-queue work (state still PENDING)
+	 * 5. Deferred QS reporting does not happen.
+	 */
+	if (rcu_preempt_depth() > 0)
+		WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);
+
 	local_irq_restore(flags);
 }
 
@@ -676,7 +690,8 @@ static void rcu_read_unlock_special(struct task_struct *t)
 			set_tsk_need_resched(current);
 			set_preempt_need_resched();
 			if (IS_ENABLED(CONFIG_IRQ_WORK) && irqs_were_disabled &&
-			    expboost && !rdp->defer_qs_iw_pending && cpu_online(rdp->cpu)) {
+			    expboost && rdp->defer_qs_iw_pending != DEFER_QS_PENDING &&
+			    cpu_online(rdp->cpu)) {
 				// Get scheduler to re-evaluate and call hooks.
 				// If !IRQ_WORK, FQS scan will eventually IPI.
 				if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) &&
@@ -686,7 +701,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
 				else
 					init_irq_work(&rdp->defer_qs_iw,
 						      rcu_preempt_deferred_qs_handler);
-				rdp->defer_qs_iw_pending = true;
+				rdp->defer_qs_iw_pending = DEFER_QS_PENDING;
 				irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
 			}
 		}
-- 
2.40.1
Re: [PATCH rcu 5/5] rcu: Fix rcu_read_unlock() deadloop due to IRQ work
Posted by Frederic Weisbecker 3 months ago
Le Wed, Jul 09, 2025 at 04:11:18PM +0530, neeraj.upadhyay@kernel.org a écrit :
> From: Joel Fernandes <joelagnelf@nvidia.com>
> 
> During rcu_read_unlock_special(), if this happens during irq_exit(), we
> can lockup if an IPI is issued. This is because the IPI itself triggers
> the irq_exit() path causing a recursive lock up.
> 
> This is precisely what Xiongfeng found when invoking a BPF program on
> the trace_tick_stop() tracepoint As shown in the trace below. Fix by
> managing the irq_work state correctly.
> 
> irq_exit()
>   __irq_exit_rcu()
>     /* in_hardirq() returns false after this */
>     preempt_count_sub(HARDIRQ_OFFSET)
>     tick_irq_exit()
>       tick_nohz_irq_exit()
> 	    tick_nohz_stop_sched_tick()
> 	      trace_tick_stop()  /* a bpf prog is hooked on this trace point */
> 		   __bpf_trace_tick_stop()
> 		      bpf_trace_run2()
> 			    rcu_read_unlock_special()
>                               /* will send a IPI to itself */
> 			      irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
> 
> A simple reproducer can also be obtained by doing the following in
> tick_irq_exit(). It will hang on boot without the patch:
> 
>   static inline void tick_irq_exit(void)
>   {
>  +	rcu_read_lock();
>  +	WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
>  +	rcu_read_unlock();
>  +
> 
> Reported-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
> Closes: https://lore.kernel.org/all/9acd5f9f-6732-7701-6880-4b51190aa070@huawei.com/
> Tested-by: Qi Xi <xiqi2@huawei.com>
> Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
> Reviewed-by: "Paul E. McKenney" <paulmck@kernel.org>
> Signed-off-by: Neeraj Upadhyay (AMD) <neeraj.upadhyay@kernel.org>
> ---
>  kernel/rcu/tree.h        | 11 ++++++++++-
>  kernel/rcu/tree_plugin.h | 23 +++++++++++++++++++----
>  2 files changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> index 3830c19cf2f6..f8f612269e6e 100644
> --- a/kernel/rcu/tree.h
> +++ b/kernel/rcu/tree.h
> @@ -174,6 +174,15 @@ struct rcu_snap_record {
>  	unsigned long   jiffies;	/* Track jiffies value */
>  };
>  
> +/*
> + * The IRQ work (deferred_qs_iw) is used by RCU to get scheduler's attention.
> + * It can be in one of the following states:
> + * - DEFER_QS_IDLE: An IRQ work was never scheduled.
> + * - DEFER_QS_PENDING: An IRQ work was scheduler but never run.

Never as in "never ever" ? :-)

I'm not a native speaker, so you guys tell me, but isn't it less
ambiguous:

- DEFER_QS_IDLE: The IRQ work isn't pending
- DEFER_QS_PENDING: The IRQ work is pending but hasn't run yet

But then the name are already self-explanatory. And then keeping
it as a boolean should be enough too. Why do we need these two
states?

> + */
> +#define DEFER_QS_IDLE		0
> +#define DEFER_QS_PENDING	1
> +
>  /* Per-CPU data for read-copy update. */
>  struct rcu_data {
>  	/* 1) quiescent-state and grace-period handling : */
> @@ -192,7 +201,7 @@ struct rcu_data {
>  					/*  during and after the last grace */
>  					/* period it is aware of. */
>  	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */
> -	bool defer_qs_iw_pending;	/* Scheduler attention pending? */
> +	int defer_qs_iw_pending;	/* Scheduler attention pending? */
>  	struct work_struct strict_work;	/* Schedule readers for strict GPs. */
>  
>  	/* 2) batch handling */
> diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
> index a91b2322a0cd..aec584812574 100644
> --- a/kernel/rcu/tree_plugin.h
> +++ b/kernel/rcu/tree_plugin.h
> @@ -486,13 +486,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
>  	struct rcu_node *rnp;
>  	union rcu_special special;
>  
> +	rdp = this_cpu_ptr(&rcu_data);
> +	if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING)
> +		rdp->defer_qs_iw_pending = DEFER_QS_IDLE;
> +
>  	/*
>  	 * If RCU core is waiting for this CPU to exit its critical section,
>  	 * report the fact that it has exited.  Because irqs are disabled,
>  	 * t->rcu_read_unlock_special cannot change.
>  	 */
>  	special = t->rcu_read_unlock_special;
> -	rdp = this_cpu_ptr(&rcu_data);
>  	if (!special.s && !rdp->cpu_no_qs.b.exp) {
>  		local_irq_restore(flags);
>  		return;
> @@ -629,7 +632,18 @@ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
>  
>  	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
>  	local_irq_save(flags);
> -	rdp->defer_qs_iw_pending = false;
> +
> +	/*
> +	 * Requeue the IRQ work on next unlock in following situation:

s/in/to avoid/

> +	 * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING)
> +	 * 2. CPU enters new rcu_read_lock()
> +	 * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0
> +	 * 4. rcu_read_unlock() does not re-queue work (state still PENDING)
> +	 * 5. Deferred QS reporting does not happen.
> +	 */
> +	if (rcu_preempt_depth() > 0)
> +		WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);

Why WRITE_ONCE() ? Also this lacks the explanation telling why it's not
unconditionally setting back to DEFER_QS_IDLE (ie: just a few words about that
irq_work() recursion thing), because I'm sure my short memory will suggest to
make it unconditional for simplification within two years (being optimistic) :-)

Thanks.

-- 
Frederic Weisbecker
SUSE Labs
Re: [PATCH rcu 5/5] rcu: Fix rcu_read_unlock() deadloop due to IRQ work
Posted by Joel Fernandes 2 months, 4 weeks ago

On 7/9/2025 8:48 AM, Frederic Weisbecker wrote:
> Le Wed, Jul 09, 2025 at 04:11:18PM +0530, neeraj.upadhyay@kernel.org a écrit :
>> From: Joel Fernandes <joelagnelf@nvidia.com>
>>
>> During rcu_read_unlock_special(), if this happens during irq_exit(), we
>> can lockup if an IPI is issued. This is because the IPI itself triggers
>> the irq_exit() path causing a recursive lock up.
>>
>> This is precisely what Xiongfeng found when invoking a BPF program on
>> the trace_tick_stop() tracepoint As shown in the trace below. Fix by
>> managing the irq_work state correctly.
>>
>> irq_exit()
>>   __irq_exit_rcu()
>>     /* in_hardirq() returns false after this */
>>     preempt_count_sub(HARDIRQ_OFFSET)
>>     tick_irq_exit()
>>       tick_nohz_irq_exit()
>> 	    tick_nohz_stop_sched_tick()
>> 	      trace_tick_stop()  /* a bpf prog is hooked on this trace point */
>> 		   __bpf_trace_tick_stop()
>> 		      bpf_trace_run2()
>> 			    rcu_read_unlock_special()
>>                               /* will send a IPI to itself */
>> 			      irq_work_queue_on(&rdp->defer_qs_iw, rdp->cpu);
>>
>> A simple reproducer can also be obtained by doing the following in
>> tick_irq_exit(). It will hang on boot without the patch:
>>
>>   static inline void tick_irq_exit(void)
>>   {
>>  +	rcu_read_lock();
>>  +	WRITE_ONCE(current->rcu_read_unlock_special.b.need_qs, true);
>>  +	rcu_read_unlock();
>>  +
>>
>> Reported-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
>> Closes: https://lore.kernel.org/all/9acd5f9f-6732-7701-6880-4b51190aa070@huawei.com/
>> Tested-by: Qi Xi <xiqi2@huawei.com>
>> Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
>> Reviewed-by: "Paul E. McKenney" <paulmck@kernel.org>
>> Signed-off-by: Neeraj Upadhyay (AMD) <neeraj.upadhyay@kernel.org>
>> ---
>>  kernel/rcu/tree.h        | 11 ++++++++++-
>>  kernel/rcu/tree_plugin.h | 23 +++++++++++++++++++----
>>  2 files changed, 29 insertions(+), 5 deletions(-)
>>
>> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
>> index 3830c19cf2f6..f8f612269e6e 100644
>> --- a/kernel/rcu/tree.h
>> +++ b/kernel/rcu/tree.h
>> @@ -174,6 +174,15 @@ struct rcu_snap_record {
>>  	unsigned long   jiffies;	/* Track jiffies value */
>>  };
>>  
>> +/*
>> + * The IRQ work (deferred_qs_iw) is used by RCU to get scheduler's attention.
>> + * It can be in one of the following states:
>> + * - DEFER_QS_IDLE: An IRQ work was never scheduled.
>> + * - DEFER_QS_PENDING: An IRQ work was scheduler but never run.
> 
> Never as in "never ever" ? :-)

You're right this comment needs an update. It should be "An IRQ work was
scheduled, but a deferred QS hasn't been reported yet".

> 
> I'm not a native speaker, so you guys tell me, but isn't it less
> ambiguous:
> 
> - DEFER_QS_IDLE: The IRQ work isn't pending
> - DEFER_QS_PENDING: The IRQ work is pending but hasn't run yet

It actually could have run but we could have been in an RCU critical section at
the time.

> But then the name are already self-explanatory. And then keeping
> it as a boolean should be enough too. Why do we need these two
> states?

Its just more readable, IMO. That's why I kept it like that.

>> + */
>> +#define DEFER_QS_IDLE		0
>> +#define DEFER_QS_PENDING	1
>> +
>>  /* Per-CPU data for read-copy update. */
>>  struct rcu_data {
>>  	/* 1) quiescent-state and grace-period handling : */
>> @@ -192,7 +201,7 @@ struct rcu_data {
>>  					/*  during and after the last grace */
>>  					/* period it is aware of. */
>>  	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */
>> -	bool defer_qs_iw_pending;	/* Scheduler attention pending? */
>> +	int defer_qs_iw_pending;	/* Scheduler attention pending? */
>>  	struct work_struct strict_work;	/* Schedule readers for strict GPs. */
>>  
>>  	/* 2) batch handling */
>> diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
>> index a91b2322a0cd..aec584812574 100644
>> --- a/kernel/rcu/tree_plugin.h
>> +++ b/kernel/rcu/tree_plugin.h
>> @@ -486,13 +486,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
>>  	struct rcu_node *rnp;
>>  	union rcu_special special;
>>  
>> +	rdp = this_cpu_ptr(&rcu_data);
>> +	if (rdp->defer_qs_iw_pending == DEFER_QS_PENDING)
>> +		rdp->defer_qs_iw_pending = DEFER_QS_IDLE;
>> +
>>  	/*
>>  	 * If RCU core is waiting for this CPU to exit its critical section,
>>  	 * report the fact that it has exited.  Because irqs are disabled,
>>  	 * t->rcu_read_unlock_special cannot change.
>>  	 */
>>  	special = t->rcu_read_unlock_special;
>> -	rdp = this_cpu_ptr(&rcu_data);
>>  	if (!special.s && !rdp->cpu_no_qs.b.exp) {
>>  		local_irq_restore(flags);
>>  		return;
>> @@ -629,7 +632,18 @@ static void rcu_preempt_deferred_qs_handler(struct irq_work *iwp)
>>  
>>  	rdp = container_of(iwp, struct rcu_data, defer_qs_iw);
>>  	local_irq_save(flags);
>> -	rdp->defer_qs_iw_pending = false;
>> +
>> +	/*
>> +	 * Requeue the IRQ work on next unlock in following situation:
> 

Sure.

> s/in/to avoid/
> 
>> +	 * 1. rcu_read_unlock() queues IRQ work (state -> DEFER_QS_PENDING)
>> +	 * 2. CPU enters new rcu_read_lock()
>> +	 * 3. IRQ work runs but cannot report QS due to rcu_preempt_depth() > 0
>> +	 * 4. rcu_read_unlock() does not re-queue work (state still PENDING)
>> +	 * 5. Deferred QS reporting does not happen.
>> +	 */
>> +	if (rcu_preempt_depth() > 0)
>> +		WRITE_ONCE(rdp->defer_qs_iw_pending, DEFER_QS_IDLE);
> 
> Why WRITE_ONCE() ? Also this lacks the explanation telling why it's not
> unconditionally setting back to DEFER_QS_IDLE (ie: just a few words about that
> irq_work() recursion thing), because I'm sure my short memory will suggest to
> make it unconditional for simplification within two years (being optimistic) :-)

The previous code was unconditionally setting it back so we would recurse before
the deferred QS report happened. I can add more comments about that. But
unfortunately, there is some hang that Neeraj and Paul are reporting so I'll go
work on that first.

thanks for the review,

 - Joel